Repository: andrewgaul/s3proxy Branch: master Commit: dc0df5caac8b Files: 125 Total size: 1014.3 KB Directory structure: gitextract_jwamyj7y/ ├── .dockerignore ├── .github/ │ ├── dependabot.yml │ └── workflows/ │ └── ci-main.yml ├── .gitignore ├── .gitmodules ├── .mailmap ├── .mvn/ │ └── maven.config ├── .releaserc ├── Dockerfile ├── LICENSE ├── README.md ├── docs/ │ ├── Encryption.md │ └── Logging.md ├── pom.xml └── src/ ├── main/ │ ├── assembly/ │ │ └── jar-with-dependencies.xml │ ├── config/ │ │ └── logback.xml │ ├── java/ │ │ └── org/ │ │ └── gaul/ │ │ └── s3proxy/ │ │ ├── AccessControlPolicy.java │ │ ├── AliasBlobStore.java │ │ ├── AuthenticationType.java │ │ ├── AwsHttpHeaders.java │ │ ├── AwsSignature.java │ │ ├── BlobStoreLocator.java │ │ ├── CaseInsensitiveImmutableMultimap.java │ │ ├── ChunkedInputStream.java │ │ ├── CompleteMultipartUploadRequest.java │ │ ├── CreateBucketRequest.java │ │ ├── CrossOriginResourceSharing.java │ │ ├── DeleteMultipleObjectsRequest.java │ │ ├── EncryptedBlobStore.java │ │ ├── EventualBlobStore.java │ │ ├── GlobBlobStoreLocator.java │ │ ├── LatencyBlobStore.java │ │ ├── Main.java │ │ ├── MetricsHandler.java │ │ ├── NoCacheBlobStore.java │ │ ├── NullBlobStore.java │ │ ├── PrefixBlobStore.java │ │ ├── PutOptions2.java │ │ ├── Quirks.java │ │ ├── ReadOnlyBlobStore.java │ │ ├── RegexBlobStore.java │ │ ├── S3AuthorizationHeader.java │ │ ├── S3ErrorCode.java │ │ ├── S3Exception.java │ │ ├── S3Operation.java │ │ ├── S3Proxy.java │ │ ├── S3ProxyConstants.java │ │ ├── S3ProxyHandler.java │ │ ├── S3ProxyHandlerJetty.java │ │ ├── S3ProxyMetrics.java │ │ ├── ShardedBlobStore.java │ │ ├── StorageClassBlobStore.java │ │ ├── ThrottledInputStream.java │ │ ├── UserMetadataReplacerBlobStore.java │ │ ├── awssdk/ │ │ │ ├── AwsS3SdkApiMetadata.java │ │ │ ├── AwsS3SdkBlobStore.java │ │ │ ├── AwsS3SdkBlobStoreContextModule.java │ │ │ └── AwsS3SdkProviderMetadata.java │ │ ├── azureblob/ │ │ │ ├── AzureBlobApiMetadata.java │ │ │ ├── AzureBlobProviderMetadata.java │ │ │ ├── AzureBlobStore.java │ │ │ └── AzureBlobStoreContextModule.java │ │ ├── crypto/ │ │ │ ├── Constants.java │ │ │ ├── Decryption.java │ │ │ ├── DecryptionInputStream.java │ │ │ ├── Encryption.java │ │ │ ├── EncryptionInputStream.java │ │ │ └── PartPadding.java │ │ ├── gcloudsdk/ │ │ │ ├── GCloudApiMetadata.java │ │ │ ├── GCloudBlobStore.java │ │ │ ├── GCloudBlobStoreContextModule.java │ │ │ └── GCloudProviderMetadata.java │ │ ├── junit/ │ │ │ ├── S3ProxyExtension.java │ │ │ ├── S3ProxyJunitCore.java │ │ │ └── S3ProxyRule.java │ │ └── nio2blob/ │ │ ├── AbstractNio2BlobStore.java │ │ ├── FilesystemNio2BlobApiMetadata.java │ │ ├── FilesystemNio2BlobProviderMetadata.java │ │ ├── FilesystemNio2BlobStore.java │ │ ├── FilesystemNio2BlobStoreContextModule.java │ │ ├── TransientNio2BlobApiMetadata.java │ │ ├── TransientNio2BlobProviderMetadata.java │ │ ├── TransientNio2BlobStore.java │ │ └── TransientNio2BlobStoreContextModule.java │ └── resources/ │ ├── checkstyle.xml │ ├── copyright_header.txt │ └── run-docker-container.sh └── test/ ├── java/ │ └── org/ │ └── gaul/ │ └── s3proxy/ │ ├── AliasBlobStoreTest.java │ ├── AwsS3SdkBlobStoreTest.java │ ├── AwsSdk2Test.java │ ├── AwsSdkAnonymousTest.java │ ├── AwsSdkTest.java │ ├── CrossOriginResourceSharingAllowAllResponseTest.java │ ├── CrossOriginResourceSharingResponseTest.java │ ├── CrossOriginResourceSharingRuleTest.java │ ├── EncryptedBlobStoreTest.java │ ├── EventualBlobStoreTest.java │ ├── GlobBlobStoreLocatorTest.java │ ├── LatencyBlobStoreTest.java │ ├── NoCacheBlobStoreTest.java │ ├── NullBlobStoreTest.java │ ├── PrefixBlobStoreTest.java │ ├── ReadOnlyBlobStoreTest.java │ ├── RegexBlobStoreTest.java │ ├── ShardedBlobStoreTest.java │ ├── TestUtils.java │ ├── TierBlobStoreTest.java │ ├── UserMetadataReplacerBlobStoreTest.java │ └── junit/ │ ├── S3ProxyExtensionTest.java │ └── S3ProxyRuleTest.java └── resources/ ├── keystore.jks ├── logback.xml ├── run-s3-tests.sh ├── s3-tests.conf ├── s3proxy-anonymous.conf ├── s3proxy-azurite.conf ├── s3proxy-cors-allow-all.conf ├── s3proxy-cors.conf ├── s3proxy-encryption.conf ├── s3proxy-fake-gcs-server.conf ├── s3proxy-filesystem-nio2.conf ├── s3proxy-localstack-aws-s3-sdk.conf ├── s3proxy-localstack-s3.conf ├── s3proxy-transient-nio2.conf └── s3proxy.conf ================================================ FILE CONTENTS ================================================ ================================================ FILE: .dockerignore ================================================ # Exclude everything from context that is not used by COPY steps in the Dockerfile * !/target/s3proxy !/src/main/resources/run-docker-container.sh ================================================ FILE: .github/dependabot.yml ================================================ # To get started with Dependabot version updates, you'll need to specify which # package ecosystems to update and where the package manifests are located. # Please see the documentation for all configuration options: # https://docs.github.com/github/administering-a-repository/configuration-options-for-dependency-updates version: 2 updates: - package-ecosystem: "github-actions" directory: "/" schedule: interval: "monthly" - package-ecosystem: "maven" directory: "/" # Location of package manifests schedule: interval: "monthly" open-pull-requests-limit: 20 ================================================ FILE: .github/workflows/ci-main.yml ================================================ name: Main CI on: push: branches: - "master" tags: - "*" pull_request: branches: - "*" permissions: contents: read env: dockerhub_publish: ${{ secrets.DOCKER_PASS != '' }} jobs: meta: runs-on: ubuntu-24.04-arm outputs: container_tags: ${{ steps.docker_action_meta.outputs.tags }} container_labels: ${{ steps.docker_action_meta.outputs.labels }} container_buildtime: ${{ fromJSON(steps.docker_action_meta.outputs.json).labels['org.opencontainers.image.created'] }} container_version: ${{ fromJSON(steps.docker_action_meta.outputs.json).labels['org.opencontainers.image.version'] }} container_revision: ${{ fromJSON(steps.docker_action_meta.outputs.json).labels['org.opencontainers.image.revision'] }} steps: - name: Checkout uses: actions/checkout@v6 with: submodules: false persist-credentials: false - name: Docker meta id: docker_action_meta uses: docker/metadata-action@v6.0.0 with: images: | name=ghcr.io/${{ github.repository }}/container name=andrewgaul/s3proxy,enable=${{ env.dockerhub_publish }} flavor: | latest=auto tags: | type=sha,format=long type=sha type=match,pattern=s3proxy-(.*),group=1 type=ref,event=branch type=ref,event=pr type=ref,event=tag labels: | org.opencontainers.image.licenses=Apache-2.0 runTests: runs-on: ubuntu-24.04-arm needs: [meta] steps: - uses: actions/checkout@v6 with: submodules: "recursive" - uses: actions/setup-java@v5 with: distribution: "temurin" java-version: "17" cache: "maven" - uses: actions/setup-python@v6 with: python-version: "3.11" cache: "pip" #Run tests - name: Maven Set version run: | mvn versions:set -DnewVersion=${{ needs.meta.outputs.version }} - name: Maven Package run: | mvn verify -DskipTests - name: Maven Test run: | mvn test - name: Maven Test with transient-nio2 run: | # TODO: run other test classes mvn test -Ds3proxy.test.conf=s3proxy-transient-nio2.conf -Dtest=AwsSdkTest - name: Maven Test with filesystem-nio2 run: | # TODO: run other test classes mkdir /tmp/blobstore mvn test -Ds3proxy.test.conf=s3proxy-filesystem-nio2.conf -Dtest=AwsSdkTest - name: Install s3-tests run: | python -m pip install --upgrade pip pip install tox tox-gh-actions - name: Run s3-tests run: | ./src/test/resources/run-s3-tests.sh - name: Run s3-tests with transient-nio2 run: | ./src/test/resources/run-s3-tests.sh s3proxy-transient-nio2.conf #Store the target - uses: actions/upload-artifact@v7 with: name: s3proxy path: target/s3proxy - uses: actions/upload-artifact@v7 with: name: pom path: pom.xml azuriteTests: runs-on: ubuntu-24.04-arm needs: [meta] steps: - uses: actions/checkout@v6 with: submodules: "recursive" - uses: actions/setup-java@v5 with: distribution: "temurin" java-version: "17" cache: "maven" - uses: actions/setup-python@v6 with: python-version: "3.11" cache: "pip" - name: Maven Package run: | mvn package -DskipTests - name: Install Azurite run: npx --yes --loglevel info azurite@3.35 --version - name: Start Azurite shell: bash run: npx --yes --package azurite@3.35 azurite-blob & - name: Maven Test with Azurite run: | # TODO: run other test classes mvn test -Ds3proxy.test.conf=s3proxy-azurite.conf -Dtest=AwsSdkTest - name: Install s3-tests run: | python -m pip install --upgrade pip pip install tox tox-gh-actions - name: Run s3-tests with Azurite run: | ./src/test/resources/run-s3-tests.sh s3proxy-azurite.conf kill $(pidof node) localstackTests: runs-on: ubuntu-24.04-arm needs: [meta] steps: - uses: actions/checkout@v6 with: submodules: "recursive" - uses: actions/setup-java@v5 with: distribution: "temurin" java-version: "17" cache: "maven" - uses: actions/setup-python@v6 with: python-version: "3.11" cache: "pip" - name: Maven Package run: | mvn package -DskipTests - name: Install LocalStack run: docker pull localstack/localstack:4.11.1 - name: Start LocalStack run: | docker run -d --name localstack -p 4566:4566 localstack/localstack:4.11.1 # Wait for LocalStack to be ready for i in $(seq 30); do if curl -s http://127.0.0.1:4566/_localstack/health | grep -q '"s3"'; then break fi sleep 1 done - name: Maven Test with LocalStack (s3) run: | # TODO: run other test classes mvn test -Ds3proxy.test.conf=s3proxy-localstack-s3.conf -Dtest=AwsSdkTest - name: Maven Test with LocalStack (aws-s3-sdk) run: | # TODO: run other test classes mvn test -Ds3proxy.test.conf=s3proxy-localstack-aws-s3-sdk.conf -Dtest=AwsSdkTest - name: Install s3-tests run: | python -m pip install --upgrade pip pip install tox tox-gh-actions - name: Run s3-tests with LocalStack (s3) run: | ./src/test/resources/run-s3-tests.sh s3proxy-localstack-s3.conf - name: Run s3-tests with LocalStack (aws-s3-sdk) run: | ./src/test/resources/run-s3-tests.sh s3proxy-localstack-aws-s3-sdk.conf docker stop localstack fakeGcsServerTests: runs-on: ubuntu-24.04-arm needs: [meta] steps: - uses: actions/checkout@v6 with: submodules: "recursive" - uses: actions/setup-java@v5 with: distribution: "temurin" java-version: "17" cache: "maven" - uses: actions/setup-python@v6 with: python-version: "3.11" cache: "pip" - name: Maven Package run: | mvn package -DskipTests - name: Install fake-gcs-server run: go install github.com/fsouza/fake-gcs-server@latest - name: Start fake-gcs-server run: $HOME/go/bin/fake-gcs-server -backend memory -scheme http -host 127.0.0.1 & - name: Maven Test with fake-gcs-server run: | # TODO: run other test classes STORAGE_EMULATOR_HOST=http://localhost:4443 mvn test -Ds3proxy.test.conf=s3proxy-fake-gcs-server.conf -Dtest=AwsSdkTest - name: Install s3-tests run: | python -m pip install --upgrade pip pip install tox tox-gh-actions - name: Run s3-tests with fake-gcs-server run: | # TODO: #STORAGE_EMULATOR_HOST=http://localhost:4443 ./src/test/resources/run-s3-tests.sh s3proxy-fake-gcs-server.conf kill $(pidof fake-gcs-server) Containerize: runs-on: ubuntu-24.04-arm needs: [runTests, azuriteTests, localstackTests, fakeGcsServerTests, meta] permissions: contents: read packages: write steps: - uses: actions/checkout@v6 - uses: actions/download-artifact@v8 with: name: s3proxy path: target - uses: actions/download-artifact@v8 with: name: pom path: . - name: Set up QEMU uses: docker/setup-qemu-action@v4 - name: Set up Docker Buildx uses: docker/setup-buildx-action@v4 - name: Login to DockerHub uses: docker/login-action@v4 if: github.event_name != 'pull_request' && env.dockerhub_publish == 'true' with: username: ${{ secrets.DOCKER_USER }} password: ${{ secrets.DOCKER_PASS }} - name: Login to GHCR uses: docker/login-action@v4 if: github.event_name != 'pull_request' with: registry: ghcr.io username: ${{ github.actor }} password: ${{ secrets.GITHUB_TOKEN }} - name: Build and push uses: docker/build-push-action@v7 with: context: . platforms: linux/amd64,linux/arm64 push: ${{ github.event_name != 'pull_request' }} tags: ${{ needs.meta.outputs.container_tags }} labels: ${{ needs.meta.outputs.container_labels }} build-args: | BUILDTIME=${{ needs.meta.outputs.container_buildtime }} VERSION=${{ needs.meta.outputs.container_version }} REVISION=${{ needs.meta.outputs.container_revision }} cache-from: type=gha cache-to: type=gha,mode=max ================================================ FILE: .gitignore ================================================ s3proxy.iml .idea/ # Eclipse project configuration files .classpath .project .settings # MAC stuff .DS_Store # below is default github .ignore for java *.class # Mobile Tools for Java (J2ME) .mtj.tmp/ # Package Files # *.jar *.war *.ear # virtual machine crash logs, see http://www.java.com/en/download/help/error_hotspot.xml hs_err_pid* target/ # files created during tests __blobstorage__/ AzuriteConfig __azurite_db* ================================================ FILE: .gitmodules ================================================ [submodule "s3-tests"] path = s3-tests url = https://github.com/gaul/s3-tests.git ================================================ FILE: .mailmap ================================================ Hironao Sekine Sheng Hu ================================================ FILE: .mvn/maven.config ================================================ -Daether.checksums.algorithms=SHA-512,SHA-256,SHA-1,MD5 ================================================ FILE: .releaserc ================================================ { "tagFormat": 's3proxy-${version}', "branches": [ { "name": 'master', prerelease: false }, { "name": 'releases\/+([0-9])?(\.\d+)(\.\d+|z|$)', prerelease: false }, { "name": 'next', prerelease: false }, { name: 'next-major', prerelease: true }, { name: 'develop', prerelease: true }, { name: 'develop\/.*', prerelease: true } ] } ================================================ FILE: Dockerfile ================================================ FROM docker.io/library/eclipse-temurin:21-jre LABEL maintainer="Andrew Gaul " WORKDIR /opt/s3proxy RUN apt-get update && \ apt-get install -y dumb-init && \ rm -rf /var/lib/apt/lists/* COPY \ target/s3proxy \ src/main/resources/run-docker-container.sh \ /opt/s3proxy/ ENV \ LOG_LEVEL="info" \ S3PROXY_AUTHORIZATION="aws-v2-or-v4" \ S3PROXY_ENDPOINT="http://0.0.0.0:80" \ S3PROXY_IDENTITY="local-identity" \ S3PROXY_CREDENTIAL="local-credential" \ S3PROXY_VIRTUALHOST="" \ S3PROXY_KEYSTORE_PATH="keystore.jks" \ S3PROXY_KEYSTORE_PASSWORD="password" \ S3PROXY_CORS_ALLOW_ALL="false" \ S3PROXY_CORS_ALLOW_ORIGINS="" \ S3PROXY_CORS_ALLOW_METHODS="" \ S3PROXY_CORS_ALLOW_HEADERS="" \ S3PROXY_CORS_ALLOW_CREDENTIAL="" \ S3PROXY_V4_MAX_CHUNK_SIZE="16777216" \ S3PROXY_IGNORE_UNKNOWN_HEADERS="false" \ S3PROXY_ENCRYPTED_BLOBSTORE="" \ S3PROXY_ENCRYPTED_BLOBSTORE_PASSWORD="" \ S3PROXY_ENCRYPTED_BLOBSTORE_SALT="" \ S3PROXY_READ_ONLY_BLOBSTORE="false" \ S3PROXY_METRICS_ENABLED="false" \ S3PROXY_METRICS_PORT="9090" \ S3PROXY_METRICS_HOST="0.0.0.0" \ JCLOUDS_PROVIDER="filesystem-nio2" \ JCLOUDS_ENDPOINT="" \ JCLOUDS_REGION="" \ JCLOUDS_REGIONS="us-east-1" \ JCLOUDS_IDENTITY="remote-identity" \ JCLOUDS_CREDENTIAL="remote-credential" \ JCLOUDS_KEYSTONE_VERSION="" \ JCLOUDS_KEYSTONE_SCOPE="" \ JCLOUDS_KEYSTONE_PROJECT_DOMAIN_NAME="" \ JCLOUDS_FILESYSTEM_BASEDIR="/data" EXPOSE 80 443 ENTRYPOINT ["/usr/bin/dumb-init", "--"] CMD ["/opt/s3proxy/run-docker-container.sh"] ================================================ FILE: LICENSE ================================================ Apache License Version 2.0, January 2004 https://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. END OF TERMS AND CONDITIONS APPENDIX: How to apply the Apache License to your work. To apply the Apache License to your work, attach the following boilerplate notice, with the fields enclosed by brackets "[]" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a file or class name and description of purpose be included on the same "printed page" as the copyright notice for easier identification within third-party archives. Copyright [yyyy] [name of copyright owner] Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at https://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ================================================ FILE: README.md ================================================ # S3Proxy [![Github All Releases](https://img.shields.io/github/downloads/gaul/s3proxy/total.svg)](https://github.com/gaul/s3proxy/releases/) [![Docker Pulls](https://img.shields.io/docker/pulls/andrewgaul/s3proxy.svg)](https://hub.docker.com/r/andrewgaul/s3proxy/) [![Maven Central](https://img.shields.io/maven-central/v/org.gaul/s3proxy.svg)](https://search.maven.org/#search%7Cga%7C1%7Ca%3A%22s3proxy%22) [![Twitter Follow](https://img.shields.io/twitter/follow/S3Proxy.svg?style=social&label=Follow)](https://twitter.com/S3Proxy) S3Proxy implements the [S3 API](https://en.wikipedia.org/wiki/Amazon_S3#S3_API_and_competing_services) and *proxies* requests, enabling several use cases: * translation from S3 to Backblaze B2, EMC Atmos, Google Cloud, Microsoft Azure, and OpenStack Swift * testing without Amazon by using the local filesystem * extension via middlewares * embedding into Java applications ## Usage with Docker [Docker Hub](https://hub.docker.com/r/andrewgaul/s3proxy/) hosts a Docker image and has instructions on how to run it. ## Usage without Docker Users can [download releases](https://github.com/gaul/s3proxy/releases) from GitHub. Developers can build the project by running `mvn package` which produces a binary at `target/s3proxy`. S3Proxy requires Java 17 or newer to run. Configure S3Proxy via a properties file. An example using the local file system as the storage backend with anonymous access: ``` s3proxy.authorization=none s3proxy.endpoint=http://127.0.0.1:8080 jclouds.provider=filesystem jclouds.filesystem.basedir=/tmp/s3proxy ``` First create the filesystem basedir: ``` mkdir /tmp/s3proxy ``` Next run S3Proxy. Linux and Mac OS X users can run the executable jar: ``` chmod +x s3proxy s3proxy --properties s3proxy.conf ``` Windows users must explicitly invoke java: ``` java -jar s3proxy --properties s3proxy.conf ``` Finally test by creating a bucket then listing all the buckets: ``` $ curl --request PUT http://localhost:8080/testbucket $ curl http://localhost:8080/ 75aa57f09aa0c8caeab4f8c24e99d10f8e7faeebf76c078efc7c6caea54ba06aCustomersName@amazon.comtestbucket2015-08-05T22:16:24.000Z ``` ## Usage with Java Maven Central hosts S3Proxy artifacts and the wiki has [instructions on Java use](https://github.com/gaul/s3proxy/wiki/Using-S3Proxy-in-Java-projects). ## Supported storage backends * atmos * aws-s3 (Amazon-only, deprecated) * aws-s3-sdk (S3-compatible backends via AWS SDK, recommended) * azureblob (deprecated) * azureblob-sdk (recommended) * b2 * filesystem (on-disk storage, deprecated) * filesystem-nio2 (on-disk storage, recommended) * google-cloud-storage (deprecated) * google-cloud-storage-sdk (recommended) * openstack-swift * rackspace-cloudfiles-uk and rackspace-cloudfiles-us * s3 (non-Amazon, deprecated) * transient (in-memory storage, deprecated) * transient-nio2 (in-memory storage, recommended) See the wiki for [examples of configurations](https://github.com/gaul/s3proxy/wiki/Storage-backend-examples). ## Assigning buckets to backends S3Proxy can be configured to assign buckets to different backends with the same credentials. The configuration in the properties file is as follows: ``` s3proxy.bucket-locator.1=bucket s3proxy.bucket-locator.2=another-bucket ``` In addition to the explicit names, [glob syntax](https://docs.oracle.com/javase/tutorial/essential/io/fileOps.html#glob) can be used to configure many buckets for a given backend. A bucket (or a glob) cannot be assigned to multiple backends. ## Middlewares S3Proxy can modify its behavior based on middlewares: * [bucket aliasing](https://github.com/gaul/s3proxy/wiki/Middleware-alias-blobstore) * [bucket prefix scoping](https://github.com/gaul/s3proxy/wiki/Middleware-prefix-blobstore) * [bucket locator](https://github.com/gaul/s3proxy/wiki/Middleware-bucket-locator) * [eventual consistency modeling](https://github.com/gaul/s3proxy/wiki/Middleware---eventual-consistency) * [large object mocking](https://github.com/gaul/s3proxy/wiki/Middleware-large-object-mocking) * [latency](https://github.com/gaul/s3proxy/wiki/Middleware-latency) * [read-only](https://github.com/gaul/s3proxy/wiki/Middleware-read-only) * [regex rename blobs](https://github.com/gaul/s3proxy/wiki/Middleware-regex) * [sharded backend containers](https://github.com/gaul/s3proxy/wiki/Middleware-sharded-backend) * [storage class override](https://github.com/gaul/s3proxy/wiki/Middleware-storage-class-override) * [user metadata replacer](https://github.com/gaul/s3proxy/wiki/Middleware-user-metadata-replacer) * [no cache override](https://github.com/gaul/s3proxy/wiki/Middleware-no-cache) ## SSL Support S3Proxy can listen on HTTPS by setting the `secure-endpoint` and [configuring a keystore](http://wiki.eclipse.org/Jetty/Howto/Configure_SSL#Generating_Keys_and_Certificates_with_JDK_keytool). You can read more about how configure S3Proxy for SSL Support in [the dedicated wiki page](https://github.com/gaul/s3proxy/wiki/SSL-support) with Docker, Kubernetes or simply Java. ## Limitations S3Proxy has broad compatibility with the S3 API, however, it does not support: * ACLs other than private and public-read * BitTorrent hosting * bucket logging * bucket policies * [CORS bucket operations](https://docs.aws.amazon.com/AmazonS3/latest/dev/cors.html#how-do-i-enable-cors) like getting or setting the CORS configuration for a bucket. S3Proxy only supports a static configuration (see below). * hosting static websites * object server-side encryption * object tagging * object versioning, see [#74](https://github.com/gaul/s3proxy/issues/74) * POST upload policies, see [#73](https://github.com/gaul/s3proxy/issues/73) * requester pays buckets * [select object content](https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectSELECTContent.html) S3Proxy emulates the following operations: * conditional PUT object when using If-Match or If-None-Match, unless the `azureblob-sdk` provider is used * copy multi-part objects, see [#76](https://github.com/gaul/s3proxy/issues/76) S3Proxy has basic CORS preflight and actual request/response handling. It can be configured within the properties file (and corresponding ENV variables for Docker): ``` s3proxy.cors-allow-origins=https://example\.com https://.+\.example\.com https://example\.cloud s3proxy.cors-allow-methods=GET PUT s3proxy.cors-allow-headers=Accept Content-Type s3proxy.cors-allow-credential=true ``` CORS cannot be configured per bucket. `s3proxy.cors-allow-all=true` will accept any origin and header. Actual CORS requests are supported for GET, PUT, POST, HEAD and DELETE methods. The wiki collects [compatibility notes](https://github.com/gaul/s3proxy/wiki/Storage-backend-compatibility) for specific storage backends. ## Support * [GitHub issues](https://github.com/gaul/s3proxy/issues) * [Stack Overflow](https://stackoverflow.com/questions/tagged/s3proxy) * [commercial support](mailto:andrew@gaul.org) ## References * [Apache jclouds](https://jclouds.apache.org/) provides storage backend support for S3Proxy * [Ceph s3-tests](https://github.com/ceph/s3-tests) help maintain and improve compatibility with the S3 API * [fake-s3](https://github.com/jubos/fake-s3), [gofakes3](https://github.com/johannesboyne/gofakes3), [minio](https://github.com/minio/minio), [S3 ninja](https://github.com/scireum/s3ninja), and [s3rver](https://github.com/jamhall/s3rver) provide functionality similar to S3Proxy when using the filesystem backend * [GlacierProxy](https://github.com/bouncestorage/glacier-proxy) and [SwiftProxy](https://github.com/bouncestorage/swiftproxy) provide similar functionality for the Amazon Glacier and OpenStack Swift APIs * [s3mock](https://github.com/adobe/S3Mock) - Adobe's s3 mock implementation * [sbt-s3](https://github.com/localytics/sbt-s3) runs S3Proxy via the Scala Build Tool * [swift3](https://github.com/openstack/swift3) provides an S3 middleware for OpenStack Swift * [Zenko](https://www.zenko.io/) provide similar multi-cloud functionality ## License Copyright (C) 2014-2026 Andrew Gaul Licensed under the Apache License, Version 2.0 ================================================ FILE: docs/Encryption.md ================================================ S3Proxy # Encryption ## Motivation The motivation behind this implementation is to provide a fully transparent and secure encryption to the s3 client while having the ability to write into different clouds. ## Cipher mode The chosen cipher is ```AES/CFB/NoPadding``` because it provides the ability to read from an offset like in the middle of a ```Blob```. While reading from an offset the decryption process needs to consider the previous 16 bytes of the AES block. ### Key generation The encryption uses a 128-bit key that will be derived from a given password and salt in combination with random initialization vector that will be stored in each part padding. ## How a blob is encrypted Every uploaded part get a padding of 64 bytes that includes the necessary information for decryption. The input stream from a s3 client is passed through ```CipherInputStream``` and piped to append the 64 byte part padding at the end the encrypted stream. The encrypted input stream is then processed by the ```BlobStore``` to save the ```Blob```. | Name | Byte size | Description | |-----------|-----------|----------------------------------------------------------------| | Delimiter | 8 byte | The delimiter is used to detect if the ```Blob``` is encrypted | | IV | 16 byte | AES initialization vector | | Part | 4 byte | The part number | | Size | 8 byte | The unencrypted size of the ```Blob``` | | Version | 2 byte | Version can be used in the future if changes are necessary | | Reserved | 26 byte | Reserved for future use | ### Multipart handling A single ```Blob``` can be uploaded by the client into multiple parts. After the completion all parts are concatenated into a single ```Blob```. This procedure will result in multiple parts and paddings being held by a single ```Blob```. ### Single blob example ``` ------------------------------------- | ENCRYPTED BYTES | PADDING | ------------------------------------- ``` ### Multipart blob example ``` ------------------------------------------------------------------------------------- | ENCRYPTED BYTES | PADDING | ENCRYPTED BYTES | PADDING | ENCRYPTED BYTES | PADDING | ------------------------------------------------------------------------------------- ``` ## How a blob is decrypted The decryption is way more complex than the encryption. Decryption process needs to take care of the following circumstances: - decryption of the entire ```Blob``` - decryption from a specific offset by skipping initial bytes - decryption of bytes by reading from the end (tail) - decryption of a specific byte range like middle of the ```Blob``` - decryption of all previous situation by considering a underlying multipart ```Blob``` ### Single blob decryption First the ```BlobMetadata``` is requested to get the encrypted ```Blob``` size. The last 64 bytes of ```PartPadding``` are fetched and inspected to detect if a decryption is necessary. The cipher is than initialized with the IV and the key. ### Multipart blob decryption The process is similar to the single ```Blob``` decryption but with the difference that a list of parts is computed by fetching all ```PartPadding``` from end to the beginning. ## Blob suffix Each stored ```Blob``` will get a suffix named ```.s3enc``` this helps to determine if a ```Blob``` is encrypted. For the s3 client the ```.s3enc``` suffix is not visible and the ```Blob``` size will always show the unencrypted size. ## Tested jClouds provider - S3 - Minio - OBS from OpenTelekomCloud - AWS S3 - Azure - GCP - Local ## Limitation - All blobs are encrypted with the same key that is derived from a given password - No support for re-encryption - Returned eTag always differs therefore clients should not verify it - Decryption of a ```Blob``` will always result in multiple calls against the backend for instance a GET will result in a HEAD + GET because the size of the blob needs to be determined ================================================ FILE: docs/Logging.md ================================================ # Logging ## Configuration The following environment variables can be used to configure logging * LOG_LEVEL default value "info" used to configure log level * LOG_APPENDER default value "STDOUT" produce string formatted logs "CONTAINER" used to produce json formatted logs ================================================ FILE: pom.xml ================================================ 4.0.0 org.gaul s3proxy 3.2.0-SNAPSHOT jar S3Proxy https://github.com/gaul/s3proxy Access other storage backends via the S3 API The Apache Software License, Version 2.0 http://www.apache.org/licenses/LICENSE-2.0.txt repo scm:git:git@github.com:gaul/s3proxy.git scm:git:git@github.com:gaul/s3proxy.git git@github.com:gaul/s3proxy.git Andrew Gaul gaul andrew@gaul.org sonatype-central-portal Sonatype Central Portal https://central.sonatype.com/repository/maven-snapshots/ sonatype-central-portal Sonatype Central Portal https://repo.maven.apache.org/maven2/ release org.apache.maven.plugins maven-gpg-plugin 3.2.8 sign-artifacts verify sign eu.maveniverse.maven.njord extension ${njord.version} eu.maveniverse.maven.plugins njord ${njord.version} org.apache.maven.plugins maven-enforcer-plugin 3.6.2 enforce-maven enforce 3.6.3 org.apache.maven.plugins maven-clean-plugin 3.5.0 org.apache.maven.plugins maven-install-plugin 3.1.4 org.apache.maven.plugins maven-deploy-plugin 3.1.4 org.apache.maven.plugins maven-checkstyle-plugin 3.6.0 check verify check src/main/resources/checkstyle.xml src/main/resources/copyright_header.txt true warning true com.puppycrawl.tools checkstyle 12.3.1 org.apache.maven.plugins maven-resources-plugin 3.5.0 org.apache.maven.plugins maven-compiler-plugin 3.15.0 ${java.version} ${java.version} true true true -Xlint -XDcompilePolicy=simple --should-stop=ifError=FLOW -Xplugin:ErrorProne -Xep:JavaUtilDate:OFF -Xep:DefaultCharset:OFF -Xep:StringCaseLocaleUsage:OFF -Xep:ProtectedMembersInFinalClass:OFF -Xep:JavaTimeDefaultTimeZone:OFF -J--add-exports=jdk.compiler/com.sun.tools.javac.api=ALL-UNNAMED -J--add-exports=jdk.compiler/com.sun.tools.javac.code=ALL-UNNAMED -J--add-exports=jdk.compiler/com.sun.tools.javac.file=ALL-UNNAMED -J--add-exports=jdk.compiler/com.sun.tools.javac.main=ALL-UNNAMED -J--add-exports=jdk.compiler/com.sun.tools.javac.model=ALL-UNNAMED -J--add-exports=jdk.compiler/com.sun.tools.javac.parser=ALL-UNNAMED -J--add-exports=jdk.compiler/com.sun.tools.javac.processing=ALL-UNNAMED -J--add-exports=jdk.compiler/com.sun.tools.javac.tree=ALL-UNNAMED -J--add-exports=jdk.compiler/com.sun.tools.javac.util=ALL-UNNAMED -J--add-opens=jdk.compiler/com.sun.tools.javac.code=ALL-UNNAMED -J--add-opens=jdk.compiler/com.sun.tools.javac.comp=ALL-UNNAMED com.google.auto.service auto-service 1.1.1 com.google.errorprone error_prone_core 2.36.0 org.apache.maven.plugins maven-jar-plugin 3.5.0 true true io.github.git-commit-id git-commit-id-maven-plugin 9.0.2 revision true git.commit.id.abbrev git.commit.id org.apache.maven.plugins maven-javadoc-plugin 3.12.0 attach-javadocs jar all,-missing org.apache.maven.plugins maven-shade-plugin 3.6.2 package shade false org.eclipse.jetty:* META-INF/MANIFEST.MF META-INF/LICENSE META-INF/NOTICE.txt about.html org.eclipse.jetty.ee10:* META-INF/MANIFEST.MF META-INF/LICENSE META-INF/NOTICE.txt about.html org.eclipse.jetty:* org.eclipse.jetty.ee10:* org.eclipse.jetty ${shade.prefix}.org.eclipse.jetty org.apache.maven.plugins maven-assembly-plugin 3.8.0 src/main/assembly/jar-with-dependencies.xml org.gaul.s3proxy.Main true make-assembly package single org.apache.maven.plugins maven-source-plugin 3.4.0 attach-sources jar-no-fork org.apache.maven.plugins maven-surefire-plugin ${surefire.version} org.apache.maven.surefire surefire-junit47 ${surefire.version} org.apache.maven.surefire surefire-junit-platform ${surefire.version} classes 1 -Xmx512m true 1800 random false junit false com.github.spotbugs spotbugs-maven-plugin 4.9.8.3 Max CrossSiteScripting,DefaultEncodingDetector,FindNullDeref jp.skypencil.findbugs.slf4j bug-pattern 1.5.0 org.skife.maven really-executable-jar-maven-plugin 2.1.1 target/s3proxy-${project.version}-jar-with-dependencies.jar s3proxy package really-executable-jar org.gaul modernizer-maven-plugin ${modernizer.version} modernizer verify modernizer ${java.version} UTF-8 17 1.12.797 2.42.31 2.7.0 12.1.8 3.3.0 0.7.5 1.60.1 1.40.0 2.0.17 ${project.groupId}.shaded 3.5.5 com.fasterxml.jackson jackson-bom 2.21.2 pom import io.opentelemetry opentelemetry-bom ${opentelemetry.version} pom import org.junit junit-bom 6.0.3 pom import io.opentelemetry opentelemetry-api io.opentelemetry opentelemetry-sdk io.opentelemetry opentelemetry-exporter-prometheus 1.60.1-alpha io.opentelemetry.semconv opentelemetry-semconv ${opentelemetry-semconv.version} com.amazonaws aws-java-sdk-s3 ${aws-sdk.version} test commons-logging commons-logging com.amazonaws aws-java-sdk-sts ${aws-sdk.version} args4j args4j 2.37 ch.qos.logback logback-classic 1.5.32 com.google.cloud google-cloud-storage 2.64.1 com.azure azure-storage-blob 12.32.0 com.azure azure-identity 1.18.2 com.google.auto.service auto-service 1.1.1 com.google.guava guava 33.5.0-jre com.google.jimfs jimfs 1.3.1 javax.xml.bind jaxb-api 2.3.1 junit junit 4.13.2 provided org.junit.jupiter junit-jupiter 6.0.3 provided org.junit.platform junit-platform-launcher test com.fasterxml.jackson.dataformat jackson-dataformat-xml 2.21.2 com.github.spotbugs spotbugs-annotations 4.9.8 provided org.jspecify jspecify 1.0.0 org.apache.jclouds jclouds-allblobstore ${jclouds.version} org.apache.jclouds.api filesystem ${jclouds.version} org.apache.jclouds.driver jclouds-slf4j ${jclouds.version} org.assertj assertj-core test 3.27.7 org.eclipse.jetty.ee10 jetty-ee10-servlet ${jetty.version} org.gaul modernizer-maven-annotations ${modernizer.version} org.slf4j slf4j-api ${slf4j.version} org.slf4j jcl-over-slf4j ${slf4j.version} software.amazon.awssdk s3 ${aws-sdkv2.version} software.amazon.awssdk sts ${aws-sdkv2.version} ================================================ FILE: src/main/assembly/jar-with-dependencies.xml ================================================ jar-with-dependencies jar false metaInf-services org.eclipse.jetty:* org.eclipse.jetty.ee10:* / true true runtime ${project.basedir}/src/main/config / logback.xml true ================================================ FILE: src/main/config/logback.xml ================================================ [s3proxy] %.-1p %d{MM-dd HH:mm:ss.SSS} %t %c{30}:%L %X{clientId}|%X{sessionId}:%X{messageId}:%X{fileId}] %m%n ${LOG_LEVEL:-info} ${LOG_LEVEL:-info} ================================================ FILE: src/main/java/org/gaul/s3proxy/AccessControlPolicy.java ================================================ /* * Copyright 2014-2026 Andrew Gaul * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.gaul.s3proxy; import java.util.Collection; import com.fasterxml.jackson.dataformat.xml.annotation.JacksonXmlElementWrapper; import com.fasterxml.jackson.dataformat.xml.annotation.JacksonXmlProperty; import com.google.common.base.MoreObjects; /** Represent an Amazon AccessControlPolicy for a container or object. */ // CHECKSTYLE:OFF final class AccessControlPolicy { @JacksonXmlProperty(localName = "Owner") Owner owner; @JacksonXmlProperty(localName = "AccessControlList") AccessControlList aclList; @Override public String toString() { return MoreObjects.toStringHelper(AccessControlList.class) .add("owner", owner) .add("aclList", aclList) .toString(); } static final class Owner { @JacksonXmlProperty(localName = "ID") String id; @JacksonXmlProperty(localName = "DisplayName") String displayName; @Override public String toString() { return MoreObjects.toStringHelper(Owner.class) .add("id", id) .add("displayName", displayName) .toString(); } } static final class AccessControlList { @JacksonXmlProperty(localName = "Grant") @JacksonXmlElementWrapper(useWrapping = false) Collection grants; @Override public String toString() { return MoreObjects.toStringHelper(AccessControlList.class) .add("grants", grants) .toString(); } static final class Grant { @JacksonXmlProperty(localName = "Grantee") Grantee grantee; @JacksonXmlProperty(localName = "Permission") String permission; @Override public String toString() { return MoreObjects.toStringHelper(Grant.class) .add("grantee", grantee) .add("permission", permission) .toString(); } static final class Grantee { @JacksonXmlProperty(namespace = "xsi", localName = "type", isAttribute = true) String type; @JacksonXmlProperty(localName = "ID") String id; @JacksonXmlProperty(localName = "DisplayName") String displayName; @JacksonXmlProperty(localName = "EmailAddress") String emailAddress; @JacksonXmlProperty(localName = "URI") String uri; @Override public String toString() { return MoreObjects.toStringHelper(Grantee.class) .add("type", type) .add("id", id) .add("displayName", displayName) .add("emailAddress", emailAddress) .add("uri", uri) .toString(); } } } } } // CHECKSTYLE:ON ================================================ FILE: src/main/java/org/gaul/s3proxy/AliasBlobStore.java ================================================ /* * Copyright 2014-2026 Andrew Gaul * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.gaul.s3proxy; import static java.util.Objects.requireNonNull; import static com.google.common.base.Preconditions.checkArgument; import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.Properties; import com.google.common.collect.BiMap; import com.google.common.collect.ImmutableBiMap; import com.google.common.collect.ImmutableList; import org.jclouds.blobstore.BlobStore; import org.jclouds.blobstore.domain.Blob; import org.jclouds.blobstore.domain.BlobMetadata; import org.jclouds.blobstore.domain.ContainerAccess; import org.jclouds.blobstore.domain.MultipartPart; import org.jclouds.blobstore.domain.MultipartUpload; import org.jclouds.blobstore.domain.MutableStorageMetadata; import org.jclouds.blobstore.domain.PageSet; import org.jclouds.blobstore.domain.StorageMetadata; import org.jclouds.blobstore.domain.internal.MutableStorageMetadataImpl; import org.jclouds.blobstore.domain.internal.PageSetImpl; import org.jclouds.blobstore.options.CopyOptions; import org.jclouds.blobstore.options.CreateContainerOptions; import org.jclouds.blobstore.options.GetOptions; import org.jclouds.blobstore.options.ListContainerOptions; import org.jclouds.blobstore.options.PutOptions; import org.jclouds.blobstore.util.ForwardingBlobStore; import org.jclouds.domain.Location; import org.jclouds.io.Payload; /** * This class implements a middleware to alias buckets to a different name. * The aliases are configured as: * s3proxy.alias-blobstore.<alias name> = <backend bucket> * * The aliases appear in bucket listings if the configured * backend buckets are present. Requests for all other buckets are unaffected. */ public final class AliasBlobStore extends ForwardingBlobStore { private final BiMap aliases; private AliasBlobStore(BlobStore delegate, BiMap aliases) { super(delegate); this.aliases = requireNonNull(aliases); } static BlobStore newAliasBlobStore(BlobStore delegate, BiMap aliases) { return new AliasBlobStore(delegate, aliases); } private MultipartUpload getDelegateMpu(MultipartUpload mpu) { return MultipartUpload.create( getContainer(mpu.containerName()), mpu.blobName(), mpu.id(), mpu.blobMetadata(), mpu.putOptions()); } public static ImmutableBiMap parseAliases( Properties properties) { Map backendBuckets = new HashMap<>(); for (String key : properties.stringPropertyNames()) { if (key.startsWith(S3ProxyConstants.PROPERTY_ALIAS_BLOBSTORE)) { String virtualBucket = key.substring( S3ProxyConstants.PROPERTY_ALIAS_BLOBSTORE.length() + 1); String backendBucket = properties.getProperty(key); checkArgument( !backendBuckets.containsKey(backendBucket), "Backend bucket %s is aliased twice", backendBucket); backendBuckets.put(backendBucket, virtualBucket); } } return ImmutableBiMap.copyOf(backendBuckets).inverse(); } private String getContainer(String container) { return this.aliases.getOrDefault(container, container); } @Override public boolean createContainerInLocation(Location location, String container) { return this.delegate().createContainerInLocation(location, getContainer(container)); } @Override public boolean createContainerInLocation( Location location, String container, CreateContainerOptions options) { return delegate().createContainerInLocation( location, getContainer(container), options); } @Override public boolean containerExists(String container) { return delegate().containerExists(getContainer(container)); } @Override public ContainerAccess getContainerAccess(String container) { return delegate().getContainerAccess(getContainer(container)); } @Override public void setContainerAccess(String container, ContainerAccess containerAccess) { delegate().setContainerAccess(getContainer(container), containerAccess); } @Override public PageSet list() { PageSet upstream = this.delegate().list(); var results = new ImmutableList.Builder(); for (StorageMetadata sm : upstream) { if (aliases.containsValue(sm.getName())) { MutableStorageMetadata bucketAlias = new MutableStorageMetadataImpl(); bucketAlias.setName(aliases.inverse().get(sm.getName())); bucketAlias.setCreationDate(sm.getCreationDate()); bucketAlias.setETag(sm.getETag()); bucketAlias.setId(sm.getProviderId()); bucketAlias.setLastModified(sm.getLastModified()); bucketAlias.setLocation(sm.getLocation()); bucketAlias.setSize(sm.getSize()); bucketAlias.setTier(sm.getTier()); bucketAlias.setType(sm.getType()); // TODO: the URI should be rewritten to use the alias bucketAlias.setUri(sm.getUri()); bucketAlias.setUserMetadata(sm.getUserMetadata()); results.add(bucketAlias); } else { results.add(sm); } } return new PageSetImpl<>(results.build(), upstream.getNextMarker()); } @Override public PageSet list(String container) { return delegate().list(getContainer(container)); } @Override public PageSet list( String container, ListContainerOptions options) { return delegate().list(getContainer(container), options); } @Override public void clearContainer(String container) { delegate().clearContainer(getContainer(container)); } @Override public void clearContainer(String container, ListContainerOptions options) { delegate().clearContainer(getContainer(container), options); } @Override public void deleteContainer(String container) { delegate().deleteContainer(getContainer(container)); } @Override public boolean deleteContainerIfEmpty(String container) { return delegate().deleteContainerIfEmpty(getContainer(container)); } @Override public boolean blobExists(String container, String name) { return delegate().blobExists(getContainer(container), name); } @Override public BlobMetadata blobMetadata(String container, String name) { return delegate().blobMetadata(getContainer(container), name); } @Override public Blob getBlob(String containerName, String blobName) { return delegate().getBlob(getContainer(containerName), blobName); } @Override public Blob getBlob(String containerName, String blobName, GetOptions getOptions) { return delegate().getBlob(getContainer(containerName), blobName, getOptions); } @Override public String putBlob(String containerName, Blob blob) { return delegate().putBlob(getContainer(containerName), blob); } @Override public String putBlob(final String containerName, Blob blob, final PutOptions options) { return delegate().putBlob(getContainer(containerName), blob, options); } @Override public void removeBlob(final String containerName, final String blobName) { delegate().removeBlob(getContainer(containerName), blobName); } @Override public void removeBlobs(final String containerName, final Iterable blobNames) { delegate().removeBlobs(getContainer(containerName), blobNames); } @Override public String copyBlob(final String fromContainer, final String fromName, final String toContainer, final String toName, final CopyOptions options) { return delegate().copyBlob(getContainer(fromContainer), fromName, getContainer(toContainer), toName, options); } @Override public MultipartUpload initiateMultipartUpload( String container, BlobMetadata blobMetadata, PutOptions options) { MultipartUpload mpu = delegate().initiateMultipartUpload( getContainer(container), blobMetadata, options); return MultipartUpload.create(container, blobMetadata.getName(), mpu.id(), mpu.blobMetadata(), mpu.putOptions()); } @Override public void abortMultipartUpload(MultipartUpload mpu) { delegate().abortMultipartUpload(getDelegateMpu(mpu)); } @Override public String completeMultipartUpload(final MultipartUpload mpu, final List parts) { return delegate().completeMultipartUpload(getDelegateMpu(mpu), parts); } @Override public MultipartPart uploadMultipartPart(MultipartUpload mpu, int partNumber, Payload payload) { return delegate().uploadMultipartPart(getDelegateMpu(mpu), partNumber, payload); } } ================================================ FILE: src/main/java/org/gaul/s3proxy/AuthenticationType.java ================================================ /* * Copyright 2014-2026 Andrew Gaul * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.gaul.s3proxy; import com.google.common.base.CaseFormat; public enum AuthenticationType { AWS_V2, AWS_V4, AWS_V2_OR_V4, NONE; static AuthenticationType fromString(String string) { return AuthenticationType.valueOf(CaseFormat.LOWER_HYPHEN.to( CaseFormat.UPPER_UNDERSCORE, string)); } } ================================================ FILE: src/main/java/org/gaul/s3proxy/AwsHttpHeaders.java ================================================ /* * Copyright 2014-2026 Andrew Gaul * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.gaul.s3proxy; final class AwsHttpHeaders { static final String ACL = "x-amz-acl"; static final String API_VERSION = "x-amz-api-version"; static final String CHECKSUM_ALGORITHM = "x-amz-checksum-algorithm"; static final String CHECKSUM_CRC32 = "x-amz-checksum-crc32"; static final String CHECKSUM_CRC32C = "x-amz-checksum-crc32c"; static final String CHECKSUM_CRC64NVME = "x-amz-checksum-crc64nvme"; static final String CHECKSUM_MODE = "x-amz-checksum-mode"; static final String CHECKSUM_SHA1 = "x-amz-checksum-sha1"; static final String CHECKSUM_SHA256 = "x-amz-checksum-sha256"; static final String CONTENT_SHA256 = "x-amz-content-sha256"; static final String COPY_SOURCE = "x-amz-copy-source"; static final String COPY_SOURCE_IF_MATCH = "x-amz-copy-source-if-match"; static final String COPY_SOURCE_IF_MODIFIED_SINCE = "x-amz-copy-source-if-modified-since"; static final String COPY_SOURCE_IF_NONE_MATCH = "x-amz-copy-source-if-none-match"; static final String COPY_SOURCE_IF_UNMODIFIED_SINCE = "x-amz-copy-source-if-unmodified-since"; static final String COPY_SOURCE_RANGE = "x-amz-copy-source-range"; static final String DATE = "x-amz-date"; static final String DECODED_CONTENT_LENGTH = "x-amz-decoded-content-length"; static final String METADATA_DIRECTIVE = "x-amz-metadata-directive"; static final String REQUEST_ID = "x-amz-request-id"; static final String SDK_CHECKSUM_ALGORITHM = "x-amz-sdk-checksum-algorithm"; static final String STORAGE_CLASS = "x-amz-storage-class"; static final String TRAILER = "x-amz-trailer"; static final String TRANSFER_ENCODING = "x-amz-te"; static final String USER_AGENT = "x-amz-user-agent"; private AwsHttpHeaders() { throw new AssertionError("intentionally unimplemented"); } } ================================================ FILE: src/main/java/org/gaul/s3proxy/AwsSignature.java ================================================ /* * Copyright 2014-2026 Andrew Gaul * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.gaul.s3proxy; import java.io.IOException; import java.nio.charset.StandardCharsets; import java.security.InvalidKeyException; import java.security.MessageDigest; import java.security.NoSuchAlgorithmException; import java.util.ArrayList; import java.util.Base64; import java.util.Collection; import java.util.Collections; import java.util.List; import java.util.Set; import java.util.regex.Pattern; import javax.crypto.Mac; import javax.crypto.spec.SecretKeySpec; import com.google.common.base.Joiner; import com.google.common.base.Splitter; import com.google.common.base.Strings; import com.google.common.collect.SortedSetMultimap; import com.google.common.collect.TreeMultimap; import com.google.common.io.BaseEncoding; import com.google.common.net.HttpHeaders; import com.google.common.net.PercentEscaper; import jakarta.servlet.http.HttpServletRequest; import org.jspecify.annotations.Nullable; import org.slf4j.Logger; import org.slf4j.LoggerFactory; final class AwsSignature { private static final Logger logger = LoggerFactory.getLogger( AwsSignature.class); private static final PercentEscaper AWS_URL_PARAMETER_ESCAPER = new PercentEscaper("-_.~", false); private static final Set SIGNED_SUBRESOURCES = Set.of( "acl", "delete", "lifecycle", "location", "logging", "notification", "partNumber", "policy", "requestPayment", "response-cache-control", "response-content-disposition", "response-content-encoding", "response-content-language", "response-content-type", "response-expires", "torrent", "uploadId", "uploads", "versionId", "versioning", "versions", "website" ); private static final Pattern REPEATING_WHITESPACE = Pattern.compile("\\s+"); private AwsSignature() { } /** * Create Amazon V2 signature. Reference: * http://docs.aws.amazon.com/general/latest/gr/signature-version-2.html */ static String createAuthorizationSignature( HttpServletRequest request, String uri, String credential, boolean queryAuth, boolean bothDateHeader) { // sort Amazon headers SortedSetMultimap canonicalizedHeaders = TreeMultimap.create(); for (String headerName : Collections.list(request.getHeaderNames())) { Collection headerValues = Collections.list( request.getHeaders(headerName)); headerName = headerName.toLowerCase(); if (!headerName.startsWith("x-amz-") || (bothDateHeader && headerName.equalsIgnoreCase(AwsHttpHeaders.DATE))) { continue; } if (headerValues.isEmpty()) { canonicalizedHeaders.put(headerName, ""); } for (String headerValue : headerValues) { canonicalizedHeaders.put(headerName, Strings.nullToEmpty(headerValue)); } } // Build string to sign var builder = new StringBuilder() .append(request.getMethod()) .append('\n') .append(Strings.nullToEmpty(request.getHeader( HttpHeaders.CONTENT_MD5))) .append('\n') .append(Strings.nullToEmpty(request.getHeader( HttpHeaders.CONTENT_TYPE))) .append('\n'); String expires = request.getParameter("Expires"); if (queryAuth) { // If expires is not nil, then it is query string sign // If expires is nil, maybe also query string sign // So should check other accessid param, presign to judge. // not the expires builder.append(Strings.nullToEmpty(expires)); } else { if (!bothDateHeader) { if (canonicalizedHeaders.containsKey(AwsHttpHeaders.DATE)) { builder.append(""); } else { builder.append(request.getHeader(HttpHeaders.DATE)); } } else { if (!canonicalizedHeaders.containsKey(AwsHttpHeaders.DATE)) { builder.append(request.getHeader(AwsHttpHeaders.DATE)); } else { // panic } } } builder.append('\n'); for (var entry : canonicalizedHeaders.entries()) { builder.append(entry.getKey()).append(':') .append(entry.getValue()).append('\n'); } builder.append(uri); char separator = '?'; List subresources = Collections.list( request.getParameterNames()); Collections.sort(subresources); for (String subresource : subresources) { if (SIGNED_SUBRESOURCES.contains(subresource)) { builder.append(separator).append(subresource); String value = request.getParameter(subresource); if (!"".equals(value)) { builder.append('=').append(value); } separator = '&'; } } String stringToSign = builder.toString(); logger.trace("stringToSign: {}", stringToSign); // Sign string Mac mac; try { mac = Mac.getInstance("HmacSHA1"); mac.init(new SecretKeySpec(credential.getBytes( StandardCharsets.UTF_8), "HmacSHA1")); } catch (InvalidKeyException | NoSuchAlgorithmException e) { throw new RuntimeException(e); } return Base64.getEncoder().encodeToString(mac.doFinal( stringToSign.getBytes(StandardCharsets.UTF_8))); } private static byte[] signMessage(byte[] data, byte[] key, String algorithm) throws InvalidKeyException, NoSuchAlgorithmException { Mac mac = Mac.getInstance(algorithm); mac.init(new SecretKeySpec(key, algorithm)); return mac.doFinal(data); } /** * Derive the AWS SigV4 signing key from the credential and auth header. */ static byte[] deriveSigningKeyV4(S3AuthorizationHeader authHeader, String credential) throws InvalidKeyException, NoSuchAlgorithmException { String algorithm = authHeader.getHmacAlgorithm(); byte[] dateKey = signMessage( authHeader.getDate().getBytes(StandardCharsets.UTF_8), ("AWS4" + credential).getBytes(StandardCharsets.UTF_8), algorithm); byte[] dateRegionKey = signMessage( authHeader.getRegion().getBytes(StandardCharsets.UTF_8), dateKey, algorithm); byte[] dateRegionServiceKey = signMessage( authHeader.getService().getBytes(StandardCharsets.UTF_8), dateRegionKey, algorithm); return signMessage( "aws4_request".getBytes(StandardCharsets.UTF_8), dateRegionServiceKey, algorithm); } private static String getMessageDigest(byte[] payload, String algorithm) throws NoSuchAlgorithmException { MessageDigest md = MessageDigest.getInstance(algorithm); byte[] hash = md.digest(payload); return BaseEncoding.base16().lowerCase().encode(hash); } @Nullable private static List extractSignedHeaders(String authorization) { int index = authorization.indexOf("SignedHeaders="); if (index < 0) { return null; } int endSigned = authorization.indexOf(',', index); if (endSigned < 0) { return null; } int startHeaders = authorization.indexOf('=', index); return Splitter.on(';').splitToList(authorization.substring( startHeaders + 1, endSigned)); } private static String buildCanonicalHeaders(HttpServletRequest request, List signedHeaders) { List headers = new ArrayList<>( /*initialCapacity=*/ signedHeaders.size()); for (String header : signedHeaders) { headers.add(header.toLowerCase()); } Collections.sort(headers); var headersWithValues = new StringBuilder(); boolean firstHeader = true; for (String header : headers) { if (firstHeader) { firstHeader = false; } else { headersWithValues.append('\n'); } headersWithValues.append(header); headersWithValues.append(':'); boolean firstValue = true; for (String value : Collections.list(request.getHeaders(header))) { if (firstValue) { firstValue = false; } else { headersWithValues.append(','); } value = value.trim(); if (!value.startsWith("\"")) { value = REPEATING_WHITESPACE.matcher(value).replaceAll(" "); } headersWithValues.append(value); } } return headersWithValues.toString(); } private static String buildCanonicalQueryString( HttpServletRequest request) { // The parameters are required to be sorted List parameters = Collections.list(request.getParameterNames()); Collections.sort(parameters); List queryParameters = new ArrayList<>(); for (String key : parameters) { if (key.equals("X-Amz-Signature")) { continue; } // re-encode keys and values in AWS normalized form String value = request.getParameter(key); queryParameters.add(AWS_URL_PARAMETER_ESCAPER.escape(key) + "=" + AWS_URL_PARAMETER_ESCAPER.escape(value)); } return Joiner.on("&").join(queryParameters); } private static String createCanonicalRequest(HttpServletRequest request, String uri, byte[] payload, String hashAlgorithm) throws IOException, NoSuchAlgorithmException { String authorizationHeader = request.getHeader("Authorization"); String xAmzContentSha256 = request.getHeader( AwsHttpHeaders.CONTENT_SHA256); if (xAmzContentSha256 == null) { xAmzContentSha256 = request.getParameter("X-Amz-SignedHeaders"); } String digest; if (authorizationHeader == null) { digest = "UNSIGNED-PAYLOAD"; } else if ("STREAMING-AWS4-HMAC-SHA256-PAYLOAD".equals( xAmzContentSha256)) { digest = "STREAMING-AWS4-HMAC-SHA256-PAYLOAD"; } else if ("STREAMING-UNSIGNED-PAYLOAD-TRAILER".equals(xAmzContentSha256)) { digest = "STREAMING-UNSIGNED-PAYLOAD-TRAILER"; } else if ("UNSIGNED-PAYLOAD".equals(xAmzContentSha256)) { digest = "UNSIGNED-PAYLOAD"; } else { digest = getMessageDigest(payload, hashAlgorithm); } List signedHeaders; if (authorizationHeader != null) { signedHeaders = extractSignedHeaders(authorizationHeader); } else { signedHeaders = Splitter.on(';').splitToList(request.getParameter( "X-Amz-SignedHeaders")); } /* * CORS Preflight * * The signature is based on the canonical request, which includes the * HTTP Method. * For presigned URLs, the method must be replaced for OPTIONS request * to match */ String method = request.getMethod(); if ("OPTIONS".equals(method)) { String corsMethod = request.getHeader( HttpHeaders.ACCESS_CONTROL_REQUEST_METHOD); if (corsMethod != null) { method = corsMethod; } } String canonicalRequest = Joiner.on("\n").join( method, uri, buildCanonicalQueryString(request), buildCanonicalHeaders(request, signedHeaders) + "\n", Joiner.on(';').join(signedHeaders), digest); return getMessageDigest( canonicalRequest.getBytes(StandardCharsets.UTF_8), hashAlgorithm); } /** * Create v4 signature. Reference: * http://docs.aws.amazon.com/general/latest/gr/signature-version-4.html */ static String createAuthorizationSignatureV4( HttpServletRequest request, S3AuthorizationHeader authHeader, byte[] payload, String uri, String credential) throws InvalidKeyException, IOException, NoSuchAlgorithmException, S3Exception { String canonicalRequest = createCanonicalRequest(request, uri, payload, authHeader.getHashAlgorithm()); String algorithm = authHeader.getHmacAlgorithm(); byte[] signingKey = deriveSigningKeyV4(authHeader, credential); String date = request.getHeader(AwsHttpHeaders.DATE); if (date == null) { date = request.getParameter("X-Amz-Date"); } String signatureString = "AWS4-HMAC-SHA256\n" + date + "\n" + authHeader.getDate() + "/" + authHeader.getRegion() + "/s3/aws4_request\n" + canonicalRequest; byte[] signature = signMessage( signatureString.getBytes(StandardCharsets.UTF_8), signingKey, algorithm); return BaseEncoding.base16().lowerCase().encode(signature); } } ================================================ FILE: src/main/java/org/gaul/s3proxy/BlobStoreLocator.java ================================================ /* * Copyright 2014-2026 Andrew Gaul * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.gaul.s3proxy; import java.util.Map; import org.jclouds.blobstore.BlobStore; public interface BlobStoreLocator { Map.Entry locateBlobStore(String identity, String container, String blob); } ================================================ FILE: src/main/java/org/gaul/s3proxy/CaseInsensitiveImmutableMultimap.java ================================================ /* * Copyright 2014-2026 Andrew Gaul * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.gaul.s3proxy; import java.util.Collection; import com.google.common.collect.ForwardingMultimap; import com.google.common.collect.ImmutableMultimap; import com.google.common.collect.Multimap; final class CaseInsensitiveImmutableMultimap extends ForwardingMultimap { private final Multimap inner; CaseInsensitiveImmutableMultimap(Multimap map) { var builder = ImmutableMultimap.builder(); for (var entry : map.entries()) { builder.put(lower(entry.getKey()), entry.getValue()); } this.inner = builder.build(); } @Override protected Multimap delegate() { return inner; } @Override public Collection get(String key) { return inner.get(lower(key)); } private static String lower(String key) { return key == null ? null : key.toLowerCase(); } } ================================================ FILE: src/main/java/org/gaul/s3proxy/ChunkedInputStream.java ================================================ /* * Copyright 2014-2026 Andrew Gaul * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.gaul.s3proxy; import java.io.FilterInputStream; import java.io.IOException; import java.io.InputStream; import java.nio.ByteBuffer; import java.nio.charset.StandardCharsets; import java.security.InvalidKeyException; import java.security.MessageDigest; import java.security.NoSuchAlgorithmException; import java.util.Base64; import javax.crypto.Mac; import javax.crypto.spec.SecretKeySpec; import com.google.common.hash.Hasher; import com.google.common.hash.Hashing; import com.google.common.io.BaseEncoding; import com.google.common.io.ByteStreams; import org.jspecify.annotations.Nullable; /** * Parse an AWS v4 signature chunked stream. Reference: * https://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-streaming.html */ final class ChunkedInputStream extends FilterInputStream { private static final int MAX_LINE_LENGTH = 4096; private static final String EMPTY_SHA256 = "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"; private byte[] chunk; private int currentIndex; private int currentLength; private String currentSignature; private final int maxChunkSize; private final Hasher hasher; private final byte @Nullable [] signingKey; @Nullable private final String hmacAlgorithm; @Nullable private final String timestamp; @Nullable private final String scope; @Nullable private String previousSignature; ChunkedInputStream(InputStream is, int maxChunkSize) { super(is); this.maxChunkSize = maxChunkSize; hasher = null; signingKey = null; hmacAlgorithm = null; timestamp = null; scope = null; } @SuppressWarnings("deprecation") ChunkedInputStream(InputStream is, int maxChunkSize, @Nullable String trailer) { super(is); this.maxChunkSize = maxChunkSize; if ("x-amz-checksum-crc32".equals(trailer)) { hasher = Hashing.crc32().newHasher(); } else if ("x-amz-checksum-crc32c".equals(trailer)) { hasher = Hashing.crc32c().newHasher(); } else if ("x-amz-checksum-sha1".equals(trailer)) { hasher = Hashing.sha1().newHasher(); } else if ("x-amz-checksum-sha256".equals(trailer)) { hasher = Hashing.sha256().newHasher(); } else { // TODO: Guava does not support x-amz-checksum-crc64nvme hasher = null; } signingKey = null; hmacAlgorithm = null; timestamp = null; scope = null; } /** * Construct a chunked stream that verifies the per-chunk signature chain * used by STREAMING-AWS4-HMAC-SHA256-PAYLOAD. * * @param seedSignature the Authorization header signature (hex-encoded) * @param signingKey the AWS SigV4 signing key * @param hmacAlgorithm HMAC algorithm name (e.g. "HmacSHA256") * @param timestamp full ISO8601 request timestamp (x-amz-date) * @param scope credential scope (date/region/service/aws4_request) */ ChunkedInputStream(InputStream is, int maxChunkSize, String seedSignature, byte[] signingKey, String hmacAlgorithm, String timestamp, String scope) { super(is); this.maxChunkSize = maxChunkSize; this.hasher = null; this.signingKey = signingKey.clone(); this.hmacAlgorithm = hmacAlgorithm; this.timestamp = timestamp; this.scope = scope; this.previousSignature = seedSignature; } @Override public int read() throws IOException { while (currentIndex == currentLength) { String line = readLine(in); if (line.equals("")) { return -1; } String[] parts = line.split(";", 2); if (parts[0].startsWith("x-amz-checksum-")) { String[] checksumParts = parts[0].split(":", 2); var expectedHash = checksumParts[1]; var actualHash = switch (checksumParts[0]) { case "x-amz-checksum-crc32", "x-amz-checksum-crc32c" -> ByteBuffer.allocate(4).putInt(hasher.hash().asInt()).array(); // Use big-endian to match AWS case "x-amz-checksum-sha1", "x-amz-checksum-sha256" -> hasher.hash().asBytes(); default -> throw new IllegalArgumentException("Unknown value: " + checksumParts[0]); }; if (!expectedHash.equals(Base64.getEncoder().encodeToString(actualHash))) { throw new IOException(new S3Exception(S3ErrorCode.BAD_DIGEST)); } currentLength = 0; } else { currentLength = Integer.parseInt(parts[0], 16); if (currentLength < 0 || currentLength > maxChunkSize) { throw new IOException( "chunk size exceeds maximum: " + currentLength); } } if (parts.length > 1) { String sigPart = parts[1]; int eq = sigPart.indexOf('='); currentSignature = eq >= 0 ? sigPart.substring(eq + 1) : sigPart; } else { currentSignature = null; } chunk = new byte[currentLength]; currentIndex = 0; ByteStreams.readFully(in, chunk); if (hasher != null) { hasher.putBytes(chunk); } if (signingKey != null) { verifyChunkSignature(chunk, currentSignature); } if (currentLength == 0) { return -1; } // consume trailing \r\n readLine(in); } return chunk[currentIndex++] & 0xFF; } @Override public int read(byte[] b, int off, int len) throws IOException { int i; for (i = 0; i < len; ++i) { int ch = read(); if (ch == -1) { break; } b[off + i] = (byte) ch; } if (i == 0) { return -1; } return i; } private void verifyChunkSignature(byte[] data, @Nullable String signature) throws IOException { if (signature == null) { throw new IOException(new S3Exception( S3ErrorCode.SIGNATURE_DOES_NOT_MATCH)); } String chunkHash; try { MessageDigest md = MessageDigest.getInstance("SHA-256"); chunkHash = BaseEncoding.base16().lowerCase() .encode(md.digest(data)); } catch (NoSuchAlgorithmException e) { throw new IOException(e); } String stringToSign = "AWS4-HMAC-SHA256-PAYLOAD\n" + timestamp + "\n" + scope + "\n" + previousSignature + "\n" + EMPTY_SHA256 + "\n" + chunkHash; String expected; try { Mac mac = Mac.getInstance(hmacAlgorithm); mac.init(new SecretKeySpec(signingKey, hmacAlgorithm)); expected = BaseEncoding.base16().lowerCase().encode( mac.doFinal(stringToSign.getBytes(StandardCharsets.UTF_8))); } catch (InvalidKeyException | NoSuchAlgorithmException e) { throw new IOException(e); } if (!constantTimeEquals(expected, signature)) { throw new IOException(new S3Exception( S3ErrorCode.SIGNATURE_DOES_NOT_MATCH)); } previousSignature = signature; } private static boolean constantTimeEquals(String a, String b) { if (a.length() != b.length()) { return false; } int diff = 0; for (int i = 0; i < a.length(); i++) { diff |= a.charAt(i) ^ b.charAt(i); } return diff == 0; } /** * Read a \r\n terminated line from an InputStream. * * @return line without the newline or empty String if InputStream is empty */ private static String readLine(InputStream is) throws IOException { var builder = new StringBuilder(); while (true) { int ch = is.read(); if (ch == '\r') { ch = is.read(); if (ch == '\n') { break; } else { throw new IOException("unexpected char after \\r: " + ch); } } else if (ch == -1) { if (builder.length() > 0) { throw new IOException("unexpected end of stream"); } break; } if (builder.length() >= MAX_LINE_LENGTH) { throw new IOException("chunk header too long"); } builder.append((char) ch); } return builder.toString(); } } ================================================ FILE: src/main/java/org/gaul/s3proxy/CompleteMultipartUploadRequest.java ================================================ /* * Copyright 2014-2026 Andrew Gaul * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.gaul.s3proxy; import java.util.Collection; import com.fasterxml.jackson.dataformat.xml.annotation.JacksonXmlElementWrapper; import com.fasterxml.jackson.dataformat.xml.annotation.JacksonXmlProperty; // CHECKSTYLE:OFF final class CompleteMultipartUploadRequest { @JacksonXmlProperty(localName = "Part") @JacksonXmlElementWrapper(useWrapping = false) Collection parts; static final class Part { @JacksonXmlProperty(localName = "PartNumber") int partNumber; @JacksonXmlProperty(localName = "ETag") String eTag; // TODO: unsupported checksums @JacksonXmlProperty(localName = "ChecksumCRC32") String checksumCRC32; @JacksonXmlProperty(localName = "ChecksumCRC32C") String checksumCRC32C; @JacksonXmlProperty(localName = "ChecksumSHA1") String checksumSHA1; @JacksonXmlProperty(localName = "ChecksumSHA256") String checksumSHA256; } } // CHECKSTYLE:ON ================================================ FILE: src/main/java/org/gaul/s3proxy/CreateBucketRequest.java ================================================ /* * Copyright 2014-2026 Andrew Gaul * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.gaul.s3proxy; import com.fasterxml.jackson.dataformat.xml.annotation.JacksonXmlProperty; // CHECKSTYLE:OFF final class CreateBucketRequest { @JacksonXmlProperty(localName = "LocationConstraint") String locationConstraint; } // CHECKSTYLE:ON ================================================ FILE: src/main/java/org/gaul/s3proxy/CrossOriginResourceSharing.java ================================================ /* * Copyright 2014-2026 Andrew Gaul * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.gaul.s3proxy; import java.util.HashSet; import java.util.List; import java.util.Objects; import java.util.Set; import java.util.regex.Matcher; import java.util.regex.Pattern; import com.google.common.base.Joiner; import com.google.common.base.Splitter; import com.google.common.base.Strings; import org.slf4j.Logger; import org.slf4j.LoggerFactory; public final class CrossOriginResourceSharing { protected static final List SUPPORTED_METHODS = List.of("GET", "HEAD", "PUT", "POST", "DELETE"); private static final String HEADER_VALUE_SEPARATOR = ", "; private static final String ALLOW_ANY_ORIGIN = "*"; private static final String ALLOW_ANY_HEADER = "*"; private static final String EXPOSE_ALL_HEADERS = "*"; private static final String ALLOW_CREDENTIALS = "true"; private static final Logger logger = LoggerFactory.getLogger( CrossOriginResourceSharing.class); private final String allowedMethodsRaw; private final String allowedHeadersRaw; private final String exposedHeadersRaw; private final boolean anyOriginAllowed; // Enforce ordering of values private final List allowedOrigins; private final List allowedMethods; private final List allowedHeaders; private final List exposedHeaders; private final String allowCredentials; public CrossOriginResourceSharing() { // CORS Allow all this(List.of(ALLOW_ANY_ORIGIN), SUPPORTED_METHODS, List.of(ALLOW_ANY_HEADER), List.of(EXPOSE_ALL_HEADERS), ""); } public CrossOriginResourceSharing(List allowedOrigins, List allowedMethods, List allowedHeaders, List exposedHeaders, String allowCredentials) { Set allowedPattern = new HashSet(); boolean anyOriginAllowed = false; if (allowedOrigins != null) { if (allowedOrigins.contains(ALLOW_ANY_ORIGIN)) { anyOriginAllowed = true; } else { for (String origin : allowedOrigins) { allowedPattern.add(Pattern.compile( origin, Pattern.CASE_INSENSITIVE)); } } } this.anyOriginAllowed = anyOriginAllowed; this.allowedOrigins = List.copyOf(allowedPattern); if (allowedMethods == null) { this.allowedMethods = List.of(); } else { this.allowedMethods = List.copyOf(allowedMethods); } this.allowedMethodsRaw = Joiner.on(HEADER_VALUE_SEPARATOR).join( this.allowedMethods); if (allowedHeaders == null) { this.allowedHeaders = List.of(); } else { this.allowedHeaders = List.copyOf(allowedHeaders); } this.allowedHeadersRaw = Joiner.on(HEADER_VALUE_SEPARATOR).join( this.allowedHeaders); if (exposedHeaders == null) { this.exposedHeaders = List.of(); } else { this.exposedHeaders = List.copyOf(exposedHeaders); } this.exposedHeadersRaw = Joiner.on(HEADER_VALUE_SEPARATOR).join( this.exposedHeaders); this.allowCredentials = allowCredentials; logger.info("CORS allowed origins: {}", allowedOrigins); logger.info("CORS allowed methods: {}", allowedMethods); logger.info("CORS allowed headers: {}", allowedHeaders); logger.info("CORS exposed headers: {}", exposedHeaders); logger.info("CORS allow credentials: {}", allowCredentials); } public String getAllowedMethods() { return this.allowedMethodsRaw; } public String getExposedHeaders() { return this.exposedHeadersRaw; } public String getAllowedOrigin(String origin) { if (this.anyOriginAllowed) { return ALLOW_ANY_ORIGIN; } else { return origin; } } public boolean isOriginAllowed(String origin) { if (!Strings.isNullOrEmpty(origin)) { if (this.anyOriginAllowed) { logger.debug("CORS origin allowed: {}", origin); return true; } else { for (Pattern pattern : this.allowedOrigins) { Matcher matcher = pattern.matcher(origin); if (matcher.matches()) { logger.debug("CORS origin allowed: {}", origin); return true; } } } } logger.debug("CORS origin not allowed: {}", origin); return false; } public boolean isMethodAllowed(String method) { if (!Strings.isNullOrEmpty(method)) { if (this.allowedMethods.contains(method)) { logger.debug("CORS method allowed: {}", method); return true; } } logger.debug("CORS method not allowed: {}", method); return false; } public boolean isEveryHeaderAllowed(String headers) { boolean result = false; if (!Strings.isNullOrEmpty(headers)) { if (this.allowedHeadersRaw.equals(ALLOW_ANY_HEADER)) { result = true; } else { for (String header : Splitter.on(HEADER_VALUE_SEPARATOR).split( headers)) { result = this.allowedHeaders.contains(header); if (!result) { // First not matching header breaks break; } } } } if (result) { logger.debug("CORS headers allowed: {}", headers); } else { logger.debug("CORS headers not allowed: {}", headers); } return result; } public boolean isAllowCredentials() { return ALLOW_CREDENTIALS.equals(allowCredentials); } @Override public boolean equals(Object object) { if (this == object) { return true; } if (!(object instanceof CrossOriginResourceSharing that)) { return false; } return this.allowedOrigins.equals(that.allowedOrigins) && this.allowedMethodsRaw.equals(that.allowedMethodsRaw) && this.allowedHeadersRaw.equals(that.allowedHeadersRaw); } @Override public int hashCode() { return Objects.hash(this.allowedOrigins, this.allowedMethodsRaw, this.allowedHeadersRaw); } } ================================================ FILE: src/main/java/org/gaul/s3proxy/DeleteMultipleObjectsRequest.java ================================================ /* * Copyright 2014-2026 Andrew Gaul * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.gaul.s3proxy; import java.util.Collection; import com.fasterxml.jackson.dataformat.xml.annotation.JacksonXmlElementWrapper; import com.fasterxml.jackson.dataformat.xml.annotation.JacksonXmlProperty; // CHECKSTYLE:OFF final class DeleteMultipleObjectsRequest { @JacksonXmlProperty(localName = "Quiet") boolean quiet; @JacksonXmlProperty(localName = "Object") @JacksonXmlElementWrapper(useWrapping = false) Collection objects; static final class S3Object { @JacksonXmlProperty(localName = "Key") String key; @JacksonXmlProperty(localName = "VersionID") String versionId; } } // CHECKSTYLE:ON ================================================ FILE: src/main/java/org/gaul/s3proxy/EncryptedBlobStore.java ================================================ /* * Copyright 2014-2026 Andrew Gaul * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.gaul.s3proxy; import static com.google.common.base.Preconditions.checkArgument; import java.io.IOException; import java.io.InputStream; import java.io.UncheckedIOException; import java.nio.charset.StandardCharsets; import java.security.GeneralSecurityException; import java.security.spec.KeySpec; import java.util.ArrayList; import java.util.List; import java.util.Map; import java.util.Properties; import java.util.regex.Matcher; import javax.crypto.SecretKey; import javax.crypto.SecretKeyFactory; import javax.crypto.spec.PBEKeySpec; import javax.crypto.spec.SecretKeySpec; import com.google.common.base.Strings; import com.google.common.collect.ImmutableSet; import com.google.common.hash.HashCode; import com.google.common.hash.Hashing; import com.google.common.net.HttpHeaders; import org.gaul.s3proxy.crypto.Constants; import org.gaul.s3proxy.crypto.Decryption; import org.gaul.s3proxy.crypto.Encryption; import org.gaul.s3proxy.crypto.PartPadding; import org.jclouds.blobstore.BlobStore; import org.jclouds.blobstore.domain.Blob; import org.jclouds.blobstore.domain.BlobAccess; import org.jclouds.blobstore.domain.BlobBuilder; import org.jclouds.blobstore.domain.BlobMetadata; import org.jclouds.blobstore.domain.MultipartPart; import org.jclouds.blobstore.domain.MultipartUpload; import org.jclouds.blobstore.domain.MutableBlobMetadata; import org.jclouds.blobstore.domain.PageSet; import org.jclouds.blobstore.domain.StorageMetadata; import org.jclouds.blobstore.domain.internal.MutableBlobMetadataImpl; import org.jclouds.blobstore.domain.internal.MutableStorageMetadataImpl; import org.jclouds.blobstore.domain.internal.PageSetImpl; import org.jclouds.blobstore.options.CopyOptions; import org.jclouds.blobstore.options.GetOptions; import org.jclouds.blobstore.options.ListContainerOptions; import org.jclouds.blobstore.options.PutOptions; import org.jclouds.blobstore.util.ForwardingBlobStore; import org.jclouds.io.ContentMetadata; import org.jclouds.io.MutableContentMetadata; import org.jclouds.io.Payload; import org.jclouds.io.Payloads; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @SuppressWarnings("UnstableApiUsage") public final class EncryptedBlobStore extends ForwardingBlobStore { private final Logger logger = LoggerFactory.getLogger(EncryptedBlobStore.class); private SecretKeySpec secretKey; private EncryptedBlobStore(BlobStore blobStore, Properties properties) throws IllegalArgumentException { super(blobStore); String password = properties.getProperty( S3ProxyConstants.PROPERTY_ENCRYPTED_BLOBSTORE_PASSWORD); checkArgument(!Strings.isNullOrEmpty(password), "Password for encrypted blobstore is not set"); String salt = properties.getProperty( S3ProxyConstants.PROPERTY_ENCRYPTED_BLOBSTORE_SALT); checkArgument(!Strings.isNullOrEmpty(salt), "Salt for encrypted blobstore is not set"); initStore(password, salt); } static BlobStore newEncryptedBlobStore(BlobStore blobStore, Properties properties) throws IOException { return new EncryptedBlobStore(blobStore, properties); } private void initStore(String password, String salt) throws IllegalArgumentException { try { SecretKeyFactory factory = SecretKeyFactory.getInstance("PBKDF2WithHmacSHA256"); KeySpec spec = new PBEKeySpec(password.toCharArray(), salt.getBytes(), 65536, 128); SecretKey tmp = factory.generateSecret(spec); secretKey = new SecretKeySpec(tmp.getEncoded(), "AES"); } catch (GeneralSecurityException e) { throw new IllegalArgumentException(e); } } private Blob cipheredBlob(String container, Blob blob, InputStream payload, long contentLength, boolean addEncryptedMetadata) { // make a copy of the blob with the new payload stream BlobMetadata blobMeta = blob.getMetadata(); ContentMetadata contentMeta = blob.getMetadata().getContentMetadata(); Map userMetadata = blobMeta.getUserMetadata(); String contentType = contentMeta.getContentType(); // suffix the content type with -s3enc if we need to encrypt if (addEncryptedMetadata) { blobMeta = setEncryptedSuffix(blobMeta); } else { // remove the -s3enc suffix while decrypting // but not if it contains a multipart meta if (!blobMeta.getUserMetadata() .containsKey(Constants.METADATA_IS_ENCRYPTED_MULTIPART)) { blobMeta = removeEncryptedSuffix(blobMeta); } } // we do not set contentMD5 as it will not match due to the encryption Blob cipheredBlob = blobBuilder(container) .name(blobMeta.getName()) .type(blobMeta.getType()) .tier(blobMeta.getTier()) .userMetadata(userMetadata) .payload(payload) .cacheControl(contentMeta.getCacheControl()) .contentDisposition(contentMeta.getContentDisposition()) .contentEncoding(contentMeta.getContentEncoding()) .contentLanguage(contentMeta.getContentLanguage()) .contentLength(contentLength) .contentType(contentType) .build(); cipheredBlob.getMetadata().setUri(blobMeta.getUri()); cipheredBlob.getMetadata().setETag(blobMeta.getETag()); cipheredBlob.getMetadata().setLastModified(blobMeta.getLastModified()); cipheredBlob.getMetadata().setSize(blobMeta.getSize()); cipheredBlob.getMetadata().setPublicUri(blobMeta.getPublicUri()); cipheredBlob.getMetadata().setContainer(blobMeta.getContainer()); return cipheredBlob; } private Blob encryptBlob(String container, Blob blob) { try { // open the streams and pass them through the encryption InputStream isRaw = blob.getPayload().openStream(); Encryption encryption = new Encryption(secretKey, isRaw, 1); InputStream is = encryption.openStream(); // adjust the encrypted content length by // adding the padding block size long contentLength = blob.getMetadata().getContentMetadata().getContentLength() + Constants.PADDING_BLOCK_SIZE; return cipheredBlob(container, blob, is, contentLength, true); } catch (IOException | GeneralSecurityException e) { throw new RuntimeException(e); } } private Payload encryptPayload(Payload payload, int partNumber) { try { // open the streams and pass them through the encryption InputStream isRaw = payload.openStream(); Encryption encryption = new Encryption(secretKey, isRaw, partNumber); InputStream is = encryption.openStream(); Payload cipheredPayload = Payloads.newInputStreamPayload(is); MutableContentMetadata contentMetadata = payload.getContentMetadata(); HashCode md5 = null; contentMetadata.setContentMD5(md5); cipheredPayload.setContentMetadata(payload.getContentMetadata()); cipheredPayload.setSensitive(payload.isSensitive()); // adjust the encrypted content length by // adding the padding block size long contentLength = payload.getContentMetadata().getContentLength() + Constants.PADDING_BLOCK_SIZE; cipheredPayload.getContentMetadata() .setContentLength(contentLength); return cipheredPayload; } catch (IOException | GeneralSecurityException e) { throw new RuntimeException(e); } } private Blob decryptBlob(Decryption decryption, String container, Blob blob) { try { // handle blob does not exist if (blob == null) { return null; } // open the streams and pass them through the decryption InputStream isRaw = blob.getPayload().openStream(); InputStream is = decryption.openStream(isRaw); // adjust the content length if the blob is encrypted long contentLength = blob.getMetadata().getContentMetadata().getContentLength(); if (decryption.isEncrypted()) { contentLength = decryption.getContentLength(); } return cipheredBlob(container, blob, is, contentLength, false); } catch (IOException e) { throw new UncheckedIOException(e); } } // filter the list by showing the unencrypted blob size private PageSet filteredList( PageSet pageSet) { var builder = ImmutableSet.builder(); for (StorageMetadata sm : pageSet) { if (sm instanceof BlobMetadata bm) { MutableBlobMetadata mbm = new MutableBlobMetadataImpl(bm); // if blob is encrypted remove the -s3enc suffix // from content type if (isEncrypted(mbm)) { mbm = removeEncryptedSuffix(bm); mbm = calculateBlobSize(mbm); } builder.add(mbm); } else if (sm.getName() != null && isEncrypted(sm.getName())) { // non-BlobMetadata list entries (e.g. from S3 list backends) // still need the .s3enc suffix stripped from the name var msm = new MutableStorageMetadataImpl(sm); msm.setName(removeEncryptedSuffix(sm.getName())); builder.add(msm); } else { builder.add(sm); } } // make sure the marker do not show blob with .s3enc suffix String marker = pageSet.getNextMarker(); if (marker != null && isEncrypted(marker)) { marker = removeEncryptedSuffix(marker); } return new PageSetImpl<>(builder.build(), marker); } private boolean isEncrypted(BlobMetadata blobMeta) { return isEncrypted(blobMeta.getName()); } private boolean isEncrypted(String blobName) { return blobName.endsWith(Constants.S3_ENC_SUFFIX); } private MutableBlobMetadata setEncryptedSuffix(BlobMetadata blobMeta) { var bm = new MutableBlobMetadataImpl(blobMeta); if (blobMeta.getName() != null && !isEncrypted(blobMeta.getName())) { bm.setName(blobNameWithSuffix(blobMeta.getName())); } return bm; } private String removeEncryptedSuffix(String blobName) { return blobName.substring(0, blobName.length() - Constants.S3_ENC_SUFFIX.length()); } private MutableBlobMetadata removeEncryptedSuffix(BlobMetadata blobMeta) { var bm = new MutableBlobMetadataImpl(blobMeta); if (isEncrypted(bm.getName())) { String blobName = bm.getName(); bm.setName(removeEncryptedSuffix(blobName)); } return bm; } private MutableBlobMetadata calculateBlobSize(BlobMetadata blobMeta) { MutableBlobMetadata mbm = removeEncryptedSuffix(blobMeta); // we are using on non-s3 backends like azure or gcp a metadata key to // calculate the part padding sizes that needs to be removed if (mbm.getUserMetadata() .containsKey(Constants.METADATA_ENCRYPTION_PARTS)) { int parts = Integer.parseInt( mbm.getUserMetadata().get(Constants.METADATA_ENCRYPTION_PARTS)); int partPaddingSizes = Constants.PADDING_BLOCK_SIZE * parts; long size = blobMeta.getSize() - partPaddingSizes; mbm.setSize(size); mbm.getContentMetadata().setContentLength(size); } else { // on s3 backends like aws or minio we rely on the eTag suffix Matcher matcher = Constants.MPU_ETAG_SUFFIX_PATTERN.matcher(blobMeta.getETag()); if (matcher.find()) { int parts = Integer.parseInt(matcher.group(1)); int partPaddingSizes = Constants.PADDING_BLOCK_SIZE * parts; long size = blobMeta.getSize() - partPaddingSizes; mbm.setSize(size); mbm.getContentMetadata().setContentLength(size); } else { // if there is also no eTag suffix then get the number of parts from last padding var options = new GetOptions() .range(blobMeta.getSize() - Constants.PADDING_BLOCK_SIZE, blobMeta.getSize()); var name = blobNameWithSuffix(blobMeta.getName()); var blob = delegate().getBlob(blobMeta.getContainer(), name, options); try { PartPadding lastPartPadding = PartPadding.readPartPaddingFromBlob(blob); int parts = lastPartPadding.getPart(); int partPaddingSizes = Constants.PADDING_BLOCK_SIZE * parts; long size = blobMeta.getSize() - partPaddingSizes; mbm.setSize(size); mbm.getContentMetadata().setContentLength(size); } catch (IOException e) { throw new UncheckedIOException("Failed to read part-padding from encrypted blob", e); } } } return mbm; } private boolean multipartRequiresStub() { String blobStoreType = getBlobStoreType(); return Quirks.MULTIPART_REQUIRES_STUB.contains(blobStoreType); } private String blobNameWithSuffix(String container, String name) { String nameWithSuffix = blobNameWithSuffix(name); if (delegate().blobExists(container, nameWithSuffix)) { name = nameWithSuffix; } return name; } private String blobNameWithSuffix(String name) { return name + Constants.S3_ENC_SUFFIX; } private String getBlobStoreType() { return delegate().getContext().unwrap().getProviderMetadata().getId(); } private String generateUploadId(String container, String blobName) { String path = container + "/" + blobName; @SuppressWarnings("deprecation") var hash = Hashing.md5(); return hash.hashBytes(path.getBytes(StandardCharsets.UTF_8)).toString(); } @Override public Blob getBlob(String containerName, String blobName) { return getBlob(containerName, blobName, new GetOptions()); } @Override public Blob getBlob(String containerName, String blobName, GetOptions getOptions) { // adjust the blob name blobName = blobNameWithSuffix(blobName); // get the metadata to determine the blob size BlobMetadata meta = delegate().blobMetadata(containerName, blobName); try { // we have a blob that ends with .s3enc if (meta != null) { // init defaults long offset = 0; long end = 0; long length = -1; if (getOptions.getRanges().size() > 0) { // S3 doesn't allow multiple ranges String range = getOptions.getRanges().get(0); String[] ranges = range.split("-", 2); if (ranges[0].isEmpty()) { // handle to read from the end end = Long.parseLong(ranges[1]); length = end; } else if (ranges[1].isEmpty()) { // handle to read from an offset till the end offset = Long.parseLong(ranges[0]); } else { // handle to read from an offset offset = Long.parseLong(ranges[0]); end = Long.parseLong(ranges[1]); length = end - offset + 1; } } // init decryption Decryption decryption = new Decryption(secretKey, delegate(), meta, offset, length); if (decryption.isEncrypted() && getOptions.getRanges().size() > 0) { // clear current ranges to avoid multiple ranges getOptions.getRanges().clear(); long startAt = decryption.getStartAt(); long endAt = decryption.getEncryptedSize(); if (offset == 0 && end > 0 && length == end) { // handle to read from the end startAt = decryption.calculateTail(); } else if (offset > 0 && end > 0) { // handle to read from an offset endAt = decryption.calculateEndAt(end); } getOptions.range(startAt, endAt); } Blob blob = delegate().getBlob(containerName, blobName, getOptions); Blob decryptedBlob = decryptBlob(decryption, containerName, blob); if (!getOptions.getRanges().isEmpty()) { long decryptedSize = decryption.getUnencryptedSize(); long endRange = (offset != 0 && end == 0) ? decryptedSize : end; decryptedBlob.getAllHeaders() .put(HttpHeaders.CONTENT_RANGE, "bytes " + offset + "-" + endRange + "/" + decryptedSize); } return decryptedBlob; } else { // we suppose to return a unencrypted blob // since no metadata was found blobName = removeEncryptedSuffix(blobName); return delegate().getBlob(containerName, blobName, getOptions); } } catch (IOException e) { throw new UncheckedIOException(e); } } @Override public String putBlob(String containerName, Blob blob) { return delegate().putBlob(containerName, encryptBlob(containerName, blob)); } @Override public String putBlob(String containerName, Blob blob, PutOptions putOptions) { return delegate().putBlob(containerName, encryptBlob(containerName, blob), putOptions); } @Override public String copyBlob(String fromContainer, String fromName, String toContainer, String toName, CopyOptions options) { // if we copy an encrypted blob // make sure to add suffix to the destination blob name String blobName = blobNameWithSuffix(fromName); if (delegate().blobExists(fromContainer, blobName)) { fromName = blobName; toName = blobNameWithSuffix(toName); } return delegate().copyBlob(fromContainer, fromName, toContainer, toName, options); } @Override public void removeBlob(String container, String name) { name = blobNameWithSuffix(container, name); delegate().removeBlob(container, name); } @Override public void removeBlobs(String container, Iterable names) { List filteredNames = new ArrayList<>(); // filter the list of blobs to determine // if we need to delete encrypted blobs for (String name : names) { name = blobNameWithSuffix(container, name); filteredNames.add(name); } delegate().removeBlobs(container, filteredNames); } @Override public BlobAccess getBlobAccess(String container, String name) { name = blobNameWithSuffix(container, name); return delegate().getBlobAccess(container, name); } @Override public boolean blobExists(String container, String name) { name = blobNameWithSuffix(container, name); return delegate().blobExists(container, name); } @Override public void setBlobAccess(String container, String name, BlobAccess access) { name = blobNameWithSuffix(container, name); delegate().setBlobAccess(container, name, access); } @Override public PageSet list() { PageSet pageSet = delegate().list(); return filteredList(pageSet); } @Override public PageSet list(String container) { PageSet pageSet = delegate().list(container); return filteredList(pageSet); } @Override public PageSet list(String container, ListContainerOptions options) { PageSet pageSet = delegate().list(container, options); return filteredList(pageSet); } @Override public MultipartUpload initiateMultipartUpload(String container, BlobMetadata blobMetadata, PutOptions options) { MutableBlobMetadata mbm = new MutableBlobMetadataImpl(blobMetadata); mbm = setEncryptedSuffix(mbm); MultipartUpload mpu = delegate().initiateMultipartUpload(container, mbm, options); // handle non-s3 backends // by setting a metadata key for multipart stubs if (multipartRequiresStub()) { mbm.getUserMetadata() .put(Constants.METADATA_IS_ENCRYPTED_MULTIPART, "true"); if (getBlobStoreType().equals("azureblob")) { // use part 0 as a placeholder delegate().uploadMultipartPart(mpu, 0, Payloads.newStringPayload("dummy")); // since azure does not have a uploadId // we use the sha256 of the path String uploadId = generateUploadId(container, mbm.getName()); mpu = MultipartUpload.create(mpu.containerName(), mpu.blobName(), uploadId, mpu.blobMetadata(), options); } else if (getBlobStoreType().equals("google-cloud-storage")) { mbm.getUserMetadata() .put(Constants.METADATA_MULTIPART_KEY, mbm.getName()); // since gcp does not have a uploadId // we use the sha256 of the path String uploadId = generateUploadId(container, mbm.getName()); // to emulate later the list of multipart uploads // we create a placeholder BlobBuilder builder = blobBuilder(Constants.MPU_FOLDER + uploadId) .payload("") .userMetadata(mbm.getUserMetadata()); delegate().putBlob(container, builder.build(), options); // final mpu on gcp mpu = MultipartUpload.create(mpu.containerName(), mpu.blobName(), uploadId, mpu.blobMetadata(), options); } } return mpu; } @Override public List listMultipartUploads(String container) { List mpus = new ArrayList<>(); // emulate list of multipart uploads on gcp if (getBlobStoreType().equals("google-cloud-storage")) { var options = new ListContainerOptions(); PageSet mpuList = delegate().list(container, options.prefix(Constants.MPU_FOLDER)); // find all blobs in .mpu folder and build the list for (StorageMetadata blob : mpuList) { Map meta = blob.getUserMetadata(); if (meta.containsKey(Constants.METADATA_MULTIPART_KEY)) { String blobName = meta.get(Constants.METADATA_MULTIPART_KEY); String uploadId = blob.getName() .substring(blob.getName().lastIndexOf("/") + 1); MultipartUpload mpu = MultipartUpload.create(container, blobName, uploadId, null, null); mpus.add(mpu); } } } else { mpus = delegate().listMultipartUploads(container); } List filtered = new ArrayList<>(); // filter the list uploads by removing the .s3enc suffix for (MultipartUpload mpu : mpus) { String blobName = mpu.blobName(); if (isEncrypted(blobName)) { blobName = removeEncryptedSuffix(mpu.blobName()); String uploadId = mpu.id(); // since azure not have a uploadId // we use the sha256 of the path if (getBlobStoreType().equals("azureblob")) { uploadId = generateUploadId(container, mpu.blobName()); } MultipartUpload mpuWithoutSuffix = MultipartUpload.create(mpu.containerName(), blobName, uploadId, mpu.blobMetadata(), mpu.putOptions()); filtered.add(mpuWithoutSuffix); } else { filtered.add(mpu); } } return filtered; } @Override public List listMultipartUpload(MultipartUpload mpu) { mpu = filterMultipartUpload(mpu); List parts = delegate().listMultipartUpload(mpu); List filteredParts = new ArrayList<>(); // fix wrong multipart size due to the part padding for (MultipartPart part : parts) { // we use part 0 as a placeholder and hide it on azure if (getBlobStoreType().equals("azureblob") && part.partNumber() == 0) { continue; } MultipartPart newPart = MultipartPart.create( part.partNumber(), part.partSize() - Constants.PADDING_BLOCK_SIZE, part.partETag(), part.lastModified() ); filteredParts.add(newPart); } return filteredParts; } @Override public MultipartPart uploadMultipartPart(MultipartUpload mpu, int partNumber, Payload payload) { mpu = filterMultipartUpload(mpu); return delegate().uploadMultipartPart(mpu, partNumber, encryptPayload(payload, partNumber)); } private MultipartUpload filterMultipartUpload(MultipartUpload mpu) { MutableBlobMetadata mbm = null; if (mpu.blobMetadata() != null) { mbm = new MutableBlobMetadataImpl(mpu.blobMetadata()); mbm = setEncryptedSuffix(mbm); } String blobName = mpu.blobName(); if (!isEncrypted(blobName)) { blobName = blobNameWithSuffix(blobName); } return MultipartUpload.create(mpu.containerName(), blobName, mpu.id(), mbm, mpu.putOptions()); } @Override public String completeMultipartUpload(MultipartUpload mpu, List parts) { MutableBlobMetadata mbm = new MutableBlobMetadataImpl(mpu.blobMetadata()); String blobName = mpu.blobName(); // always set .s3enc suffix except on gcp // and blob name starts with multipart upload id if (getBlobStoreType().equals("google-cloud-storage") && mpu.blobName().startsWith(mpu.id())) { logger.debug("skip suffix on gcp"); } else { mbm = setEncryptedSuffix(mbm); if (!isEncrypted(mpu.blobName())) { blobName = blobNameWithSuffix(blobName); } } MultipartUpload mpuWithSuffix = MultipartUpload.create(mpu.containerName(), blobName, mpu.id(), mbm, mpu.putOptions()); // this will only work for non s3 backends like azure and gcp if (multipartRequiresStub()) { long partCount = parts.size(); // special handling for GCP to sum up all parts if (getBlobStoreType().equals("google-cloud-storage")) { partCount = 0; for (MultipartPart part : parts) { blobName = "%s_%08d".formatted( mpu.id(), part.partNumber()); BlobMetadata metadata = delegate().blobMetadata(mpu.containerName(), blobName); if (metadata != null && metadata.getUserMetadata() .containsKey(Constants.METADATA_ENCRYPTION_PARTS)) { String partMetaCount = metadata.getUserMetadata() .get(Constants.METADATA_ENCRYPTION_PARTS); partCount = partCount + Long.parseLong(partMetaCount); } else { partCount++; } } } mpuWithSuffix.blobMetadata().getUserMetadata() .put(Constants.METADATA_ENCRYPTION_PARTS, String.valueOf(partCount)); mpuWithSuffix.blobMetadata().getUserMetadata() .remove(Constants.METADATA_IS_ENCRYPTED_MULTIPART); } String eTag = delegate().completeMultipartUpload(mpuWithSuffix, parts); // cleanup mpu placeholder on gcp if (getBlobStoreType().equals("google-cloud-storage")) { delegate().removeBlob(mpu.containerName(), Constants.MPU_FOLDER + mpu.id()); } return eTag; } @Override public BlobMetadata blobMetadata(String container, String name) { name = blobNameWithSuffix(container, name); BlobMetadata blobMetadata = delegate().blobMetadata(container, name); if (blobMetadata != null) { // only remove the -s3enc suffix // if the blob is encrypted and not a multipart stub if (isEncrypted(blobMetadata) && !blobMetadata.getUserMetadata() .containsKey(Constants.METADATA_IS_ENCRYPTED_MULTIPART)) { blobMetadata = removeEncryptedSuffix(blobMetadata); blobMetadata = calculateBlobSize(blobMetadata); } } return blobMetadata; } @Override public long getMaximumMultipartPartSize() { long max = delegate().getMaximumMultipartPartSize(); return max - Constants.PADDING_BLOCK_SIZE; } } ================================================ FILE: src/main/java/org/gaul/s3proxy/EventualBlobStore.java ================================================ /* * Copyright 2014-2026 Andrew Gaul * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.gaul.s3proxy; import static java.util.Objects.requireNonNull; import static com.google.common.base.Preconditions.checkArgument; import java.util.Deque; import java.util.List; import java.util.Random; import java.util.concurrent.Callable; import java.util.concurrent.ConcurrentLinkedDeque; import java.util.concurrent.ScheduledExecutorService; import java.util.concurrent.TimeUnit; import org.jclouds.blobstore.BlobStore; import org.jclouds.blobstore.domain.Blob; import org.jclouds.blobstore.domain.BlobMetadata; import org.jclouds.blobstore.domain.MultipartPart; import org.jclouds.blobstore.domain.MultipartUpload; import org.jclouds.blobstore.options.CopyOptions; import org.jclouds.blobstore.options.CreateContainerOptions; import org.jclouds.blobstore.options.PutOptions; import org.jclouds.blobstore.util.ForwardingBlobStore; import org.jclouds.domain.Location; import org.jclouds.io.Payload; /** * This class is a BlobStore wrapper which emulates eventual consistency * using two blobstores. It writes objects to one store and reads objects * from another. An asynchronous process copies objects between stores. Note * that container operations are not eventually consistent. */ final class EventualBlobStore extends ForwardingBlobStore { private final BlobStore writeStore; // read from delegate private final ScheduledExecutorService executorService; private final Deque> deque = new ConcurrentLinkedDeque<>(); private final int delay; private final TimeUnit delayUnit; private final double probability; private final Random random = new Random(); private EventualBlobStore(BlobStore writeStore, BlobStore readStore, ScheduledExecutorService executorService, int delay, TimeUnit delayUnit, double probability) { super(readStore); this.writeStore = requireNonNull(writeStore); this.executorService = requireNonNull(executorService); checkArgument(delay >= 0, "Delay must be at least zero, was: %s", delay); this.delay = delay; this.delayUnit = requireNonNull(delayUnit); checkArgument(probability >= 0.0 && probability <= 1.0, "Probability must be between 0.0 and 1.0, was: %s", probability); this.probability = probability; } static BlobStore newEventualBlobStore(BlobStore writeStore, BlobStore readStore, ScheduledExecutorService executorService, int delay, TimeUnit delayUnit, double probability) { return new EventualBlobStore(writeStore, readStore, executorService, delay, delayUnit, probability); } @Override public boolean createContainerInLocation(Location location, String container, CreateContainerOptions options) { return delegate().createContainerInLocation( location, container, options) && writeStore.createContainerInLocation( location, container, options); } @Override public void deleteContainer(String container) { delegate().deleteContainer(container); writeStore.deleteContainer(container); } @Override public boolean deleteContainerIfEmpty(String container) { return delegate().deleteContainerIfEmpty(container) && writeStore.deleteContainerIfEmpty(container); } @Override public String putBlob(String containerName, Blob blob) { return putBlob(containerName, blob, PutOptions.NONE); } @Override public String putBlob(final String containerName, Blob blob, final PutOptions options) { final String nearName = blob.getMetadata().getName(); String nearETag = writeStore.putBlob(containerName, blob, options); schedule(new Callable() { @Override public String call() { Blob nearBlob = writeStore.getBlob(containerName, nearName); String farETag = delegate().putBlob(containerName, nearBlob, options); return farETag; } }); return nearETag; } @Override public void removeBlob(final String containerName, final String blobName) { writeStore.removeBlob(containerName, blobName); schedule(new Callable() { @Override public Void call() { delegate().removeBlob(containerName, blobName); return null; } }); } @Override public void removeBlobs(final String containerName, final Iterable blobNames) { writeStore.removeBlobs(containerName, blobNames); schedule(new Callable() { @Override public Void call() { delegate().removeBlobs(containerName, blobNames); return null; } }); } @Override public String copyBlob(final String fromContainer, final String fromName, final String toContainer, final String toName, final CopyOptions options) { String nearETag = writeStore.copyBlob(fromContainer, fromName, toContainer, toName, options); schedule(new Callable() { @Override public String call() { return delegate().copyBlob(fromContainer, fromName, toContainer, toName, options); } }); return nearETag; } @Override public MultipartUpload initiateMultipartUpload(String container, BlobMetadata blobMetadata, PutOptions options) { MultipartUpload mpu = delegate().initiateMultipartUpload(container, blobMetadata, options); return mpu; } @Override public void abortMultipartUpload(MultipartUpload mpu) { delegate().abortMultipartUpload(mpu); } @Override public String completeMultipartUpload(final MultipartUpload mpu, final List parts) { schedule(new Callable() { @Override public String call() { String farETag = delegate().completeMultipartUpload(mpu, parts); return farETag; } }); return ""; // TODO: fake ETag } @Override public MultipartPart uploadMultipartPart(MultipartUpload mpu, int partNumber, Payload payload) { MultipartPart part = delegate().uploadMultipartPart(mpu, partNumber, payload); return part; } @SuppressWarnings("FutureReturnValueIgnored") private void schedule(Callable callable) { if (random.nextDouble() < probability) { deque.add(callable); executorService.schedule(new DequeCallable(), delay, delayUnit); } } private final class DequeCallable implements Callable { @Override public Void call() throws Exception { deque.poll().call(); return null; } } } ================================================ FILE: src/main/java/org/gaul/s3proxy/GlobBlobStoreLocator.java ================================================ /* * Copyright 2014-2026 Andrew Gaul * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.gaul.s3proxy; import java.nio.file.FileSystems; import java.nio.file.PathMatcher; import java.util.Map; import com.google.common.collect.Maps; import org.jclouds.blobstore.BlobStore; import org.jspecify.annotations.Nullable; public final class GlobBlobStoreLocator implements BlobStoreLocator { private final Map> locator; private final Map> globLocator; public GlobBlobStoreLocator( Map> locator, Map> globLocator) { this.locator = locator; this.globLocator = globLocator; } @Override public Map.Entry locateBlobStore( @Nullable String identity, String container, String blob) { Map.Entry locatorEntry = locator.get(identity); Map.Entry globEntry = null; if (container != null) { for (var entry : globLocator.entrySet()) { if (entry.getKey().matches(FileSystems.getDefault() .getPath(container))) { globEntry = entry.getValue(); } } } if (globEntry == null) { if (identity == null) { if (!locator.isEmpty()) { return locator.entrySet().iterator().next() .getValue(); } return Maps.immutableEntry(null, globLocator.entrySet().iterator().next().getValue() .getValue()); } return locatorEntry; } if (identity == null) { return Maps.immutableEntry(null, globEntry.getValue()); } if (!globEntry.getKey().equals(identity)) { return null; } if (locatorEntry == null) { return null; } return Map.entry(locatorEntry.getKey(), globEntry.getValue()); } } ================================================ FILE: src/main/java/org/gaul/s3proxy/LatencyBlobStore.java ================================================ /* * Copyright 2014-2026 Andrew Gaul * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.gaul.s3proxy; import static com.google.common.base.Preconditions.checkArgument; import static java.util.Objects.requireNonNull; import java.io.File; import java.io.IOException; import java.io.InputStream; import java.util.List; import java.util.Map; import java.util.Properties; import java.util.Set; import java.util.concurrent.ExecutorService; import java.util.regex.Matcher; import java.util.regex.Pattern; import com.google.common.collect.ImmutableMap; import org.jclouds.blobstore.BlobStore; import org.jclouds.blobstore.domain.Blob; import org.jclouds.blobstore.domain.BlobAccess; import org.jclouds.blobstore.domain.BlobMetadata; import org.jclouds.blobstore.domain.ContainerAccess; import org.jclouds.blobstore.domain.MultipartPart; import org.jclouds.blobstore.domain.MultipartUpload; import org.jclouds.blobstore.domain.PageSet; import org.jclouds.blobstore.domain.StorageMetadata; import org.jclouds.blobstore.options.CopyOptions; import org.jclouds.blobstore.options.CreateContainerOptions; import org.jclouds.blobstore.options.GetOptions; import org.jclouds.blobstore.options.ListContainerOptions; import org.jclouds.blobstore.options.PutOptions; import org.jclouds.blobstore.util.ForwardingBlobStore; import org.jclouds.domain.Location; import org.jclouds.io.ContentMetadata; import org.jclouds.io.Payload; import org.jclouds.io.payloads.InputStreamPayload; public final class LatencyBlobStore extends ForwardingBlobStore { private static final Pattern PROPERTIES_LATENCY_RE = Pattern.compile( "^" + S3ProxyConstants.PROPERTY_LATENCY + "\\.(?.*)\\.latency$"); private static final Pattern PROPERTIES_SPEED_RE = Pattern.compile( "^" + S3ProxyConstants.PROPERTY_LATENCY + "\\.(?.*)\\.speed$"); private static final String OP_ALL = "*"; private static final String OP_CONTAINER_EXISTS = "container-exists"; private static final String OP_CREATE_CONTAINER = "create-container"; private static final String OP_CONTAINER_ACCESS = "container-access"; private static final String OP_LIST = "list"; private static final String OP_CLEAR_CONTAINER = "clear-container"; private static final String OP_DELETE_CONTAINER = "delete-container"; private static final String OP_DIRECTORY_EXISTS = "directory-exists"; private static final String OP_CREATE_DIRECTORY = "create-directory"; private static final String OP_DELETE_DIRECTORY = "delete-directory"; private static final String OP_BLOB_EXISTS = "blob-exists"; private static final String OP_PUT_BLOB = "put"; private static final String OP_COPY_BLOB = "copy"; private static final String OP_BLOB_METADATA = "metadata"; private static final String OP_GET_BLOB = "get"; private static final String OP_REMOVE_BLOB = "remove"; private static final String OP_BLOB_ACCESS = "blob-access"; private static final String OP_COUNT_BLOBS = "count"; private static final String OP_MULTIPART_MESSAGE = "multipart-message"; private static final String OP_UPLOAD_PART = "upload-part"; private static final String OP_LIST_MULTIPART = "list-multipart"; private static final String OP_MULTIPART_PARAM = "multipart-param"; private static final String OP_DOWNLOAD_BLOB = "download"; private static final String OP_STREAM_BLOB = "stream"; private final Map latencies; private final Map speeds; private LatencyBlobStore(BlobStore blobStore, Map latencies, Map speeds) { super(blobStore); this.latencies = requireNonNull(latencies); for (String op : latencies.keySet()) { checkArgument(latencies.get(op) >= 0, "Latency must be non negative for %s", op); } this.speeds = requireNonNull(speeds); for (String op : speeds.keySet()) { checkArgument(speeds.get(op) > 0, "Speed must be positive for %s", op); } } public static Map parseLatencies(Properties properties) { var latencies = new ImmutableMap.Builder(); for (String key : properties.stringPropertyNames()) { Matcher matcher = PROPERTIES_LATENCY_RE.matcher(key); if (!matcher.matches()) { continue; } String op = matcher.group("op"); long latency = Long.parseLong(properties.getProperty(key)); checkArgument(latency >= 0, "Latency must be non negative for %s", op); latencies.put(op, latency); } return latencies.build(); } public static Map parseSpeeds(Properties properties) { var speeds = new ImmutableMap.Builder(); for (String key : properties.stringPropertyNames()) { Matcher matcher = PROPERTIES_SPEED_RE.matcher(key); if (!matcher.matches()) { continue; } String op = matcher.group("op"); long speed = Long.parseLong(properties.getProperty(key)); checkArgument(speed > 0, "Speed must be positive for %s", op); speeds.put(op, speed); } return speeds.build(); } static BlobStore newLatencyBlobStore(BlobStore delegate, Map latencies, Map speeds) { return new LatencyBlobStore(delegate, latencies, speeds); } @Override public Set listAssignableLocations() { simulateLatency(OP_LIST); return super.listAssignableLocations(); } @Override public PageSet list() { simulateLatency(OP_LIST); return super.list(); } @Override public PageSet list(String container) { simulateLatency(OP_LIST); return super.list(container); } @Override public PageSet list(String container, ListContainerOptions options) { simulateLatency(OP_LIST); return super.list(container, options); } @Override public boolean containerExists(String container) { simulateLatency(OP_CONTAINER_EXISTS); return super.containerExists(container); } @Override public boolean createContainerInLocation(Location location, String container) { simulateLatency(OP_CREATE_CONTAINER); return super.createContainerInLocation(location, container); } @Override public boolean createContainerInLocation(Location location, String container, CreateContainerOptions createContainerOptions) { simulateLatency(OP_CREATE_CONTAINER); return super.createContainerInLocation(location, container, createContainerOptions); } @Override public ContainerAccess getContainerAccess(String container) { simulateLatency(OP_CONTAINER_ACCESS); return super.getContainerAccess(container); } @Override public void setContainerAccess(String container, ContainerAccess containerAccess) { simulateLatency(OP_CONTAINER_ACCESS); super.setContainerAccess(container, containerAccess); } @Override public void clearContainer(String container) { simulateLatency(OP_CLEAR_CONTAINER); super.clearContainer(container); } @Override public void clearContainer(String container, ListContainerOptions options) { simulateLatency(OP_CLEAR_CONTAINER); super.clearContainer(container, options); } @Override public void deleteContainer(String container) { simulateLatency(OP_DELETE_CONTAINER); super.deleteContainer(container); } @Override public boolean deleteContainerIfEmpty(String container) { simulateLatency(OP_DELETE_CONTAINER); return super.deleteContainerIfEmpty(container); } @Override public boolean directoryExists(String container, String directory) { simulateLatency(OP_DIRECTORY_EXISTS); return super.directoryExists(container, directory); } @Override public void createDirectory(String container, String directory) { simulateLatency(OP_CREATE_DIRECTORY); super.createDirectory(container, directory); } @Override public void deleteDirectory(String container, String directory) { simulateLatency(OP_DELETE_DIRECTORY); super.deleteDirectory(container, directory); } @Override public boolean blobExists(String container, String name) { simulateLatency(OP_BLOB_EXISTS); return super.blobExists(container, name); } @Override public String putBlob(String containerName, Blob blob) { simulateLatency(OP_PUT_BLOB); try { InputStream is = blob.getPayload().openStream(); Blob newBlob = replaceStream(blob, new ThrottledInputStream(is, getSpeed(OP_PUT_BLOB))); return super.putBlob(containerName, newBlob); } catch (IOException e) { throw new RuntimeException(e); } } @Override public String putBlob(String containerName, Blob blob, PutOptions putOptions) { simulateLatency(OP_PUT_BLOB); try { InputStream is = blob.getPayload().openStream(); Blob newBlob = replaceStream(blob, new ThrottledInputStream(is, getSpeed(OP_PUT_BLOB))); return super.putBlob(containerName, newBlob); } catch (IOException e) { throw new RuntimeException(e); } } @Override public String copyBlob(String fromContainer, String fromName, String toContainer, String toName, CopyOptions options) { simulateLatency(OP_COPY_BLOB); return super.copyBlob(fromContainer, fromName, toContainer, toName, options); } @Override public BlobMetadata blobMetadata(String container, String name) { simulateLatency(OP_BLOB_METADATA); return super.blobMetadata(container, name); } @Override public Blob getBlob(String containerName, String blobName) { simulateLatency(OP_GET_BLOB); Blob blob = super.getBlob(containerName, blobName); try { InputStream is = blob.getPayload().openStream(); return replaceStream(blob, new ThrottledInputStream(is, getSpeed(OP_GET_BLOB))); } catch (IOException e) { throw new RuntimeException(e); } } @Override public Blob getBlob(String containerName, String blobName, GetOptions getOptions) { simulateLatency(OP_GET_BLOB); Blob blob = super.getBlob(containerName, blobName, getOptions); try { InputStream is = blob.getPayload().openStream(); return replaceStream(blob, new ThrottledInputStream(is, getSpeed(OP_GET_BLOB))); } catch (IOException e) { throw new RuntimeException(e); } } @Override public void removeBlob(String container, String name) { simulateLatency(OP_REMOVE_BLOB); super.removeBlob(container, name); } @Override public void removeBlobs(String container, Iterable iterable) { simulateLatency(OP_REMOVE_BLOB); super.removeBlobs(container, iterable); } @Override public BlobAccess getBlobAccess(String container, String name) { simulateLatency(OP_BLOB_ACCESS); return super.getBlobAccess(container, name); } @Override public void setBlobAccess(String container, String name, BlobAccess access) { simulateLatency(OP_BLOB_ACCESS); super.setBlobAccess(container, name, access); } @Override public long countBlobs(String container) { simulateLatency(OP_COUNT_BLOBS); return super.countBlobs(container); } @Override public long countBlobs(String container, ListContainerOptions options) { simulateLatency(OP_COUNT_BLOBS); return super.countBlobs(container, options); } @Override public MultipartUpload initiateMultipartUpload(String container, BlobMetadata blobMetadata, PutOptions options) { simulateLatency(OP_MULTIPART_MESSAGE); return super.initiateMultipartUpload(container, blobMetadata, options); } @Override public void abortMultipartUpload(MultipartUpload mpu) { simulateLatency(OP_MULTIPART_MESSAGE); super.abortMultipartUpload(mpu); } @Override public String completeMultipartUpload(MultipartUpload mpu, List parts) { simulateLatency(OP_MULTIPART_MESSAGE); return super.completeMultipartUpload(mpu, parts); } @Override public MultipartPart uploadMultipartPart(MultipartUpload mpu, int partNumber, Payload payload) { simulateLatency(OP_UPLOAD_PART); try { InputStream is = payload.openStream(); payload = new InputStreamPayload(new ThrottledInputStream(is, getSpeed(OP_UPLOAD_PART))); } catch (IOException e) { throw new RuntimeException(e); } return super.uploadMultipartPart(mpu, partNumber, payload); } @Override public List listMultipartUpload(MultipartUpload mpu) { simulateLatency(OP_LIST_MULTIPART); return super.listMultipartUpload(mpu); } @Override public List listMultipartUploads(String container) { simulateLatency(OP_LIST_MULTIPART); return super.listMultipartUploads(container); } @Override public long getMinimumMultipartPartSize() { simulateLatency(OP_MULTIPART_PARAM); return super.getMinimumMultipartPartSize(); } @Override public long getMaximumMultipartPartSize() { simulateLatency(OP_MULTIPART_PARAM); return super.getMaximumMultipartPartSize(); } @Override public int getMaximumNumberOfParts() { simulateLatency(OP_MULTIPART_PARAM); return super.getMaximumNumberOfParts(); } @Override public void downloadBlob(String container, String name, File destination) { simulateLatency(OP_DOWNLOAD_BLOB); super.downloadBlob(container, name, destination); } @Override public void downloadBlob(String container, String name, File destination, ExecutorService executor) { simulateLatency(OP_DOWNLOAD_BLOB); super.downloadBlob(container, name, destination, executor); } @Override public InputStream streamBlob(String container, String name) { simulateLatency(OP_STREAM_BLOB); InputStream is = super.streamBlob(container, name); return new ThrottledInputStream(is, getSpeed(OP_STREAM_BLOB)); } @Override public InputStream streamBlob(String container, String name, ExecutorService executor) { simulateLatency(OP_STREAM_BLOB); InputStream is = super.streamBlob(container, name, executor); return new ThrottledInputStream(is, getSpeed(OP_STREAM_BLOB)); } private long getLatency(String op) { return latencies.getOrDefault(op, latencies.getOrDefault(OP_ALL, 0L)); } private Long getSpeed(String op) { return speeds.getOrDefault(op, speeds.getOrDefault(OP_ALL, null)); } private void simulateLatency(String op) { long latency = getLatency(op); if (latency > 0) { try { Thread.sleep(latency); } catch (InterruptedException e) { Thread.currentThread().interrupt(); } } } private Blob replaceStream(Blob blob, InputStream is) { BlobMetadata blobMeta = blob.getMetadata(); ContentMetadata contentMeta = blobMeta.getContentMetadata(); Map userMetadata = blobMeta.getUserMetadata(); Blob newBlob = blobBuilder(blobMeta.getName()) .type(blobMeta.getType()) .tier(blobMeta.getTier()) .userMetadata(userMetadata) .payload(is) .cacheControl(contentMeta.getCacheControl()) .contentDisposition(contentMeta.getContentDisposition()) .contentEncoding(contentMeta.getContentEncoding()) .contentLanguage(contentMeta.getContentLanguage()) .contentLength(contentMeta.getContentLength()) .contentType(contentMeta.getContentType()) .build(); newBlob.getMetadata().setUri(blobMeta.getUri()); newBlob.getMetadata().setETag(blobMeta.getETag()); newBlob.getMetadata().setLastModified(blobMeta.getLastModified()); newBlob.getMetadata().setSize(blobMeta.getSize()); newBlob.getMetadata().setPublicUri(blobMeta.getPublicUri()); newBlob.getMetadata().setContainer(blobMeta.getContainer()); return newBlob; } } ================================================ FILE: src/main/java/org/gaul/s3proxy/Main.java ================================================ /* * Copyright 2014-2026 Andrew Gaul * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.gaul.s3proxy; import java.io.Console; import java.io.IOException; import java.io.PrintStream; import java.nio.charset.StandardCharsets; import java.nio.file.FileSystems; import java.nio.file.Files; import java.nio.file.Path; import java.nio.file.PathMatcher; import java.util.ArrayList; import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Properties; import java.util.Set; import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; import java.util.concurrent.TimeUnit; import java.util.regex.Pattern; import com.amazonaws.auth.AWSCredentials; import com.amazonaws.auth.AWSCredentialsProvider; import com.amazonaws.auth.AWSSessionCredentials; import com.amazonaws.auth.DefaultAWSCredentialsProviderChain; import com.google.common.base.Strings; import com.google.common.base.Supplier; import com.google.common.collect.ImmutableBiMap; import com.google.common.collect.ImmutableMap; import com.google.common.collect.Maps; import com.google.common.io.MoreFiles; import com.google.common.util.concurrent.ThreadFactoryBuilder; import org.gaul.modernizer_maven_annotations.SuppressModernizer; import org.jclouds.Constants; import org.jclouds.ContextBuilder; import org.jclouds.JcloudsVersion; import org.jclouds.aws.domain.SessionCredentials; import org.jclouds.blobstore.BlobStore; import org.jclouds.blobstore.BlobStoreContext; import org.jclouds.concurrent.DynamicExecutors; import org.jclouds.concurrent.config.ExecutorServiceModule; import org.jclouds.domain.Credentials; import org.jclouds.location.reference.LocationConstants; import org.jclouds.logging.slf4j.config.SLF4JLoggingModule; import org.jclouds.openstack.swift.v1.blobstore.RegionScopedBlobStoreContext; import org.jclouds.s3.domain.ObjectMetadata.StorageClass; import org.kohsuke.args4j.CmdLineException; import org.kohsuke.args4j.CmdLineParser; import org.kohsuke.args4j.Option; import org.slf4j.Logger; import org.slf4j.LoggerFactory; public final class Main { private static final Logger logger = LoggerFactory.getLogger(Main.class); private Main() { throw new AssertionError("intentionally not implemented"); } private static final class Options { @Option(name = "--properties", usage = "S3Proxy configuration (required, multiple allowed)") private List properties = new ArrayList<>(); @Option(name = "--version", usage = "display version") private boolean version; } @SuppressWarnings("EqualsIncompatibleType") public static void main(String[] args) throws Exception { Console console = System.console(); if (console == null) { System.setErr(createLoggerErrorPrintStream()); } var options = new Options(); var parser = new CmdLineParser(options); try { parser.parseArgument(args); } catch (CmdLineException cle) { usage(parser); } if (options.version) { System.err.println( Main.class.getPackage().getImplementationVersion()); System.exit(0); } else if (options.properties.isEmpty()) { usage(parser); } S3Proxy.Builder s3ProxyBuilder = null; var factory = new ThreadFactoryBuilder() .setNameFormat("user thread %d") .setThreadFactory(Executors.defaultThreadFactory()) .build(); ExecutorService executorService = DynamicExecutors.newScalingThreadPool( 1, 20, 60 * 1000, factory); var locators = ImmutableMap .>builder(); var globLocators = ImmutableMap .>builder(); Set locatorGlobs = new HashSet<>(); Set parsedIdentities = new HashSet<>(); for (var path : options.properties) { var properties = new Properties(); try (var is = Files.newInputStream(path)) { properties.load(is); } properties.putAll(System.getProperties()); BlobStore blobStore = createBlobStore(properties, executorService); var blobStoreType = blobStore.getContext().unwrap().getProviderMetadata().getId(); if (blobStoreType.equals("aws-s3")) { System.err.println("WARNING: aws-s3 storage backend deprecated -- please use aws-s3-sdk instead"); } else if (blobStoreType.equals("azureblob")) { System.err.println("WARNING: azureblob storage backend deprecated -- please use azureblob-sdk instead"); } else if (blobStoreType.equals("filesystem")) { System.err.println("WARNING: filesystem storage backend deprecated -- please use filesystem-nio2 instead"); } else if (blobStoreType.equals("google-cloud-storage")) { System.err.println("WARNING: google-cloud-storage storage backend deprecated -- please use google-cloud-storage-sdk instead"); } else if (blobStoreType.equals("s3")) { System.err.println("WARNING: s3 storage backend deprecated -- please use aws-s3-sdk instead"); } else if (blobStoreType.equals("transient")) { System.err.println("WARNING: transient storage backend deprecated -- please use transient-nio2 instead"); } blobStore = parseMiddlewareProperties(blobStore, executorService, properties); String s3ProxyAuthorizationString = properties.getProperty( S3ProxyConstants.PROPERTY_AUTHORIZATION); String localIdentity = null; if (AuthenticationType.fromString(s3ProxyAuthorizationString) != AuthenticationType.NONE) { localIdentity = properties.getProperty( S3ProxyConstants.PROPERTY_IDENTITY); String localCredential = properties.getProperty( S3ProxyConstants.PROPERTY_CREDENTIAL); if (parsedIdentities.add(localIdentity)) { locators.put(localIdentity, Map.entry(localCredential, blobStore)); } } for (String key : properties.stringPropertyNames()) { if (key.startsWith(S3ProxyConstants.PROPERTY_BUCKET_LOCATOR)) { String bucketLocator = properties.getProperty(key); if (locatorGlobs.add(bucketLocator)) { globLocators.put( FileSystems.getDefault().getPathMatcher( "glob:" + bucketLocator), Maps.immutableEntry(localIdentity, blobStore)); } else { System.err.println("Multiple definitions of the " + "bucket locator: " + bucketLocator); System.exit(1); } } } S3Proxy.Builder s3ProxyBuilder2 = S3Proxy.Builder .fromProperties(properties) .blobStore(blobStore); if (s3ProxyBuilder != null && !s3ProxyBuilder.equals(s3ProxyBuilder2)) { System.err.println("Multiple configurations require" + " identical s3proxy properties"); System.exit(1); } s3ProxyBuilder = s3ProxyBuilder2; } S3Proxy s3Proxy; try { s3Proxy = s3ProxyBuilder.build(); } catch (IllegalArgumentException | IllegalStateException e) { System.err.println(e.getMessage()); System.exit(1); throw e; } var locator = locators.build(); var globLocator = globLocators.build(); if (!locator.isEmpty() || !globLocator.isEmpty()) { s3Proxy.setBlobStoreLocator( new GlobBlobStoreLocator(locator, globLocator)); } try { s3Proxy.start(); } catch (Exception e) { System.err.println(e.getMessage()); System.exit(1); } } private static BlobStore parseMiddlewareProperties(BlobStore blobStore, ExecutorService executorService, Properties properties) throws IOException { var altProperties = new Properties(); for (var entry : properties.entrySet()) { String key = (String) entry.getKey(); if (key.startsWith(S3ProxyConstants.PROPERTY_ALT_JCLOUDS_PREFIX)) { key = key.substring( S3ProxyConstants.PROPERTY_ALT_JCLOUDS_PREFIX.length()); altProperties.put(key, (String) entry.getValue()); } } String eventualConsistency = properties.getProperty( S3ProxyConstants.PROPERTY_EVENTUAL_CONSISTENCY); if ("true".equalsIgnoreCase(eventualConsistency)) { BlobStore altBlobStore = createBlobStore(altProperties, executorService); int delay = Integer.parseInt(properties.getProperty( S3ProxyConstants.PROPERTY_EVENTUAL_CONSISTENCY_DELAY, "5")); double probability = Double.parseDouble(properties.getProperty( S3ProxyConstants.PROPERTY_EVENTUAL_CONSISTENCY_PROBABILITY, "1.0")); System.err.println("Emulating eventual consistency with delay " + delay + " seconds and probability " + (probability * 100) + "%"); blobStore = EventualBlobStore.newEventualBlobStore( blobStore, altBlobStore, Executors.newScheduledThreadPool(1), delay, TimeUnit.SECONDS, probability); } String nullBlobStore = properties.getProperty( S3ProxyConstants.PROPERTY_NULL_BLOBSTORE); if ("true".equalsIgnoreCase(nullBlobStore)) { System.err.println("Using null storage backend"); blobStore = NullBlobStore.newNullBlobStore(blobStore); } String readOnlyBlobStore = properties.getProperty( S3ProxyConstants.PROPERTY_READ_ONLY_BLOBSTORE); if ("true".equalsIgnoreCase(readOnlyBlobStore)) { System.err.println("Using read-only storage backend"); blobStore = ReadOnlyBlobStore.newReadOnlyBlobStore(blobStore); } ImmutableBiMap aliases = AliasBlobStore.parseAliases( properties); if (!aliases.isEmpty()) { System.err.println("Using alias backend"); blobStore = AliasBlobStore.newAliasBlobStore(blobStore, aliases); } Map prefixMap = PrefixBlobStore.parsePrefixes(properties); if (!prefixMap.isEmpty()) { System.err.println("Using prefix backend"); blobStore = PrefixBlobStore.newPrefixBlobStore(blobStore, prefixMap); } List> regexs = RegexBlobStore.parseRegexs(properties); if (!regexs.isEmpty()) { System.err.println("Using regex backend"); blobStore = RegexBlobStore.newRegexBlobStore(blobStore, regexs); } Map shards = ShardedBlobStore.parseBucketShards(properties); Map prefixes = ShardedBlobStore.parsePrefixes(properties); if (!shards.isEmpty()) { System.err.println("Using sharded buckets backend"); blobStore = ShardedBlobStore.newShardedBlobStore(blobStore, shards, prefixes); } String encryptedBlobStore = properties.getProperty( S3ProxyConstants.PROPERTY_ENCRYPTED_BLOBSTORE); if ("true".equalsIgnoreCase(encryptedBlobStore)) { System.err.println("Using encrypted storage backend"); blobStore = EncryptedBlobStore.newEncryptedBlobStore(blobStore, properties); } var storageClass = properties.getProperty( S3ProxyConstants.PROPERTY_STORAGE_CLASS_BLOBSTORE); if (!Strings.isNullOrEmpty(storageClass)) { System.err.println("Using storage class override backend"); var storageClassBlobStore = StorageClassBlobStore.newStorageClassBlobStore( blobStore, storageClass); blobStore = storageClassBlobStore; System.err.println("Configuration storage class: " + storageClass); // TODO: This only makes sense for S3 backends. System.err.println("Mapping storage storage class to: " + StorageClass.fromTier(storageClassBlobStore.getTier())); } String userMetadataReplacerBlobStore = properties.getProperty( S3ProxyConstants.PROPERTY_USER_METADATA_REPLACER); if ("true".equalsIgnoreCase(userMetadataReplacerBlobStore)) { System.err.println("Using user metadata replacers storage backend"); String fromChars = properties.getProperty(S3ProxyConstants .PROPERTY_USER_METADATA_REPLACER_FROM_CHARS); String toChars = properties.getProperty(S3ProxyConstants .PROPERTY_USER_METADATA_REPLACER_TO_CHARS); blobStore = UserMetadataReplacerBlobStore .newUserMetadataReplacerBlobStore( blobStore, fromChars, toChars); } Map latencies = LatencyBlobStore.parseLatencies(properties); Map speeds = LatencyBlobStore.parseSpeeds(properties); if (!latencies.isEmpty() || !speeds.isEmpty()) { System.err.println("Using latency storage backend"); blobStore = LatencyBlobStore.newLatencyBlobStore(blobStore, latencies, speeds); } String noCacheBlobStore = properties.getProperty( S3ProxyConstants.PROPERTY_NO_CACHE_BLOBSTORE); if ("true".equalsIgnoreCase(noCacheBlobStore)) { System.err.println("Using no-cache storage backend middleware"); blobStore = NoCacheBlobStore .newNoCacheBlobStore(blobStore); } return blobStore; } private static PrintStream createLoggerErrorPrintStream() { return new PrintStream(System.err) { private final StringBuilder builder = new StringBuilder(); @Override @edu.umd.cs.findbugs.annotations.SuppressFBWarnings( "SLF4J_SIGN_ONLY_FORMAT") public void print(final String string) { logger.error("{}", string); } @Override public void write(byte[] buf, int off, int len) { for (int i = off; i < len; ++i) { char ch = (char) buf[i]; if (ch == '\n') { if (builder.length() != 0) { print(builder.toString()); builder.setLength(0); } } else { builder.append(ch); } } } }; } private static BlobStore createBlobStore(Properties properties, ExecutorService executorService) throws IOException { String provider = properties.getProperty(Constants.PROPERTY_PROVIDER); String identity = properties.getProperty(Constants.PROPERTY_IDENTITY); String credential = properties.getProperty( Constants.PROPERTY_CREDENTIAL); String endpoint = properties.getProperty(Constants.PROPERTY_ENDPOINT); properties.remove(Constants.PROPERTY_ENDPOINT); String region = properties.getProperty( LocationConstants.PROPERTY_REGION); if (provider == null) { System.err.println( "Properties file must contain: " + Constants.PROPERTY_PROVIDER); System.exit(1); } if (provider.equals("filesystem") || provider.equals("filesystem-nio2") || provider.equals("transient") || provider.equals("transient-nio2")) { // local blobstores do not require credentials identity = Strings.nullToEmpty(identity); credential = Strings.nullToEmpty(credential); } else if (provider.equals("google-cloud-storage") || provider.equals("google-cloud-storage-sdk")) { if (credential != null && !credential.isEmpty()) { var path = FileSystems.getDefault().getPath(credential); if (Files.exists(path)) { credential = MoreFiles.asCharSource(path, StandardCharsets.UTF_8).read(); } } identity = Strings.nullToEmpty(identity); credential = Strings.nullToEmpty(credential); properties.remove(Constants.PROPERTY_CREDENTIAL); // We also need to clear the system property, otherwise the // credential will be overridden by the system property. System.clearProperty(Constants.PROPERTY_CREDENTIAL); } if (identity == null || credential == null) { System.err.println( "Properties file must contain: " + Constants.PROPERTY_IDENTITY + " and " + Constants.PROPERTY_CREDENTIAL); System.exit(1); } properties.setProperty(Constants.PROPERTY_USER_AGENT, "s3proxy/%s jclouds/%s java/%s".formatted( Main.class.getPackage().getImplementationVersion(), JcloudsVersion.get(), System.getProperty("java.version"))); ContextBuilder builder = ContextBuilder .newBuilder(provider) .modules(List.of( new SLF4JLoggingModule(), new ExecutorServiceModule(executorService))) .overrides(properties); if (!Strings.isNullOrEmpty(endpoint)) { builder = builder.endpoint(endpoint); } if ((identity.isEmpty() || credential.isEmpty()) && provider.equals("aws-s3")) { @SuppressModernizer Supplier credentialsSupplier = new Supplier() { @Override public Credentials get() { AWSCredentialsProvider authChain = DefaultAWSCredentialsProviderChain.getInstance(); AWSCredentials newCreds = authChain.getCredentials(); Credentials jcloudsCred = null; if (newCreds instanceof AWSSessionCredentials sessionCreds) { jcloudsCred = SessionCredentials.builder() .accessKeyId(newCreds.getAWSAccessKeyId()) .secretAccessKey(newCreds.getAWSSecretKey()) .sessionToken(sessionCreds.getSessionToken()) .build(); } else { jcloudsCred = new Credentials( newCreds.getAWSAccessKeyId(), newCreds.getAWSSecretKey() ); } return jcloudsCred; } }; builder = builder.credentialsSupplier(credentialsSupplier); } else { builder = builder.credentials(identity, credential); } BlobStoreContext context = builder.build(BlobStoreContext.class); BlobStore blobStore; if (context instanceof RegionScopedBlobStoreContext regionContext && region != null) { blobStore = regionContext.getBlobStore(region); } else { blobStore = context.getBlobStore(); } return blobStore; } private static void usage(CmdLineParser parser) { System.err.println("Usage: s3proxy [options...]"); parser.printUsage(System.err); System.exit(1); } } ================================================ FILE: src/main/java/org/gaul/s3proxy/MetricsHandler.java ================================================ /* * Copyright 2014-2026 Andrew Gaul * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.gaul.s3proxy; import java.io.IOException; import jakarta.servlet.http.HttpServlet; import jakarta.servlet.http.HttpServletRequest; import jakarta.servlet.http.HttpServletResponse; /** Servlet that serves Prometheus metrics at /metrics endpoint. */ public final class MetricsHandler extends HttpServlet { private final S3ProxyMetrics metrics; public MetricsHandler(S3ProxyMetrics metrics) { this.metrics = metrics; } @Override protected void service(HttpServletRequest request, HttpServletResponse response) throws IOException { response.setContentType("text/plain; version=0.0.4; charset=utf-8"); response.setStatus(HttpServletResponse.SC_OK); response.getWriter().write(metrics.scrape()); } } ================================================ FILE: src/main/java/org/gaul/s3proxy/NoCacheBlobStore.java ================================================ /* * Copyright 2014-2026 Andrew Gaul * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.gaul.s3proxy; import org.jclouds.blobstore.BlobStore; import org.jclouds.blobstore.domain.Blob; import org.jclouds.blobstore.options.GetOptions; import org.jclouds.blobstore.util.ForwardingBlobStore; /** * BlobStore which drops ETag or date-based cache options from object requests. * This is useful as jclouds does not fully support the proxying of HTTP 304 responses. */ final class NoCacheBlobStore extends ForwardingBlobStore { private NoCacheBlobStore(BlobStore blobStore) { super(blobStore); } public static BlobStore newNoCacheBlobStore(BlobStore blobStore) { return new NoCacheBlobStore(blobStore); } @Override public Blob getBlob(String containerName, String name) { return getBlob(containerName, name, new GetOptions()); } @Override public Blob getBlob(String containerName, String name, GetOptions getOptions) { return super.getBlob(containerName, name, resetCacheHeaders(getOptions)); } static GetOptions resetCacheHeaders(GetOptions options) { if (options.getIfMatch() != null || options.getIfNoneMatch() != null || options.getIfModifiedSince() != null || options.getIfUnmodifiedSince() != null) { // as there is no exposed method to reset just the cache headers, a copy is used GetOptions optionsNoCache = new GetOptions(); for (String range : options.getRanges()) { String[] ranges = range.split("-", 2); if (ranges[0].isEmpty()) { optionsNoCache.tail(Long.parseLong(ranges[1])); } else if (ranges[1].isEmpty()) { optionsNoCache.startAt(Long.parseLong(ranges[0])); } else { optionsNoCache.range(Long.parseLong(ranges[0]), Long.parseLong(ranges[1])); } } return optionsNoCache; } return options; } } ================================================ FILE: src/main/java/org/gaul/s3proxy/NullBlobStore.java ================================================ /* * Copyright 2014-2026 Andrew Gaul * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.gaul.s3proxy; import java.io.IOException; import java.io.InputStream; import java.io.OutputStream; import java.util.Arrays; import java.util.List; import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableSet; import com.google.common.hash.HashCode; import com.google.common.io.ByteSource; import com.google.common.primitives.Longs; import org.jclouds.blobstore.BlobStore; import org.jclouds.blobstore.domain.Blob; import org.jclouds.blobstore.domain.BlobMetadata; import org.jclouds.blobstore.domain.MultipartPart; import org.jclouds.blobstore.domain.MultipartUpload; import org.jclouds.blobstore.domain.PageSet; import org.jclouds.blobstore.domain.StorageMetadata; import org.jclouds.blobstore.domain.internal.MutableStorageMetadataImpl; import org.jclouds.blobstore.domain.internal.PageSetImpl; import org.jclouds.blobstore.options.GetOptions; import org.jclouds.blobstore.options.PutOptions; import org.jclouds.blobstore.util.ForwardingBlobStore; import org.jclouds.io.Payload; import org.jclouds.io.payloads.ByteSourcePayload; import org.jspecify.annotations.Nullable; final class NullBlobStore extends ForwardingBlobStore { private NullBlobStore(BlobStore blobStore) { super(blobStore); } static BlobStore newNullBlobStore(BlobStore blobStore) { return new NullBlobStore(blobStore); } @Override @Nullable public BlobMetadata blobMetadata(String container, String name) { Blob blob = getBlob(container, name); if (blob == null) { return null; } return blob.getMetadata(); } @Override @Nullable public Blob getBlob(String container, String name) { return getBlob(container, name, GetOptions.NONE); } @Override @Nullable public Blob getBlob(String container, String name, GetOptions options) { Blob blob = super.getBlob(container, name, options); if (blob == null) { return null; } byte[] array; try (InputStream is = blob.getPayload().openStream()) { array = is.readAllBytes(); } catch (IOException ioe) { throw new RuntimeException(ioe); } long length = Longs.fromByteArray(array); var payload = new ByteSourcePayload( new NullByteSource().slice(0, length)); payload.setContentMetadata(blob.getPayload().getContentMetadata()); payload.getContentMetadata().setContentLength(length); payload.getContentMetadata().setContentMD5((HashCode) null); blob.setPayload(payload); blob.getMetadata().setSize(length); return blob; } @Override public PageSet list(String container) { var builder = ImmutableSet.builder(); PageSet pageSet = super.list(container); for (StorageMetadata sm : pageSet) { var msm = new MutableStorageMetadataImpl(sm); msm.setSize(0L); builder.add(msm); } return new PageSetImpl<>(builder.build(), pageSet.getNextMarker()); } @Override public String putBlob(String containerName, Blob blob) { return putBlob(containerName, blob, PutOptions.NONE); } @Override public String putBlob(String containerName, Blob blob, PutOptions options) { long length; try (InputStream is = blob.getPayload().openStream()) { length = is.transferTo(OutputStream.nullOutputStream()); } catch (IOException ioe) { throw new RuntimeException(ioe); } byte[] array = Longs.toByteArray(length); var payload = new ByteSourcePayload( ByteSource.wrap(array)); payload.setContentMetadata(blob.getPayload().getContentMetadata()); payload.getContentMetadata().setContentLength((long) array.length); payload.getContentMetadata().setContentMD5((HashCode) null); blob.setPayload(payload); return super.putBlob(containerName, blob, options); } @Override public String completeMultipartUpload(final MultipartUpload mpu, final List parts) { long length = 0; for (MultipartPart part : parts) { length += part.partSize(); super.removeBlob(mpu.containerName(), mpu.id() + "-" + part.partNumber()); } byte[] array = Longs.toByteArray(length); var payload = new ByteSourcePayload( ByteSource.wrap(array)); payload.getContentMetadata().setContentLength((long) array.length); super.abortMultipartUpload(mpu); MultipartUpload mpu2 = super.initiateMultipartUpload( mpu.containerName(), mpu.blobMetadata(), mpu.putOptions()); MultipartPart part = super.uploadMultipartPart(mpu2, 1, payload); return super.completeMultipartUpload(mpu2, List.of(part)); } @Override public void abortMultipartUpload(MultipartUpload mpu) { for (MultipartPart part : super.listMultipartUpload(mpu)) { super.removeBlob(mpu.containerName(), mpu.id() + "-" + part.partNumber()); } super.abortMultipartUpload(mpu); } @Override public MultipartPart uploadMultipartPart(MultipartUpload mpu, int partNumber, Payload payload) { long length; try (InputStream is = payload.openStream()) { length = is.transferTo(OutputStream.nullOutputStream()); } catch (IOException ioe) { throw new RuntimeException(ioe); } byte[] array = Longs.toByteArray(length); var newPayload = new ByteSourcePayload( ByteSource.wrap(array)); newPayload.setContentMetadata(payload.getContentMetadata()); newPayload.getContentMetadata().setContentLength((long) array.length); newPayload.getContentMetadata().setContentMD5((HashCode) null); // create a single-part object which contains the logical length which // list and complete will read later Blob blob = blobBuilder(mpu.id() + "-" + partNumber) .payload(newPayload) .build(); super.putBlob(mpu.containerName(), blob); MultipartPart part = super.uploadMultipartPart(mpu, partNumber, newPayload); return MultipartPart.create(part.partNumber(), length, part.partETag(), part.lastModified()); } @Override public List listMultipartUpload(MultipartUpload mpu) { var builder = ImmutableList.builder(); for (MultipartPart part : super.listMultipartUpload(mpu)) { // get real blob size from stub blob Blob blob = getBlob(mpu.containerName(), mpu.id() + "-" + part.partNumber()); long length = blob.getPayload().getContentMetadata() .getContentLength(); builder.add(MultipartPart.create(part.partNumber(), length, part.partETag(), part.lastModified())); } return builder.build(); } private static final class NullByteSource extends ByteSource { @Override public InputStream openStream() throws IOException { return new NullInputStream(); } } private static final class NullInputStream extends InputStream { private boolean closed; @Override public int read() throws IOException { if (closed) { throw new IOException("Stream already closed"); } return 0; } @Override public int read(byte[] b, int off, int len) throws IOException { if (closed) { throw new IOException("Stream already closed"); } Arrays.fill(b, off, off + len, (byte) 0); return len; } @Override public void close() throws IOException { super.close(); closed = true; } } } ================================================ FILE: src/main/java/org/gaul/s3proxy/PrefixBlobStore.java ================================================ /* * Copyright 2014-2026 Andrew Gaul * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.gaul.s3proxy; import static com.google.common.base.Preconditions.checkArgument; import static java.util.Objects.requireNonNull; import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.Properties; import com.google.common.base.Strings; import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableMap; import org.jclouds.blobstore.BlobStore; import org.jclouds.blobstore.domain.Blob; import org.jclouds.blobstore.domain.BlobAccess; import org.jclouds.blobstore.domain.BlobMetadata; import org.jclouds.blobstore.domain.MultipartPart; import org.jclouds.blobstore.domain.MultipartUpload; import org.jclouds.blobstore.domain.PageSet; import org.jclouds.blobstore.domain.StorageMetadata; import org.jclouds.blobstore.domain.internal.MutableBlobMetadataImpl; import org.jclouds.blobstore.domain.internal.MutableStorageMetadataImpl; import org.jclouds.blobstore.domain.internal.PageSetImpl; import org.jclouds.blobstore.options.CopyOptions; import org.jclouds.blobstore.options.GetOptions; import org.jclouds.blobstore.options.ListContainerOptions; import org.jclouds.blobstore.options.PutOptions; import org.jclouds.blobstore.util.ForwardingBlobStore; import org.jclouds.io.Payload; /** * Middleware that scopes a virtual bucket to a fixed backend prefix. */ public final class PrefixBlobStore extends ForwardingBlobStore { private final Map prefixes; private PrefixBlobStore(BlobStore delegate, Map prefixes) { super(delegate); this.prefixes = ImmutableMap.copyOf(requireNonNull(prefixes)); } static BlobStore newPrefixBlobStore(BlobStore delegate, Map prefixes) { String blobStoreType = delegate.getContext().unwrap() .getProviderMetadata().getId(); if (Quirks.OPAQUE_MARKERS.contains(blobStoreType)) { throw new UnsupportedOperationException( "Only supports opaque markers"); } return new PrefixBlobStore(delegate, prefixes); } public static Map parsePrefixes(Properties properties) { Map prefixMap = new HashMap<>(); for (String key : properties.stringPropertyNames()) { if (!key.startsWith(S3ProxyConstants.PROPERTY_PREFIX_BLOBSTORE + ".")) { continue; } String bucket = key.substring( S3ProxyConstants.PROPERTY_PREFIX_BLOBSTORE.length() + 1); String prefix = properties.getProperty(key); checkArgument(!Strings.isNullOrEmpty(bucket), "Prefix property %s must specify a bucket", key); checkArgument(!Strings.isNullOrEmpty(prefix), "Prefix for bucket %s must not be empty", bucket); checkArgument(prefixMap.put(bucket, prefix) == null, "Multiple prefixes configured for bucket %s", bucket); } return ImmutableMap.copyOf(prefixMap); } private boolean hasPrefix(String container) { return this.prefixes.containsKey(container); } private String getPrefix(String container) { return this.prefixes.get(container); } private String addPrefix(String container, String name) { if (!hasPrefix(container) || Strings.isNullOrEmpty(name)) { return name; } String prefix = getPrefix(container); if (name.startsWith(prefix)) { return name; } if (prefix.endsWith("/") && name.startsWith("/")) { return prefix + name.substring(1); } return prefix + name; } private String trimPrefix(String container, String name) { if (!hasPrefix(container) || Strings.isNullOrEmpty(name)) { return name; } String prefix = getPrefix(container); if (name.startsWith(prefix)) { return name.substring(prefix.length()); } return name; } private BlobMetadata trimBlobMetadata(String container, BlobMetadata metadata) { if (metadata == null || !hasPrefix(container)) { return metadata; } var mutable = new MutableBlobMetadataImpl(metadata); mutable.setName(trimPrefix(container, metadata.getName())); return mutable; } private Blob trimBlob(String container, Blob blob) { if (blob == null || !hasPrefix(container)) { return blob; } blob.getMetadata().setName( trimPrefix(container, blob.getMetadata().getName())); return blob; } private MultipartUpload toDelegateMultipartUpload(MultipartUpload upload) { if (upload == null || !hasPrefix(upload.containerName())) { return upload; } var metadata = upload.blobMetadata() == null ? null : new MutableBlobMetadataImpl(upload.blobMetadata()); if (metadata != null) { metadata.setName( addPrefix(upload.containerName(), metadata.getName())); } return MultipartUpload.create(upload.containerName(), addPrefix(upload.containerName(), upload.blobName()), upload.id(), metadata, upload.putOptions()); } private MultipartUpload toClientMultipartUpload(MultipartUpload upload) { if (upload == null || !hasPrefix(upload.containerName())) { return upload; } var metadata = upload.blobMetadata() == null ? null : new MutableBlobMetadataImpl(upload.blobMetadata()); if (metadata != null) { metadata.setName( trimPrefix(upload.containerName(), metadata.getName())); } return MultipartUpload.create(upload.containerName(), trimPrefix(upload.containerName(), upload.blobName()), upload.id(), metadata, upload.putOptions()); } private ListContainerOptions applyPrefix(String container, ListContainerOptions options) { if (!hasPrefix(container)) { return options; } ListContainerOptions effective = options == null ? new ListContainerOptions() : options.clone(); String basePrefix = getPrefix(container); String requestedPrefix = effective.getPrefix(); String requestedMarker = effective.getMarker(); String requestedDir = effective.getDir(); if (Strings.isNullOrEmpty(requestedPrefix)) { effective.prefix(basePrefix); } else { effective.prefix(addPrefix(container, requestedPrefix)); } if (!Strings.isNullOrEmpty(requestedMarker)) { effective.afterMarker(addPrefix(container, requestedMarker)); } if (!Strings.isNullOrEmpty(requestedDir)) { effective.inDirectory(addPrefix(container, requestedDir)); } return effective; } private PageSet trimListing(String container, PageSet listing) { if (!hasPrefix(container)) { return listing; } var builder = ImmutableList.builder(); for (StorageMetadata metadata : listing) { if (metadata instanceof BlobMetadata blobMetadata) { var mutable = new MutableBlobMetadataImpl(blobMetadata); mutable.setName(trimPrefix(container, blobMetadata.getName())); builder.add(mutable); } else { var mutable = new MutableStorageMetadataImpl(metadata); mutable.setName(trimPrefix(container, metadata.getName())); builder.add(mutable); } } String nextMarker = listing.getNextMarker(); if (nextMarker != null) { nextMarker = trimPrefix(container, nextMarker); } return new PageSetImpl<>(builder.build(), nextMarker); } @Override public boolean directoryExists(String container, String directory) { return super.directoryExists(container, addPrefix(container, directory)); } @Override public void createDirectory(String container, String directory) { super.createDirectory(container, addPrefix(container, directory)); } @Override public void deleteDirectory(String container, String directory) { super.deleteDirectory(container, addPrefix(container, directory)); } @Override public boolean blobExists(String container, String name) { return super.blobExists(container, addPrefix(container, name)); } @Override public BlobMetadata blobMetadata(String container, String name) { return trimBlobMetadata(container, super.blobMetadata(container, addPrefix(container, name))); } @Override public Blob getBlob(String containerName, String blobName) { return trimBlob(containerName, super.getBlob(containerName, addPrefix(containerName, blobName))); } @Override public Blob getBlob(String containerName, String blobName, GetOptions getOptions) { return trimBlob(containerName, super.getBlob(containerName, addPrefix(containerName, blobName), getOptions)); } @Override public String putBlob(String containerName, Blob blob) { String originalName = blob.getMetadata().getName(); blob.getMetadata().setName(addPrefix(containerName, originalName)); try { return super.putBlob(containerName, blob); } finally { blob.getMetadata().setName(originalName); } } @Override public String putBlob(String containerName, Blob blob, PutOptions options) { String originalName = blob.getMetadata().getName(); blob.getMetadata().setName(addPrefix(containerName, originalName)); try { return super.putBlob(containerName, blob, options); } finally { blob.getMetadata().setName(originalName); } } @Override public void removeBlob(String container, String name) { super.removeBlob(container, addPrefix(container, name)); } @Override public void removeBlobs(String container, Iterable names) { if (!hasPrefix(container)) { super.removeBlobs(container, names); return; } var builder = ImmutableList.builder(); for (String name : names) { builder.add(addPrefix(container, name)); } super.removeBlobs(container, builder.build()); } @Override public BlobAccess getBlobAccess(String container, String name) { return super.getBlobAccess(container, addPrefix(container, name)); } @Override public void setBlobAccess(String container, String name, BlobAccess access) { super.setBlobAccess(container, addPrefix(container, name), access); } @Override public String copyBlob(String fromContainer, String fromName, String toContainer, String toName, CopyOptions options) { return super.copyBlob(fromContainer, addPrefix(fromContainer, fromName), toContainer, addPrefix(toContainer, toName), options); } @Override public PageSet list(String container) { if (!hasPrefix(container)) { return super.list(container); } return list(container, new ListContainerOptions()); } @Override public PageSet list(String container, ListContainerOptions options) { if (!hasPrefix(container)) { return super.list(container, options); } var effective = applyPrefix(container, options); return trimListing(container, super.list(container, effective)); } @Override public void clearContainer(String container) { if (!hasPrefix(container)) { super.clearContainer(container); return; } var options = new ListContainerOptions() .prefix(getPrefix(container)) .recursive(); super.clearContainer(container, options); } @Override public void clearContainer(String container, ListContainerOptions options) { if (!hasPrefix(container)) { super.clearContainer(container, options); return; } super.clearContainer(container, applyPrefix(container, options)); } @Override public MultipartUpload initiateMultipartUpload(String container, BlobMetadata blobMetadata, PutOptions options) { var mutable = new MutableBlobMetadataImpl(blobMetadata); mutable.setName(addPrefix(container, blobMetadata.getName())); MultipartUpload upload = super.initiateMultipartUpload(container, mutable, options); return toClientMultipartUpload(upload); } @Override public void abortMultipartUpload(MultipartUpload mpu) { super.abortMultipartUpload(toDelegateMultipartUpload(mpu)); } @Override public String completeMultipartUpload(MultipartUpload mpu, List parts) { return super.completeMultipartUpload( toDelegateMultipartUpload(mpu), parts); } @Override public MultipartPart uploadMultipartPart(MultipartUpload mpu, int partNumber, Payload payload) { return super.uploadMultipartPart( toDelegateMultipartUpload(mpu), partNumber, payload); } @Override public List listMultipartUpload(MultipartUpload mpu) { return super.listMultipartUpload(toDelegateMultipartUpload(mpu)); } @Override public List listMultipartUploads(String container) { List uploads = super.listMultipartUploads(container); if (!hasPrefix(container)) { return uploads; } var builder = ImmutableList.builder(); for (MultipartUpload upload : uploads) { builder.add(toClientMultipartUpload(upload)); } return builder.build(); } } ================================================ FILE: src/main/java/org/gaul/s3proxy/PutOptions2.java ================================================ /* * Copyright 2014-2026 Andrew Gaul * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.gaul.s3proxy; import com.google.common.util.concurrent.ListeningExecutorService; import org.jclouds.blobstore.domain.BlobAccess; import org.jclouds.blobstore.options.PutOptions; import org.jspecify.annotations.Nullable; /** * This class extends jclouds' PutOptions to support conditional put operations via * the If-Match and If-None-Match headers. */ public final class PutOptions2 extends PutOptions { @Nullable private String ifMatch; @Nullable private String ifNoneMatch; public PutOptions2() { super(); } public PutOptions2(PutOptions options) { super(options.isMultipart(), options.getUseCustomExecutor(), options.getCustomExecutor()); this.setBlobAccess(options.getBlobAccess()); if (options instanceof PutOptions2 other) { this.ifMatch = other.ifMatch; this.ifNoneMatch = other.ifNoneMatch; } } @Nullable public String getIfMatch() { return ifMatch; } public PutOptions2 setIfMatch(@Nullable String etag) { this.ifMatch = etag; return this; } @Nullable public String getIfNoneMatch() { return ifNoneMatch; } public PutOptions2 setIfNoneMatch(@Nullable String etag) { this.ifNoneMatch = etag; return this; } @Override public PutOptions2 setBlobAccess(BlobAccess blobAccess) { super.setBlobAccess(blobAccess); return this; } @Override public PutOptions2 multipart() { super.multipart(); return this; } @Override public PutOptions2 multipart(boolean val) { super.multipart(val); return this; } @Override public PutOptions2 multipart(ListeningExecutorService customExecutor) { super.multipart(customExecutor); return this; } @Override public PutOptions2 setCustomExecutor(ListeningExecutorService customExecutor) { super.setCustomExecutor(customExecutor); return this; } @Override public String toString() { String s = super.toString(); return s.substring(0, s.length() - 1) + ", ifMatch=" + ifMatch + ", ifNoneMatch=" + ifNoneMatch + "]"; } } ================================================ FILE: src/main/java/org/gaul/s3proxy/Quirks.java ================================================ /* * Copyright 2014-2026 Andrew Gaul * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.gaul.s3proxy; import java.util.Set; final class Quirks { /** Blobstores which do not support blob-level access control. */ static final Set NO_BLOB_ACCESS_CONTROL = Set.of( "azureblob", "azureblob-sdk", "b2", "google-cloud-storage-sdk", "rackspace-cloudfiles-uk", "rackspace-cloudfiles-us", "openstack-swift" ); /** Blobstores which do not support the Cache-Control header. */ static final Set NO_CACHE_CONTROL_SUPPORT = Set.of( "atmos", "b2", "google-cloud-storage", "google-cloud-storage-sdk", "rackspace-cloudfiles-uk", "rackspace-cloudfiles-us", "openstack-swift" ); /** Blobstores which do not support the Cache-Control header. */ static final Set NO_CONTENT_DISPOSITION = Set.of( "b2" ); /** Blobstores which do not support the Content-Encoding header. */ static final Set NO_CONTENT_ENCODING = Set.of( "b2", "google-cloud-storage", "google-cloud-storage-sdk" ); /** Blobstores which do not support the Content-Language header. */ static final Set NO_CONTENT_LANGUAGE = Set.of( "b2", "rackspace-cloudfiles-uk", "rackspace-cloudfiles-us", "openstack-swift" ); /** Blobstores which do not support the If-None-Match header during copy. */ static final Set NO_COPY_IF_NONE_MATCH = Set.of( "openstack-swift", "rackspace-cloudfiles-uk", "rackspace-cloudfiles-us" ); static final Set NO_EXPIRES = Set.of( "azureblob", "azureblob-sdk" ); static final Set NO_LIST_MULTIPART_UPLOADS = Set.of( "atmos", "filesystem", "google-cloud-storage", "openstack-swift", "rackspace-cloudfiles-uk", "rackspace-cloudfiles-us", "transient" ); /** Blobstores which do not allow listing zero keys. */ static final Set NO_LIST_ZERO_KEYS = Set.of( "atmos", "azureblob", "azureblob-sdk" ); /** * S3 stores object metadata during initiate multipart while others * require it during complete multipart. Emulate the former in the latter * by storing and retrieving a stub object. * * Note: azureblob-sdk also uses stubs for multipart uploads but handles * this internally in AzureBlobStore rather than in S3ProxyHandler. */ static final Set MULTIPART_REQUIRES_STUB = Set.of( "azureblob", "filesystem", "filesystem-nio2", "google-cloud-storage", "openstack-swift", "transient", "transient-nio2" ); /** Blobstores with opaque ETags. */ static final Set OPAQUE_ETAG = Set.of( "azureblob", "azureblob-sdk", "b2", "google-cloud-storage", "google-cloud-storage-sdk" ); /** Blobstores with opaque markers. */ static final Set OPAQUE_MARKERS = Set.of( "azureblob", "azureblob-sdk", // S3 marker means one past this token while B2 means this token "b2", "google-cloud-storage", "google-cloud-storage-sdk" ); private Quirks() { throw new AssertionError("Intentionally unimplemented"); } } ================================================ FILE: src/main/java/org/gaul/s3proxy/ReadOnlyBlobStore.java ================================================ /* * Copyright 2014-2026 Andrew Gaul * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.gaul.s3proxy; import java.util.List; import org.jclouds.blobstore.BlobStore; import org.jclouds.blobstore.domain.Blob; import org.jclouds.blobstore.domain.BlobMetadata; import org.jclouds.blobstore.domain.MultipartPart; import org.jclouds.blobstore.domain.MultipartUpload; import org.jclouds.blobstore.options.CopyOptions; import org.jclouds.blobstore.options.CreateContainerOptions; import org.jclouds.blobstore.options.PutOptions; import org.jclouds.blobstore.util.ForwardingBlobStore; import org.jclouds.domain.Location; import org.jclouds.io.Payload; /** This class is a BlobStore wrapper which prevents mutating operations. */ final class ReadOnlyBlobStore extends ForwardingBlobStore { private ReadOnlyBlobStore(BlobStore blobStore) { super(blobStore); } static BlobStore newReadOnlyBlobStore(BlobStore blobStore) { return new ReadOnlyBlobStore(blobStore); } @Override public boolean createContainerInLocation(Location location, String container, CreateContainerOptions options) { throw new UnsupportedOperationException("read-only BlobStore"); } @Override public void deleteContainer(String container) { throw new UnsupportedOperationException("read-only BlobStore"); } @Override public boolean deleteContainerIfEmpty(String container) { throw new UnsupportedOperationException("read-only BlobStore"); } @Override public String putBlob(String containerName, Blob blob) { throw new UnsupportedOperationException("read-only BlobStore"); } @Override public String putBlob(final String containerName, Blob blob, final PutOptions options) { throw new UnsupportedOperationException("read-only BlobStore"); } @Override public void removeBlob(final String containerName, final String blobName) { throw new UnsupportedOperationException("read-only BlobStore"); } @Override public void removeBlobs(final String containerName, final Iterable blobNames) { throw new UnsupportedOperationException("read-only BlobStore"); } @Override public String copyBlob(final String fromContainer, final String fromName, final String toContainer, final String toName, final CopyOptions options) { throw new UnsupportedOperationException("read-only BlobStore"); } @Override public MultipartUpload initiateMultipartUpload(String container, BlobMetadata blobMetadata, PutOptions options) { throw new UnsupportedOperationException("read-only BlobStore"); } @Override public void abortMultipartUpload(MultipartUpload mpu) { throw new UnsupportedOperationException("read-only BlobStore"); } @Override public String completeMultipartUpload(final MultipartUpload mpu, final List parts) { throw new UnsupportedOperationException("read-only BlobStore"); } @Override public MultipartPart uploadMultipartPart(MultipartUpload mpu, int partNumber, Payload payload) { throw new UnsupportedOperationException("read-only BlobStore"); } } ================================================ FILE: src/main/java/org/gaul/s3proxy/RegexBlobStore.java ================================================ /* * Copyright 2014-2026 Andrew Gaul * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.gaul.s3proxy; import static com.google.common.base.Preconditions.checkArgument; import static java.util.Objects.requireNonNull; import java.io.File; import java.io.InputStream; import java.util.AbstractMap.SimpleEntry; import java.util.ArrayList; import java.util.List; import java.util.Map; import java.util.Map.Entry; import java.util.Properties; import java.util.concurrent.ExecutorService; import java.util.regex.Matcher; import java.util.regex.Pattern; import org.jclouds.blobstore.BlobStore; import org.jclouds.blobstore.domain.Blob; import org.jclouds.blobstore.domain.BlobAccess; import org.jclouds.blobstore.domain.BlobMetadata; import org.jclouds.blobstore.options.CopyOptions; import org.jclouds.blobstore.options.PutOptions; import org.jclouds.blobstore.util.ForwardingBlobStore; import org.slf4j.Logger; import org.slf4j.LoggerFactory; /** * This class implements a middleware to apply regex to blob names. * The regex are configured as: * s3proxy.regex-blobstore.match.<regex name> = <regex match * expression> * s3proxy.regex-blobstore.replace.<regex name> = <regex replace * expression> * * You can add multiple regex, they will be applied from the beginning to the * end, * stopping as soon as the first regex matches. */ public final class RegexBlobStore extends ForwardingBlobStore { private static final Logger logger = LoggerFactory.getLogger( RegexBlobStore.class); private final List> regexs; private RegexBlobStore(BlobStore blobStore, List> regexs) { super(blobStore); this.regexs = requireNonNull(regexs); } static BlobStore newRegexBlobStore(BlobStore delegate, List> regexs) { return new RegexBlobStore(delegate, regexs); } public static List> parseRegexs( Properties properties) { List> configRegex = new ArrayList<>(); List> regexs = new ArrayList<>(); for (String key : properties.stringPropertyNames()) { if (key.startsWith(S3ProxyConstants.PROPERTY_REGEX_BLOBSTORE)) { String propKey = key.substring( S3ProxyConstants.PROPERTY_REGEX_BLOBSTORE.length() + 1); String value = properties.getProperty(key); configRegex.add(new SimpleEntry<>(propKey, value)); } } for (Entry entry : configRegex) { String key = entry.getKey(); if (key.startsWith( S3ProxyConstants.PROPERTY_REGEX_BLOBSTORE_MATCH)) { String regexName = key.substring(S3ProxyConstants .PROPERTY_REGEX_BLOBSTORE_MATCH.length() + 1); String regex = entry.getValue(); Pattern pattern = Pattern.compile(regex); String replace = properties.getProperty(String.join( ".", S3ProxyConstants.PROPERTY_REGEX_BLOBSTORE, S3ProxyConstants.PROPERTY_REGEX_BLOBSTORE_REPLACE, regexName)); checkArgument( replace != null, "Regex %s has no replace property associated", regexName); logger.info( "Adding new regex with name {} replaces with {} to {}", regexName, regex, replace); regexs.add(new SimpleEntry<>(pattern, replace)); } } return List.copyOf(regexs); } @Override public boolean directoryExists(String container, String directory) { return super.directoryExists(container, replaceBlobName(directory)); } @Override public void createDirectory(String container, String directory) { super.createDirectory(container, replaceBlobName(directory)); } @Override public void deleteDirectory(String container, String directory) { super.deleteDirectory(container, replaceBlobName(directory)); } @Override public boolean blobExists(String container, String name) { return super.blobExists(container, replaceBlobName(name)); } @Override public String putBlob(String containerName, Blob blob) { String name = blob.getMetadata().getName(); String newName = replaceBlobName(name); blob.getMetadata().setName(newName); logger.debug("Renaming blob name from {} to {}", name, newName); return super.putBlob(containerName, blob); } @Override public String putBlob(String containerName, Blob blob, PutOptions putOptions) { String name = blob.getMetadata().getName(); String newName = replaceBlobName(name); blob.getMetadata().setName(newName); logger.debug("Renaming blob name from {} to {}", name, newName); return super.putBlob(containerName, blob, putOptions); } @Override public String copyBlob(String fromContainer, String fromName, String toContainer, String toName, CopyOptions options) { return super.copyBlob(fromContainer, replaceBlobName(fromName), toContainer, replaceBlobName(toName), options); } @Override public BlobMetadata blobMetadata(String container, String name) { return super.blobMetadata(container, replaceBlobName(name)); } @Override public Blob getBlob(String containerName, String name) { return super.getBlob(containerName, replaceBlobName(name)); } @Override public void removeBlob(String container, String name) { super.removeBlob(container, replaceBlobName(name)); } @Override public void removeBlobs(String container, Iterable iterable) { List blobs = new ArrayList<>(); for (String name : iterable) { blobs.add(replaceBlobName(name)); } super.removeBlobs(container, blobs); } @Override public BlobAccess getBlobAccess(String container, String name) { return super.getBlobAccess(container, replaceBlobName(name)); } @Override public void setBlobAccess(String container, String name, BlobAccess access) { super.setBlobAccess(container, replaceBlobName(name), access); } @Override public void downloadBlob(String container, String name, File destination) { super.downloadBlob(container, replaceBlobName(name), destination); } @Override public void downloadBlob(String container, String name, File destination, ExecutorService executor) { super.downloadBlob(container, replaceBlobName(name), destination, executor); } @Override public InputStream streamBlob(String container, String name) { return super.streamBlob(container, replaceBlobName(name)); } @Override public InputStream streamBlob(String container, String name, ExecutorService executor) { return super.streamBlob(container, replaceBlobName(name), executor); } private String replaceBlobName(String name) { String newName = name; for (var entry : this.regexs) { Pattern pattern = entry.getKey(); Matcher match = pattern.matcher(name); if (match.find()) { return match.replaceAll(entry.getValue()); } } return newName; } } ================================================ FILE: src/main/java/org/gaul/s3proxy/S3AuthorizationHeader.java ================================================ /* * Copyright 2014-2026 Andrew Gaul * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.gaul.s3proxy; import java.util.List; import java.util.Map; import com.google.common.base.Splitter; import org.jspecify.annotations.Nullable; final class S3AuthorizationHeader { private static final Map DIGEST_MAP = Map.of( "SHA256", "SHA-256", "SHA1", "SHA-1", "MD5", "MD5"); private static final String SIGNATURE_FIELD = "Signature="; private static final String CREDENTIAL_FIELD = "Credential="; private final AuthenticationType authenticationType; @Nullable private final String hmacAlgorithm; @Nullable private final String hashAlgorithm; @Nullable private final String region; @Nullable private final String date; @Nullable private final String service; private final String identity; private final String signature; S3AuthorizationHeader(String header) { if (header.startsWith("AWS ")) { authenticationType = AuthenticationType.AWS_V2; hmacAlgorithm = null; hashAlgorithm = null; region = null; date = null; service = null; List fields = Splitter.on(' ').splitToList(header); if (fields.size() != 2) { throw new IllegalArgumentException("Invalid header"); } List identityTuple = Splitter.on(':').splitToList( fields.get(1)); if (identityTuple.size() != 2) { throw new IllegalArgumentException("Invalid header"); } identity = identityTuple.get(0); signature = identityTuple.get(1); } else if (header.startsWith("AWS4-HMAC")) { authenticationType = AuthenticationType.AWS_V4; signature = extractSignature(header); int credentialIndex = header.indexOf(CREDENTIAL_FIELD); if (credentialIndex < 0) { throw new IllegalArgumentException("Invalid header"); } int credentialEnd = header.indexOf(',', credentialIndex); if (credentialEnd < 0) { throw new IllegalArgumentException("Invalid header"); } String credential = header.substring(credentialIndex + CREDENTIAL_FIELD.length(), credentialEnd); List fields = Splitter.on('/').splitToList(credential); if (fields.size() != 5) { throw new IllegalArgumentException( "Invalid Credential: " + credential); } identity = fields.get(0); date = fields.get(1); region = fields.get(2); service = fields.get(3); String awsSignatureVersion = header.substring( 0, header.indexOf(' ')); hashAlgorithm = DIGEST_MAP.get(Splitter.on('-').splitToList( awsSignatureVersion).get(2)); hmacAlgorithm = "Hmac" + Splitter.on('-').splitToList( awsSignatureVersion).get(2); } else { throw new IllegalArgumentException("Invalid header"); } } @Override public String toString() { return "Identity: " + identity + "; Signature: " + signature + "; HMAC algorithm: " + hmacAlgorithm + "; Hash algorithm: " + hashAlgorithm + "; region: " + region + "; date: " + date + "; service " + service; } private static String extractSignature(String header) { int signatureIndex = header.indexOf(SIGNATURE_FIELD); if (signatureIndex < 0) { throw new IllegalArgumentException("Invalid signature"); } signatureIndex += SIGNATURE_FIELD.length(); int signatureEnd = header.indexOf(',', signatureIndex); if (signatureEnd < 0) { return header.substring(signatureIndex); } else { return header.substring(signatureIndex, signatureEnd); } } public AuthenticationType getAuthenticationType() { return authenticationType; } public String getHmacAlgorithm() { return hmacAlgorithm; } public String getHashAlgorithm() { return hashAlgorithm; } public String getRegion() { return region; } public String getDate() { return date; } public String getService() { return service; } public String getIdentity() { return identity; } public String getSignature() { return signature; } } ================================================ FILE: src/main/java/org/gaul/s3proxy/S3ErrorCode.java ================================================ /* * Copyright 2014-2026 Andrew Gaul * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.gaul.s3proxy; import static java.util.Objects.requireNonNull; import com.google.common.base.CaseFormat; import jakarta.servlet.http.HttpServletResponse; /** * List of S3 error codes. Reference: * http://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html */ public enum S3ErrorCode { ACCESS_DENIED(HttpServletResponse.SC_FORBIDDEN, "Forbidden"), BAD_DIGEST(HttpServletResponse.SC_BAD_REQUEST, "Bad Request"), BUCKET_ALREADY_EXISTS(HttpServletResponse.SC_FORBIDDEN, "The requested bucket name is not available." + " The bucket namespace is shared by all users of the system." + " Please select a different name and try again."), BUCKET_ALREADY_OWNED_BY_YOU(HttpServletResponse.SC_CONFLICT, "Your previous request to create the named bucket" + " succeeded and you already own it."), BUCKET_NOT_EMPTY(HttpServletResponse.SC_CONFLICT, "The bucket you tried to delete is not empty"), ENTITY_TOO_LARGE(HttpServletResponse.SC_BAD_REQUEST, "Your proposed upload exceeds the maximum allowed object size."), ENTITY_TOO_SMALL(HttpServletResponse.SC_BAD_REQUEST, "Your proposed upload is smaller than the minimum allowed object" + " size. Each part must be at least 5 MB in size, except the last" + " part."), INTERNAL_ERROR(HttpServletResponse.SC_INTERNAL_SERVER_ERROR, "An internal error occurred. Try again."), INVALID_ACCESS_KEY_ID(HttpServletResponse.SC_FORBIDDEN, "Forbidden"), INVALID_ARGUMENT(HttpServletResponse.SC_BAD_REQUEST, "Bad Request"), INVALID_BUCKET_NAME(HttpServletResponse.SC_BAD_REQUEST, "The specified bucket is not valid."), INVALID_CORS_ORIGIN(HttpServletResponse.SC_BAD_REQUEST, "Insufficient information. Origin request header needed."), INVALID_CORS_METHOD(HttpServletResponse.SC_BAD_REQUEST, "The specified Access-Control-Request-Method is not valid."), INVALID_DIGEST(HttpServletResponse.SC_BAD_REQUEST, "Bad Request"), INVALID_LOCATION_CONSTRAINT(HttpServletResponse.SC_BAD_REQUEST, "The specified location constraint is not valid. For" + " more information about Regions, see How to Select" + " a Region for Your Buckets."), INVALID_RANGE(HttpServletResponse.SC_REQUESTED_RANGE_NOT_SATISFIABLE, "The requested range is not satisfiable"), INVALID_PART(HttpServletResponse.SC_BAD_REQUEST, "One or more of the specified parts could not be found." + " The part may not have been uploaded, or the specified entity" + " tag may not match the part's entity tag."), INVALID_PART_ORDER(HttpServletResponse.SC_BAD_REQUEST, "The list of parts must be specified in ascending" + " PartNumber order."), INVALID_REQUEST(HttpServletResponse.SC_BAD_REQUEST, "Bad Request"), MALFORMED_X_M_L(HttpServletResponse.SC_BAD_REQUEST, "The XML you provided was not well-formed or did not validate" + " against our published schema."), MAX_MESSAGE_LENGTH_EXCEEDED(HttpServletResponse.SC_BAD_REQUEST, "Your request was too big."), METHOD_NOT_ALLOWED(HttpServletResponse.SC_METHOD_NOT_ALLOWED, "Method Not Allowed"), MISSING_CONTENT_LENGTH(HttpServletResponse.SC_LENGTH_REQUIRED, "Length Required"), NO_SUCH_BUCKET(HttpServletResponse.SC_NOT_FOUND, "The specified bucket does not exist"), NO_SUCH_KEY(HttpServletResponse.SC_NOT_FOUND, "The specified key does not exist."), NO_SUCH_POLICY(HttpServletResponse.SC_NOT_FOUND, "The specified bucket does not have a bucket policy."), NO_SUCH_UPLOAD(HttpServletResponse.SC_NOT_FOUND, "Not Found"), NOT_IMPLEMENTED(HttpServletResponse.SC_NOT_IMPLEMENTED, "A header you provided implies functionality that is not" + " implemented."), PRECONDITION_FAILED(HttpServletResponse.SC_PRECONDITION_FAILED, "At least one of the preconditions you specified did not hold."), REQUEST_TIME_TOO_SKEWED(HttpServletResponse.SC_FORBIDDEN, "Forbidden"), REQUEST_TIMEOUT(HttpServletResponse.SC_BAD_REQUEST, "Bad Request"), SIGNATURE_DOES_NOT_MATCH(HttpServletResponse.SC_FORBIDDEN, "Forbidden"), X_AMZ_CONTENT_S_H_A_256_MISMATCH(HttpServletResponse.SC_BAD_REQUEST, "The provided 'x-amz-content-sha256' header does not match what" + " was computed."); private final String errorCode; private final int httpStatusCode; private final String message; S3ErrorCode(int httpStatusCode, String message) { this.errorCode = CaseFormat.UPPER_UNDERSCORE.to(CaseFormat.UPPER_CAMEL, name()); this.httpStatusCode = httpStatusCode; this.message = requireNonNull(message); } String getErrorCode() { return errorCode; } int getHttpStatusCode() { return httpStatusCode; } String getMessage() { return message; } @Override public String toString() { return getHttpStatusCode() + " " + getErrorCode() + " " + getMessage(); } } ================================================ FILE: src/main/java/org/gaul/s3proxy/S3Exception.java ================================================ /* * Copyright 2014-2026 Andrew Gaul * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.gaul.s3proxy; import static java.util.Objects.requireNonNull; import java.util.Map; @SuppressWarnings("serial") public final class S3Exception extends Exception { private final S3ErrorCode error; private final Map elements; S3Exception(S3ErrorCode error) { this(error, error.getMessage(), (Throwable) null, Map.of()); } S3Exception(S3ErrorCode error, String message) { this(error, message, (Throwable) null, Map.of()); } S3Exception(S3ErrorCode error, Throwable cause) { this(error, error.getMessage(), cause, Map.of()); } S3Exception(S3ErrorCode error, String message, Throwable cause) { this(error, message, cause, Map.of()); } S3Exception(S3ErrorCode error, String message, Throwable cause, Map elements) { super(requireNonNull(message), cause); this.error = requireNonNull(error); this.elements = Map.copyOf(elements); } S3ErrorCode getError() { return error; } Map getElements() { return elements; } @Override public String getMessage() { var builder = new StringBuilder().append(super.getMessage()); if (!elements.isEmpty()) { builder.append(" ").append(elements); } return builder.toString(); } } ================================================ FILE: src/main/java/org/gaul/s3proxy/S3Operation.java ================================================ /* * Copyright 2014-2026 Andrew Gaul * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.gaul.s3proxy; /** Enumeration of S3 operations for metrics tracking. */ public enum S3Operation { LIST_BUCKETS("ListBuckets"), LIST_OBJECTS_V2("ListObjectsV2"), GET_OBJECT("GetObject"), PUT_OBJECT("PutObject"), DELETE_OBJECT("DeleteObject"), DELETE_OBJECTS("DeleteObjects"), CREATE_BUCKET("CreateBucket"), DELETE_BUCKET("DeleteBucket"), HEAD_BUCKET("HeadBucket"), HEAD_OBJECT("HeadObject"), COPY_OBJECT("CopyObject"), CREATE_MULTIPART_UPLOAD("CreateMultipartUpload"), UPLOAD_PART("UploadPart"), UPLOAD_PART_COPY("UploadPartCopy"), COMPLETE_MULTIPART_UPLOAD("CompleteMultipartUpload"), ABORT_MULTIPART_UPLOAD("AbortMultipartUpload"), LIST_MULTIPART_UPLOADS("ListMultipartUploads"), LIST_PARTS("ListParts"), GET_OBJECT_ACL("GetObjectAcl"), PUT_OBJECT_ACL("PutObjectAcl"), GET_BUCKET_ACL("GetBucketAcl"), PUT_BUCKET_ACL("PutBucketAcl"), GET_BUCKET_LOCATION("GetBucketLocation"), GET_BUCKET_POLICY("GetBucketPolicy"), OPTIONS_OBJECT("OptionsObject"), UNKNOWN("Unknown"); private final String value; S3Operation(String value) { this.value = value; } public String getValue() { return value; } } ================================================ FILE: src/main/java/org/gaul/s3proxy/S3Proxy.java ================================================ /* * Copyright 2014-2026 Andrew Gaul * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.gaul.s3proxy; import static java.util.Objects.requireNonNull; import static com.google.common.base.Preconditions.checkArgument; import java.net.URI; import java.net.URISyntaxException; import java.util.Collection; import java.util.Objects; import java.util.Properties; import javax.net.ssl.SSLContext; import com.google.common.base.Joiner; import com.google.common.base.Splitter; import com.google.common.base.Strings; import com.google.common.collect.Lists; import org.eclipse.jetty.ee10.servlet.ServletContextHandler; import org.eclipse.jetty.ee10.servlet.ServletHolder; import org.eclipse.jetty.http.HttpCompliance; import org.eclipse.jetty.http.UriCompliance; import org.eclipse.jetty.server.HttpConfiguration; import org.eclipse.jetty.server.HttpConnectionFactory; import org.eclipse.jetty.server.SecureRequestCustomizer; import org.eclipse.jetty.server.Server; import org.eclipse.jetty.server.ServerConnector; import org.eclipse.jetty.util.ssl.SslContextFactory; import org.eclipse.jetty.util.thread.QueuedThreadPool; import org.jclouds.blobstore.BlobStore; /** * S3Proxy translates S3 HTTP operations into jclouds provider-agnostic * operations. This allows applications using the S3 API to interface with any * provider that jclouds supports, e.g., EMC Atmos, Microsoft Azure, * OpenStack Swift. */ public final class S3Proxy { private final Server server; private final S3ProxyHandlerJetty handler; private final S3ProxyMetrics metrics; private final boolean listenHTTP; private final boolean listenHTTPS; S3Proxy(Builder builder) { checkArgument(builder.endpoint != null || builder.secureEndpoint != null, "Must provide endpoint or secure-endpoint"); if (builder.endpoint != null) { checkArgument(builder.endpoint.getPath().isEmpty(), "endpoint path must be empty, was: %s", builder.endpoint.getPath()); } if (builder.secureEndpoint != null) { checkArgument(builder.secureEndpoint.getPath().isEmpty(), "secure-endpoint path must be empty, was: %s", builder.secureEndpoint.getPath()); if (builder.sslContext == null) { requireNonNull(builder.keyStorePath, "Must provide keyStorePath with HTTPS endpoint"); requireNonNull(builder.keyStorePassword, "Must provide keyStorePassword with HTTPS endpoint"); } } checkArgument(Strings.isNullOrEmpty(builder.identity) ^ !Strings.isNullOrEmpty(builder.credential), "Must provide both identity and credential"); var pool = new QueuedThreadPool(builder.jettyMaxThreads); pool.setName("S3Proxy-Jetty"); server = new Server(pool); var httpConfiguration = new HttpConfiguration(); httpConfiguration.setHttpCompliance(HttpCompliance.LEGACY); httpConfiguration.setUriCompliance(UriCompliance.LEGACY); var src = new SecureRequestCustomizer(); src.setSniHostCheck(false); httpConfiguration.addCustomizer(src); HttpConnectionFactory httpConnectionFactory = new HttpConnectionFactory(httpConfiguration); ServerConnector connector; if (builder.endpoint != null) { connector = new ServerConnector(server, httpConnectionFactory); connector.setHost(builder.endpoint.getHost()); connector.setPort(builder.endpoint.getPort()); server.addConnector(connector); listenHTTP = true; } else { listenHTTP = false; } if (builder.secureEndpoint != null) { SslContextFactory.Server sslContextFactory = new SslContextFactory.Server(); if (builder.sslContext != null) { sslContextFactory.setSslContext(builder.sslContext); } else { sslContextFactory.setKeyStorePath(builder.keyStorePath); sslContextFactory.setKeyStorePassword(builder.keyStorePassword); } connector = new ServerConnector(server, sslContextFactory, httpConnectionFactory); connector.setHost(builder.secureEndpoint.getHost()); connector.setPort(builder.secureEndpoint.getPort()); server.addConnector(connector); listenHTTPS = true; } else { listenHTTPS = false; } if (builder.metricsEnabled) { this.metrics = new S3ProxyMetrics( builder.metricsHost, builder.metricsPort); } else { this.metrics = null; } handler = new S3ProxyHandlerJetty(builder.blobStore, builder.authenticationType, builder.identity, builder.credential, builder.virtualHost, builder.maxSinglePartObjectSize, builder.v4MaxNonChunkedRequestSize, builder.v4MaxChunkSize, builder.ignoreUnknownHeaders, builder.corsRules, builder.servicePath, builder.maximumTimeSkew, metrics); var context = new ServletContextHandler(); if (builder.servicePath != null && !builder.servicePath.isEmpty()) { context.setContextPath(builder.servicePath); } if (metrics != null) { context.addServlet(new ServletHolder( new MetricsHandler(metrics)), "/metrics"); } context.addServlet(new ServletHolder(handler), "/*"); server.setHandler(context); } public static final class Builder { private BlobStore blobStore; private URI endpoint; private URI secureEndpoint; private String servicePath; private AuthenticationType authenticationType = AuthenticationType.NONE; private String identity; private String credential; private SSLContext sslContext; private String keyStorePath; private String keyStorePassword; private String virtualHost; private long maxSinglePartObjectSize = 5L * 1024 * 1024 * 1024; private long v4MaxNonChunkedRequestSize = 128 * 1024 * 1024; private int v4MaxChunkSize = 16 * 1024 * 1024; private boolean ignoreUnknownHeaders; private CrossOriginResourceSharing corsRules; private int jettyMaxThreads = 200; // sourced from QueuedThreadPool() private int maximumTimeSkew = 15 * 60; private boolean metricsEnabled; private int metricsPort = S3ProxyMetrics.DEFAULT_METRICS_PORT; private String metricsHost = S3ProxyMetrics.DEFAULT_METRICS_HOST; Builder() { } public S3Proxy build() { return new S3Proxy(this); } public static Builder fromProperties(Properties properties) throws URISyntaxException { var builder = new Builder(); String endpoint = properties.getProperty( S3ProxyConstants.PROPERTY_ENDPOINT); String secureEndpoint = properties.getProperty( S3ProxyConstants.PROPERTY_SECURE_ENDPOINT); boolean hasEndpoint = !Strings.isNullOrEmpty(endpoint); boolean hasSecureEndpoint = !Strings.isNullOrEmpty(secureEndpoint); if (!hasEndpoint && !hasSecureEndpoint) { throw new IllegalArgumentException( "Properties file must contain: " + S3ProxyConstants.PROPERTY_ENDPOINT + " or " + S3ProxyConstants.PROPERTY_SECURE_ENDPOINT); } if (hasEndpoint) { builder.endpoint(new URI(endpoint)); } if (hasSecureEndpoint) { builder.secureEndpoint(new URI(secureEndpoint)); } String authorizationString = properties.getProperty( S3ProxyConstants.PROPERTY_AUTHORIZATION); if (authorizationString == null) { throw new IllegalArgumentException( "Properties file must contain: " + S3ProxyConstants.PROPERTY_AUTHORIZATION); } AuthenticationType authorization = AuthenticationType.fromString(authorizationString); String localIdentity = null; String localCredential = null; switch (authorization) { case AWS_V2: case AWS_V4: case AWS_V2_OR_V4: localIdentity = properties.getProperty( S3ProxyConstants.PROPERTY_IDENTITY); localCredential = properties.getProperty( S3ProxyConstants.PROPERTY_CREDENTIAL); if (localIdentity == null || localCredential == null) { throw new IllegalArgumentException("Must specify both " + S3ProxyConstants.PROPERTY_IDENTITY + " and " + S3ProxyConstants.PROPERTY_CREDENTIAL + " when using authentication"); } break; case NONE: break; default: throw new IllegalArgumentException( S3ProxyConstants.PROPERTY_AUTHORIZATION + " invalid value, was: " + authorization); } if (localIdentity != null || localCredential != null) { builder.awsAuthentication(authorization, localIdentity, localCredential); } String servicePath = Strings.nullToEmpty(properties.getProperty( S3ProxyConstants.PROPERTY_SERVICE_PATH)); if (servicePath != null) { builder.servicePath(servicePath); } String keyStorePath = properties.getProperty( S3ProxyConstants.PROPERTY_KEYSTORE_PATH); String keyStorePassword = properties.getProperty( S3ProxyConstants.PROPERTY_KEYSTORE_PASSWORD); if (keyStorePath != null || keyStorePassword != null) { builder.keyStore(keyStorePath, keyStorePassword); } String virtualHost = properties.getProperty( S3ProxyConstants.PROPERTY_VIRTUAL_HOST); if (!Strings.isNullOrEmpty(virtualHost)) { builder.virtualHost(virtualHost); } String maxSinglePartObjectSize = properties.getProperty( S3ProxyConstants.PROPERTY_MAX_SINGLE_PART_OBJECT_SIZE); if (maxSinglePartObjectSize != null) { builder.maxSinglePartObjectSize(Long.parseLong( maxSinglePartObjectSize)); } String v4MaxNonChunkedRequestSize = properties.getProperty( S3ProxyConstants.PROPERTY_V4_MAX_NON_CHUNKED_REQUEST_SIZE); if (v4MaxNonChunkedRequestSize != null) { builder.v4MaxNonChunkedRequestSize(Long.parseLong( v4MaxNonChunkedRequestSize)); } String v4MaxChunkSize = properties.getProperty( S3ProxyConstants.PROPERTY_V4_MAX_CHUNK_SIZE); if (v4MaxChunkSize != null) { builder.v4MaxChunkSize(Integer.parseInt(v4MaxChunkSize)); } String ignoreUnknownHeaders = properties.getProperty( S3ProxyConstants.PROPERTY_IGNORE_UNKNOWN_HEADERS); if (!Strings.isNullOrEmpty(ignoreUnknownHeaders)) { builder.ignoreUnknownHeaders(Boolean.parseBoolean( ignoreUnknownHeaders)); } String corsAllowAll = properties.getProperty( S3ProxyConstants.PROPERTY_CORS_ALLOW_ALL); if (!Strings.isNullOrEmpty(corsAllowAll) && Boolean.parseBoolean( corsAllowAll)) { builder.corsRules(new CrossOriginResourceSharing()); } else { String corsAllowOrigins = properties.getProperty( S3ProxyConstants.PROPERTY_CORS_ALLOW_ORIGINS, ""); String corsAllowMethods = properties.getProperty( S3ProxyConstants.PROPERTY_CORS_ALLOW_METHODS, ""); String corsAllowHeaders = properties.getProperty( S3ProxyConstants.PROPERTY_CORS_ALLOW_HEADERS, ""); String corsExposedHeaders = properties.getProperty( S3ProxyConstants.PROPERTY_CORS_EXPOSED_HEADERS, ""); String allowCredentials = properties.getProperty( S3ProxyConstants.PROPERTY_CORS_ALLOW_CREDENTIAL, ""); Splitter splitter = Splitter.on(" ").trimResults() .omitEmptyStrings(); //Validate configured methods Collection allowedMethods = Lists.newArrayList( splitter.split(corsAllowMethods)); allowedMethods.removeAll( CrossOriginResourceSharing.SUPPORTED_METHODS); if (!allowedMethods.isEmpty()) { throw new IllegalArgumentException( S3ProxyConstants.PROPERTY_CORS_ALLOW_METHODS + " contains not supported values: " + Joiner.on(" ") .join(allowedMethods)); } builder.corsRules(new CrossOriginResourceSharing( splitter.splitToList(corsAllowOrigins), splitter.splitToList(corsAllowMethods), splitter.splitToList(corsAllowHeaders), splitter.splitToList(corsExposedHeaders), allowCredentials)); } String jettyMaxThreads = properties.getProperty( S3ProxyConstants.PROPERTY_JETTY_MAX_THREADS); if (jettyMaxThreads != null) { builder.jettyMaxThreads(Integer.parseInt(jettyMaxThreads)); } String maximumTimeSkew = properties.getProperty( S3ProxyConstants.PROPERTY_MAXIMUM_TIME_SKEW); if (maximumTimeSkew != null && !maximumTimeSkew.isBlank()) { builder.maximumTimeSkew(Integer.parseInt(maximumTimeSkew)); } String metricsEnabled = properties.getProperty( S3ProxyConstants.PROPERTY_METRICS_ENABLED); if (!Strings.isNullOrEmpty(metricsEnabled)) { builder.metricsEnabled(Boolean.parseBoolean(metricsEnabled)); } String metricsPort = properties.getProperty( S3ProxyConstants.PROPERTY_METRICS_PORT); if (!Strings.isNullOrEmpty(metricsPort)) { builder.metricsPort(Integer.parseInt(metricsPort)); } String metricsHost = properties.getProperty( S3ProxyConstants.PROPERTY_METRICS_HOST); if (!Strings.isNullOrEmpty(metricsHost)) { builder.metricsHost(metricsHost); } return builder; } public Builder blobStore(BlobStore blobStore) { this.blobStore = requireNonNull(blobStore); return this; } public Builder endpoint(URI endpoint) { this.endpoint = requireNonNull(endpoint); return this; } public Builder secureEndpoint(URI secureEndpoint) { this.secureEndpoint = requireNonNull(secureEndpoint); return this; } public Builder awsAuthentication(AuthenticationType authenticationType, String identity, String credential) { this.authenticationType = authenticationType; if (!AuthenticationType.NONE.equals(authenticationType)) { this.identity = requireNonNull(identity); this.credential = requireNonNull(credential); } return this; } public Builder sslContext(SSLContext sslContext) { this.sslContext = requireNonNull(sslContext); this.keyStorePath = null; this.keyStorePassword = null; return this; } public Builder keyStore(String keyStorePath, String keyStorePassword) { this.keyStorePath = requireNonNull(keyStorePath); this.keyStorePassword = requireNonNull(keyStorePassword); this.sslContext = null; return this; } public Builder virtualHost(String virtualHost) { this.virtualHost = requireNonNull(virtualHost); return this; } public Builder maxSinglePartObjectSize(long maxSinglePartObjectSize) { if (maxSinglePartObjectSize <= 0) { throw new IllegalArgumentException( "must be greater than zero, was: " + maxSinglePartObjectSize); } this.maxSinglePartObjectSize = maxSinglePartObjectSize; return this; } public Builder v4MaxNonChunkedRequestSize( long v4MaxNonChunkedRequestSize) { if (v4MaxNonChunkedRequestSize <= 0) { throw new IllegalArgumentException( "must be greater than zero, was: " + v4MaxNonChunkedRequestSize); } this.v4MaxNonChunkedRequestSize = v4MaxNonChunkedRequestSize; return this; } public Builder v4MaxChunkSize(int v4MaxChunkSize) { if (v4MaxChunkSize <= 0) { throw new IllegalArgumentException( "must be greater than zero, was: " + v4MaxChunkSize); } this.v4MaxChunkSize = v4MaxChunkSize; return this; } public Builder ignoreUnknownHeaders(boolean ignoreUnknownHeaders) { this.ignoreUnknownHeaders = ignoreUnknownHeaders; return this; } public Builder corsRules(CrossOriginResourceSharing corsRules) { this.corsRules = corsRules; return this; } public Builder jettyMaxThreads(int jettyMaxThreads) { this.jettyMaxThreads = jettyMaxThreads; return this; } public Builder maximumTimeSkew(int maximumTimeSkew) { this.maximumTimeSkew = maximumTimeSkew; return this; } public Builder metricsEnabled(boolean metricsEnabled) { this.metricsEnabled = metricsEnabled; return this; } public Builder metricsPort(int metricsPort) { this.metricsPort = metricsPort; return this; } public Builder metricsHost(String metricsHost) { this.metricsHost = requireNonNull(metricsHost); return this; } public Builder servicePath(String s3ProxyServicePath) { String path = Strings.nullToEmpty(s3ProxyServicePath); if (!path.isEmpty()) { if (!path.startsWith("/")) { path = "/" + path; } } this.servicePath = path; return this; } public URI getEndpoint() { return endpoint; } public URI getSecureEndpoint() { return secureEndpoint; } public String getServicePath() { return servicePath; } public String getIdentity() { return identity; } public String getCredential() { return credential; } @Override public boolean equals(Object object) { if (this == object) { return true; } else if (!(object instanceof S3Proxy.Builder)) { return false; } S3Proxy.Builder that = (S3Proxy.Builder) object; // do not check credentials or storage backend fields return Objects.equals(this.endpoint, that.endpoint) && Objects.equals(this.secureEndpoint, that.secureEndpoint) && Objects.equals(this.sslContext, that.sslContext) && Objects.equals(this.keyStorePath, that.keyStorePath) && Objects.equals(this.keyStorePassword, that.keyStorePassword) && Objects.equals(this.virtualHost, that.virtualHost) && Objects.equals(this.servicePath, that.servicePath) && this.maxSinglePartObjectSize == that.maxSinglePartObjectSize && this.v4MaxNonChunkedRequestSize == that.v4MaxNonChunkedRequestSize && this.v4MaxChunkSize == that.v4MaxChunkSize && this.ignoreUnknownHeaders == that.ignoreUnknownHeaders && this.corsRules.equals(that.corsRules); } @Override public int hashCode() { return Objects.hash(endpoint, secureEndpoint, sslContext, keyStorePath, keyStorePassword, virtualHost, servicePath, maxSinglePartObjectSize, v4MaxNonChunkedRequestSize, v4MaxChunkSize, ignoreUnknownHeaders, corsRules); } } public static Builder builder() { return new Builder(); } public void start() throws Exception { server.start(); } public void stop() throws Exception { server.stop(); if (metrics != null) { metrics.close(); } } public int getPort() { if (listenHTTP) { return ((ServerConnector) server.getConnectors()[0]).getLocalPort(); } else { return -1; } } public int getSecurePort() { if (listenHTTPS) { ServerConnector connector; if (listenHTTP) { connector = (ServerConnector) server.getConnectors()[1]; } else { connector = (ServerConnector) server.getConnectors()[0]; } return connector.getLocalPort(); } return -1; } public String getState() { return server.getState(); } public void setBlobStoreLocator(BlobStoreLocator lookup) { handler.getHandler().setBlobStoreLocator(lookup); } } ================================================ FILE: src/main/java/org/gaul/s3proxy/S3ProxyConstants.java ================================================ /* * Copyright 2014-2026 Andrew Gaul * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.gaul.s3proxy; public final class S3ProxyConstants { public static final String PROPERTY_ENDPOINT = "s3proxy.endpoint"; public static final String PROPERTY_SECURE_ENDPOINT = "s3proxy.secure-endpoint"; public static final String PROPERTY_AUTHORIZATION = "s3proxy.authorization"; public static final String PROPERTY_IDENTITY = "s3proxy.identity"; /** * Path to prepend to all requests, e.g., * https://endpoint/service-path/object. */ public static final String PROPERTY_SERVICE_PATH = "s3proxy.service-path"; /** When true, include "Access-Control-Allow-Origin: *" in all responses. */ public static final String PROPERTY_CORS_ALLOW_ALL = "s3proxy.cors-allow-all"; public static final String PROPERTY_CORS_ALLOW_ORIGINS = "s3proxy.cors-allow-origins"; public static final String PROPERTY_CORS_ALLOW_METHODS = "s3proxy.cors-allow-methods"; public static final String PROPERTY_CORS_ALLOW_HEADERS = "s3proxy.cors-allow-headers"; public static final String PROPERTY_CORS_EXPOSED_HEADERS = "s3proxy.cors-exposed-headers"; public static final String PROPERTY_CORS_ALLOW_CREDENTIAL = "s3proxy.cors-allow-credential"; public static final String PROPERTY_CREDENTIAL = "s3proxy.credential"; public static final String PROPERTY_IGNORE_UNKNOWN_HEADERS = "s3proxy.ignore-unknown-headers"; public static final String PROPERTY_KEYSTORE_PATH = "s3proxy.keystore-path"; public static final String PROPERTY_KEYSTORE_PASSWORD = "s3proxy.keystore-password"; public static final String PROPERTY_JETTY_MAX_THREADS = "s3proxy.jetty.max-threads"; /** Request attributes. */ public static final String ATTRIBUTE_QUERY_ENCODING = "queryEncoding"; /** * Configure servicing of virtual host buckets. Setting to localhost:8080 * allows bucket-in-hostname requests, e.g., bucketname.localhost:8080. * This mode requires configuring DNS to forward all requests to the * S3Proxy host. */ public static final String PROPERTY_VIRTUAL_HOST = "s3proxy.virtual-host"; public static final String PROPERTY_MAX_SINGLE_PART_OBJECT_SIZE = "s3proxy.max-single-part-object-size"; public static final String PROPERTY_V4_MAX_NON_CHUNKED_REQUEST_SIZE = "s3proxy.v4-max-non-chunked-request-size"; /** Maximum size of a single chunk in an aws-chunked transfer encoding. */ public static final String PROPERTY_V4_MAX_CHUNK_SIZE = "s3proxy.v4-max-chunk-size"; /** Used to locate blobstores by specified bucket names. Each property * file should contain a list of buckets associated with it, e.g. * s3proxy.bucket-locator.1 = data * s3proxy.bucket-locator.2 = metadata * s3proxy.bucket-locator.3 = other * When a request is made for the specified bucket, the backend defined * in that properties file is used. This allows using the same * credentials in multiple properties file and select the backend based * on the bucket names. */ public static final String PROPERTY_BUCKET_LOCATOR = "s3proxy.bucket-locator"; /** When true, model eventual consistency using two storage backends. */ public static final String PROPERTY_EVENTUAL_CONSISTENCY = "s3proxy.eventual-consistency"; /** * Minimum delay, in seconds, when propagating modifications from the * write backend to the read backend. */ public static final String PROPERTY_EVENTUAL_CONSISTENCY_DELAY = "s3proxy.eventual-consistency.delay"; /** Probability of eventual consistency, between 0.0 and 1.0. */ public static final String PROPERTY_EVENTUAL_CONSISTENCY_PROBABILITY = "s3proxy.eventual-consistency.probability"; /** Alias a backend bucket to an alternate name. */ public static final String PROPERTY_ALIAS_BLOBSTORE = "s3proxy.alias-blobstore"; /** Scope bucket operations to a specific object prefix. */ public static final String PROPERTY_PREFIX_BLOBSTORE = "s3proxy.prefix-blobstore"; /** Alias a backend bucket to an alternate name. */ public static final String PROPERTY_REGEX_BLOBSTORE = "s3proxy.regex-blobstore"; public static final String PROPERTY_REGEX_BLOBSTORE_MATCH = "match"; public static final String PROPERTY_REGEX_BLOBSTORE_REPLACE = "replace"; /** Discard object data. */ public static final String PROPERTY_NULL_BLOBSTORE = "s3proxy.null-blobstore"; /** Prevent mutations. */ public static final String PROPERTY_READ_ONLY_BLOBSTORE = "s3proxy.read-only-blobstore"; /** Shard objects across a specified number of buckets. */ public static final String PROPERTY_SHARDED_BLOBSTORE = "s3proxy.sharded-blobstore"; /** Override tier when creating blobs. */ public static final String PROPERTY_STORAGE_CLASS_BLOBSTORE = "s3proxy.storage-class-blobstore"; /** Maximum time skew allowed in signed requests. */ public static final String PROPERTY_MAXIMUM_TIME_SKEW = "s3proxy.maximum-timeskew"; public static final String PROPERTY_ENCRYPTED_BLOBSTORE = "s3proxy.encrypted-blobstore"; public static final String PROPERTY_ENCRYPTED_BLOBSTORE_PASSWORD = "s3proxy.encrypted-blobstore-password"; public static final String PROPERTY_ENCRYPTED_BLOBSTORE_SALT = "s3proxy.encrypted-blobstore-salt"; public static final String PROPERTY_USER_METADATA_REPLACER = "s3proxy.user-metadata-replacer-blobstore"; public static final String PROPERTY_USER_METADATA_REPLACER_FROM_CHARS = "s3proxy.user-metadata-replacer-blobstore.from-chars"; public static final String PROPERTY_USER_METADATA_REPLACER_TO_CHARS = "s3proxy.user-metadata-replacer-blobstore.to-chars"; public static final String PROPERTY_LATENCY = "s3proxy.latency-blobstore"; public static final String PROPERTY_NO_CACHE_BLOBSTORE = "s3proxy.no-cache-blobstore"; /** Enable Prometheus metrics endpoint at /metrics. */ public static final String PROPERTY_METRICS_ENABLED = "s3proxy.metrics.enabled"; public static final String PROPERTY_METRICS_PORT = "s3proxy.metrics.port"; public static final String PROPERTY_METRICS_HOST = "s3proxy.metrics.host"; static final String PROPERTY_ALT_JCLOUDS_PREFIX = "alt."; private S3ProxyConstants() { throw new AssertionError("Cannot instantiate utility constructor"); } } ================================================ FILE: src/main/java/org/gaul/s3proxy/S3ProxyHandler.java ================================================ /* * Copyright 2014-2026 Andrew Gaul * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.gaul.s3proxy; import java.io.ByteArrayInputStream; import java.io.FilterInputStream; import java.io.IOException; import java.io.InputStream; import java.io.OutputStream; import java.io.PrintWriter; import java.io.PushbackInputStream; import java.io.Writer; import java.net.URLDecoder; import java.nio.charset.StandardCharsets; import java.nio.file.AccessDeniedException; import java.security.InvalidKeyException; import java.security.MessageDigest; import java.security.NoSuchAlgorithmException; import java.text.ParseException; import java.text.SimpleDateFormat; import java.time.Instant; import java.util.ArrayList; import java.util.Base64; import java.util.Collection; import java.util.Collections; import java.util.Date; import java.util.List; import java.util.Map; import java.util.Optional; import java.util.Properties; import java.util.Set; import java.util.SortedMap; import java.util.TimeZone; import java.util.TreeMap; import java.util.TreeSet; import java.util.concurrent.ThreadLocalRandom; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicReference; import java.util.stream.Collectors; import javax.crypto.Mac; import javax.crypto.spec.SecretKeySpec; import javax.xml.stream.XMLOutputFactory; import javax.xml.stream.XMLStreamException; import javax.xml.stream.XMLStreamWriter; import com.fasterxml.jackson.core.JsonParseException; import com.fasterxml.jackson.databind.ObjectMapper; import com.fasterxml.jackson.dataformat.xml.XmlMapper; import com.google.common.base.CharMatcher; import com.google.common.base.Splitter; import com.google.common.base.Strings; import com.google.common.cache.Cache; import com.google.common.cache.CacheBuilder; import com.google.common.collect.ImmutableMap; import com.google.common.collect.Maps; import com.google.common.collect.Streams; import com.google.common.escape.Escaper; import com.google.common.hash.HashCode; import com.google.common.hash.HashFunction; import com.google.common.hash.Hashing; import com.google.common.hash.HashingInputStream; import com.google.common.io.BaseEncoding; import com.google.common.io.ByteSource; import com.google.common.io.ByteStreams; import com.google.common.net.HostAndPort; import com.google.common.net.HttpHeaders; import com.google.common.net.PercentEscaper; import jakarta.servlet.http.HttpServletRequest; import jakarta.servlet.http.HttpServletResponse; import org.eclipse.jetty.http.MultiPartFormData; import org.eclipse.jetty.io.content.InputStreamContentSource; import org.jclouds.blobstore.BlobStore; import org.jclouds.blobstore.ContainerNotFoundException; import org.jclouds.blobstore.KeyNotFoundException; import org.jclouds.blobstore.domain.Blob; import org.jclouds.blobstore.domain.BlobAccess; import org.jclouds.blobstore.domain.BlobBuilder; import org.jclouds.blobstore.domain.BlobMetadata; import org.jclouds.blobstore.domain.ContainerAccess; import org.jclouds.blobstore.domain.MultipartPart; import org.jclouds.blobstore.domain.MultipartUpload; import org.jclouds.blobstore.domain.PageSet; import org.jclouds.blobstore.domain.StorageMetadata; import org.jclouds.blobstore.domain.Tier; import org.jclouds.blobstore.domain.internal.MutableBlobMetadataImpl; import org.jclouds.blobstore.options.CopyOptions; import org.jclouds.blobstore.options.CreateContainerOptions; import org.jclouds.blobstore.options.GetOptions; import org.jclouds.blobstore.options.ListContainerOptions; import org.jclouds.blobstore.options.PutOptions; import org.jclouds.domain.Location; import org.jclouds.io.ContentMetadata; import org.jclouds.io.ContentMetadataBuilder; import org.jclouds.io.Payload; import org.jclouds.io.Payloads; import org.jclouds.rest.AuthorizationException; import org.jclouds.s3.domain.ObjectMetadata.StorageClass; import org.jspecify.annotations.Nullable; import org.slf4j.Logger; import org.slf4j.LoggerFactory; /** HTTP server-independent handler for S3 requests. */ public class S3ProxyHandler { private static final Logger logger = LoggerFactory.getLogger( S3ProxyHandler.class); public static final class RequestContext { private S3Operation operation; private String bucket; public S3Operation getOperation() { return operation; } public void setOperation(S3Operation operation) { this.operation = operation; } public String getBucket() { return bucket; } public void setBucket(String bucket) { this.bucket = bucket; } } private static final String AWS_XMLNS = "http://s3.amazonaws.com/doc/2006-03-01/"; // TODO: support configurable metadata prefix private static final String USER_METADATA_PREFIX = "x-amz-meta-"; // TODO: fake owner private static final String FAKE_OWNER_ID = "75aa57f09aa0c8caeab4f8c24e99d10f8e7faeebf76c078efc7c6caea54ba06a"; private static final String FAKE_OWNER_DISPLAY_NAME = "CustomersName@amazon.com"; private static final String FAKE_INITIATOR_ID = "arn:aws:iam::111122223333:" + "user/some-user-11116a31-17b5-4fb7-9df5-b288870f11xx"; private static final String FAKE_INITIATOR_DISPLAY_NAME = "umat-user-11116a31-17b5-4fb7-9df5-b288870f11xx"; private static final CharMatcher VALID_BUCKET_FIRST_CHAR = CharMatcher.inRange('a', 'z') .or(CharMatcher.inRange('A', 'Z')) .or(CharMatcher.inRange('0', '9')); private static final CharMatcher VALID_BUCKET = VALID_BUCKET_FIRST_CHAR .or(CharMatcher.is('.')) .or(CharMatcher.is('_')) .or(CharMatcher.is('-')); private static final long MAX_MULTIPART_COPY_SIZE = 5L * 1024L * 1024L * 1024L; private static final Set UNSUPPORTED_PARAMETERS = Set.of( "accelerate", "analytics", "cors", "inventory", "lifecycle", "logging", "metrics", "notification", "replication", "requestPayment", "restore", "tagging", "torrent", "versioning", "versions", "website" ); /** All supported x-amz- headers, except for x-amz-meta- user metadata. */ private static final Set SUPPORTED_X_AMZ_HEADERS = Set.of( AwsHttpHeaders.ACL, AwsHttpHeaders.API_VERSION, AwsHttpHeaders.CHECKSUM_ALGORITHM, // TODO: ignoring header AwsHttpHeaders.CHECKSUM_CRC32, // TODO: ignoring header AwsHttpHeaders.CHECKSUM_CRC32C, // TODO: ignoring header AwsHttpHeaders.CHECKSUM_CRC64NVME, // TODO: ignoring header AwsHttpHeaders.CHECKSUM_MODE, // TODO: ignoring header AwsHttpHeaders.CHECKSUM_SHA1, // TODO: ignoring header AwsHttpHeaders.CHECKSUM_SHA256, // TODO: ignoring header AwsHttpHeaders.CONTENT_SHA256, AwsHttpHeaders.COPY_SOURCE, AwsHttpHeaders.COPY_SOURCE_IF_MATCH, AwsHttpHeaders.COPY_SOURCE_IF_MODIFIED_SINCE, AwsHttpHeaders.COPY_SOURCE_IF_NONE_MATCH, AwsHttpHeaders.COPY_SOURCE_IF_UNMODIFIED_SINCE, AwsHttpHeaders.COPY_SOURCE_RANGE, AwsHttpHeaders.DATE, AwsHttpHeaders.DECODED_CONTENT_LENGTH, AwsHttpHeaders.METADATA_DIRECTIVE, AwsHttpHeaders.SDK_CHECKSUM_ALGORITHM, // TODO: ignoring header AwsHttpHeaders.STORAGE_CLASS, AwsHttpHeaders.TRAILER, AwsHttpHeaders.TRANSFER_ENCODING, // TODO: ignoring header AwsHttpHeaders.USER_AGENT ); private static final Set CANNED_ACLS = Set.of( "private", "public-read", "public-read-write", "authenticated-read", "bucket-owner-read", "bucket-owner-full-control", "log-delivery-write" ); private static final String XML_CONTENT_TYPE = "application/xml"; private static final String UTF_8 = "UTF-8"; /** URLEncoder escapes / which we do not want. */ private static final Escaper urlEscaper = new PercentEscaper( "*-./_", /*plusForSpace=*/ false); @SuppressWarnings("deprecation") private static final HashFunction MD5 = Hashing.md5(); private static final ObjectMapper JSON_MAPPER = new ObjectMapper(); private static final Instant LAUNCH_TIME = Instant.now(); private static final String GIT_HASH = loadGitHash(); private final boolean anonymousIdentity; private final AuthenticationType authenticationType; private final Optional virtualHost; private final long maxSinglePartObjectSize; private final long v4MaxNonChunkedRequestSize; private final int v4MaxChunkSize; private final boolean ignoreUnknownHeaders; private final CrossOriginResourceSharing corsRules; private final String servicePath; private final int maximumTimeSkew; private final XmlMapper mapper = new XmlMapper(); private final XMLOutputFactory xmlOutputFactory = XMLOutputFactory.newInstance(); private BlobStoreLocator blobStoreLocator; // TODO: hack to allow per-request anonymous access private final BlobStore defaultBlobStore; /** * S3 supports arbitrary keys for the marker while some blobstores only * support opaque markers. Emulate the common case for these by mapping * the last key from a listing to the corresponding previously returned * marker. */ private final Cache, String> lastKeyToMarker = CacheBuilder.newBuilder() .maximumSize(10000) .expireAfterWrite(10, TimeUnit.MINUTES) .build(); public S3ProxyHandler(final BlobStore blobStore, AuthenticationType authenticationType, final String identity, final String credential, @Nullable String virtualHost, long maxSinglePartObjectSize, long v4MaxNonChunkedRequestSize, int v4MaxChunkSize, boolean ignoreUnknownHeaders, @Nullable CrossOriginResourceSharing corsRules, final String servicePath, int maximumTimeSkew) { if (corsRules != null) { this.corsRules = corsRules; } else { this.corsRules = new CrossOriginResourceSharing(); } if (authenticationType != AuthenticationType.NONE) { anonymousIdentity = false; blobStoreLocator = new BlobStoreLocator() { @Override public Map.@Nullable Entry locateBlobStore( String identityArg, String container, String blob) { if (!identity.equals(identityArg)) { return null; } return Map.entry(credential, blobStore); } }; } else { anonymousIdentity = true; final Map.Entry anonymousBlobStore = Maps.immutableEntry(null, blobStore); blobStoreLocator = new BlobStoreLocator() { @Override public Map.Entry locateBlobStore( String identityArg, String container, String blob) { return anonymousBlobStore; } }; } this.authenticationType = authenticationType; this.virtualHost = Optional.ofNullable(virtualHost); this.maxSinglePartObjectSize = maxSinglePartObjectSize; this.v4MaxNonChunkedRequestSize = v4MaxNonChunkedRequestSize; this.v4MaxChunkSize = v4MaxChunkSize; this.ignoreUnknownHeaders = ignoreUnknownHeaders; this.defaultBlobStore = blobStore; xmlOutputFactory.setProperty("javax.xml.stream.isRepairingNamespaces", false); this.servicePath = Strings.nullToEmpty(servicePath); this.maximumTimeSkew = maximumTimeSkew; } private static String getBlobStoreType(BlobStore blobStore) { return blobStore.getContext().unwrap().getProviderMetadata().getId(); } private static boolean isValidContainer(String containerName) { if (containerName == null || containerName.length() < 3 || containerName.length() > 255 || containerName.startsWith(".") || containerName.endsWith(".") || validateIpAddress(containerName) || !VALID_BUCKET_FIRST_CHAR.matches(containerName.charAt(0)) || !VALID_BUCKET.matchesAllOf(containerName)) { return false; } return true; } public final void doHandle(HttpServletRequest baseRequest, HttpServletRequest request, HttpServletResponse response, InputStream is, @Nullable RequestContext ctx) throws IOException, S3Exception { String method = request.getMethod(); String uri = request.getRequestURI(); String originalUri = request.getRequestURI(); String healthzUri = servicePath.isEmpty() ? "/healthz" : servicePath + "/healthz"; if (healthzUri.equals(uri) && "GET".equalsIgnoreCase(method)) { handleStatuszRequest(response); return; } if (!this.servicePath.isEmpty()) { if (uri.length() > this.servicePath.length()) { uri = uri.substring(this.servicePath.length()); } } logger.debug("request: {}", request); String hostHeader = request.getHeader(HttpHeaders.HOST); if (hostHeader != null && virtualHost.isPresent()) { hostHeader = HostAndPort.fromString(hostHeader).getHost(); String virtualHostSuffix = "." + virtualHost.orElseThrow(); if (!hostHeader.equals(virtualHost.orElseThrow())) { if (hostHeader.endsWith(virtualHostSuffix)) { String bucket = hostHeader.substring(0, hostHeader.length() - virtualHostSuffix.length()); uri = "/" + bucket + uri; } else { String bucket = hostHeader.toLowerCase(); uri = "/" + bucket + uri; } } } response.addHeader(AwsHttpHeaders.REQUEST_ID, generateRequestId()); boolean hasDateHeader = false; boolean hasXAmzDateHeader = false; for (String headerName : Collections.list(request.getHeaderNames())) { for (String headerValue : Collections.list(request.getHeaders( headerName))) { logger.trace("header: {}: {}", headerName, Strings.nullToEmpty(headerValue)); } if (headerName.equalsIgnoreCase(HttpHeaders.DATE)) { hasDateHeader = true; } else if (headerName.equalsIgnoreCase(AwsHttpHeaders.DATE)) { if (!Strings.isNullOrEmpty(request.getHeader( AwsHttpHeaders.DATE))) { hasXAmzDateHeader = true; } } } boolean haveBothDateHeader = false; if (hasDateHeader && hasXAmzDateHeader) { haveBothDateHeader = true; } // when access information is not provided in request header, // treat it as anonymous, return all public accessible information if (!anonymousIdentity && (method.equals("GET") || method.equals("HEAD") || method.equals("POST") || method.equals("OPTIONS")) && request.getHeader(HttpHeaders.AUTHORIZATION) == null && // v2 or /v4 request.getParameter("X-Amz-Algorithm") == null && // v4 query request.getParameter("AWSAccessKeyId") == null && // v2 query defaultBlobStore != null) { doHandleAnonymous(request, response, is, uri, defaultBlobStore, ctx); return; } // should according the AWSAccessKeyId= Signature or auth header nil if (!anonymousIdentity && !hasDateHeader && !hasXAmzDateHeader && request.getParameter("X-Amz-Date") == null && request.getParameter("Expires") == null) { throw new S3Exception(S3ErrorCode.ACCESS_DENIED, "AWS authentication requires a valid Date or" + " x-amz-date header"); } BlobStore blobStore; String requestIdentity = null; String headerAuthorization = request.getHeader( HttpHeaders.AUTHORIZATION); S3AuthorizationHeader authHeader = null; boolean presignedUrl = false; if (!anonymousIdentity) { if (Strings.isNullOrEmpty(headerAuthorization)) { String algorithm = request.getParameter("X-Amz-Algorithm"); if (algorithm == null) { //v2 query String identity = request.getParameter("AWSAccessKeyId"); String signature = request.getParameter("Signature"); if (identity == null || signature == null) { throw new S3Exception(S3ErrorCode.ACCESS_DENIED); } headerAuthorization = "AWS " + identity + ":" + signature; presignedUrl = true; } else if (algorithm.equals("AWS4-HMAC-SHA256")) { //v4 query String credential = request.getParameter( "X-Amz-Credential"); String signedHeaders = request.getParameter( "X-Amz-SignedHeaders"); String signature = request.getParameter( "X-Amz-Signature"); if (credential == null || signedHeaders == null || signature == null) { throw new S3Exception(S3ErrorCode.ACCESS_DENIED); } headerAuthorization = "AWS4-HMAC-SHA256" + " Credential=" + credential + ", requestSignedHeaders=" + signedHeaders + ", Signature=" + signature; presignedUrl = true; } else { throw new IllegalArgumentException("unknown algorithm: " + algorithm); } } try { authHeader = new S3AuthorizationHeader(headerAuthorization); //whether v2 or v4 (normal header and query) } catch (IllegalArgumentException iae) { throw new S3Exception(S3ErrorCode.INVALID_ARGUMENT, iae); } requestIdentity = authHeader.getIdentity(); } long dateSkew = 0; //date for timeskew check //v2 GET /s3proxy-1080747708/foo?AWSAccessKeyId=local-identity&Expires= //1510322602&Signature=UTyfHY1b1Wgr5BFEn9dpPlWdtFE%3D) //have no date if (!anonymousIdentity) { boolean haveDate = true; AuthenticationType finalAuthType = null; if (authHeader.getAuthenticationType() == AuthenticationType.AWS_V2 && (authenticationType == AuthenticationType.AWS_V2 || authenticationType == AuthenticationType.AWS_V2_OR_V4)) { finalAuthType = AuthenticationType.AWS_V2; } else if ( authHeader.getAuthenticationType() == AuthenticationType.AWS_V4 && (authenticationType == AuthenticationType.AWS_V4 || authenticationType == AuthenticationType.AWS_V2_OR_V4)) { finalAuthType = AuthenticationType.AWS_V4; } else if (authenticationType != AuthenticationType.NONE) { throw new S3Exception(S3ErrorCode.ACCESS_DENIED); } if (hasXAmzDateHeader) { //format diff between v2 and v4 if (finalAuthType == AuthenticationType.AWS_V2) { dateSkew = request.getDateHeader(AwsHttpHeaders.DATE); dateSkew /= 1000; //case sensitive? } else if (finalAuthType == AuthenticationType.AWS_V4) { dateSkew = parseIso8601(request.getHeader( AwsHttpHeaders.DATE)); } } else if (hasDateHeader) { try { dateSkew = request.getDateHeader(HttpHeaders.DATE); dateSkew /= 1000; } catch (IllegalArgumentException iae) { try { dateSkew = parseIso8601(request.getHeader( HttpHeaders.DATE)); } catch (IllegalArgumentException iae2) { throw new S3Exception(S3ErrorCode.ACCESS_DENIED, iae); } } } else { haveDate = false; } if (haveDate) { isTimeSkewed(dateSkew, presignedUrl); } } String[] path = uri.split("/", 3); for (int i = 0; i < path.length; i++) { path[i] = URLDecoder.decode(path[i], StandardCharsets.UTF_8); } for (String parameter : Collections.list( request.getParameterNames())) { if (UNSUPPORTED_PARAMETERS.contains(parameter)) { logger.error("Unknown parameters {} with URI {}", parameter, request.getRequestURI()); throw new S3Exception(S3ErrorCode.NOT_IMPLEMENTED); } } // emit NotImplemented for unknown x-amz- headers for (String headerName : Collections.list(request.getHeaderNames())) { headerName = headerName.toLowerCase(); if (ignoreUnknownHeaders) { continue; } if (!headerName.startsWith("x-amz-")) { continue; } if (headerName.startsWith(USER_METADATA_PREFIX)) { continue; } if (!SUPPORTED_X_AMZ_HEADERS.contains(headerName)) { logger.error("Unknown header {} with URI {}", headerName, request.getRequestURI()); throw new S3Exception(S3ErrorCode.NOT_IMPLEMENTED); } } Map.Entry provider = blobStoreLocator.locateBlobStore( requestIdentity, path.length > 1 ? path[1] : null, path.length > 2 ? path[2] : null); if (anonymousIdentity) { blobStore = provider.getValue(); String contentSha256 = request.getHeader( AwsHttpHeaders.CONTENT_SHA256); if ("STREAMING-AWS4-HMAC-SHA256-PAYLOAD".equals(contentSha256)) { is = new ChunkedInputStream(is, v4MaxChunkSize); } else if ("STREAMING-UNSIGNED-PAYLOAD-TRAILER".equals(contentSha256)) { is = new ChunkedInputStream(is, v4MaxChunkSize, request.getHeader(AwsHttpHeaders.TRAILER)); } } else if (requestIdentity == null) { throw new S3Exception(S3ErrorCode.ACCESS_DENIED); } else { if (provider == null) { throw new S3Exception(S3ErrorCode.INVALID_ACCESS_KEY_ID); } String credential = provider.getKey(); blobStore = provider.getValue(); String expiresString = request.getParameter("Expires"); if (expiresString != null) { // v2 query long expires = Long.parseLong(expiresString); long nowSeconds = System.currentTimeMillis() / 1000; if (nowSeconds >= expires) { throw new S3Exception(S3ErrorCode.ACCESS_DENIED, "Request has expired"); } if (expires - nowSeconds > TimeUnit.DAYS.toSeconds(365)) { throw new S3Exception(S3ErrorCode.ACCESS_DENIED); } } String dateString = request.getParameter("X-Amz-Date"); //from para v4 query expiresString = request.getParameter("X-Amz-Expires"); if (dateString != null && expiresString != null) { //v4 query long date = parseIso8601(dateString); long expires = Long.parseLong(expiresString); long nowSeconds = System.currentTimeMillis() / 1000; if (nowSeconds >= date + expires) { throw new S3Exception(S3ErrorCode.ACCESS_DENIED, "Request has expired"); } if (expires > TimeUnit.DAYS.toSeconds(7)) { throw new S3Exception(S3ErrorCode.ACCESS_DENIED); } } // The aim ? switch (authHeader.getAuthenticationType()) { case AWS_V2: switch (authenticationType) { case AWS_V2: case AWS_V2_OR_V4: case NONE: break; default: throw new S3Exception(S3ErrorCode.ACCESS_DENIED); } break; case AWS_V4: switch (authenticationType) { case AWS_V4: case AWS_V2_OR_V4: case NONE: break; default: throw new S3Exception(S3ErrorCode.ACCESS_DENIED); } break; case NONE: break; default: throw new IllegalArgumentException("Unhandled type: " + authHeader.getAuthenticationType()); } String expectedSignature = null; if (authHeader.getHmacAlgorithm() == null) { //v2 // When presigned url is generated, it doesn't consider // service path String uriForSigning = presignedUrl ? uri : this.servicePath + uri; expectedSignature = AwsSignature.createAuthorizationSignature( request, uriForSigning, credential, presignedUrl, haveBothDateHeader); } else { String contentSha256 = request.getHeader( AwsHttpHeaders.CONTENT_SHA256); try { byte[] payload; if (request.getParameter("X-Amz-Algorithm") != null) { payload = new byte[0]; } else if ("STREAMING-AWS4-HMAC-SHA256-PAYLOAD".equals( contentSha256)) { payload = new byte[0]; // ChunkedInputStream constructed below after deriving // the signing key so per-chunk signatures can be // verified. } else if ("STREAMING-UNSIGNED-PAYLOAD-TRAILER".equals(contentSha256)) { payload = new byte[0]; is = new ChunkedInputStream(is, v4MaxChunkSize, request.getHeader(AwsHttpHeaders.TRAILER)); } else if ("UNSIGNED-PAYLOAD".equals(contentSha256)) { payload = new byte[0]; } else { // buffer the entire stream to calculate digest // why input stream read contentlength of header? payload = ByteStreams.limit(is, v4MaxNonChunkedRequestSize + 1) .readAllBytes(); if (payload.length == v4MaxNonChunkedRequestSize + 1) { throw new S3Exception( S3ErrorCode.MAX_MESSAGE_LENGTH_EXCEEDED); } // maybe we should check this when signing, // a lot of dup code with aws sign code. MessageDigest md = MessageDigest.getInstance( authHeader.getHashAlgorithm()); byte[] hash = md.digest(payload); if (!contentSha256.equals( BaseEncoding.base16().lowerCase() .encode(hash))) { throw new S3Exception( S3ErrorCode .X_AMZ_CONTENT_S_H_A_256_MISMATCH); } is = new ByteArrayInputStream(payload); } String uriForSigning = presignedUrl ? originalUri : this.servicePath + originalUri; expectedSignature = AwsSignature .createAuthorizationSignatureV4(// v4 sign baseRequest, authHeader, payload, uriForSigning, credential); if ("STREAMING-AWS4-HMAC-SHA256-PAYLOAD".equals( contentSha256)) { byte[] signingKey = AwsSignature.deriveSigningKeyV4( authHeader, credential); String scope = authHeader.getDate() + "/" + authHeader.getRegion() + "/" + authHeader.getService() + "/aws4_request"; String timestamp = request.getHeader( AwsHttpHeaders.DATE); if (timestamp == null) { timestamp = request.getParameter("X-Amz-Date"); } is = new ChunkedInputStream(is, v4MaxChunkSize, expectedSignature, signingKey, authHeader.getHmacAlgorithm(), timestamp, scope); } } catch (InvalidKeyException | NoSuchAlgorithmException e) { throw new S3Exception(S3ErrorCode.INVALID_ARGUMENT, e); } } // AWS does not check signatures with OPTIONS verb if (!method.equals("OPTIONS") && !constantTimeEquals( expectedSignature, authHeader.getSignature())) { throw new S3Exception(S3ErrorCode.SIGNATURE_DOES_NOT_MATCH); } } // Validate container name if (!uri.equals("/") && !isValidContainer(path[1])) { if (method.equals("PUT") && (path.length <= 2 || path[2].isEmpty()) && !"".equals(request.getParameter("acl"))) { throw new S3Exception(S3ErrorCode.INVALID_BUCKET_NAME); } else { throw new S3Exception(S3ErrorCode.NO_SUCH_BUCKET); } } String uploadId = request.getParameter("uploadId"); if (ctx != null && path.length > 1 && !path[1].isEmpty()) { ctx.setBucket(path[1]); } switch (method) { case "DELETE": if (path.length <= 2 || path[2].isEmpty()) { setOperation(ctx, S3Operation.DELETE_BUCKET); handleContainerDelete(request, response, blobStore, path[1]); return; } else if (uploadId != null) { setOperation(ctx, S3Operation.ABORT_MULTIPART_UPLOAD); handleAbortMultipartUpload(request, response, blobStore, path[1], path[2], uploadId); return; } else { setOperation(ctx, S3Operation.DELETE_OBJECT); handleBlobRemove(request, response, blobStore, path[1], path[2]); return; } case "GET": if (uri.equals("/")) { setOperation(ctx, S3Operation.LIST_BUCKETS); handleContainerList(request, response, blobStore); return; } else if (path.length <= 2 || path[2].isEmpty()) { if (request.getParameter("acl") != null) { setOperation(ctx, S3Operation.GET_BUCKET_ACL); handleGetContainerAcl(request, response, blobStore, path[1]); return; } else if (request.getParameter("location") != null) { setOperation(ctx, S3Operation.GET_BUCKET_LOCATION); handleContainerLocation(request, response); return; } else if (request.getParameter("policy") != null) { setOperation(ctx, S3Operation.GET_BUCKET_POLICY); handleBucketPolicy(blobStore, path[1]); return; } else if (request.getParameter("uploads") != null) { setOperation(ctx, S3Operation.LIST_MULTIPART_UPLOADS); handleListMultipartUploads(request, response, blobStore, path[1]); return; } setOperation(ctx, S3Operation.LIST_OBJECTS_V2); handleBlobList(request, response, blobStore, path[1]); return; } else { if (request.getParameter("acl") != null) { setOperation(ctx, S3Operation.GET_OBJECT_ACL); handleGetBlobAcl(request, response, blobStore, path[1], path[2]); return; } else if (uploadId != null) { setOperation(ctx, S3Operation.LIST_PARTS); handleListParts(request, response, blobStore, path[1], path[2], uploadId); return; } setOperation(ctx, S3Operation.GET_OBJECT); handleGetBlob(request, response, blobStore, path[1], path[2]); return; } case "HEAD": if (path.length <= 2 || path[2].isEmpty()) { setOperation(ctx, S3Operation.HEAD_BUCKET); handleContainerExists(request, response, blobStore, path[1]); return; } else { setOperation(ctx, S3Operation.HEAD_OBJECT); handleBlobMetadata(request, response, blobStore, path[1], path[2]); return; } case "POST": if (request.getParameter("delete") != null) { setOperation(ctx, S3Operation.DELETE_OBJECTS); handleMultiBlobRemove(request, response, is, blobStore, path[1]); return; } else if (request.getParameter("uploads") != null) { setOperation(ctx, S3Operation.CREATE_MULTIPART_UPLOAD); handleInitiateMultipartUpload(request, response, blobStore, path[1], path[2]); return; } else if (uploadId != null && request.getParameter("partNumber") == null) { setOperation(ctx, S3Operation.COMPLETE_MULTIPART_UPLOAD); handleCompleteMultipartUpload(request, response, is, blobStore, path[1], path[2], uploadId); return; } break; case "PUT": if (path.length <= 2 || path[2].isEmpty()) { if (request.getParameter("acl") != null) { setOperation(ctx, S3Operation.PUT_BUCKET_ACL); handleSetContainerAcl(request, response, is, blobStore, path[1]); return; } setOperation(ctx, S3Operation.CREATE_BUCKET); handleContainerCreate(request, response, is, blobStore, path[1]); return; } else if (uploadId != null) { if (request.getHeader(AwsHttpHeaders.COPY_SOURCE) != null) { setOperation(ctx, S3Operation.UPLOAD_PART_COPY); handleCopyPart(request, response, blobStore, path[1], path[2], uploadId); } else { setOperation(ctx, S3Operation.UPLOAD_PART); handleUploadPart(request, response, is, blobStore, path[1], path[2], uploadId); } return; } else if (request.getHeader(AwsHttpHeaders.COPY_SOURCE) != null) { setOperation(ctx, S3Operation.COPY_OBJECT); handleCopyBlob(request, response, is, blobStore, path[1], path[2]); return; } else { if (request.getParameter("acl") != null) { setOperation(ctx, S3Operation.PUT_OBJECT_ACL); handleSetBlobAcl(request, response, is, blobStore, path[1], path[2]); return; } setOperation(ctx, S3Operation.PUT_OBJECT); handlePutBlob(request, response, is, blobStore, path[1], path[2]); return; } case "OPTIONS": setOperation(ctx, S3Operation.OPTIONS_OBJECT); handleOptionsBlob(request, response, blobStore, path[1]); return; default: break; } setOperation(ctx, S3Operation.UNKNOWN); logger.error("Unknown method {} with URI {}", method, request.getRequestURI()); throw new S3Exception(S3ErrorCode.NOT_IMPLEMENTED); } private static void setOperation(@Nullable RequestContext ctx, S3Operation operation) { if (ctx != null) { ctx.setOperation(operation); } } private static boolean checkPublicAccess(BlobStore blobStore, String containerName, String blobName) throws S3Exception { String blobStoreType = getBlobStoreType(blobStore); try { if (Quirks.NO_BLOB_ACCESS_CONTROL.contains(blobStoreType)) { ContainerAccess access = blobStore.getContainerAccess( containerName); return access == ContainerAccess.PUBLIC_READ; } BlobAccess access = blobStore.getBlobAccess(containerName, blobName); return access == BlobAccess.PUBLIC_READ; } catch (ContainerNotFoundException e) { throw new S3Exception(S3ErrorCode.NO_SUCH_BUCKET, e); } catch (KeyNotFoundException e) { throw new S3Exception(S3ErrorCode.NO_SUCH_KEY, e); } } private void doHandleAnonymous(HttpServletRequest request, HttpServletResponse response, InputStream is, String uri, BlobStore blobStore, @Nullable RequestContext ctx) throws IOException, S3Exception { String method = request.getMethod(); String[] path = uri.split("/", 3); if (ctx != null && path.length > 1 && !path[1].isEmpty()) { ctx.setBucket(path[1]); } switch (method) { case "GET": if (uri.equals("/")) { setOperation(ctx, S3Operation.LIST_BUCKETS); throw new S3Exception(S3ErrorCode.ACCESS_DENIED); } else if (path.length <= 2 || path[2].isEmpty()) { String containerName = path[1]; ContainerAccess access = blobStore.getContainerAccess( containerName); if (access == ContainerAccess.PRIVATE) { setOperation(ctx, S3Operation.LIST_OBJECTS_V2); throw new S3Exception(S3ErrorCode.ACCESS_DENIED); } setOperation(ctx, S3Operation.LIST_OBJECTS_V2); handleBlobList(request, response, blobStore, containerName); return; } else { String containerName = path[1]; String blobName = path[2]; if (!checkPublicAccess(blobStore, containerName, blobName)) { setOperation(ctx, S3Operation.GET_OBJECT); throw new S3Exception(S3ErrorCode.ACCESS_DENIED); } setOperation(ctx, S3Operation.GET_OBJECT); handleGetBlob(request, response, blobStore, containerName, blobName); return; } case "HEAD": if (path.length <= 2 || path[2].isEmpty()) { String containerName = path[1]; ContainerAccess access = blobStore.getContainerAccess( containerName); if (access == ContainerAccess.PRIVATE) { setOperation(ctx, S3Operation.HEAD_BUCKET); throw new S3Exception(S3ErrorCode.ACCESS_DENIED); } setOperation(ctx, S3Operation.HEAD_BUCKET); if (!blobStore.containerExists(containerName)) { throw new S3Exception(S3ErrorCode.NO_SUCH_BUCKET); } } else { String containerName = path[1]; String blobName = path[2]; if (!checkPublicAccess(blobStore, containerName, blobName)) { setOperation(ctx, S3Operation.HEAD_OBJECT); throw new S3Exception(S3ErrorCode.ACCESS_DENIED); } setOperation(ctx, S3Operation.HEAD_OBJECT); handleBlobMetadata(request, response, blobStore, containerName, blobName); } return; case "POST": if (path.length <= 2 || path[2].isEmpty()) { setOperation(ctx, S3Operation.PUT_OBJECT); handlePostBlob(request, response, is, blobStore, path[1]); return; } break; case "OPTIONS": if (uri.equals("/")) { setOperation(ctx, S3Operation.OPTIONS_OBJECT); throw new S3Exception(S3ErrorCode.ACCESS_DENIED); } else { String containerName = path[1]; setOperation(ctx, S3Operation.OPTIONS_OBJECT); handleOptionsBlob(request, response, blobStore, containerName); return; } default: break; } setOperation(ctx, S3Operation.UNKNOWN); logger.error("Unknown method {} with URI {}", method, request.getRequestURI()); throw new S3Exception(S3ErrorCode.NOT_IMPLEMENTED); } private void handleGetContainerAcl(HttpServletRequest request, HttpServletResponse response, BlobStore blobStore, String containerName) throws IOException, S3Exception { if (!blobStore.containerExists(containerName)) { throw new S3Exception(S3ErrorCode.NO_SUCH_BUCKET); } ContainerAccess access = blobStore.getContainerAccess(containerName); response.setCharacterEncoding(UTF_8); addCorsResponseHeader(request, response); try (Writer writer = response.getWriter()) { response.setContentType(XML_CONTENT_TYPE); XMLStreamWriter xml = xmlOutputFactory.createXMLStreamWriter( writer); xml.writeStartDocument(); xml.writeStartElement("AccessControlPolicy"); xml.writeDefaultNamespace(AWS_XMLNS); writeOwnerStanza(xml); xml.writeStartElement("AccessControlList"); xml.writeStartElement("Grant"); xml.writeStartElement("Grantee"); xml.writeNamespace("xsi", "http://www.w3.org/2001/XMLSchema-instance"); xml.writeAttribute("xsi:type", "CanonicalUser"); writeSimpleElement(xml, "ID", FAKE_OWNER_ID); writeSimpleElement(xml, "DisplayName", FAKE_OWNER_DISPLAY_NAME); xml.writeEndElement(); writeSimpleElement(xml, "Permission", "FULL_CONTROL"); xml.writeEndElement(); if (access == ContainerAccess.PUBLIC_READ) { xml.writeStartElement("Grant"); xml.writeStartElement("Grantee"); xml.writeNamespace("xsi", "http://www.w3.org/2001/XMLSchema-instance"); xml.writeAttribute("xsi:type", "Group"); writeSimpleElement(xml, "URI", "http://acs.amazonaws.com/groups/global/AllUsers"); xml.writeEndElement(); writeSimpleElement(xml, "Permission", "READ"); xml.writeEndElement(); } xml.writeEndElement(); xml.writeEndElement(); xml.flush(); } catch (XMLStreamException xse) { throw new IOException(xse); } } private void handleSetContainerAcl(HttpServletRequest request, HttpServletResponse response, InputStream is, BlobStore blobStore, String containerName) throws IOException, S3Exception { ContainerAccess access; String cannedAcl = request.getHeader(AwsHttpHeaders.ACL); if (cannedAcl == null || "private".equalsIgnoreCase(cannedAcl)) { access = ContainerAccess.PRIVATE; } else if ("public-read".equalsIgnoreCase(cannedAcl)) { access = ContainerAccess.PUBLIC_READ; } else if (CANNED_ACLS.contains(cannedAcl)) { throw new S3Exception(S3ErrorCode.NOT_IMPLEMENTED); } else { response.sendError(HttpServletResponse.SC_BAD_REQUEST); return; } var pis = new PushbackInputStream(is); int ch = pis.read(); if (ch != -1) { pis.unread(ch); AccessControlPolicy policy = mapper.readValue( pis, AccessControlPolicy.class); String accessString = mapXmlAclsToCannedPolicy(policy); if (accessString.equals("private")) { access = ContainerAccess.PRIVATE; } else if (accessString.equals("public-read")) { access = ContainerAccess.PUBLIC_READ; } else { throw new S3Exception(S3ErrorCode.NOT_IMPLEMENTED); } } blobStore.setContainerAccess(containerName, access); addCorsResponseHeader(request, response); } private void handleGetBlobAcl(HttpServletRequest request, HttpServletResponse response, BlobStore blobStore, String containerName, String blobName) throws IOException { BlobAccess access = blobStore.getBlobAccess(containerName, blobName); response.setCharacterEncoding(UTF_8); addCorsResponseHeader(request, response); try (Writer writer = response.getWriter()) { response.setContentType(XML_CONTENT_TYPE); XMLStreamWriter xml = xmlOutputFactory.createXMLStreamWriter( writer); xml.writeStartDocument(); xml.writeStartElement("AccessControlPolicy"); xml.writeDefaultNamespace(AWS_XMLNS); writeOwnerStanza(xml); xml.writeStartElement("AccessControlList"); xml.writeStartElement("Grant"); xml.writeStartElement("Grantee"); xml.writeNamespace("xsi", "http://www.w3.org/2001/XMLSchema-instance"); xml.writeAttribute("xsi:type", "CanonicalUser"); writeSimpleElement(xml, "ID", FAKE_OWNER_ID); writeSimpleElement(xml, "DisplayName", FAKE_OWNER_DISPLAY_NAME); xml.writeEndElement(); writeSimpleElement(xml, "Permission", "FULL_CONTROL"); xml.writeEndElement(); if (access == BlobAccess.PUBLIC_READ) { xml.writeStartElement("Grant"); xml.writeStartElement("Grantee"); xml.writeNamespace("xsi", "http://www.w3.org/2001/XMLSchema-instance"); xml.writeAttribute("xsi:type", "Group"); writeSimpleElement(xml, "URI", "http://acs.amazonaws.com/groups/global/AllUsers"); xml.writeEndElement(); writeSimpleElement(xml, "Permission", "READ"); xml.writeEndElement(); } xml.writeEndElement(); xml.writeEndElement(); xml.flush(); } catch (XMLStreamException xse) { throw new IOException(xse); } } private void handleSetBlobAcl(HttpServletRequest request, HttpServletResponse response, InputStream is, BlobStore blobStore, String containerName, String blobName) throws IOException, S3Exception { BlobAccess access; String cannedAcl = request.getHeader(AwsHttpHeaders.ACL); if (cannedAcl == null || "private".equalsIgnoreCase(cannedAcl)) { access = BlobAccess.PRIVATE; } else if ("public-read".equalsIgnoreCase(cannedAcl)) { access = BlobAccess.PUBLIC_READ; } else if (CANNED_ACLS.contains(cannedAcl)) { throw new S3Exception(S3ErrorCode.NOT_IMPLEMENTED); } else { response.sendError(HttpServletResponse.SC_BAD_REQUEST); return; } var pis = new PushbackInputStream(is); int ch = pis.read(); if (ch != -1) { pis.unread(ch); AccessControlPolicy policy = mapper.readValue( pis, AccessControlPolicy.class); String accessString = mapXmlAclsToCannedPolicy(policy); if (accessString.equals("private")) { access = BlobAccess.PRIVATE; } else if (accessString.equals("public-read")) { access = BlobAccess.PUBLIC_READ; } else { throw new S3Exception(S3ErrorCode.NOT_IMPLEMENTED); } } blobStore.setBlobAccess(containerName, blobName, access); addCorsResponseHeader(request, response); } /** Map XML ACLs to a canned policy if an exact transformation exists. */ private static String mapXmlAclsToCannedPolicy( AccessControlPolicy policy) throws S3Exception { if (!policy.owner.id.equals(FAKE_OWNER_ID)) { throw new S3Exception(S3ErrorCode.NOT_IMPLEMENTED); } boolean ownerFullControl = false; boolean allUsersRead = false; if (policy.aclList != null) { for (AccessControlPolicy.AccessControlList.Grant grant : policy.aclList.grants) { if (grant.grantee.type.equals("CanonicalUser") && grant.grantee.id.equals(FAKE_OWNER_ID) && grant.permission.equals("FULL_CONTROL")) { ownerFullControl = true; } else if (grant.grantee.type.equals("Group") && grant.grantee.uri.equals("http://acs.amazonaws.com/" + "groups/global/AllUsers") && grant.permission.equals("READ")) { allUsersRead = true; } else { throw new S3Exception(S3ErrorCode.NOT_IMPLEMENTED); } } } if (ownerFullControl) { if (allUsersRead) { return "public-read"; } return "private"; } else { throw new S3Exception(S3ErrorCode.NOT_IMPLEMENTED); } } private void handleContainerList(HttpServletRequest request, HttpServletResponse response, BlobStore blobStore) throws IOException { PageSet buckets = blobStore.list(); response.setCharacterEncoding(UTF_8); addCorsResponseHeader(request, response); try (Writer writer = response.getWriter()) { response.setContentType(XML_CONTENT_TYPE); XMLStreamWriter xml = xmlOutputFactory.createXMLStreamWriter( writer); xml.writeStartDocument(); xml.writeStartElement("ListAllMyBucketsResult"); xml.writeDefaultNamespace(AWS_XMLNS); writeOwnerStanza(xml); xml.writeStartElement("Buckets"); for (StorageMetadata metadata : buckets) { xml.writeStartElement("Bucket"); writeSimpleElement(xml, "Name", metadata.getName()); Date creationDate = metadata.getCreationDate(); if (creationDate == null) { // Some providers, e.g., Swift, do not provide container // creation date. Emit a bogus one to satisfy clients like // s3cmd which require one. creationDate = new Date(0); } writeSimpleElement(xml, "CreationDate", blobStore.getContext().utils().date() .iso8601DateFormat(creationDate).trim()); xml.writeEndElement(); } xml.writeEndElement(); xml.writeEndElement(); xml.flush(); } catch (XMLStreamException xse) { throw new IOException(xse); } } private void handleContainerLocation(HttpServletRequest request, HttpServletResponse response) throws IOException { response.setCharacterEncoding(UTF_8); addCorsResponseHeader(request, response); try (Writer writer = response.getWriter()) { response.setContentType(XML_CONTENT_TYPE); XMLStreamWriter xml = xmlOutputFactory.createXMLStreamWriter( writer); xml.writeStartDocument(); // TODO: using us-standard semantics but could emit actual location xml.writeStartElement("LocationConstraint"); xml.writeDefaultNamespace(AWS_XMLNS); xml.writeEndElement(); xml.flush(); } catch (XMLStreamException xse) { throw new IOException(xse); } } private static void handleBucketPolicy(BlobStore blobStore, String containerName) throws S3Exception { if (!blobStore.containerExists(containerName)) { throw new S3Exception(S3ErrorCode.NO_SUCH_BUCKET); } throw new S3Exception(S3ErrorCode.NO_SUCH_POLICY); } private void handleListMultipartUploads(HttpServletRequest request, HttpServletResponse response, BlobStore blobStore, String container) throws IOException, S3Exception { if (request.getParameter("delimiter") != null || request.getParameter("max-uploads") != null || request.getParameter("key-marker") != null || request.getParameter("upload-id-marker") != null) { throw new UnsupportedOperationException(); } String encodingType = request.getParameter("encoding-type"); String prefix = request.getParameter("prefix"); List uploads = blobStore.listMultipartUploads( container); response.setCharacterEncoding(UTF_8); addCorsResponseHeader(request, response); try (Writer writer = response.getWriter()) { response.setContentType(XML_CONTENT_TYPE); XMLStreamWriter xml = xmlOutputFactory.createXMLStreamWriter( writer); xml.writeStartDocument(); xml.writeStartElement("ListMultipartUploadsResult"); xml.writeDefaultNamespace(AWS_XMLNS); writeSimpleElement(xml, "Bucket", container); // TODO: bogus values xml.writeEmptyElement("KeyMarker"); xml.writeEmptyElement("UploadIdMarker"); xml.writeEmptyElement("NextKeyMarker"); xml.writeEmptyElement("NextUploadIdMarker"); xml.writeEmptyElement("Delimiter"); if (Strings.isNullOrEmpty(prefix)) { xml.writeEmptyElement("Prefix"); } else { writeSimpleElement(xml, "Prefix", encodeBlob( encodingType, prefix)); } writeSimpleElement(xml, "MaxUploads", "1000"); writeSimpleElement(xml, "IsTruncated", "false"); for (MultipartUpload upload : uploads) { if (prefix != null && !upload.blobName().startsWith(prefix)) { continue; } xml.writeStartElement("Upload"); writeSimpleElement(xml, "Key", upload.blobName()); writeSimpleElement(xml, "UploadId", upload.id()); writeInitiatorStanza(xml); writeOwnerStanza(xml); // TODO: bogus value writeSimpleElement(xml, "StorageClass", "STANDARD"); // TODO: bogus value writeSimpleElement(xml, "Initiated", blobStore.getContext().utils().date() .iso8601DateFormat(new Date())); xml.writeEndElement(); } // TODO: CommonPrefixes xml.writeEndElement(); xml.flush(); } catch (XMLStreamException xse) { throw new IOException(xse); } } private void handleContainerExists(HttpServletRequest request, HttpServletResponse response, BlobStore blobStore, String containerName) throws IOException, S3Exception { if (!blobStore.containerExists(containerName)) { throw new S3Exception(S3ErrorCode.NO_SUCH_BUCKET); } addCorsResponseHeader(request, response); } private void handleContainerCreate(HttpServletRequest request, HttpServletResponse response, InputStream is, BlobStore blobStore, String containerName) throws IOException, S3Exception { if (containerName.isEmpty()) { throw new S3Exception(S3ErrorCode.METHOD_NOT_ALLOWED); } String contentLengthString = request.getHeader( HttpHeaders.CONTENT_LENGTH); if (contentLengthString != null) { long contentLength; try { contentLength = Long.parseLong(contentLengthString); } catch (NumberFormatException nfe) { throw new S3Exception(S3ErrorCode.INVALID_ARGUMENT, nfe); } if (contentLength < 0) { throw new S3Exception(S3ErrorCode.INVALID_ARGUMENT); } } String locationString; try (PushbackInputStream pis = new PushbackInputStream(is)) { int ch = pis.read(); if (ch == -1) { // handle empty bodies locationString = null; } else { pis.unread(ch); CreateBucketRequest cbr = mapper.readValue( pis, CreateBucketRequest.class); locationString = cbr.locationConstraint; } } Location location = null; if (locationString != null) { for (Location loc : blobStore.listAssignableLocations()) { if (loc.getId().equalsIgnoreCase(locationString)) { location = loc; break; } } if (location == null) { throw new S3Exception(S3ErrorCode.INVALID_LOCATION_CONSTRAINT); } } logger.debug("Creating bucket with location: {}", location); var options = new CreateContainerOptions(); String acl = request.getHeader(AwsHttpHeaders.ACL); if ("public-read".equalsIgnoreCase(acl)) { options.publicRead(); } boolean created; try { created = blobStore.createContainerInLocation(location, containerName, options); } catch (AuthorizationException ae) { if (ae.getCause() instanceof AccessDeniedException) { throw new S3Exception(S3ErrorCode.ACCESS_DENIED, "Could not create bucket", ae); } throw new S3Exception(S3ErrorCode.BUCKET_ALREADY_EXISTS, ae); } if (!created) { throw new S3Exception(S3ErrorCode.BUCKET_ALREADY_OWNED_BY_YOU, S3ErrorCode.BUCKET_ALREADY_OWNED_BY_YOU.getMessage(), null, Map.of("BucketName", containerName)); } response.addHeader(HttpHeaders.LOCATION, "/" + containerName); addCorsResponseHeader(request, response); } private void handleContainerDelete(HttpServletRequest request, HttpServletResponse response, BlobStore blobStore, String containerName) throws IOException, S3Exception { if (!blobStore.containerExists(containerName)) { throw new S3Exception(S3ErrorCode.NO_SUCH_BUCKET); } String blobStoreType = getBlobStoreType(blobStore); if (blobStoreType.equals("b2")) { // S3 allows deleting a container with in-progress MPU while B2 does // not. Explicitly cancel uploads for B2. for (MultipartUpload mpu : blobStore.listMultipartUploads( containerName)) { blobStore.abortMultipartUpload(mpu); } } if (!blobStore.deleteContainerIfEmpty(containerName)) { throw new S3Exception(S3ErrorCode.BUCKET_NOT_EMPTY); } addCorsResponseHeader(request, response); response.setStatus(HttpServletResponse.SC_NO_CONTENT); } private void handleBlobList(HttpServletRequest request, HttpServletResponse response, BlobStore blobStore, String containerName) throws IOException, S3Exception { String blobStoreType = getBlobStoreType(blobStore); var options = new ListContainerOptions(); String encodingType = request.getParameter("encoding-type"); String delimiter = request.getParameter("delimiter"); if (delimiter != null) { options.delimiter(delimiter); } else { options.recursive(); } String prefix = request.getParameter("prefix"); if (prefix != null && !prefix.isEmpty()) { options.prefix(prefix); } boolean isListV2 = false; String marker; String listType = request.getParameter("list-type"); String continuationToken = request.getParameter("continuation-token"); String startAfter = request.getParameter("start-after"); if (listType == null) { marker = request.getParameter("marker"); } else if (listType.equals("2")) { isListV2 = true; if (continuationToken != null && startAfter != null) { throw new S3Exception(S3ErrorCode.INVALID_ARGUMENT); } if (continuationToken != null) { marker = continuationToken; } else { marker = startAfter; } } else { throw new S3Exception(S3ErrorCode.NOT_IMPLEMENTED); } if (marker != null) { if (Quirks.OPAQUE_MARKERS.contains(blobStoreType)) { String realMarker = lastKeyToMarker.getIfPresent( Map.entry(containerName, marker)); if (realMarker != null) { marker = realMarker; } } options.afterMarker(marker); } boolean fetchOwner = !isListV2 || "true".equals(request.getParameter("fetch-owner")); int maxKeys = 1000; String maxKeysString = request.getParameter("max-keys"); if (maxKeysString != null) { try { maxKeys = Integer.parseInt(maxKeysString); } catch (NumberFormatException nfe) { throw new S3Exception(S3ErrorCode.INVALID_ARGUMENT, nfe); } if (maxKeys > 1000) { maxKeys = 1000; } } options.maxResults(maxKeys); PageSet set = blobStore.list(containerName, options); addCorsResponseHeader(request, response); response.setCharacterEncoding(UTF_8); try (Writer writer = response.getWriter()) { response.setContentType(XML_CONTENT_TYPE); XMLStreamWriter xml = xmlOutputFactory.createXMLStreamWriter( writer); xml.writeStartDocument(); xml.writeStartElement("ListBucketResult"); xml.writeDefaultNamespace(AWS_XMLNS); writeSimpleElement(xml, "Name", containerName); if (prefix == null) { xml.writeEmptyElement("Prefix"); } else { writeSimpleElement(xml, "Prefix", encodeBlob( encodingType, prefix)); } if (isListV2) { writeSimpleElement(xml, "KeyCount", String.valueOf(set.size())); } writeSimpleElement(xml, "MaxKeys", String.valueOf(maxKeys)); if (!isListV2) { if (marker == null) { xml.writeEmptyElement("Marker"); } else { writeSimpleElement(xml, "Marker", encodeBlob( encodingType, marker)); } } else { if (continuationToken == null) { xml.writeEmptyElement("ContinuationToken"); } else { writeSimpleElement(xml, "ContinuationToken", encodeBlob( encodingType, continuationToken)); } if (startAfter == null) { xml.writeEmptyElement("StartAfter"); } else { writeSimpleElement(xml, "StartAfter", encodeBlob( encodingType, startAfter)); } } if (!Strings.isNullOrEmpty(delimiter)) { writeSimpleElement(xml, "Delimiter", encodeBlob( encodingType, delimiter)); } if (encodingType != null && encodingType.equals("url")) { writeSimpleElement(xml, "EncodingType", encodingType); } String nextMarker = set.getNextMarker(); if (nextMarker != null) { writeSimpleElement(xml, "IsTruncated", "true"); writeSimpleElement(xml, isListV2 ? "NextContinuationToken" : "NextMarker", encodeBlob(encodingType, nextMarker)); if (Quirks.OPAQUE_MARKERS.contains(blobStoreType)) { StorageMetadata sm = Streams.findLast( set.stream()).orElse(null); if (sm != null) { lastKeyToMarker.put(Map.entry( containerName, encodeBlob(encodingType, nextMarker)), nextMarker); } } } else { writeSimpleElement(xml, "IsTruncated", "false"); } Set commonPrefixes = new TreeSet<>(); for (StorageMetadata metadata : set) { switch (metadata.getType()) { case FOLDER: // fallthrough case RELATIVE_PATH: if (delimiter != null) { commonPrefixes.add(metadata.getName()); continue; } break; default: break; } xml.writeStartElement("Contents"); writeSimpleElement(xml, "Key", encodeBlob(encodingType, metadata.getName())); Date lastModified = metadata.getLastModified(); if (lastModified != null) { writeSimpleElement(xml, "LastModified", formatDate(lastModified)); } String eTag = metadata.getETag(); if (eTag != null) { writeSimpleElement(xml, "ETag", maybeQuoteETag(eTag)); } Long size = metadata.getSize(); if (size != null) { writeSimpleElement(xml, "Size", String.valueOf(size)); } Tier tier = metadata.getTier(); if (tier != null) { writeSimpleElement(xml, "StorageClass", StorageClass.fromTier(tier).toString()); } if (fetchOwner) { writeOwnerStanza(xml); } xml.writeEndElement(); } for (String commonPrefix : commonPrefixes) { xml.writeStartElement("CommonPrefixes"); writeSimpleElement(xml, "Prefix", encodeBlob(encodingType, commonPrefix)); xml.writeEndElement(); } xml.writeEndElement(); xml.flush(); } catch (XMLStreamException xse) { throw new IOException(xse); } } private void handleBlobRemove(HttpServletRequest request, HttpServletResponse response, BlobStore blobStore, String containerName, String blobName) throws IOException, S3Exception { blobStore.removeBlob(containerName, blobName); addCorsResponseHeader(request, response); response.sendError(HttpServletResponse.SC_NO_CONTENT); } private void handleMultiBlobRemove(HttpServletRequest request, HttpServletResponse response, InputStream is, BlobStore blobStore, String containerName) throws IOException, S3Exception { String contentMD5String = request.getHeader(HttpHeaders.CONTENT_MD5); if (contentMD5String == null) { throw new S3Exception(S3ErrorCode.INVALID_REQUEST, "Missing required header for this request: Content-Md5"); } HashCode expected; try { expected = HashCode.fromBytes( Base64.getDecoder().decode(contentMD5String)); } catch (IllegalArgumentException iae) { throw new S3Exception(S3ErrorCode.INVALID_DIGEST, iae); } if (expected.bits() != MD5.bits()) { throw new S3Exception(S3ErrorCode.INVALID_DIGEST); } byte[] body = is.readAllBytes(); HashCode actual = MD5.hashBytes(body); if (!expected.equals(actual)) { throw new S3Exception(S3ErrorCode.BAD_DIGEST); } DeleteMultipleObjectsRequest dmor = mapper.readValue( body, DeleteMultipleObjectsRequest.class); if (dmor.objects == null) { throw new S3Exception(S3ErrorCode.MALFORMED_X_M_L); } if (dmor.objects.size() > 1_000) { throw new S3Exception(S3ErrorCode.INVALID_ARGUMENT); } Collection blobNames = new ArrayList<>(); for (DeleteMultipleObjectsRequest.S3Object s3Object : dmor.objects) { blobNames.add(s3Object.key); } blobStore.removeBlobs(containerName, blobNames); response.setCharacterEncoding(UTF_8); addCorsResponseHeader(request, response); try (Writer writer = response.getWriter()) { response.setContentType(XML_CONTENT_TYPE); XMLStreamWriter xml = xmlOutputFactory.createXMLStreamWriter( writer); xml.writeStartDocument(); xml.writeStartElement("DeleteResult"); xml.writeDefaultNamespace(AWS_XMLNS); if (!dmor.quiet) { for (String blobName : blobNames) { xml.writeStartElement("Deleted"); writeSimpleElement(xml, "Key", blobName); xml.writeEndElement(); } } // TODO: emit error stanza xml.writeEndElement(); xml.flush(); } catch (XMLStreamException xse) { throw new IOException(xse); } } private void handleBlobMetadata(HttpServletRequest request, HttpServletResponse response, BlobStore blobStore, String containerName, String blobName) throws IOException, S3Exception { BlobMetadata metadata = blobStore.blobMetadata(containerName, blobName); if (metadata == null) { throw new S3Exception(S3ErrorCode.NO_SUCH_KEY); } // BlobStore.blobMetadata does not support GetOptions so we emulate // conditional requests. String ifMatch = request.getHeader(HttpHeaders.IF_MATCH); String ifNoneMatch = request.getHeader(HttpHeaders.IF_NONE_MATCH); long ifModifiedSince = request.getDateHeader( HttpHeaders.IF_MODIFIED_SINCE); long ifUnmodifiedSince = request.getDateHeader( HttpHeaders.IF_UNMODIFIED_SINCE); String eTag = metadata.getETag(); if (eTag != null) { eTag = maybeQuoteETag(eTag); if (ifMatch != null && !ifMatch.equals(eTag)) { throw new S3Exception(S3ErrorCode.PRECONDITION_FAILED); } if (ifNoneMatch != null && ifNoneMatch.equals(eTag)) { response.setStatus(HttpServletResponse.SC_NOT_MODIFIED); return; } } Date lastModified = metadata.getLastModified(); if (lastModified != null) { if (ifModifiedSince != -1 && lastModified.compareTo( new Date(ifModifiedSince)) <= 0) { throw new S3Exception(S3ErrorCode.PRECONDITION_FAILED); } if (ifUnmodifiedSince != -1 && lastModified.compareTo( new Date(ifUnmodifiedSince)) >= 0) { response.setStatus(HttpServletResponse.SC_NOT_MODIFIED); return; } } response.setStatus(HttpServletResponse.SC_OK); addMetadataToResponse(request, response, metadata); addCorsResponseHeader(request, response); } private void handleOptionsBlob(HttpServletRequest request, HttpServletResponse response, BlobStore blobStore, String containerName) throws IOException, S3Exception { if (!blobStore.containerExists(containerName)) { // Don't leak internal information, although authenticated throw new S3Exception(S3ErrorCode.ACCESS_DENIED); } String corsOrigin = request.getHeader(HttpHeaders.ORIGIN); if (Strings.isNullOrEmpty(corsOrigin)) { throw new S3Exception(S3ErrorCode.INVALID_CORS_ORIGIN); } if (!corsRules.isOriginAllowed(corsOrigin)) { throw new S3Exception(S3ErrorCode.ACCESS_DENIED); } String corsMethod = request.getHeader( HttpHeaders.ACCESS_CONTROL_REQUEST_METHOD); if (!corsRules.isMethodAllowed(corsMethod)) { throw new S3Exception(S3ErrorCode.INVALID_CORS_METHOD); } String corsHeaders = request.getHeader( HttpHeaders.ACCESS_CONTROL_REQUEST_HEADERS); if (!Strings.isNullOrEmpty(corsHeaders)) { if (corsRules.isEveryHeaderAllowed(corsHeaders)) { response.addHeader(HttpHeaders.ACCESS_CONTROL_ALLOW_HEADERS, corsHeaders); } else { throw new S3Exception(S3ErrorCode.ACCESS_DENIED); } } response.addHeader(HttpHeaders.VARY, HttpHeaders.ORIGIN); response.addHeader(HttpHeaders.ACCESS_CONTROL_ALLOW_ORIGIN, corsRules.getAllowedOrigin(corsOrigin)); response.addHeader(HttpHeaders.ACCESS_CONTROL_ALLOW_METHODS, corsRules.getAllowedMethods()); String exposedHeaders = corsRules.getExposedHeaders(); if (!Strings.isNullOrEmpty(exposedHeaders)) { response.addHeader(HttpHeaders.ACCESS_CONTROL_EXPOSE_HEADERS, exposedHeaders); } response.setStatus(HttpServletResponse.SC_OK); } private void handleGetBlob(HttpServletRequest request, HttpServletResponse response, BlobStore blobStore, String containerName, String blobName) throws IOException, S3Exception { int status = HttpServletResponse.SC_OK; var options = new GetOptions(); String ifMatch = request.getHeader(HttpHeaders.IF_MATCH); if (ifMatch != null) { options.ifETagMatches(ifMatch); } String ifNoneMatch = request.getHeader(HttpHeaders.IF_NONE_MATCH); if (ifNoneMatch != null) { options.ifETagDoesntMatch(ifNoneMatch); } long ifModifiedSince = request.getDateHeader( HttpHeaders.IF_MODIFIED_SINCE); if (ifModifiedSince != -1) { options.ifModifiedSince(new Date(ifModifiedSince)); } long ifUnmodifiedSince = request.getDateHeader( HttpHeaders.IF_UNMODIFIED_SINCE); if (ifUnmodifiedSince != -1) { options.ifUnmodifiedSince(new Date(ifUnmodifiedSince)); } String range = request.getHeader(HttpHeaders.RANGE); if (range != null && range.startsWith("bytes=") && // ignore multiple ranges range.indexOf(',') == -1) { range = range.substring("bytes=".length()); String[] ranges = range.split("-", 2); if (ranges[0].isEmpty()) { options.tail(Long.parseLong(ranges[1])); } else if (ranges[1].isEmpty()) { options.startAt(Long.parseLong(ranges[0])); } else { options.range(Long.parseLong(ranges[0]), Long.parseLong(ranges[1])); } status = HttpServletResponse.SC_PARTIAL_CONTENT; } Blob blob = blobStore.getBlob(containerName, blobName, options); if (blob == null) { throw new S3Exception(S3ErrorCode.NO_SUCH_KEY); } response.setStatus(status); addCorsResponseHeader(request, response); addMetadataToResponse(request, response, blob.getMetadata()); // TODO: handles only a single range due to jclouds limitations var headers = new CaseInsensitiveImmutableMultimap( blob.getAllHeaders()); Collection contentRanges = headers.get(HttpHeaders.CONTENT_RANGE); if (!contentRanges.isEmpty()) { response.addHeader(HttpHeaders.CONTENT_RANGE, contentRanges.iterator().next()); response.addHeader(HttpHeaders.ACCEPT_RANGES, "bytes"); } try (InputStream is = blob.getPayload().openStream(); OutputStream os = response.getOutputStream()) { is.transferTo(os); os.flush(); } } private void handleCopyBlob(HttpServletRequest request, HttpServletResponse response, InputStream is, BlobStore blobStore, String destContainerName, String destBlobName) throws IOException, S3Exception { String copySourceHeader = request.getHeader(AwsHttpHeaders.COPY_SOURCE); copySourceHeader = URLDecoder.decode( copySourceHeader, StandardCharsets.UTF_8); if (copySourceHeader.startsWith("/")) { // Some clients like boto do not include the leading slash copySourceHeader = copySourceHeader.substring(1); } String[] path = copySourceHeader.split("/", 2); if (path.length != 2) { throw new S3Exception(S3ErrorCode.INVALID_REQUEST); } String sourceContainerName = path[0]; String sourceBlobName = path[1]; boolean replaceMetadata = "REPLACE".equalsIgnoreCase(request.getHeader( AwsHttpHeaders.METADATA_DIRECTIVE)); if (sourceContainerName.equals(destContainerName) && sourceBlobName.equals(destBlobName) && !replaceMetadata) { throw new S3Exception(S3ErrorCode.INVALID_REQUEST); } CopyOptions.Builder options = CopyOptions.builder(); String ifMatch = request.getHeader(AwsHttpHeaders.COPY_SOURCE_IF_MATCH); if (ifMatch != null) { options.ifMatch(ifMatch); } String ifNoneMatch = request.getHeader( AwsHttpHeaders.COPY_SOURCE_IF_NONE_MATCH); if (ifNoneMatch != null) { options.ifNoneMatch(ifNoneMatch); } long ifModifiedSince = request.getDateHeader( AwsHttpHeaders.COPY_SOURCE_IF_MODIFIED_SINCE); if (ifModifiedSince != -1) { options.ifModifiedSince(new Date(ifModifiedSince)); } long ifUnmodifiedSince = request.getDateHeader( AwsHttpHeaders.COPY_SOURCE_IF_UNMODIFIED_SINCE); if (ifUnmodifiedSince != -1) { options.ifUnmodifiedSince(new Date(ifUnmodifiedSince)); } if (replaceMetadata) { ContentMetadataBuilder contentMetadata = ContentMetadataBuilder.create(); var userMetadata = ImmutableMap.builder(); for (String headerName : Collections.list( request.getHeaderNames())) { String headerValue = Strings.nullToEmpty(request.getHeader( headerName)); if (headerName.equalsIgnoreCase( HttpHeaders.CACHE_CONTROL)) { contentMetadata.cacheControl(headerValue); } else if (headerName.equalsIgnoreCase( HttpHeaders.CONTENT_DISPOSITION)) { contentMetadata.contentDisposition(headerValue); } else if (headerName.equalsIgnoreCase( HttpHeaders.CONTENT_ENCODING)) { contentMetadata.contentEncoding(headerValue); } else if (headerName.equalsIgnoreCase( HttpHeaders.CONTENT_LANGUAGE)) { contentMetadata.contentLanguage(headerValue); } else if (headerName.equalsIgnoreCase( HttpHeaders.CONTENT_TYPE)) { contentMetadata.contentType(headerValue); } else if (startsWithIgnoreCase(headerName, USER_METADATA_PREFIX)) { userMetadata.put( headerName.substring(USER_METADATA_PREFIX.length()), headerValue); } // TODO: Expires } options.contentMetadata(contentMetadata.build()); options.userMetadata(userMetadata.build()); } String eTag; try { eTag = blobStore.copyBlob( sourceContainerName, sourceBlobName, destContainerName, destBlobName, options.build()); } catch (KeyNotFoundException knfe) { throw new S3Exception(S3ErrorCode.NO_SUCH_KEY, knfe); } // TODO: jclouds should include this in CopyOptions String cannedAcl = request.getHeader(AwsHttpHeaders.ACL); if (cannedAcl != null && !cannedAcl.equalsIgnoreCase("private")) { handleSetBlobAcl(request, response, is, blobStore, destContainerName, destBlobName); } BlobMetadata blobMetadata = blobStore.blobMetadata(destContainerName, destBlobName); response.setCharacterEncoding(UTF_8); addCorsResponseHeader(request, response); try (Writer writer = response.getWriter()) { response.setContentType(XML_CONTENT_TYPE); XMLStreamWriter xml = xmlOutputFactory.createXMLStreamWriter( writer); xml.writeStartDocument(); xml.writeStartElement("CopyObjectResult"); xml.writeDefaultNamespace(AWS_XMLNS); var lastModified = blobMetadata.getLastModified(); if (lastModified != null) { writeSimpleElement(xml, "LastModified", formatDate(lastModified)); } writeSimpleElement(xml, "ETag", maybeQuoteETag(eTag)); xml.writeEndElement(); xml.flush(); } catch (XMLStreamException xse) { throw new IOException(xse); } } private void handlePutBlob(HttpServletRequest request, HttpServletResponse response, InputStream is, BlobStore blobStore, String containerName, String blobName) throws IOException, S3Exception { // Flag headers present since HttpServletResponse.getHeader returns // null for empty headers values. String contentLengthString = null; String decodedContentLengthString = null; String contentMD5String = null; for (String headerName : Collections.list(request.getHeaderNames())) { String headerValue = Strings.nullToEmpty(request.getHeader( headerName)); if (headerName.equalsIgnoreCase(HttpHeaders.CONTENT_LENGTH)) { contentLengthString = headerValue; } else if (headerName.equalsIgnoreCase( AwsHttpHeaders.DECODED_CONTENT_LENGTH)) { decodedContentLengthString = headerValue; } else if (headerName.equalsIgnoreCase(HttpHeaders.CONTENT_MD5)) { contentMD5String = headerValue; } } if (decodedContentLengthString != null) { contentLengthString = decodedContentLengthString; } HashCode contentMD5 = null; if (contentMD5String != null) { try { contentMD5 = HashCode.fromBytes( Base64.getDecoder().decode(contentMD5String)); } catch (IllegalArgumentException iae) { throw new S3Exception(S3ErrorCode.INVALID_DIGEST, iae); } if (contentMD5.bits() != MD5.bits()) { throw new S3Exception(S3ErrorCode.INVALID_DIGEST); } } if (contentLengthString == null) { throw new S3Exception(S3ErrorCode.MISSING_CONTENT_LENGTH); } long contentLength; try { contentLength = Long.parseLong(contentLengthString); } catch (NumberFormatException nfe) { throw new S3Exception(S3ErrorCode.INVALID_ARGUMENT, nfe); } if (contentLength < 0) { throw new S3Exception(S3ErrorCode.INVALID_ARGUMENT); } if (contentLength > maxSinglePartObjectSize) { throw new S3Exception(S3ErrorCode.ENTITY_TOO_LARGE); } if (decodedContentLengthString != null) { is = ByteStreams.limit(is, contentLength); } String ifMatch = request.getHeader(HttpHeaders.IF_MATCH); String ifNoneMatch = request.getHeader(HttpHeaders.IF_NONE_MATCH); String blobStoreType = getBlobStoreType(blobStore); // Azure only supports If-None-Match: *, not If-Match: * // Handle If-Match: * manually for the azureblob-sdk provider. // Note: this is a non-atomic operation (HEAD then PUT). if (ifMatch != null && ifMatch.equals("*") && blobStoreType.equals("azureblob-sdk")) { BlobMetadata metadata = blobStore.blobMetadata(containerName, blobName); if (metadata == null) { throw new S3Exception(S3ErrorCode.PRECONDITION_FAILED); } ifMatch = null; } // Providers that support native conditional writes boolean supportsNativeConditionalWrites = blobStoreType.equals("azureblob-sdk") || blobStoreType.equals("aws-s3-sdk") || blobStoreType.equals("google-cloud-storage-sdk"); // Emulate conditional put for backends without native support. // Note: this is a non-atomic operation (HEAD then PUT). if ((ifMatch != null || ifNoneMatch != null) && !supportsNativeConditionalWrites) { BlobMetadata metadata = blobStore.blobMetadata(containerName, blobName); if (ifMatch != null) { if (ifMatch.equals("*")) { if (metadata == null) { throw new S3Exception(S3ErrorCode.PRECONDITION_FAILED); } } else { if (metadata == null) { throw new S3Exception(S3ErrorCode.NO_SUCH_KEY); } String eTag = metadata.getETag(); if (eTag != null) { eTag = maybeQuoteETag(eTag); if (!equalsIgnoringSurroundingQuotes(ifMatch, eTag)) { throw new S3Exception(S3ErrorCode.PRECONDITION_FAILED); } } else { throw new S3Exception(S3ErrorCode.PRECONDITION_FAILED); } } } if (ifNoneMatch != null) { if (ifNoneMatch.equals("*")) { if (metadata != null) { throw new S3Exception(S3ErrorCode.PRECONDITION_FAILED); } } else if (metadata != null) { String eTag = metadata.getETag(); if (eTag != null) { eTag = maybeQuoteETag(eTag); if (equalsIgnoringSurroundingQuotes(ifNoneMatch, eTag)) { throw new S3Exception(S3ErrorCode.PRECONDITION_FAILED); } } } } } BlobAccess access; String cannedAcl = request.getHeader(AwsHttpHeaders.ACL); if (cannedAcl == null || cannedAcl.equalsIgnoreCase("private")) { access = BlobAccess.PRIVATE; } else if (cannedAcl.equalsIgnoreCase("public-read")) { access = BlobAccess.PUBLIC_READ; } else if (CANNED_ACLS.contains(cannedAcl)) { throw new S3Exception(S3ErrorCode.NOT_IMPLEMENTED); } else { response.sendError(HttpServletResponse.SC_BAD_REQUEST); return; } var options = new PutOptions2() .setBlobAccess(access) .setIfMatch(ifMatch) .setIfNoneMatch(ifNoneMatch); if (blobStoreType.equals("azureblob") && contentLength > 256 * 1024 * 1024) { options.multipart(true); } String eTag; BlobBuilder.PayloadBlobBuilder builder = blobStore .blobBuilder(blobName) .payload(is) .contentLength(contentLength); String storageClass = request.getHeader(AwsHttpHeaders.STORAGE_CLASS); if (storageClass == null || storageClass.equalsIgnoreCase("STANDARD")) { // defaults to STANDARD } else { builder.tier(StorageClass.valueOf(storageClass).toTier()); } addContentMetadataFromHttpRequest(builder, request); if (contentMD5 != null) { builder = builder.contentMD5(contentMD5); } eTag = blobStore.putBlob(containerName, builder.build(), options); addCorsResponseHeader(request, response); response.addHeader(HttpHeaders.ETAG, maybeQuoteETag(eTag)); } private void handleStatuszRequest(HttpServletResponse response) throws IOException { response.setStatus(HttpServletResponse.SC_OK); response.setContentType("application/json"); response.setCharacterEncoding(UTF_8); Map body = ImmutableMap.of( "status", "OK", "gitHash", GIT_HASH, "launchTime", LAUNCH_TIME.toString(), "currentTime", Instant.now().toString()); try (PrintWriter writer = response.getWriter()) { JSON_MAPPER.writeValue(writer, body); } } private static String loadGitHash() { try (InputStream stream = S3ProxyHandler.class.getClassLoader() .getResourceAsStream("git.properties")) { if (stream == null) { return "unknown"; } Properties properties = new Properties(); properties.load(stream); String hash = properties.getProperty("git.commit.id.abbrev"); if (hash == null) { hash = properties.getProperty("git.commit.id", "unknown"); } return hash; } catch (IOException ioe) { logger.debug("Unable to load git.properties", ioe); return "unknown"; } } private void handlePostBlob(HttpServletRequest request, HttpServletResponse response, InputStream is, BlobStore blobStore, String containerName) throws IOException, S3Exception { String boundaryHeader = request.getHeader(HttpHeaders.CONTENT_TYPE); if (boundaryHeader == null || !boundaryHeader.startsWith("multipart/form-data; boundary=")) { response.setStatus(HttpServletResponse.SC_BAD_REQUEST); return; } String boundary = boundaryHeader.substring(boundaryHeader.indexOf('=') + 1); String blobName = null; String contentType = null; String identity = null; // TODO: handle policy byte[] policy = null; String signature = null; String algorithm = null; byte[] payload = null; var parser = new MultiPartFormData.Parser(boundary); parser.setFilesDirectory(java.nio.file.Path.of( System.getProperty("java.io.tmpdir"))); MultiPartFormData.Parts parts = parser.parse( new InputStreamContentSource(is)).join(); try { for (var part : parts) { var header = part.getName(); if (header.equalsIgnoreCase("acl")) { // TODO: acl } else if (header.equalsIgnoreCase("AWSAccessKeyId") || header.equalsIgnoreCase("X-Amz-Credential")) { identity = part.getContentAsString( StandardCharsets.UTF_8); } else if (header.equalsIgnoreCase("Content-Type")) { contentType = part.getContentAsString( StandardCharsets.UTF_8); } else if (header.equalsIgnoreCase("file")) { // TODO: buffers entire payload payload = part.getContentAsString( StandardCharsets.ISO_8859_1) .getBytes(StandardCharsets.ISO_8859_1); } else if (header.equalsIgnoreCase("key")) { blobName = part.getContentAsString( StandardCharsets.UTF_8); } else if (header.equalsIgnoreCase("policy")) { policy = part.getContentAsString( StandardCharsets.ISO_8859_1) .getBytes(StandardCharsets.ISO_8859_1); } else if (header.equalsIgnoreCase("signature") || header.equalsIgnoreCase("X-Amz-Signature")) { signature = part.getContentAsString( StandardCharsets.UTF_8); } else if (header.equalsIgnoreCase("X-Amz-Algorithm")) { algorithm = part.getContentAsString( StandardCharsets.UTF_8); } } } finally { parts.close(); } if (blobName == null || policy == null) { response.setStatus(HttpServletResponse.SC_BAD_REQUEST); return; } String headerAuthorization = null; S3AuthorizationHeader authHeader = null; boolean signatureVersion4; if (algorithm == null) { if (identity == null || signature == null) { throw new S3Exception(S3ErrorCode.ACCESS_DENIED); } signatureVersion4 = false; headerAuthorization = "AWS " + identity + ":" + signature; } else if (algorithm.equals("AWS4-HMAC-SHA256")) { if (identity == null || signature == null) { throw new S3Exception(S3ErrorCode.ACCESS_DENIED); } signatureVersion4 = true; headerAuthorization = "AWS4-HMAC-SHA256" + " Credential=" + identity + ", Signature=" + signature; } else { response.setStatus(HttpServletResponse.SC_BAD_REQUEST); return; } try { authHeader = new S3AuthorizationHeader(headerAuthorization); } catch (IllegalArgumentException iae) { throw new S3Exception(S3ErrorCode.INVALID_ARGUMENT, iae); } switch (authHeader.getAuthenticationType()) { case AWS_V2: switch (authenticationType) { case AWS_V2: case AWS_V2_OR_V4: case NONE: break; default: throw new S3Exception(S3ErrorCode.ACCESS_DENIED); } break; case AWS_V4: switch (authenticationType) { case AWS_V4: case AWS_V2_OR_V4: case NONE: break; default: throw new S3Exception(S3ErrorCode.ACCESS_DENIED); } break; case NONE: break; default: throw new IllegalArgumentException("Unhandled type: " + authHeader.getAuthenticationType()); } Map.Entry provider = blobStoreLocator.locateBlobStore(authHeader.getIdentity(), null, null); if (provider == null) { response.setStatus(HttpServletResponse.SC_FORBIDDEN); return; } String credential = provider.getKey(); if (signatureVersion4) { byte[] kSecret = ("AWS4" + credential).getBytes( StandardCharsets.UTF_8); byte[] kDate = hmac("HmacSHA256", authHeader.getDate().getBytes(StandardCharsets.UTF_8), kSecret); byte[] kRegion = hmac("HmacSHA256", authHeader.getRegion().getBytes(StandardCharsets.UTF_8), kDate); byte[] kService = hmac("HmacSHA256", authHeader.getService().getBytes(StandardCharsets.UTF_8), kRegion); byte[] kSigning = hmac("HmacSHA256", "aws4_request".getBytes(StandardCharsets.UTF_8), kService); String expectedSignature = BaseEncoding.base16().lowerCase().encode( hmac("HmacSHA256", policy, kSigning)); if (!constantTimeEquals(signature, expectedSignature)) { response.setStatus(HttpServletResponse.SC_FORBIDDEN); return; } } else { String expectedSignature = Base64.getEncoder().encodeToString( hmac("HmacSHA1", policy, credential.getBytes(StandardCharsets.UTF_8))); if (!constantTimeEquals(signature, expectedSignature)) { response.setStatus(HttpServletResponse.SC_FORBIDDEN); return; } } BlobBuilder.PayloadBlobBuilder builder = blobStore .blobBuilder(blobName) .payload(payload); if (contentType != null) { builder.contentType(contentType); } Blob blob = builder.build(); blobStore.putBlob(containerName, blob); response.setStatus(HttpServletResponse.SC_NO_CONTENT); addCorsResponseHeader(request, response); } private void handleInitiateMultipartUpload(HttpServletRequest request, HttpServletResponse response, BlobStore blobStore, String containerName, String blobName) throws IOException, S3Exception { ByteSource payload = ByteSource.empty(); BlobBuilder.PayloadBlobBuilder builder = blobStore .blobBuilder(blobName) .payload(payload); addContentMetadataFromHttpRequest(builder, request); builder.contentLength(payload.size()); String storageClass = request.getHeader(AwsHttpHeaders.STORAGE_CLASS); if (storageClass == null || storageClass.equalsIgnoreCase("STANDARD")) { // defaults to STANDARD } else { builder.tier(StorageClass.valueOf(storageClass).toTier()); } String ifMatch = request.getHeader(HttpHeaders.IF_MATCH); String ifNoneMatch = request.getHeader(HttpHeaders.IF_NONE_MATCH); String blobStoreType = getBlobStoreType(blobStore); // Azure only supports If-None-Match: *, not If-Match: * // Handle If-Match: * manually for the azureblob-sdk provider. // Note: this is a non-atomic operation (HEAD then PUT). if (ifMatch != null && ifMatch.equals("*") && blobStoreType.equals("azureblob-sdk")) { BlobMetadata metadata = blobStore.blobMetadata(containerName, blobName); if (metadata == null) { throw new S3Exception(S3ErrorCode.PRECONDITION_FAILED); } ifMatch = null; } BlobAccess access; String cannedAcl = request.getHeader(AwsHttpHeaders.ACL); if (cannedAcl == null || cannedAcl.equalsIgnoreCase("private")) { access = BlobAccess.PRIVATE; } else if (cannedAcl.equalsIgnoreCase("public-read")) { access = BlobAccess.PUBLIC_READ; } else if (CANNED_ACLS.contains(cannedAcl)) { throw new S3Exception(S3ErrorCode.NOT_IMPLEMENTED); } else { response.sendError(HttpServletResponse.SC_BAD_REQUEST); return; } var options = new PutOptions2() .setBlobAccess(access) .setIfMatch(ifMatch) .setIfNoneMatch(ifNoneMatch); MultipartUpload mpu = blobStore.initiateMultipartUpload(containerName, builder.build().getMetadata(), options); if (Quirks.MULTIPART_REQUIRES_STUB.contains(getBlobStoreType( blobStore))) { blobStore.putBlob(containerName, builder.name(mpu.id()).build(), options); } response.setCharacterEncoding(UTF_8); addCorsResponseHeader(request, response); try (Writer writer = response.getWriter()) { response.setContentType(XML_CONTENT_TYPE); XMLStreamWriter xml = xmlOutputFactory.createXMLStreamWriter( writer); xml.writeStartDocument(); xml.writeStartElement("InitiateMultipartUploadResult"); xml.writeDefaultNamespace(AWS_XMLNS); writeSimpleElement(xml, "Bucket", containerName); writeSimpleElement(xml, "Key", blobName); writeSimpleElement(xml, "UploadId", mpu.id()); xml.writeEndElement(); xml.flush(); } catch (XMLStreamException xse) { throw new IOException(xse); } } private void handleCompleteMultipartUpload(HttpServletRequest request, HttpServletResponse response, InputStream is, final BlobStore blobStore, String containerName, String blobName, String uploadId) throws IOException, S3Exception { BlobMetadata metadata; PutOptions options; if (Quirks.MULTIPART_REQUIRES_STUB.contains(getBlobStoreType( blobStore))) { metadata = blobStore.blobMetadata(containerName, uploadId); BlobAccess access = blobStore.getBlobAccess(containerName, uploadId); options = new PutOptions().setBlobAccess(access); } else { metadata = new MutableBlobMetadataImpl(); options = new PutOptions(); } final MultipartUpload mpu = MultipartUpload.create(containerName, blobName, uploadId, metadata, options); final List parts = new ArrayList<>(); String blobStoreType = getBlobStoreType(blobStore); if (blobStoreType.equals("azureblob")) { for (MultipartPart part : blobStore.listMultipartUpload(mpu)) { parts.add(part); } } else if (blobStoreType.equals("azureblob-sdk") || blobStoreType.equals("google-cloud-storage-sdk")) { var partsByListing = blobStore.listMultipartUpload(mpu).stream().collect( Collectors.toMap( part -> part.partNumber(), part -> part)); CompleteMultipartUploadRequest cmu; try { cmu = mapper.readValue( is, CompleteMultipartUploadRequest.class); } catch (JsonParseException jpe) { throw new S3Exception(S3ErrorCode.MALFORMED_X_M_L, jpe); } if (cmu.parts != null) { // sort by part number and deduplicate (last occurrence wins) SortedMap partsMap = new TreeMap<>(); for (CompleteMultipartUploadRequest.Part part : cmu.parts) { if (part.partNumber <= 0) { throw new S3Exception(S3ErrorCode.INVALID_PART_ORDER, "Part numbers must be positive integers."); } MultipartPart uploadedPart = partsByListing.get(part.partNumber); if (uploadedPart == null) { throw new S3Exception(S3ErrorCode.INVALID_PART); } partsMap.put(part.partNumber, uploadedPart); } parts.addAll(partsMap.values()); } } else if (blobStoreType.equals("google-cloud-storage")) { // GCS only supports 32 parts but we can support up to 1024 by // recursively combining objects. for (int partNumber = 1;; ++partNumber) { MultipartUpload mpu2 = MultipartUpload.create( containerName, "%s_%08d".formatted(mpu.id(), partNumber), "%s_%08d".formatted(mpu.id(), partNumber), metadata, options); List subParts = blobStore.listMultipartUpload( mpu2); if (subParts.isEmpty()) { break; } long partSize = 0; for (MultipartPart part : subParts) { partSize += part.partSize(); } String eTag = blobStore.completeMultipartUpload(mpu2, subParts); parts.add(MultipartPart.create( partNumber, partSize, eTag, /*lastModified=*/ null)); } } else { // List parts to get part sizes and to map multiple Azure parts // into single parts. var partsByListing = blobStore.listMultipartUpload(mpu).stream().collect( Collectors.toMap( part -> part.partNumber(), part -> part)); CompleteMultipartUploadRequest cmu; try { cmu = mapper.readValue( is, CompleteMultipartUploadRequest.class); } catch (JsonParseException jpe) { throw new S3Exception(S3ErrorCode.MALFORMED_X_M_L, jpe); } // use TreeMap to sort by part number and deduplicate (last wins) SortedMap requestParts = new TreeMap<>(); if (cmu.parts != null) { for (CompleteMultipartUploadRequest.Part part : cmu.parts) { if (part.partNumber <= 0) { throw new S3Exception(S3ErrorCode.INVALID_PART_ORDER, "Part numbers must be positive integers."); } requestParts.put(part.partNumber, part.eTag); } } for (var it = requestParts.entrySet().iterator(); it.hasNext();) { var entry = it.next(); MultipartPart part = partsByListing.get(entry.getKey()); if (part == null) { throw new S3Exception(S3ErrorCode.INVALID_PART); } long partSize = part.partSize(); if (it.hasNext() && partSize != -1 && (partSize < 5 * 1024 * 1024 || partSize < blobStore.getMinimumMultipartPartSize())) { throw new S3Exception(S3ErrorCode.ENTITY_TOO_SMALL); } if (part.partETag() != null && !equalsIgnoringSurroundingQuotes(part.partETag(), entry.getValue())) { throw new S3Exception(S3ErrorCode.INVALID_PART); } parts.add(MultipartPart.create(entry.getKey(), partSize, part.partETag(), part.lastModified())); } } if (parts.isEmpty()) { // Amazon requires at least one part throw new S3Exception(S3ErrorCode.MALFORMED_X_M_L); } response.setCharacterEncoding(UTF_8); addCorsResponseHeader(request, response); try (PrintWriter writer = response.getWriter()) { response.setStatus(HttpServletResponse.SC_OK); response.setContentType(XML_CONTENT_TYPE); // Launch async thread to allow main thread to emit newlines to // the client while completeMultipartUpload processes. final AtomicReference eTag = new AtomicReference<>(); final AtomicReference exception = new AtomicReference<>(); var thread = new Thread() { @Override public void run() { try { eTag.set(blobStore.completeMultipartUpload(mpu, parts)); } catch (RuntimeException re) { exception.set(re); } } }; thread.start(); XMLStreamWriter xml = xmlOutputFactory.createXMLStreamWriter( writer); xml.writeStartDocument(); xml.writeStartElement("CompleteMultipartUploadResult"); xml.writeDefaultNamespace(AWS_XMLNS); xml.flush(); while (thread.isAlive()) { try { thread.join(1000); } catch (InterruptedException ie) { // ignore } writer.write("\n"); writer.flush(); } if (exception.get() != null) { throw exception.get(); } if (Quirks.MULTIPART_REQUIRES_STUB.contains(getBlobStoreType( blobStore))) { blobStore.removeBlob(containerName, uploadId); } // TODO: bogus value writeSimpleElement(xml, "Location", "http://Example-Bucket.s3.amazonaws.com/" + blobName); writeSimpleElement(xml, "Bucket", containerName); writeSimpleElement(xml, "Key", blobName); if (eTag.get() != null) { writeSimpleElement(xml, "ETag", maybeQuoteETag(eTag.get())); } xml.writeEndElement(); xml.flush(); } catch (XMLStreamException xse) { throw new IOException(xse); } } private void handleAbortMultipartUpload(HttpServletRequest request, HttpServletResponse response, BlobStore blobStore, String containerName, String blobName, String uploadId) throws IOException, S3Exception { if (Quirks.MULTIPART_REQUIRES_STUB.contains(getBlobStoreType( blobStore))) { if (!blobStore.blobExists(containerName, uploadId)) { throw new S3Exception(S3ErrorCode.NO_SUCH_UPLOAD); } blobStore.removeBlob(containerName, uploadId); } addCorsResponseHeader(request, response); // TODO: how to reconstruct original mpu? MultipartUpload mpu = MultipartUpload.create(containerName, blobName, uploadId, createFakeBlobMetadata(blobStore), new PutOptions()); try { blobStore.abortMultipartUpload(mpu); } catch (KeyNotFoundException knfe) { throw new S3Exception(S3ErrorCode.NO_SUCH_UPLOAD, knfe); } response.sendError(HttpServletResponse.SC_NO_CONTENT); } private void handleListParts(HttpServletRequest request, HttpServletResponse response, BlobStore blobStore, String containerName, String blobName, String uploadId) throws IOException, S3Exception { // support only the no-op zero case String partNumberMarker = request.getParameter("part-number-marker"); if (partNumberMarker != null && !partNumberMarker.equals("0")) { throw new S3Exception(S3ErrorCode.NOT_IMPLEMENTED); } // TODO: how to reconstruct original mpu? MultipartUpload mpu = MultipartUpload.create(containerName, blobName, uploadId, createFakeBlobMetadata(blobStore), new PutOptions()); List parts; var blobStoreType = getBlobStoreType(blobStore); if (blobStoreType.equals("azureblob")) { // map Azure subparts back into S3 parts SortedMap map = new TreeMap<>(); for (MultipartPart part : blobStore.listMultipartUpload(mpu)) { int virtualPartNumber = part.partNumber() / 10_000; Long size = map.get(virtualPartNumber); map.put(virtualPartNumber, (size == null ? 0L : (long) size) + part.partSize()); } parts = new ArrayList<>(); for (var entry : map.entrySet()) { String eTag = ""; // TODO: bogus value Date lastModified = null; // TODO: bogus value parts.add(MultipartPart.create(entry.getKey(), entry.getValue(), eTag, lastModified)); } } else { parts = blobStore.listMultipartUpload(mpu); } String encodingType = request.getParameter("encoding-type"); response.setCharacterEncoding(UTF_8); addCorsResponseHeader(request, response); try (Writer writer = response.getWriter()) { response.setContentType(XML_CONTENT_TYPE); XMLStreamWriter xml = xmlOutputFactory.createXMLStreamWriter( writer); xml.writeStartDocument(); xml.writeStartElement("ListPartsResult"); xml.writeDefaultNamespace(AWS_XMLNS); if (encodingType != null && encodingType.equals("url")) { writeSimpleElement(xml, "EncodingType", encodingType); } writeSimpleElement(xml, "Bucket", containerName); writeSimpleElement(xml, "Key", encodeBlob( encodingType, blobName)); writeSimpleElement(xml, "UploadId", uploadId); writeInitiatorStanza(xml); writeOwnerStanza(xml); // TODO: bogus value writeSimpleElement(xml, "StorageClass", "STANDARD"); // TODO: pagination /* writeSimpleElement(xml, "PartNumberMarker", "1"); writeSimpleElement(xml, "NextPartNumberMarker", "3"); writeSimpleElement(xml, "MaxParts", "2"); writeSimpleElement(xml, "IsTruncated", "true"); */ for (MultipartPart part : parts) { xml.writeStartElement("Part"); writeSimpleElement(xml, "PartNumber", String.valueOf( part.partNumber())); Date lastModified = part.lastModified(); if (lastModified != null) { writeSimpleElement(xml, "LastModified", formatDate(lastModified)); } String eTag = part.partETag(); if (eTag != null) { writeSimpleElement(xml, "ETag", maybeQuoteETag(eTag)); } writeSimpleElement(xml, "Size", String.valueOf( part.partSize())); xml.writeEndElement(); } xml.writeEndElement(); xml.flush(); } catch (XMLStreamException xse) { throw new IOException(xse); } } private void handleCopyPart(HttpServletRequest request, HttpServletResponse response, BlobStore blobStore, String containerName, String blobName, String uploadId) throws IOException, S3Exception { // TODO: duplicated from handlePutBlob String copySourceHeader = request.getHeader(AwsHttpHeaders.COPY_SOURCE); copySourceHeader = URLDecoder.decode( copySourceHeader, StandardCharsets.UTF_8); if (copySourceHeader.startsWith("/")) { // Some clients like boto do not include the leading slash copySourceHeader = copySourceHeader.substring(1); } String[] path = copySourceHeader.split("/", 2); if (path.length != 2) { throw new S3Exception(S3ErrorCode.INVALID_REQUEST); } String sourceContainerName = path[0]; String sourceBlobName = path[1]; var options = new GetOptions(); String range = request.getHeader(AwsHttpHeaders.COPY_SOURCE_RANGE); long expectedSize = -1; if (range != null) { if (!range.startsWith("bytes=") || range.indexOf(',') != -1 || range.indexOf('-') == -1) { throw new S3Exception(S3ErrorCode.INVALID_ARGUMENT, "The x-amz-copy-source-range value must be of the form " + "bytes=first-last where first and last are the " + "zero-based offsets of the first and last bytes to copy"); } try { range = range.substring("bytes=".length()); String[] ranges = range.split("-", 2); if (ranges[0].isEmpty()) { options.tail(Long.parseLong(ranges[1])); } else if (ranges[1].isEmpty()) { options.startAt(Long.parseLong(ranges[0])); } else { long start = Long.parseLong(ranges[0]); long end = Long.parseLong(ranges[1]); if (end < start) { throw new S3Exception(S3ErrorCode.INVALID_RANGE); } expectedSize = end - start + 1; if (expectedSize > MAX_MULTIPART_COPY_SIZE) { throw new S3Exception(S3ErrorCode.INVALID_REQUEST, "The specified copy source is larger than" + " the maximum allowable size for a copy" + " source: " + MAX_MULTIPART_COPY_SIZE); } options.range(start, end); } } catch (NumberFormatException nfe) { throw new S3Exception(S3ErrorCode.INVALID_ARGUMENT, "The x-amz-copy-source-range value must be of the form " + "bytes=first-last where first and last are the " + "zero-based offsets of the first and last bytes to copy", nfe); } } String partNumberString = request.getParameter("partNumber"); if (partNumberString == null) { throw new S3Exception(S3ErrorCode.INVALID_ARGUMENT); } int partNumber; try { partNumber = Integer.parseInt(partNumberString); } catch (NumberFormatException nfe) { throw new S3Exception(S3ErrorCode.INVALID_ARGUMENT, "Part number must be an integer between 1 and 10000" + ", inclusive", nfe, Map.of( "ArgumentName", "partNumber", "ArgumentValue", partNumberString)); } if (partNumber < 1 || partNumber > 10_000) { throw new S3Exception(S3ErrorCode.INVALID_ARGUMENT, "Part number must be an integer between 1 and 10000" + ", inclusive", (Throwable) null, Map.of( "ArgumentName", "partNumber", "ArgumentValue", partNumberString)); } // GCS only supports 32 parts so partition MPU into 32-part chunks. String blobStoreType = getBlobStoreType(blobStore); if (blobStoreType.equals("google-cloud-storage")) { // fix up 1-based part numbers uploadId = "%s_%08d".formatted( uploadId, ((partNumber - 1) / 32) + 1); partNumber = ((partNumber - 1) % 32) + 1; } // TODO: how to reconstruct original mpu? MultipartUpload mpu = MultipartUpload.create(containerName, blobName, uploadId, createFakeBlobMetadata(blobStore), new PutOptions()); // TODO: Blob can leak on precondition failures. Blob blob = blobStore.getBlob(sourceContainerName, sourceBlobName, options); if (blob == null) { throw new S3Exception(S3ErrorCode.NO_SUCH_KEY); } BlobMetadata blobMetadata = blob.getMetadata(); // HTTP GET allow overlong ranges but S3 CopyPart does not if (expectedSize != -1 && blobMetadata.getSize() < expectedSize) { throw new S3Exception(S3ErrorCode.INVALID_RANGE); } String ifMatch = request.getHeader( AwsHttpHeaders.COPY_SOURCE_IF_MATCH); String ifNoneMatch = request.getHeader( AwsHttpHeaders.COPY_SOURCE_IF_NONE_MATCH); long ifModifiedSince = request.getDateHeader( AwsHttpHeaders.COPY_SOURCE_IF_MODIFIED_SINCE); long ifUnmodifiedSince = request.getDateHeader( AwsHttpHeaders.COPY_SOURCE_IF_UNMODIFIED_SINCE); String eTag = blobMetadata.getETag(); if (eTag != null) { eTag = maybeQuoteETag(eTag); if (ifMatch != null && !ifMatch.equals(eTag)) { throw new S3Exception(S3ErrorCode.PRECONDITION_FAILED); } if (ifNoneMatch != null && ifNoneMatch.equals(eTag)) { throw new S3Exception(S3ErrorCode.PRECONDITION_FAILED); } } Date lastModified = blobMetadata.getLastModified(); if (lastModified != null) { if (ifModifiedSince != -1 && lastModified.compareTo( new Date(ifModifiedSince)) <= 0) { throw new S3Exception(S3ErrorCode.PRECONDITION_FAILED); } if (ifUnmodifiedSince != -1 && lastModified.compareTo( new Date(ifUnmodifiedSince)) >= 0) { throw new S3Exception(S3ErrorCode.PRECONDITION_FAILED); } } long contentLength = blobMetadata.getContentMetadata().getContentLength(); try (InputStream is = blob.getPayload().openStream()) { if (blobStoreType.equals("azureblob")) { // Azure has a smaller maximum part size than S3. Split a // single S3 part multiple Azure parts. long azureMaximumMultipartPartSize = blobStore.getMaximumMultipartPartSize(); var his = new HashingInputStream(MD5, is); int subPartNumber = 0; for (long offset = 0; offset < contentLength; offset += azureMaximumMultipartPartSize, ++subPartNumber) { Payload payload = Payloads.newInputStreamPayload( new UncloseableInputStream(ByteStreams.limit(his, azureMaximumMultipartPartSize))); payload.getContentMetadata().setContentLength( Math.min(azureMaximumMultipartPartSize, contentLength - offset)); blobStore.uploadMultipartPart(mpu, 10_000 * partNumber + subPartNumber, payload); } eTag = BaseEncoding.base16().lowerCase().encode( his.hash().asBytes()); } else { Payload payload = Payloads.newInputStreamPayload(is); payload.getContentMetadata().setContentLength(contentLength); MultipartPart part = blobStore.uploadMultipartPart(mpu, partNumber, payload); eTag = part.partETag(); } } response.setCharacterEncoding(UTF_8); addCorsResponseHeader(request, response); try (Writer writer = response.getWriter()) { response.setContentType(XML_CONTENT_TYPE); XMLStreamWriter xml = xmlOutputFactory.createXMLStreamWriter( writer); xml.writeStartDocument(); xml.writeStartElement("CopyObjectResult"); xml.writeDefaultNamespace(AWS_XMLNS); if (lastModified != null) { writeSimpleElement(xml, "LastModified", formatDate(lastModified)); } if (eTag != null) { writeSimpleElement(xml, "ETag", maybeQuoteETag(eTag)); } xml.writeEndElement(); xml.flush(); } catch (XMLStreamException xse) { throw new IOException(xse); } } private void handleUploadPart(HttpServletRequest request, HttpServletResponse response, InputStream is, BlobStore blobStore, String containerName, String blobName, String uploadId) throws IOException, S3Exception { // TODO: duplicated from handlePutBlob String contentLengthString = null; String decodedContentLengthString = null; String contentMD5String = null; for (String headerName : Collections.list(request.getHeaderNames())) { String headerValue = Strings.nullToEmpty(request.getHeader( headerName)); if (headerName.equalsIgnoreCase(HttpHeaders.CONTENT_LENGTH)) { contentLengthString = headerValue; } else if (headerName.equalsIgnoreCase( AwsHttpHeaders.DECODED_CONTENT_LENGTH)) { decodedContentLengthString = headerValue; } else if (headerName.equalsIgnoreCase(HttpHeaders.CONTENT_MD5)) { contentMD5String = headerValue; } } if (decodedContentLengthString != null) { contentLengthString = decodedContentLengthString; } HashCode contentMD5 = null; if (contentMD5String != null) { try { contentMD5 = HashCode.fromBytes( Base64.getDecoder().decode(contentMD5String)); } catch (IllegalArgumentException iae) { throw new S3Exception(S3ErrorCode.INVALID_DIGEST, iae); } if (contentMD5.bits() != MD5.bits()) { throw new S3Exception(S3ErrorCode.INVALID_DIGEST); } } if (contentLengthString == null) { throw new S3Exception(S3ErrorCode.MISSING_CONTENT_LENGTH); } long contentLength; try { contentLength = Long.parseLong(contentLengthString); } catch (NumberFormatException nfe) { throw new S3Exception(S3ErrorCode.INVALID_ARGUMENT, nfe); } if (contentLength < 0) { throw new S3Exception(S3ErrorCode.INVALID_ARGUMENT); } if (decodedContentLengthString != null) { is = ByteStreams.limit(is, contentLength); } String partNumberString = request.getParameter("partNumber"); if (partNumberString == null) { throw new S3Exception(S3ErrorCode.INVALID_ARGUMENT); } int partNumber; try { partNumber = Integer.parseInt(partNumberString); } catch (NumberFormatException nfe) { throw new S3Exception(S3ErrorCode.INVALID_ARGUMENT, "Part number must be an integer between 1 and 10000" + ", inclusive", nfe, Map.of( "ArgumentName", "partNumber", "ArgumentValue", partNumberString)); } if (partNumber < 1 || partNumber > 10_000) { throw new S3Exception(S3ErrorCode.INVALID_ARGUMENT, "Part number must be an integer between 1 and 10000" + ", inclusive", (Throwable) null, Map.of( "ArgumentName", "partNumber", "ArgumentValue", partNumberString)); } // GCS only supports 32 parts so partition MPU into 32-part chunks. String blobStoreType = getBlobStoreType(blobStore); if (blobStoreType.equals("google-cloud-storage")) { // fix up 1-based part numbers uploadId = "%s_%08d".formatted( uploadId, ((partNumber - 1) / 32) + 1); partNumber = ((partNumber - 1) % 32) + 1; } // TODO: how to reconstruct original mpu? BlobMetadata blobMetadata; if (Quirks.MULTIPART_REQUIRES_STUB.contains(getBlobStoreType( blobStore))) { blobMetadata = blobStore.blobMetadata(containerName, uploadId); } else { blobMetadata = createFakeBlobMetadata(blobStore); } MultipartUpload mpu = MultipartUpload.create(containerName, blobName, uploadId, blobMetadata, new PutOptions()); if (getBlobStoreType(blobStore).equals("azureblob")) { // Azure has a smaller maximum part size than S3. Split a single // S3 part multiple Azure parts. long azureMaximumMultipartPartSize = blobStore.getMaximumMultipartPartSize(); var his = new HashingInputStream(MD5, is); int subPartNumber = 0; for (long offset = 0; offset < contentLength; offset += azureMaximumMultipartPartSize, ++subPartNumber) { Payload payload = Payloads.newInputStreamPayload( ByteStreams.limit(his, azureMaximumMultipartPartSize)); payload.getContentMetadata().setContentLength( Math.min(azureMaximumMultipartPartSize, contentLength - offset)); blobStore.uploadMultipartPart(mpu, 10_000 * partNumber + subPartNumber, payload); } response.addHeader(HttpHeaders.ETAG, maybeQuoteETag( BaseEncoding.base16().lowerCase().encode( his.hash().asBytes()))); } else { MultipartPart part; Payload payload = Payloads.newInputStreamPayload(is); payload.getContentMetadata().setContentLength(contentLength); if (contentMD5 != null) { payload.getContentMetadata().setContentMD5(contentMD5); } part = blobStore.uploadMultipartPart(mpu, partNumber, payload); if (part.partETag() != null) { response.addHeader(HttpHeaders.ETAG, maybeQuoteETag(part.partETag())); } } addCorsResponseHeader(request, response); } private static void addResponseHeaderWithOverride( HttpServletRequest request, HttpServletResponse response, String headerName, String overrideHeaderName, String value) { String override = request.getParameter(overrideHeaderName); // NPE in if value is null override = (override != null) ? override : value; if (override != null) { response.addHeader(headerName, override); } } private static void addMetadataToResponse(HttpServletRequest request, HttpServletResponse response, BlobMetadata metadata) { ContentMetadata contentMetadata = metadata.getContentMetadata(); addResponseHeaderWithOverride(request, response, HttpHeaders.CACHE_CONTROL, "response-cache-control", contentMetadata.getCacheControl()); addResponseHeaderWithOverride(request, response, HttpHeaders.CONTENT_ENCODING, "response-content-encoding", contentMetadata.getContentEncoding()); addResponseHeaderWithOverride(request, response, HttpHeaders.CONTENT_LANGUAGE, "response-content-language", contentMetadata.getContentLanguage()); addResponseHeaderWithOverride(request, response, HttpHeaders.CONTENT_DISPOSITION, "response-content-disposition", contentMetadata.getContentDisposition()); Long contentLength = contentMetadata.getContentLength(); if (contentLength != null) { response.addHeader(HttpHeaders.CONTENT_LENGTH, contentLength.toString()); } String overrideContentType = request.getParameter( "response-content-type"); response.setContentType(overrideContentType != null ? overrideContentType : contentMetadata.getContentType()); String eTag = metadata.getETag(); if (eTag != null) { response.addHeader(HttpHeaders.ETAG, maybeQuoteETag(eTag)); } String overrideExpires = request.getParameter("response-expires"); if (overrideExpires != null) { response.addHeader(HttpHeaders.EXPIRES, overrideExpires); } else { Date expires = contentMetadata.getExpires(); if (expires != null) { response.addDateHeader(HttpHeaders.EXPIRES, expires.getTime()); } } Date lastModified = metadata.getLastModified(); if (lastModified != null) { response.addDateHeader(HttpHeaders.LAST_MODIFIED, lastModified.getTime()); } Tier tier = metadata.getTier(); if (tier != null) { response.addHeader(AwsHttpHeaders.STORAGE_CLASS, StorageClass.fromTier(tier).toString()); } for (var entry : metadata.getUserMetadata().entrySet()) { response.addHeader(USER_METADATA_PREFIX + entry.getKey(), entry.getValue()); } } /** Parse ISO 8601 timestamp into seconds since 1970. */ private static long parseIso8601(String date) { var formatter = new SimpleDateFormat( "yyyyMMdd'T'HHmmss'Z'"); formatter.setTimeZone(TimeZone.getTimeZone("UTC")); try { return formatter.parse(date).getTime() / 1000; } catch (ParseException pe) { throw new IllegalArgumentException(pe); } } private void isTimeSkewed( long date, boolean isPresigned) throws S3Exception { if (date < 0) { throw new S3Exception(S3ErrorCode.ACCESS_DENIED); } long now = System.currentTimeMillis() / 1000; if (isPresigned) { if (now + maximumTimeSkew < date) { logger.debug("request is not valid yet {} {}", date, now); throw new S3Exception(S3ErrorCode.ACCESS_DENIED); } } else { if (now + maximumTimeSkew < date || now - maximumTimeSkew > date) { logger.debug("time skewed {} {}", date, now); throw new S3Exception(S3ErrorCode.REQUEST_TIME_TOO_SKEWED); } } } // cannot call BlobStore.getContext().utils().date().iso8601DateFormat since // it has unwanted millisecond precision private static String generateRequestId() { return String.format("%016X", ThreadLocalRandom.current().nextLong()); } private static String formatDate(Date date) { var formatter = new SimpleDateFormat( "yyyy-MM-dd'T'HH:mm:ss'Z'"); formatter.setTimeZone(TimeZone.getTimeZone("GMT")); return formatter.format(date); } protected final void sendSimpleErrorResponse( HttpServletRequest request, HttpServletResponse response, S3ErrorCode code, String message, Map elements) throws IOException { logger.debug("sendSimpleErrorResponse: {} {}", code, elements); if (response.isCommitted()) { // Another handler already opened and closed the writer. return; } response.setStatus(code.getHttpStatusCode()); if (request.getMethod().equals("HEAD")) { // The HEAD method is identical to GET except that the server MUST // NOT return a message-body in the response. return; } response.setCharacterEncoding(UTF_8); try (Writer writer = response.getWriter()) { response.setContentType(XML_CONTENT_TYPE); XMLStreamWriter xml = xmlOutputFactory.createXMLStreamWriter( writer); xml.writeStartDocument(); xml.writeStartElement("Error"); writeSimpleElement(xml, "Code", code.getErrorCode()); writeSimpleElement(xml, "Message", message); for (var entry : elements.entrySet()) { writeSimpleElement(xml, entry.getKey(), entry.getValue()); } String requestId = response.getHeader(AwsHttpHeaders.REQUEST_ID); if (requestId == null) { requestId = generateRequestId(); } writeSimpleElement(xml, "RequestId", requestId); xml.writeEndElement(); xml.flush(); } catch (XMLStreamException xse) { throw new IOException(xse); } } private void addCorsResponseHeader(HttpServletRequest request, HttpServletResponse response) { String corsOrigin = request.getHeader(HttpHeaders.ORIGIN); if (!Strings.isNullOrEmpty(corsOrigin) && corsRules.isOriginAllowed(corsOrigin)) { response.addHeader(HttpHeaders.ACCESS_CONTROL_ALLOW_ORIGIN, corsRules.getAllowedOrigin(corsOrigin)); response.addHeader(HttpHeaders.ACCESS_CONTROL_ALLOW_METHODS, corsRules.getAllowedMethods()); response.addHeader(HttpHeaders.ACCESS_CONTROL_EXPOSE_HEADERS, corsRules.getExposedHeaders()); if (corsRules.isAllowCredentials()) { response.addHeader(HttpHeaders.ACCESS_CONTROL_ALLOW_CREDENTIALS, "true"); } } } private static void addContentMetadataFromHttpRequest( BlobBuilder.PayloadBlobBuilder builder, HttpServletRequest request) { var userMetadata = ImmutableMap.builder(); for (String headerName : Collections.list(request.getHeaderNames())) { if (startsWithIgnoreCase(headerName, USER_METADATA_PREFIX)) { userMetadata.put( headerName.substring(USER_METADATA_PREFIX.length()), Strings.nullToEmpty(request.getHeader(headerName))); } } builder.cacheControl(request.getHeader( HttpHeaders.CACHE_CONTROL)) .contentDisposition(request.getHeader( HttpHeaders.CONTENT_DISPOSITION)) .contentEncoding(request.getHeader( HttpHeaders.CONTENT_ENCODING)) .contentLanguage(request.getHeader( HttpHeaders.CONTENT_LANGUAGE)) .userMetadata(userMetadata.build()); String contentType = request.getContentType(); if (contentType != null) { builder.contentType(contentType); } long expires = request.getDateHeader(HttpHeaders.EXPIRES); if (expires != -1) { builder.expires(new Date(expires)); } } // TODO: bogus values private static void writeInitiatorStanza(XMLStreamWriter xml) throws XMLStreamException { xml.writeStartElement("Initiator"); writeSimpleElement(xml, "ID", FAKE_INITIATOR_ID); writeSimpleElement(xml, "DisplayName", FAKE_INITIATOR_DISPLAY_NAME); xml.writeEndElement(); } // TODO: bogus values private static void writeOwnerStanza(XMLStreamWriter xml) throws XMLStreamException { xml.writeStartElement("Owner"); writeSimpleElement(xml, "ID", FAKE_OWNER_ID); writeSimpleElement(xml, "DisplayName", FAKE_OWNER_DISPLAY_NAME); xml.writeEndElement(); } private static void writeSimpleElement(XMLStreamWriter xml, String elementName, String characters) throws XMLStreamException { xml.writeStartElement(elementName); xml.writeCharacters(characters); xml.writeEndElement(); } private static BlobMetadata createFakeBlobMetadata(BlobStore blobStore) { return blobStore.blobBuilder("fake-name") .build() .getMetadata(); } private static boolean equalsIgnoringSurroundingQuotes(String s1, String s2) { if (s1.length() >= 2 && s1.startsWith("\"") && s1.endsWith("\"")) { s1 = s1.substring(1, s1.length() - 1); } if (s2.length() >= 2 && s2.startsWith("\"") && s2.endsWith("\"")) { s2 = s2.substring(1, s2.length() - 1); } return s1.equals(s2); } private static String maybeQuoteETag(String eTag) { if (!eTag.startsWith("\"") && !eTag.endsWith("\"")) { eTag = "\"" + eTag + "\""; } return eTag; } private static boolean startsWithIgnoreCase(String string, String prefix) { return string.toLowerCase().startsWith(prefix.toLowerCase()); } private static byte[] hmac(String algorithm, byte[] data, byte[] key) { try { Mac mac = Mac.getInstance(algorithm); mac.init(new SecretKeySpec(key, algorithm)); return mac.doFinal(data); } catch (InvalidKeyException | NoSuchAlgorithmException e) { throw new RuntimeException(e); } } // Encode blob name if client requests it. This allows for characters // which XML 1.0 cannot represent. private static String encodeBlob(String encodingType, String blobName) { if (encodingType != null && encodingType.equals("url")) { return urlEscaper.escape(blobName); } else { return blobName; } } private static final class UncloseableInputStream extends FilterInputStream { UncloseableInputStream(InputStream is) { super(is); } @Override public void close() throws IOException { } } public final BlobStoreLocator getBlobStoreLocator() { return blobStoreLocator; } public final void setBlobStoreLocator(BlobStoreLocator locator) { this.blobStoreLocator = locator; } private static boolean validateIpAddress(String string) { List parts = Splitter.on('.').splitToList(string); if (parts.size() != 4) { return false; } for (String part : parts) { try { int num = Integer.parseInt(part); if (num < 0 || num > 255) { return false; } } catch (NumberFormatException nfe) { return false; } } return true; } private static boolean constantTimeEquals(String x, String y) { return MessageDigest.isEqual(x.getBytes(StandardCharsets.UTF_8), y.getBytes(StandardCharsets.UTF_8)); } } ================================================ FILE: src/main/java/org/gaul/s3proxy/S3ProxyHandlerJetty.java ================================================ /* * Copyright 2014-2026 Andrew Gaul * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.gaul.s3proxy; import java.io.IOException; import java.io.InputStream; import java.util.Map; import java.util.concurrent.TimeoutException; import com.google.common.net.HttpHeaders; import jakarta.servlet.http.HttpServlet; import jakarta.servlet.http.HttpServletRequest; import jakarta.servlet.http.HttpServletResponse; import org.jclouds.blobstore.BlobStore; import org.jclouds.blobstore.ContainerNotFoundException; import org.jclouds.blobstore.KeyNotFoundException; import org.jclouds.http.HttpResponse; import org.jclouds.http.HttpResponseException; import org.jclouds.rest.AuthorizationException; import org.jclouds.util.Throwables2; import org.jspecify.annotations.Nullable; import org.slf4j.Logger; import org.slf4j.LoggerFactory; /** Jetty-specific handler for S3 requests. */ final class S3ProxyHandlerJetty extends HttpServlet { private static final Logger logger = LoggerFactory.getLogger( S3ProxyHandlerJetty.class); private final S3ProxyHandler handler; private final S3ProxyMetrics metrics; S3ProxyHandlerJetty(final BlobStore blobStore, AuthenticationType authenticationType, final String identity, final String credential, @Nullable String virtualHost, long maxSinglePartObjectSize, long v4MaxNonChunkedRequestSize, int v4MaxChunkSize, boolean ignoreUnknownHeaders, CrossOriginResourceSharing corsRules, String servicePath, int maximumTimeSkew, @Nullable S3ProxyMetrics metrics) { handler = new S3ProxyHandler(blobStore, authenticationType, identity, credential, virtualHost, maxSinglePartObjectSize, v4MaxNonChunkedRequestSize, v4MaxChunkSize, ignoreUnknownHeaders, corsRules, servicePath, maximumTimeSkew); this.metrics = metrics; } private void sendS3Exception(HttpServletRequest request, HttpServletResponse response, S3Exception se) throws IOException { handler.sendSimpleErrorResponse(request, response, se.getError(), se.getMessage(), se.getElements()); } @Override protected void service(HttpServletRequest request, HttpServletResponse response) throws IOException { long startNanos = System.nanoTime(); var ctx = new S3ProxyHandler.RequestContext(); try (InputStream is = request.getInputStream()) { handler.doHandle(request, request, response, is, ctx); } catch (ContainerNotFoundException cnfe) { S3ErrorCode code = S3ErrorCode.NO_SUCH_BUCKET; handler.sendSimpleErrorResponse(request, response, code, code.getMessage(), Map.of()); return; } catch (HttpResponseException hre) { HttpResponse hr = hre.getResponse(); if (hr == null) { logger.debug("HttpResponseException without HttpResponse:", hre); response.sendError( HttpServletResponse.SC_INTERNAL_SERVER_ERROR, hre.getMessage()); return; } String eTag = hr.getFirstHeaderOrNull(HttpHeaders.ETAG); if (eTag != null) { response.setHeader(HttpHeaders.ETAG, eTag); } int status = hr.getStatusCode(); switch (status) { case 412: sendS3Exception(request, response, new S3Exception(S3ErrorCode.PRECONDITION_FAILED)); break; case 416: sendS3Exception(request, response, new S3Exception(S3ErrorCode.INVALID_RANGE)); break; case HttpServletResponse.SC_BAD_REQUEST: case 422: // Swift returns 422 Unprocessable Entity sendS3Exception(request, response, new S3Exception(S3ErrorCode.BAD_DIGEST)); break; default: logger.debug("HttpResponseException:", hre); response.setStatus(status); break; } return; } catch (IllegalArgumentException iae) { logger.debug("IllegalArgumentException:", iae); response.sendError(HttpServletResponse.SC_BAD_REQUEST, iae.getMessage()); return; } catch (IllegalStateException ise) { // google-cloud-storage uses a different exception if (ise.getMessage().startsWith("PreconditionFailed")) { sendS3Exception(request, response, new S3Exception(S3ErrorCode.PRECONDITION_FAILED)); return; } logger.debug("IllegalStateException:", ise); response.sendError(HttpServletResponse.SC_BAD_REQUEST, ise.getMessage()); return; } catch (IOException ioe) { var cause = Throwables2.getFirstThrowableOfType(ioe, S3Exception.class); if (cause != null) { sendS3Exception(request, response, cause); return; } throw ioe; } catch (KeyNotFoundException knfe) { S3ErrorCode code = S3ErrorCode.NO_SUCH_KEY; handler.sendSimpleErrorResponse(request, response, code, code.getMessage(), Map.of()); return; } catch (S3Exception se) { sendS3Exception(request, response, se); return; } catch (UnsupportedOperationException uoe) { logger.debug("UnsupportedOperationException:", uoe); response.sendError(HttpServletResponse.SC_NOT_IMPLEMENTED, uoe.getMessage()); return; } catch (Throwable throwable) { if (Throwables2.getFirstThrowableOfType(throwable, AuthorizationException.class) != null) { S3ErrorCode code = S3ErrorCode.ACCESS_DENIED; handler.sendSimpleErrorResponse(request, response, code, code.getMessage(), Map.of()); return; } else if (Throwables2.getFirstThrowableOfType(throwable, TimeoutException.class) != null) { S3ErrorCode code = S3ErrorCode.REQUEST_TIMEOUT; handler.sendSimpleErrorResponse(request, response, code, code.getMessage(), Map.of()); return; } else { logger.debug("Unknown exception:", throwable); throw throwable; } } finally { recordMetrics(request, response, ctx, startNanos); } } private void recordMetrics(HttpServletRequest request, HttpServletResponse response, S3ProxyHandler.RequestContext ctx, long startNanos) { if (metrics == null || ctx.getOperation() == null) { return; } long durationNanos = System.nanoTime() - startNanos; metrics.recordRequest( request.getMethod(), request.getScheme(), response.getStatus(), ctx.getOperation(), ctx.getBucket(), durationNanos); } public S3ProxyHandler getHandler() { return this.handler; } } ================================================ FILE: src/main/java/org/gaul/s3proxy/S3ProxyMetrics.java ================================================ /* * Copyright 2014-2026 Andrew Gaul * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.gaul.s3proxy; import java.util.List; import io.opentelemetry.api.common.AttributeKey; import io.opentelemetry.api.common.Attributes; import io.opentelemetry.api.common.AttributesBuilder; import io.opentelemetry.api.metrics.DoubleHistogram; import io.opentelemetry.api.metrics.Meter; import io.opentelemetry.exporter.prometheus.PrometheusHttpServer; import io.opentelemetry.sdk.metrics.SdkMeterProvider; import io.opentelemetry.semconv.HttpAttributes; import io.opentelemetry.semconv.UrlAttributes; import org.jspecify.annotations.Nullable; public final class S3ProxyMetrics { /** Default metrics port (0 = ephemeral). */ public static final int DEFAULT_METRICS_PORT = 0; public static final String DEFAULT_METRICS_HOST = "0.0.0.0"; private static final AttributeKey S3_OPERATION = AttributeKey.stringKey("s3.operation"); private static final AttributeKey S3_BUCKET = AttributeKey.stringKey("s3.bucket"); // OTel semantic conventions specify these bucket boundaries for // http.server.request.duration histogram. // See: https://opentelemetry.io/docs/specs/semconv/http/http-metrics/ private static final List DURATION_BUCKETS = List.of( 0.005, 0.01, 0.025, 0.05, 0.075, 0.1, 0.25, 0.5, 0.75, 1.0, 2.5, 5.0, 7.5, 10.0); private final SdkMeterProvider meterProvider; private final DoubleHistogram requestDuration; private final PrometheusHttpServer prometheusServer; public S3ProxyMetrics() { this(DEFAULT_METRICS_HOST, DEFAULT_METRICS_PORT); } public S3ProxyMetrics(String host, int port) { prometheusServer = PrometheusHttpServer.builder() .setHost(host) .setPort(port) .build(); meterProvider = SdkMeterProvider.builder() .registerMetricReader(prometheusServer) .build(); Meter meter = meterProvider.get("org.gaul.s3proxy"); requestDuration = meter.histogramBuilder("http.server.request.duration") .setDescription("Duration of HTTP server requests") .setUnit("s") .setExplicitBucketBoundariesAdvice(DURATION_BUCKETS) .build(); } public void recordRequest( String method, String scheme, int statusCode, @Nullable S3Operation operation, @Nullable String bucket, long durationNanos) { if (operation == null) { return; } double durationSeconds = durationNanos / 1_000_000_000.0; AttributesBuilder builder = Attributes.builder() .put(HttpAttributes.HTTP_REQUEST_METHOD, method) .put(UrlAttributes.URL_SCHEME, scheme) .put(HttpAttributes.HTTP_RESPONSE_STATUS_CODE, statusCode) .put(S3_OPERATION, operation.getValue()); if (bucket != null && !bucket.isEmpty()) { builder.put(S3_BUCKET, bucket); } requestDuration.record(durationSeconds, builder.build()); } public String scrape() { return prometheusServer.toString(); } public void close() { if (prometheusServer != null) { prometheusServer.close(); } if (meterProvider != null) { meterProvider.close(); } } } ================================================ FILE: src/main/java/org/gaul/s3proxy/ShardedBlobStore.java ================================================ /* * Copyright 2014-2026 Andrew Gaul * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.gaul.s3proxy; import static com.google.common.base.Preconditions.checkArgument; import java.io.File; import java.io.InputStream; import java.nio.charset.StandardCharsets; import java.util.ArrayList; import java.util.HashMap; import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Objects; import java.util.Properties; import java.util.Set; import java.util.concurrent.ExecutionException; import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; import java.util.concurrent.Future; import java.util.regex.Matcher; import java.util.regex.Pattern; import java.util.stream.Collectors; import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableMap; import com.google.common.collect.Sets; import com.google.common.hash.HashCode; import com.google.common.hash.HashFunction; import com.google.common.hash.Hashing; import org.jclouds.blobstore.BlobStore; import org.jclouds.blobstore.ContainerNotFoundException; import org.jclouds.blobstore.domain.Blob; import org.jclouds.blobstore.domain.BlobAccess; import org.jclouds.blobstore.domain.BlobMetadata; import org.jclouds.blobstore.domain.ContainerAccess; import org.jclouds.blobstore.domain.MultipartPart; import org.jclouds.blobstore.domain.MultipartUpload; import org.jclouds.blobstore.domain.MutableStorageMetadata; import org.jclouds.blobstore.domain.PageSet; import org.jclouds.blobstore.domain.StorageMetadata; import org.jclouds.blobstore.domain.internal.MutableStorageMetadataImpl; import org.jclouds.blobstore.domain.internal.PageSetImpl; import org.jclouds.blobstore.options.CopyOptions; import org.jclouds.blobstore.options.CreateContainerOptions; import org.jclouds.blobstore.options.GetOptions; import org.jclouds.blobstore.options.ListContainerOptions; import org.jclouds.blobstore.options.PutOptions; import org.jclouds.blobstore.util.ForwardingBlobStore; import org.jclouds.domain.Location; import org.jclouds.io.Payload; /** * This class implements the ability to split objects destined for specified * buckets across multiple backend buckets. The sharding is only applied to * the configured buckets. Each sharded bucket must specify the number of * shards in the form: * s3proxy.sharded-blobstore.<bucket name>.shards=<integer>. * The number of shards is limited to 1000. An optional prefix can be * specified to use for shard names, like so: * s3proxy.sharded-blobstore.<bucket name>.prefix=<string>. * The shards are named as follows: <prefix>-<integer>, * corresponding to the shards from 0 to the specified number. If a * <prefix> is not specified, the name of the bucket is used instead. * * Requests for all other buckets are passed through unchanged. Shards must * be pre-created either out of band or by issuing the CreateBucket API with * the sharded bucket name. The sharded bucket itself will not be * instantiated on the backend. */ final class ShardedBlobStore extends ForwardingBlobStore { public static final Pattern PROPERTIES_PREFIX_RE = Pattern.compile( S3ProxyConstants.PROPERTY_SHARDED_BLOBSTORE + "\\.(?.*)\\.prefix$"); private static final Pattern PROPERTIES_SHARDS_RE = Pattern.compile( S3ProxyConstants.PROPERTY_SHARDED_BLOBSTORE + "\\.(?.*)\\.shards$"); private static final Pattern SHARD_RE = Pattern.compile( "(?.*)-(?[0-9]+)$"); private static final HashFunction SHARD_HASH = Hashing.murmur3_128(); private static final int MAX_SHARD_THREADS = 10; private static final String SUPERBLOCK_VERSION = "1.0"; private static final String SUPERBLOCK_BLOB_NAME = ".s3proxy-sharded-superblock"; private static final int MAX_SHARDS = 1000; private final Map buckets; private final Map prefixMap; private static final class ShardedBucket { private final String prefix; private final int shards; private ShardedBucket(String name, int shards) { this.prefix = Objects.requireNonNull(name); this.shards = shards; } } private ShardedBlobStore(BlobStore blobStore, Map shards, Map prefixes) { super(blobStore); Set missingShards = Sets.difference( prefixes.keySet(), shards.keySet()); if (!missingShards.isEmpty()) { String allMissingShards = missingShards.stream().collect( Collectors.joining(", ")); throw new IllegalArgumentException( "Number of shards unset for sharded buckets: %s" .formatted(allMissingShards)); } var bucketsBuilder = new ImmutableMap.Builder(); for (String bucket : shards.keySet()) { String prefix = prefixes.get(bucket); if (prefix == null) { prefix = bucket; } bucketsBuilder.put(bucket, new ShardedBucket(prefix, shards.get(bucket))); } this.buckets = bucketsBuilder.build(); this.prefixMap = buckets.keySet().stream().collect(Collectors.toMap( virtualBucket -> buckets.get(virtualBucket).prefix, virtualBucket -> virtualBucket)); } public static Map parseBucketShards( Properties properties) { var shardsMap = new ImmutableMap.Builder(); for (String key : properties.stringPropertyNames()) { Matcher matcher = PROPERTIES_SHARDS_RE.matcher(key); if (!matcher.matches()) { continue; } String bucket = matcher.group("bucket"); int shards = Integer.parseInt(properties.getProperty(key)); checkArgument(shards > 0 && shards < MAX_SHARDS, "number of shards must be between 1 and 1000 for %s", bucket); shardsMap.put(bucket, shards); } return shardsMap.build(); } public static Map parsePrefixes(Properties properties) { var prefixesMap = new ImmutableMap.Builder(); for (String key : properties.stringPropertyNames()) { Matcher matcher = PROPERTIES_PREFIX_RE.matcher(key); if (!matcher.matches()) { continue; } prefixesMap.put(matcher.group("bucket"), properties.getProperty(key)); } return prefixesMap.build(); } static ShardedBlobStore newShardedBlobStore( BlobStore blobStore, Map shards, Map prefixes) { return new ShardedBlobStore(blobStore, shards, prefixes); } private Map createSuperblockMeta(ShardedBucket bucket) { return Map.of( "s3proxy-sharded-superblock-version", SUPERBLOCK_VERSION, "s3proxy-sharded-superblock-prefix", bucket.prefix, "s3proxy-sharded-superblock-shards", Integer.toString(bucket.shards)); } private static String getShardContainer(ShardedBucket bucket, int shard) { return "%s-%d".formatted(bucket.prefix, shard); } private String getShard(String containerName, String blob) { ShardedBucket bucket = buckets.get(containerName); if (bucket == null) { return containerName; } HashCode hash = SHARD_HASH.hashString(blob, StandardCharsets.UTF_8); return ShardedBlobStore.getShardContainer( bucket, Hashing.consistentHash(hash, bucket.shards)); } private void checkSuperBlock(Blob blob, Map expectedMeta, String container) { Map currentSuperblockMeta = blob.getMetadata().getUserMetadata(); for (var entry : expectedMeta.entrySet()) { String current = currentSuperblockMeta.get(entry.getKey()); String expected = entry.getValue(); if (!expected.equalsIgnoreCase(current)) { throw new RuntimeException( "Superblock block for %s does not match: %s, %s".formatted( container, expected, current)); } } } private boolean createShards(ShardedBucket bucket, Location location, CreateContainerOptions options) { var futuresBuilder = new ImmutableList.Builder>(); ExecutorService executor = Executors.newFixedThreadPool( Math.min(bucket.shards, MAX_SHARD_THREADS)); BlobStore blobStore = this.delegate(); for (int n = 0; n < bucket.shards; ++n) { String shardContainer = ShardedBlobStore.getShardContainer( bucket, n); futuresBuilder.add(executor.submit( () -> blobStore.createContainerInLocation( location, shardContainer, options))); } var futures = futuresBuilder.build(); executor.shutdown(); boolean ret = true; for (Future future : futures) { try { ret &= future.get(); } catch (InterruptedException | ExecutionException e) { throw new RuntimeException("Failed to create some shards", e); } } return ret; } @Override public boolean createContainerInLocation(Location location, String container) { return createContainerInLocation( location, container, CreateContainerOptions.NONE); } @SuppressWarnings("EmptyCatch") @Override public boolean createContainerInLocation( Location location, String container, CreateContainerOptions createContainerOptions) { ShardedBucket bucket = this.buckets.get(container); if (bucket == null) { return this.delegate().createContainerInLocation( location, container, createContainerOptions); } Map superblockMeta = this.createSuperblockMeta(bucket); Blob superblockBlob = null; try { superblockBlob = this.delegate().getBlob( ShardedBlobStore.getShardContainer(bucket, 0), SUPERBLOCK_BLOB_NAME); } catch (ContainerNotFoundException ignored) { } if (superblockBlob != null) { checkSuperBlock(superblockBlob, superblockMeta, container); } boolean ret = createShards(bucket, location, createContainerOptions); // Upload the superblock if (superblockBlob == null) { superblockBlob = this.delegate().blobBuilder(SUPERBLOCK_BLOB_NAME) .payload("") .userMetadata(superblockMeta) .build(); this.delegate().putBlob(ShardedBlobStore.getShardContainer( bucket, 0), superblockBlob); } return ret; } @Override public PageSet list() { PageSet upstream = this.delegate().list(); var results = new ImmutableList.Builder(); Set virtualBuckets = new HashSet<>(); for (StorageMetadata sm : upstream) { Matcher matcher = SHARD_RE.matcher(sm.getName()); if (!matcher.matches()) { results.add(sm); continue; } String prefix = matcher.group("prefix"); String virtualBucketName = this.prefixMap.get(prefix); if (virtualBucketName == null) { results.add(sm); continue; } if (!virtualBuckets.contains(prefix)) { virtualBuckets.add(prefix); MutableStorageMetadata virtualBucket = new MutableStorageMetadataImpl(); virtualBucket.setCreationDate(sm.getCreationDate()); virtualBucket.setETag(sm.getETag()); virtualBucket.setId(sm.getProviderId()); virtualBucket.setLastModified(sm.getLastModified()); virtualBucket.setLocation(sm.getLocation()); virtualBucket.setName(virtualBucketName); virtualBucket.setSize(sm.getSize()); virtualBucket.setTier(sm.getTier()); virtualBucket.setType(sm.getType()); virtualBucket.setUri(sm.getUri()); // copy the user metadata from the first shard as part // of the response virtualBucket.setUserMetadata(sm.getUserMetadata()); results.add(virtualBucket); } } return new PageSetImpl<>(results.build(), upstream.getNextMarker()); } @Override public PageSet list(String container) { if (!this.buckets.containsKey(container)) { return this.delegate().list(container); } // TODO: implement listing a sharded container throw new UnsupportedOperationException("sharded bucket"); } @Override public PageSet list( String container, ListContainerOptions options) { if (!this.buckets.containsKey(container)) { return this.delegate().list(container, options); } // TODO: implement listing a sharded container throw new UnsupportedOperationException("sharded bucket"); } @Override public boolean containerExists(String container) { if (!this.buckets.containsKey(container)) { return this.delegate().containerExists(container); } return true; } @Override public ContainerAccess getContainerAccess(String container) { if (!this.buckets.containsKey(container)) { return this.delegate().getContainerAccess(container); } throw new UnsupportedOperationException("sharded bucket"); } @Override public void setContainerAccess(String container, ContainerAccess containerAccess) { if (!this.buckets.containsKey(container)) { this.delegate().setContainerAccess(container, containerAccess); } throw new UnsupportedOperationException("sharded bucket"); } @Override public void clearContainer(String container) { clearContainer(container, new ListContainerOptions()); } @Override public void clearContainer(String container, ListContainerOptions options) { throw new UnsupportedOperationException("sharded bucket"); } @Override public void deleteContainer(String container) { throw new UnsupportedOperationException("sharded bucket"); } private boolean deleteShards(ShardedBucket bucket) { var futuresBuilder = new ImmutableList.Builder>(); ExecutorService executor = Executors.newFixedThreadPool( Math.min(bucket.shards, MAX_SHARD_THREADS)); for (int n = 0; n < bucket.shards; ++n) { String shard = ShardedBlobStore.getShardContainer(bucket, n); futuresBuilder.add(executor.submit( () -> this.delegate().deleteContainerIfEmpty(shard))); } executor.shutdown(); var futures = futuresBuilder.build(); boolean ret = true; for (Future future : futures) { try { ret &= future.get(); } catch (InterruptedException | ExecutionException e) { throw new RuntimeException("Failed to delete shards", e); } } return ret; } @Override public boolean deleteContainerIfEmpty(String container) { ShardedBucket bucket = this.buckets.get(container); if (bucket == null) { return this.delegate().deleteContainerIfEmpty(container); } String zeroShardContainer = ShardedBlobStore.getShardContainer( bucket, 0); PageSet listing = this.delegate().list( zeroShardContainer); if (listing.size() > 1) { return false; } StorageMetadata sm = listing.iterator().next(); if (!sm.getName().equals(SUPERBLOCK_BLOB_NAME)) { return false; } // Remove the superblock this.delegate().removeBlob(zeroShardContainer, SUPERBLOCK_BLOB_NAME); return this.deleteShards(bucket); } @Override public boolean directoryExists(String container, String directory) { throw new UnsupportedOperationException("sharded bucket"); } @Override public void createDirectory(String container, String directory) { throw new UnsupportedOperationException("sharded bucket"); } @Override public void deleteDirectory(String container, String directory) { throw new UnsupportedOperationException("sharded bucket"); } @Override public boolean blobExists(String container, String name) { return this.delegate().blobExists(this.getShard(container, name), name); } @Override public String putBlob(String containerName, Blob blob) { return this.delegate().putBlob(this.getShard(containerName, blob.getMetadata().getName()), blob); } @Override public String putBlob(final String containerName, Blob blob, final PutOptions putOptions) { return this.delegate().putBlob( this.getShard(containerName, blob.getMetadata().getName()), blob, putOptions); } @Override public String copyBlob(String fromContainer, String fromName, String toContainer, String toName, CopyOptions options) { String srcShard = this.getShard(fromContainer, fromName); String dstShard = this.getShard(toContainer, toName); return this.delegate().copyBlob(srcShard, fromName, dstShard, toName, options); } @Override public BlobMetadata blobMetadata(String container, String name) { return this.delegate().blobMetadata(this.getShard(container, name), name); } @Override public Blob getBlob(String containerName, String blobName) { return this.delegate().getBlob(this.getShard(containerName, blobName), blobName); } @Override public Blob getBlob(String containerName, String blobName, GetOptions getOptions) { return this.delegate() .getBlob(this.getShard(containerName, blobName), blobName, getOptions); } @Override public void removeBlob(String container, String name) { this.delegate().removeBlob(this.getShard(container, name), name); } @Override public void removeBlobs(String container, Iterable iterable) { if (!this.buckets.containsKey(container)) { this.delegate().removeBlobs(container, iterable); } Map> shardMap = new HashMap<>(); for (String blob : iterable) { List shardBlobs = shardMap.computeIfAbsent(this.getShard(container, blob), k -> new ArrayList<>()); shardBlobs.add(blob); } for (var entry : shardMap.entrySet()) { this.delegate().removeBlobs(entry.getKey(), entry.getValue()); } } @Override public BlobAccess getBlobAccess(String container, String name) { return this.delegate() .getBlobAccess(this.getShard(container, name), name); } @Override public void setBlobAccess(String container, String name, BlobAccess access) { this.delegate() .setBlobAccess(this.getShard(container, name), name, access); } @Override public long countBlobs(String container) { if (!this.buckets.containsKey(container)) { return this.delegate().countBlobs(container); } throw new UnsupportedOperationException("sharded bucket"); } @Override public long countBlobs(String container, ListContainerOptions options) { if (!this.buckets.containsKey(container)) { return this.delegate().countBlobs(container, options); } throw new UnsupportedOperationException("sharded bucket"); } @Override public MultipartUpload initiateMultipartUpload(String container, BlobMetadata blobMetadata, PutOptions options) { if (!this.buckets.containsKey(container)) { return this.delegate() .initiateMultipartUpload(container, blobMetadata, options); } throw new UnsupportedOperationException("sharded bucket"); } @Override public void abortMultipartUpload(MultipartUpload mpu) { if (!this.buckets.containsKey(mpu.containerName())) { this.delegate().abortMultipartUpload(mpu); } throw new UnsupportedOperationException("sharded bucket"); } @Override public String completeMultipartUpload(MultipartUpload mpu, List parts) { if (!this.buckets.containsKey(mpu.containerName())) { return this.delegate().completeMultipartUpload(mpu, parts); } throw new UnsupportedOperationException("sharded bucket"); } @Override public MultipartPart uploadMultipartPart(MultipartUpload mpu, int partNumber, Payload payload) { if (!this.buckets.containsKey(mpu.containerName())) { return this.delegate() .uploadMultipartPart(mpu, partNumber, payload); } throw new UnsupportedOperationException("sharded bucket"); } @Override public List listMultipartUpload(MultipartUpload mpu) { if (!this.buckets.containsKey(mpu.containerName())) { return this.delegate().listMultipartUpload(mpu); } throw new UnsupportedOperationException("sharded bucket"); } @Override public List listMultipartUploads(String container) { if (!this.buckets.containsKey(container)) { return this.delegate().listMultipartUploads(container); } throw new UnsupportedOperationException("sharded bucket"); } @Override public void downloadBlob(String container, String name, File destination) { this.delegate().downloadBlob(this.getShard(container, name), name, destination); } @Override public void downloadBlob(String container, String name, File destination, ExecutorService executor) { this.delegate() .downloadBlob(this.getShard(container, name), name, destination, executor); } @Override public InputStream streamBlob(String container, String name) { return this.delegate().streamBlob(this.getShard(container, name), name); } @Override public InputStream streamBlob(String container, String name, ExecutorService executor) { return this.delegate() .streamBlob(this.getShard(container, name), name, executor); } } ================================================ FILE: src/main/java/org/gaul/s3proxy/StorageClassBlobStore.java ================================================ /* * Copyright 2014-2026 Andrew Gaul * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.gaul.s3proxy; import org.jclouds.blobstore.BlobStore; import org.jclouds.blobstore.domain.Blob; import org.jclouds.blobstore.domain.BlobMetadata; import org.jclouds.blobstore.domain.MultipartUpload; import org.jclouds.blobstore.domain.Tier; import org.jclouds.blobstore.domain.internal.BlobMetadataImpl; import org.jclouds.blobstore.options.PutOptions; import org.jclouds.blobstore.util.ForwardingBlobStore; import org.jclouds.s3.domain.ObjectMetadata.StorageClass; /** * This class implements a middleware to set the storage tier when creating * objects. The class is configured via: * * s3proxy.storage-class-blobstore = VALUE * * VALUE can be anything from org.jclouds.s3.domain.StorageClass, e.g., * STANDARD, STANDARD_IA, GLACIER_IR, DEEP_ARCHIVE. Some values do not * translate exactly due to jclouds limitations, e.g., REDUCED_REDUNDANCY maps * to STANDARD. This mapping is best effort especially for non-S3 object * stores. */ public final class StorageClassBlobStore extends ForwardingBlobStore { private final Tier tier; private StorageClassBlobStore(BlobStore delegate, String storageClassString) { super(delegate); StorageClass storageClass; try { storageClass = StorageClass.valueOf( storageClassString.toUpperCase()); } catch (IllegalArgumentException iae) { storageClass = StorageClass.STANDARD; } this.tier = storageClass.toTier(); } static StorageClassBlobStore newStorageClassBlobStore(BlobStore blobStore, String storageClass) { return new StorageClassBlobStore(blobStore, storageClass); } public Tier getTier() { return tier; } @Override public String putBlob(String containerName, Blob blob) { var newBlob = replaceTier(containerName, blob); return delegate().putBlob(containerName, newBlob); } @Override public String putBlob(String containerName, Blob blob, PutOptions options) { var newBlob = replaceTier(containerName, blob); return delegate().putBlob(containerName, newBlob, options); } @Override public MultipartUpload initiateMultipartUpload( String container, BlobMetadata blobMetadata, PutOptions options) { var newBlobMetadata = replaceTier(blobMetadata); return delegate().initiateMultipartUpload(container, newBlobMetadata, options); } private Blob replaceTier(String containerName, Blob blob) { var blobMeta = blob.getMetadata(); var contentMeta = blob.getMetadata().getContentMetadata(); return blobBuilder(containerName) .name(blobMeta.getName()) .type(blobMeta.getType()) .tier(tier) .userMetadata(blobMeta.getUserMetadata()) .payload(blob.getPayload()) .cacheControl(contentMeta.getCacheControl()) .contentDisposition(contentMeta.getContentDisposition()) .contentEncoding(contentMeta.getContentEncoding()) .contentLanguage(contentMeta.getContentLanguage()) .contentType(contentMeta.getContentType()) .build(); } private BlobMetadata replaceTier(BlobMetadata meta) { return new BlobMetadataImpl(meta.getProviderId(), meta.getName(), meta.getLocation(), meta.getUri(), meta.getETag(), meta.getCreationDate(), meta.getLastModified(), meta.getUserMetadata(), meta.getPublicUri(), meta.getContainer(), meta.getContentMetadata(), meta.getSize(), tier); } // TODO: copyBlob } ================================================ FILE: src/main/java/org/gaul/s3proxy/ThrottledInputStream.java ================================================ /* * Copyright 2014-2026 Andrew Gaul * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.gaul.s3proxy; import java.io.FilterInputStream; import java.io.IOException; import java.io.InputStream; final class ThrottledInputStream extends FilterInputStream { private final Long speed; ThrottledInputStream(InputStream is, Long speed) { super(is); this.speed = speed; } @Override public int read() throws IOException { int b = super.read(); if (b != -1) { simulateLatency(1); } return b; } @Override public int read(byte[] b, int off, int len) throws IOException { int n = super.read(b, off, len); if (n != -1) { simulateLatency(n); } return n; } private void simulateLatency(int size) { if (size == 0 || speed == null) { return; } try { Thread.sleep(size / speed, (int) (size % speed) * 1_000_000); } catch (InterruptedException e) { Thread.currentThread().interrupt(); } } } ================================================ FILE: src/main/java/org/gaul/s3proxy/UserMetadataReplacerBlobStore.java ================================================ /* * Copyright 2014-2026 Andrew Gaul * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.gaul.s3proxy; import static com.google.common.base.Preconditions.checkArgument; import com.google.common.collect.ImmutableMap; import org.jclouds.blobstore.BlobStore; import org.jclouds.blobstore.domain.Blob; import org.jclouds.blobstore.domain.BlobMetadata; import org.jclouds.blobstore.domain.MultipartUpload; import org.jclouds.blobstore.domain.MutableBlobMetadata; import org.jclouds.blobstore.options.GetOptions; import org.jclouds.blobstore.options.PutOptions; import org.jclouds.blobstore.util.ForwardingBlobStore; /** * BlobStore which maps user metadata keys and values using character * replacement. This is useful for some object stores like Azure which do not * allow characters like hyphens. This munges keys and values during putBlob * and unmunges them on getBlob. */ final class UserMetadataReplacerBlobStore extends ForwardingBlobStore { private final String fromChars; private final String toChars; private UserMetadataReplacerBlobStore( BlobStore blobStore, String fromChars, String toChars) { super(blobStore); checkArgument(fromChars.length() == toChars.length()); this.fromChars = fromChars; this.toChars = toChars; } public static BlobStore newUserMetadataReplacerBlobStore( BlobStore blobStore, String fromChars, String toChars) { return new UserMetadataReplacerBlobStore(blobStore, fromChars, toChars); } @Override public String putBlob(String containerName, Blob blob) { return putBlob(containerName, blob, new PutOptions()); } @Override public String putBlob(String containerName, Blob blob, PutOptions putOptions) { var metadata = ImmutableMap.builder(); for (var entry : blob.getMetadata().getUserMetadata().entrySet()) { metadata.put(replaceChars(entry.getKey(), fromChars, toChars), replaceChars(entry.getValue(), fromChars, toChars)); } // TODO: should this modify the parameter? blob.getMetadata().setUserMetadata(metadata.build()); return super.putBlob(containerName, blob, putOptions); } @Override public BlobMetadata blobMetadata(String container, String name) { var blobMetadata = super.blobMetadata(container, name); if (blobMetadata == null) { return null; } var metadata = ImmutableMap.builder(); // TODO: duplication for (var entry : blobMetadata.getUserMetadata().entrySet()) { metadata.put(replaceChars(entry.getKey(), /*fromChars=*/ toChars, /*toChars=*/ fromChars), replaceChars(entry.getValue(), /*fromChars=*/ toChars, /*toChars=*/ fromChars)); } ((MutableBlobMetadata) blobMetadata).setUserMetadata(metadata.build()); return blobMetadata; } @Override public Blob getBlob(String containerName, String name) { return getBlob(containerName, name, new GetOptions()); } @Override public Blob getBlob(String containerName, String name, GetOptions getOptions) { var blob = super.getBlob(containerName, name, getOptions); if (blob == null) { return null; } var metadata = ImmutableMap.builder(); for (var entry : blob.getMetadata().getUserMetadata().entrySet()) { metadata.put(replaceChars(entry.getKey(), /*fromChars=*/ toChars, /*toChars=*/ fromChars), replaceChars(entry.getValue(), /*fromChars=*/ toChars, /*toChars=*/ fromChars)); } blob.getMetadata().setUserMetadata(metadata.build()); return blob; } @Override public MultipartUpload initiateMultipartUpload(String container, BlobMetadata blobMetadata, PutOptions overrides) { var metadata = ImmutableMap.builder(); for (var entry : blobMetadata.getUserMetadata().entrySet()) { metadata.put(replaceChars(entry.getKey(), /*fromChars=*/ fromChars, /*toChars=*/ toChars), replaceChars(entry.getValue(), /*fromChars=*/ fromChars, /*toChars=*/ toChars)); } ((MutableBlobMetadata) blobMetadata).setUserMetadata(metadata.build()); return super.initiateMultipartUpload(container, blobMetadata, overrides); } private static String replaceChars(String value, String fromChars, String toChars) { var builder = new StringBuilder(/*capacity=*/ value.length()); for (int i = 0; i < value.length(); ++i) { for (int j = 0; j < fromChars.length(); ++j) { builder.append(value.charAt(i) == fromChars.charAt(j) ? toChars.charAt(j) : value.charAt(i)); } } return builder.toString(); } } ================================================ FILE: src/main/java/org/gaul/s3proxy/awssdk/AwsS3SdkApiMetadata.java ================================================ /* * Copyright 2014-2026 Andrew Gaul * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.gaul.s3proxy.awssdk; import java.net.URI; import java.util.Properties; import java.util.Set; import org.jclouds.blobstore.BlobStoreContext; import org.jclouds.reflect.Reflection2; import org.jclouds.rest.internal.BaseHttpApiMetadata; @SuppressWarnings("rawtypes") public final class AwsS3SdkApiMetadata extends BaseHttpApiMetadata { public static final String REGION = "aws-s3-sdk.region"; /** * Property for conditional writes mode. * Values: "native" (default) - use If-Match/If-None-Match headers directly * "emulated" - validate via HEAD request before PUT */ public static final String CONDITIONAL_WRITES = "aws-s3-sdk.conditional-writes"; /** * Property for enabling chunked encoding (default: true). * When false, sends "x-amz-content-sha256: UNSIGNED-PAYLOAD" instead of * streaming signatures. Disable for S3-compatible backends that don't * support aws-chunked encoding (e.g., some Ceph RGW versions). */ public static final String CHUNKED_ENCODING_ENABLED = "aws-s3-sdk.chunked-encoding"; /** * Property for stripping quotes from ETag values in conditional headers. * Enable for S3-compatible backends with Ceph Reef bug that requires * unquoted ETags in If-Match/If-None-Match headers. * See: https://tracker.ceph.com/issues/68712 * TODO: Can be removed after 2027-01-01 - by then every provider should * have migrated to a newer Ceph version (including Hetzner). */ public static final String STRIP_ETAG_QUOTES = "aws-s3-sdk.strip-etag-quotes"; public AwsS3SdkApiMetadata() { this(builder()); } protected AwsS3SdkApiMetadata(Builder builder) { super(builder); } private static Builder builder() { return new Builder(); } @Override public Builder toBuilder() { return builder().fromApiMetadata(this); } public static Properties defaultProperties() { Properties properties = BaseHttpApiMetadata.defaultProperties(); properties.setProperty(REGION, "us-east-1"); properties.setProperty(CONDITIONAL_WRITES, "native"); properties.setProperty(CHUNKED_ENCODING_ENABLED, "true"); properties.setProperty(STRIP_ETAG_QUOTES, "false"); return properties; } // Fake API client - required by jclouds but not actually used private interface AwsS3SdkClient { } public static final class Builder extends BaseHttpApiMetadata.Builder { protected Builder() { super(AwsS3SdkClient.class); id("aws-s3-sdk") .name("AWS S3 SDK Backend") .identityName("Access Key ID") .credentialName("Secret Access Key") .version("2006-03-01") .defaultEndpoint("https://s3.amazonaws.com") .documentation(URI.create( "https://docs.aws.amazon.com/AmazonS3/latest/" + "API/Welcome.html")) .defaultProperties(AwsS3SdkApiMetadata.defaultProperties()) .view(Reflection2.typeToken(BlobStoreContext.class)) .defaultModules(Set.of(AwsS3SdkBlobStoreContextModule.class)); } @Override public AwsS3SdkApiMetadata build() { return new AwsS3SdkApiMetadata(this); } @Override protected Builder self() { return this; } } } ================================================ FILE: src/main/java/org/gaul/s3proxy/awssdk/AwsS3SdkBlobStore.java ================================================ /* * Copyright 2014-2026 Andrew Gaul * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.gaul.s3proxy.awssdk; import java.io.IOException; import java.io.InputStream; import java.net.URI; import java.time.Instant; import java.util.Base64; import java.util.Comparator; import java.util.Date; import java.util.List; import java.util.Map; import java.util.Set; import com.google.common.base.Supplier; import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableSet; import com.google.common.collect.Streams; import com.google.common.net.HttpHeaders; import jakarta.inject.Inject; import jakarta.inject.Named; import jakarta.inject.Singleton; import org.gaul.s3proxy.PutOptions2; import org.jclouds.blobstore.BlobStoreContext; import org.jclouds.blobstore.ContainerNotFoundException; import org.jclouds.blobstore.KeyNotFoundException; import org.jclouds.blobstore.domain.Blob; import org.jclouds.blobstore.domain.BlobAccess; import org.jclouds.blobstore.domain.BlobMetadata; import org.jclouds.blobstore.domain.ContainerAccess; import org.jclouds.blobstore.domain.MultipartPart; import org.jclouds.blobstore.domain.MultipartUpload; import org.jclouds.blobstore.domain.PageSet; import org.jclouds.blobstore.domain.StorageMetadata; import org.jclouds.blobstore.domain.StorageType; import org.jclouds.blobstore.domain.Tier; import org.jclouds.blobstore.domain.internal.BlobBuilderImpl; import org.jclouds.blobstore.domain.internal.BlobMetadataImpl; import org.jclouds.blobstore.domain.internal.PageSetImpl; import org.jclouds.blobstore.domain.internal.StorageMetadataImpl; import org.jclouds.blobstore.internal.BaseBlobStore; import org.jclouds.blobstore.options.CopyOptions; import org.jclouds.blobstore.options.CreateContainerOptions; import org.jclouds.blobstore.options.GetOptions; import org.jclouds.blobstore.options.ListContainerOptions; import org.jclouds.blobstore.options.PutOptions; import org.jclouds.blobstore.util.BlobUtils; import org.jclouds.collect.Memoized; import org.jclouds.domain.Credentials; import org.jclouds.domain.Location; import org.jclouds.http.HttpCommand; import org.jclouds.http.HttpRequest; import org.jclouds.http.HttpResponse; import org.jclouds.http.HttpResponseException; import org.jclouds.io.ContentMetadataBuilder; import org.jclouds.io.Payload; import org.jclouds.io.PayloadSlicer; import org.jclouds.providers.ProviderMetadata; import org.jclouds.rest.AuthorizationException; import org.jspecify.annotations.Nullable; import software.amazon.awssdk.auth.credentials.AwsBasicCredentials; import software.amazon.awssdk.auth.credentials.StaticCredentialsProvider; import software.amazon.awssdk.awscore.exception.AwsErrorDetails; import software.amazon.awssdk.core.checksums.RequestChecksumCalculation; import software.amazon.awssdk.core.checksums.ResponseChecksumValidation; import software.amazon.awssdk.core.sync.RequestBody; import software.amazon.awssdk.regions.Region; import software.amazon.awssdk.services.s3.S3Client; import software.amazon.awssdk.services.s3.S3ClientBuilder; import software.amazon.awssdk.services.s3.S3Configuration; import software.amazon.awssdk.services.s3.model.AbortMultipartUploadRequest; import software.amazon.awssdk.services.s3.model.Bucket; import software.amazon.awssdk.services.s3.model.BucketCannedACL; import software.amazon.awssdk.services.s3.model.CommonPrefix; import software.amazon.awssdk.services.s3.model.CompleteMultipartUploadRequest; import software.amazon.awssdk.services.s3.model.CompletedMultipartUpload; import software.amazon.awssdk.services.s3.model.CompletedPart; import software.amazon.awssdk.services.s3.model.CopyObjectRequest; import software.amazon.awssdk.services.s3.model.CreateBucketConfiguration; import software.amazon.awssdk.services.s3.model.CreateBucketRequest; import software.amazon.awssdk.services.s3.model.CreateMultipartUploadRequest; import software.amazon.awssdk.services.s3.model.DeleteBucketRequest; import software.amazon.awssdk.services.s3.model.DeleteObjectRequest; import software.amazon.awssdk.services.s3.model.GetBucketAclRequest; import software.amazon.awssdk.services.s3.model.GetObjectAclRequest; import software.amazon.awssdk.services.s3.model.GetObjectRequest; import software.amazon.awssdk.services.s3.model.Grant; import software.amazon.awssdk.services.s3.model.HeadBucketRequest; import software.amazon.awssdk.services.s3.model.HeadObjectRequest; import software.amazon.awssdk.services.s3.model.HeadObjectResponse; import software.amazon.awssdk.services.s3.model.ListMultipartUploadsRequest; import software.amazon.awssdk.services.s3.model.ListObjectsV2Request; import software.amazon.awssdk.services.s3.model.ListPartsRequest; import software.amazon.awssdk.services.s3.model.NoSuchBucketException; import software.amazon.awssdk.services.s3.model.NoSuchKeyException; import software.amazon.awssdk.services.s3.model.ObjectCannedACL; import software.amazon.awssdk.services.s3.model.Part; import software.amazon.awssdk.services.s3.model.Permission; import software.amazon.awssdk.services.s3.model.PutBucketAclRequest; import software.amazon.awssdk.services.s3.model.PutObjectAclRequest; import software.amazon.awssdk.services.s3.model.PutObjectRequest; import software.amazon.awssdk.services.s3.model.S3Exception; import software.amazon.awssdk.services.s3.model.S3Object; import software.amazon.awssdk.services.s3.model.StorageClass; import software.amazon.awssdk.services.s3.model.Type; import software.amazon.awssdk.services.s3.model.UploadPartRequest; @Singleton public final class AwsS3SdkBlobStore extends BaseBlobStore { private final S3Client s3Client; private final String endpoint; private final boolean useNativeConditionalWrites; private final boolean stripETagQuotes; private final Region awsRegion; @Inject AwsS3SdkBlobStore(BlobStoreContext context, BlobUtils blobUtils, Supplier defaultLocation, @Memoized Supplier> locations, PayloadSlicer slicer, @org.jclouds.location.Provider Supplier creds, ProviderMetadata provider, @Named(AwsS3SdkApiMetadata.REGION) String region, @Named(AwsS3SdkApiMetadata.CONDITIONAL_WRITES) String conditionalWrites, @Named(AwsS3SdkApiMetadata.CHUNKED_ENCODING_ENABLED) String chunkedEncodingEnabled, @Named(AwsS3SdkApiMetadata.STRIP_ETAG_QUOTES) String stripETagQuotes) { super(context, blobUtils, defaultLocation, locations, slicer); this.endpoint = provider.getEndpoint(); this.awsRegion = Region.of(region); this.useNativeConditionalWrites = !"emulated".equalsIgnoreCase( conditionalWrites); this.stripETagQuotes = Boolean.parseBoolean(stripETagQuotes); var cred = creds.get(); S3ClientBuilder builder = S3Client.builder(); builder.serviceConfiguration(S3Configuration.builder() .chunkedEncodingEnabled(Boolean.valueOf(chunkedEncodingEnabled)) .build()); // Disable checksum calculation to avoid reading the stream twice. // This allows streaming non-resettable InputStreams to S3-compatible // backends that don't support aws-chunked encoding. builder.requestChecksumCalculation(RequestChecksumCalculation.WHEN_REQUIRED); builder.responseChecksumValidation(ResponseChecksumValidation.WHEN_REQUIRED); if (cred.identity != null && !cred.identity.isEmpty() && cred.credential != null && !cred.credential.isEmpty()) { builder.credentialsProvider(StaticCredentialsProvider.create( AwsBasicCredentials.create(cred.identity, cred.credential))); } if (endpoint != null && !endpoint.isEmpty()) { URI endpointUri = URI.create(endpoint); builder.endpointOverride(endpointUri); // Use path-style for non-AWS endpoints (Hetzner, MinIO, etc.) String host = endpointUri.getHost(); if (host != null && !host.endsWith(".amazonaws.com")) { builder.forcePathStyle(true); } } builder.region(this.awsRegion); this.s3Client = builder.build(); } @Override public PageSet list() { try { var set = ImmutableSet.builder(); for (Bucket bucket : s3Client.listBuckets().buckets()) { set.add(new StorageMetadataImpl(StorageType.CONTAINER, /*id=*/ null, bucket.name(), /*location=*/ null, /*uri=*/ null, /*eTag=*/ null, toDate(bucket.creationDate()), toDate(bucket.creationDate()), Map.of(), /*size=*/ null, Tier.STANDARD)); } return new PageSetImpl(set.build(), null); } catch (S3Exception e) { translateAndRethrowException(e, null, null); throw e; } } @Override public PageSet list(String container, ListContainerOptions options) { var requestBuilder = ListObjectsV2Request.builder() .bucket(container); if (options.getPrefix() != null) { requestBuilder.prefix(options.getPrefix()); } if (options.getDelimiter() != null) { requestBuilder.delimiter(options.getDelimiter()); } if (options.getMarker() != null) { requestBuilder.startAfter(options.getMarker()); } int maxKeys = options.getMaxResults() != null ? options.getMaxResults() : 1000; if (maxKeys == 0) { return new PageSetImpl(ImmutableSet.of(), null); } requestBuilder.maxKeys(maxKeys); try { var response = s3Client.listObjectsV2(requestBuilder.build()); var set = ImmutableSet.builder(); String nextMarker = null; for (S3Object obj : response.contents()) { set.add(new StorageMetadataImpl(StorageType.BLOB, /*id=*/ null, obj.key(), /*location=*/ null, /*uri=*/ null, obj.eTag(), toDate(obj.lastModified()), toDate(obj.lastModified()), Map.of(), obj.size(), toTier(obj.storageClass()))); } for (CommonPrefix prefix : response.commonPrefixes()) { set.add(new StorageMetadataImpl(StorageType.RELATIVE_PATH, /*id=*/ null, prefix.prefix(), /*location=*/ null, /*uri=*/ null, /*eTag=*/ null, /*creationDate=*/ null, /*lastModified=*/ null, Map.of(), /*size=*/ 0L, Tier.STANDARD)); } if (response.isTruncated()) { if (!response.contents().isEmpty()) { nextMarker = Streams.findLast(response.contents().stream()) .orElseThrow().key(); } else if (!response.commonPrefixes().isEmpty()) { nextMarker = Streams.findLast( response.commonPrefixes().stream()) .orElseThrow().prefix(); } } return new PageSetImpl(set.build(), nextMarker); } catch (NoSuchBucketException e) { throw new ContainerNotFoundException(container, e.getMessage()); } catch (S3Exception e) { translateAndRethrowException(e, container, null); throw e; } } @Override public boolean containerExists(String container) { try { s3Client.headBucket(HeadBucketRequest.builder() .bucket(container) .build()); return true; } catch (NoSuchBucketException e) { return false; } catch (S3Exception e) { if (e.statusCode() == 404) { return false; } throw e; } } @Override public boolean createContainerInLocation(Location location, String container) { return createContainerInLocation(location, container, new CreateContainerOptions()); } @Override public boolean createContainerInLocation(Location location, String container, CreateContainerOptions options) { if (options == null) { options = new CreateContainerOptions(); } try { var requestBuilder = CreateBucketRequest.builder() .bucket(container); if (!Region.US_EAST_1.equals(awsRegion)) { requestBuilder.createBucketConfiguration( CreateBucketConfiguration.builder() .locationConstraint(awsRegion.id()) .build()); } s3Client.createBucket(requestBuilder.build()); if (options.isPublicRead()) { setContainerAccess(container, ContainerAccess.PUBLIC_READ); } return true; } catch (S3Exception e) { if (e.statusCode() == 409) { String errorCode = e.awsErrorDetails() != null ? e.awsErrorDetails().errorCode() : null; if ("BucketAlreadyOwnedByYou".equals(errorCode)) { // Idempotent success - bucket exists and caller owns it return false; } if ("BucketAlreadyExists".equals(errorCode)) { // Bucket exists but is owned by someone else throw new AuthorizationException( "Bucket already exists: " + container, e); } } translateAndRethrowException(e, container, null); throw e; } } @Override public void deleteContainer(String container) { try { clearContainer(container); s3Client.deleteBucket(DeleteBucketRequest.builder() .bucket(container) .build()); } catch (NoSuchBucketException e) { // Already deleted, ignore } catch (S3Exception e) { translateAndRethrowException(e, container, null); throw e; } } @Override public boolean deleteContainerIfEmpty(String container) { try { var response = s3Client.listObjectsV2(ListObjectsV2Request.builder() .bucket(container) .maxKeys(1) .build()); if (!response.contents().isEmpty()) { return false; } s3Client.deleteBucket(DeleteBucketRequest.builder() .bucket(container) .build()); return true; } catch (NoSuchBucketException e) { return true; } catch (S3Exception e) { if (e.statusCode() == 409) { // Bucket not empty return false; } throw e; } } @Override public boolean blobExists(String container, String key) { try { s3Client.headObject(HeadObjectRequest.builder() .bucket(container) .key(key) .build()); return true; } catch (NoSuchKeyException e) { return false; } catch (S3Exception e) { if (e.statusCode() == 404) { return false; } throw e; } } @Override public Blob getBlob(String container, String key, GetOptions options) { var requestBuilder = GetObjectRequest.builder() .bucket(container) .key(key); if (!options.getRanges().isEmpty()) { String rangeSpec = options.getRanges().get(0); requestBuilder.range("bytes=" + rangeSpec); } if (options.getIfMatch() != null) { requestBuilder.ifMatch(maybeStripETagQuotes(options.getIfMatch())); } if (options.getIfNoneMatch() != null) { requestBuilder.ifNoneMatch( maybeStripETagQuotes(options.getIfNoneMatch())); } if (options.getIfModifiedSince() != null) { requestBuilder.ifModifiedSince( options.getIfModifiedSince().toInstant()); } if (options.getIfUnmodifiedSince() != null) { requestBuilder.ifUnmodifiedSince( options.getIfUnmodifiedSince().toInstant()); } try { var responseStream = s3Client.getObject(requestBuilder.build()); var response = responseStream.response(); var blob = new BlobBuilderImpl() .name(key) .userMetadata(response.metadata()) .payload(responseStream) .cacheControl(response.cacheControl()) .contentDisposition(response.contentDisposition()) .contentEncoding(response.contentEncoding()) .contentLanguage(response.contentLanguage()) .contentLength(response.contentLength()) .contentType(response.contentType()) .expires(response.expires() != null ? Date.from(response.expires()) : null) .build(); if (response.contentRange() != null) { blob.getAllHeaders().put(HttpHeaders.CONTENT_RANGE, response.contentRange()); } var metadata = blob.getMetadata(); metadata.setETag(response.eTag()); if (response.lastModified() != null) { metadata.setLastModified(Date.from(response.lastModified())); } metadata.setSize(response.contentLength()); return blob; } catch (NoSuchKeyException e) { throw new KeyNotFoundException(container, key, e.getMessage()); } catch (NoSuchBucketException e) { throw new ContainerNotFoundException(container, e.getMessage()); } catch (S3Exception e) { if (e.statusCode() == 304) { var request = HttpRequest.builder() .method("GET") .endpoint(endpoint) .build(); var responseBuilder = HttpResponse.builder() .statusCode(304); e.awsErrorDetails().sdkHttpResponse().firstMatchingHeader("ETag") .ifPresent(etag -> responseBuilder.addHeader(HttpHeaders.ETAG, etag)); throw new HttpResponseException( new HttpCommand(request), responseBuilder.build(), e); } translateAndRethrowException(e, container, key); throw e; } } @Override public String putBlob(String container, Blob blob) { return putBlob(container, blob, new PutOptions()); } @Override public String putBlob(String container, Blob blob, PutOptions options) { var contentMetadata = blob.getMetadata().getContentMetadata(); var requestBuilder = PutObjectRequest.builder() .bucket(container) .key(blob.getMetadata().getName()); if (contentMetadata.getCacheControl() != null) { requestBuilder.cacheControl(contentMetadata.getCacheControl()); } if (contentMetadata.getContentDisposition() != null) { requestBuilder.contentDisposition( contentMetadata.getContentDisposition()); } if (contentMetadata.getContentEncoding() != null) { requestBuilder.contentEncoding(contentMetadata.getContentEncoding()); } if (contentMetadata.getContentLanguage() != null) { requestBuilder.contentLanguage(contentMetadata.getContentLanguage()); } if (contentMetadata.getContentMD5() != null) { requestBuilder.contentMD5(Base64.getEncoder().encodeToString( contentMetadata.getContentMD5())); } if (contentMetadata.getContentType() != null) { requestBuilder.contentType(contentMetadata.getContentType()); } if (contentMetadata.getExpires() != null) { requestBuilder.expires(contentMetadata.getExpires().toInstant()); } var userMetadata = blob.getMetadata().getUserMetadata(); if (userMetadata != null && !userMetadata.isEmpty()) { requestBuilder.metadata(userMetadata); } BlobAccess requestedAccess = options != null ? options.getBlobAccess() : null; if (requestedAccess == BlobAccess.PUBLIC_READ) { requestBuilder.acl(ObjectCannedACL.PUBLIC_READ); } if (blob.getMetadata().getTier() != null && blob.getMetadata().getTier() != Tier.STANDARD) { requestBuilder.storageClass( toStorageClass(blob.getMetadata().getTier())); } String ifMatch = null; String ifNoneMatch = null; if (options instanceof PutOptions2) { var putOptions2 = (PutOptions2) options; ifMatch = putOptions2.getIfMatch(); ifNoneMatch = putOptions2.getIfNoneMatch(); } boolean hasConditionalHeaders = ifMatch != null || ifNoneMatch != null; if (hasConditionalHeaders && !useNativeConditionalWrites) { validateConditionalPut(container, blob.getMetadata().getName(), ifMatch, ifNoneMatch); ifMatch = null; ifNoneMatch = null; } if (ifMatch != null) { requestBuilder.ifMatch(maybeStripETagQuotes(ifMatch)); } if (ifNoneMatch != null) { requestBuilder.ifNoneMatch(maybeStripETagQuotes(ifNoneMatch)); } try (InputStream is = blob.getPayload().openStream()) { Long contentLength = contentMetadata.getContentLength(); if (contentLength == null) { // Mimic S3 behavior: Reject unknown length instead of crashing memory throw new IllegalArgumentException("Content-Length is required for S3 putBlob"); } else { var response = s3Client.putObject(requestBuilder.build(), RequestBody.fromInputStream(is, contentLength)); return response.eTag(); } } catch (IOException e) { throw new RuntimeException("Failed to read blob payload", e); } catch (S3Exception e) { translateAndRethrowException(e, container, blob.getMetadata().getName()); throw e; } } @Override public String copyBlob(String fromContainer, String fromName, String toContainer, String toName, CopyOptions options) { var requestBuilder = CopyObjectRequest.builder() .sourceBucket(fromContainer) .sourceKey(fromName) .destinationBucket(toContainer) .destinationKey(toName); var contentMetadata = options.contentMetadata(); if (contentMetadata != null) { if (contentMetadata.getCacheControl() != null) { requestBuilder.cacheControl(contentMetadata.getCacheControl()); } if (contentMetadata.getContentDisposition() != null) { requestBuilder.contentDisposition( contentMetadata.getContentDisposition()); } if (contentMetadata.getContentEncoding() != null) { requestBuilder.contentEncoding( contentMetadata.getContentEncoding()); } if (contentMetadata.getContentLanguage() != null) { requestBuilder.contentLanguage( contentMetadata.getContentLanguage()); } if (contentMetadata.getContentType() != null) { requestBuilder.contentType(contentMetadata.getContentType()); } requestBuilder.metadataDirective("REPLACE"); } var userMetadata = options.userMetadata(); if (userMetadata != null) { requestBuilder.metadata(userMetadata); requestBuilder.metadataDirective("REPLACE"); } try { var response = s3Client.copyObject(requestBuilder.build()); return response.copyObjectResult().eTag(); } catch (NoSuchKeyException e) { throw new KeyNotFoundException(fromContainer, fromName, e.getMessage()); } catch (NoSuchBucketException e) { throw new ContainerNotFoundException(fromContainer, e.getMessage()); } catch (S3Exception e) { translateAndRethrowException(e, fromContainer, fromName); throw e; } } @Override public void removeBlob(String container, String key) { try { s3Client.deleteObject(DeleteObjectRequest.builder() .bucket(container) .key(key) .build()); } catch (NoSuchKeyException | NoSuchBucketException e) { // Ignore - delete is idempotent } catch (S3Exception e) { if (e.statusCode() != 404) { throw e; } } } @Override public BlobMetadata blobMetadata(String container, String key) { try { HeadObjectResponse response = s3Client.headObject( HeadObjectRequest.builder() .bucket(container) .key(key) .build()); return new BlobMetadataImpl(/*id=*/ null, key, /*location=*/ null, /*uri=*/ null, response.eTag(), toDate(response.lastModified()), toDate(response.lastModified()), response.metadata(), /*publicUri=*/ null, container, toContentMetadata(response), response.contentLength(), toTier(response.storageClass())); } catch (NoSuchKeyException e) { return null; } catch (NoSuchBucketException e) { throw new ContainerNotFoundException(container, e.getMessage()); } catch (S3Exception e) { if (e.statusCode() == 404) { return null; } translateAndRethrowException(e, container, key); throw e; } } @Override protected boolean deleteAndVerifyContainerGone(String container) { try { s3Client.deleteBucket(DeleteBucketRequest.builder() .bucket(container) .build()); return true; } catch (NoSuchBucketException e) { return true; } } @Override public ContainerAccess getContainerAccess(String container) { try { var response = s3Client.getBucketAcl(GetBucketAclRequest.builder() .bucket(container) .build()); boolean isPublic = hasPublicRead(response.grants()); return isPublic ? ContainerAccess.PUBLIC_READ : ContainerAccess.PRIVATE; } catch (NoSuchBucketException e) { throw new ContainerNotFoundException(container, e.getMessage()); } catch (S3Exception e) { if (e.statusCode() == 404) { throw new ContainerNotFoundException(container, e.getMessage()); } return ContainerAccess.PRIVATE; } } @Override public void setContainerAccess(String container, ContainerAccess access) { BucketCannedACL acl = access == ContainerAccess.PUBLIC_READ ? BucketCannedACL.PUBLIC_READ : BucketCannedACL.PRIVATE; try { s3Client.putBucketAcl(PutBucketAclRequest.builder() .bucket(container) .acl(acl) .build()); } catch (NoSuchBucketException e) { throw new ContainerNotFoundException(container, e.getMessage()); } catch (S3Exception e) { translateAndRethrowException(e, container, null); throw e; } } @Override public BlobAccess getBlobAccess(String container, String key) { try { var response = s3Client.getObjectAcl(GetObjectAclRequest.builder() .bucket(container) .key(key) .build()); return hasPublicRead(response.grants()) ? BlobAccess.PUBLIC_READ : BlobAccess.PRIVATE; } catch (NoSuchKeyException e) { throw new KeyNotFoundException(container, key, e.getMessage()); } catch (NoSuchBucketException e) { throw new ContainerNotFoundException(container, e.getMessage()); } catch (S3Exception e) { if (e.statusCode() == 404) { throw translateAclNotFound(container, key, e); } throw e; } } private static boolean hasPublicRead(List grants) { for (Grant grant : grants) { if (grant.permission() == Permission.READ || grant.permission() == Permission.FULL_CONTROL) { if (grant.grantee().type() == Type.GROUP && "http://acs.amazonaws.com/groups/global/AllUsers".equals(grant.grantee().uri())) { return true; } } } return false; } private RuntimeException translateAclNotFound(String container, String key, S3Exception e) { AwsErrorDetails details = e.awsErrorDetails(); String errorCode = details != null ? details.errorCode() : null; if ("NoSuchKey".equals(errorCode) || "NotFound".equals(errorCode)) { return new KeyNotFoundException(container, key, e.getMessage()); } if ("NoSuchBucket".equals(errorCode)) { return new ContainerNotFoundException(container, e.getMessage()); } if (key != null) { return new KeyNotFoundException(container, key, e.getMessage()); } return new ContainerNotFoundException(container, e.getMessage()); } private void applyMultipartAclIfNeeded(MultipartUpload mpu) { if (mpu == null) { return; } PutOptions putOptions = mpu.putOptions(); if (putOptions != null && putOptions.getBlobAccess() == BlobAccess.PUBLIC_READ) { setBlobAccess(mpu.containerName(), mpu.blobName(), BlobAccess.PUBLIC_READ); } } @Override public void setBlobAccess(String container, String key, BlobAccess access) { ObjectCannedACL acl = access == BlobAccess.PUBLIC_READ ? ObjectCannedACL.PUBLIC_READ : ObjectCannedACL.PRIVATE; try { s3Client.putObjectAcl(PutObjectAclRequest.builder() .bucket(container) .key(key) .acl(acl) .build()); } catch (NoSuchKeyException e) { throw new KeyNotFoundException(container, key, e.getMessage()); } catch (NoSuchBucketException e) { throw new ContainerNotFoundException(container, e.getMessage()); } catch (S3Exception e) { translateAndRethrowException(e, container, key); throw e; } } @Override public MultipartUpload initiateMultipartUpload(String container, BlobMetadata blobMetadata, PutOptions options) { var requestBuilder = CreateMultipartUploadRequest.builder() .bucket(container) .key(blobMetadata.getName()); var contentMetadata = blobMetadata.getContentMetadata(); if (contentMetadata != null) { if (contentMetadata.getCacheControl() != null) { requestBuilder.cacheControl(contentMetadata.getCacheControl()); } if (contentMetadata.getContentDisposition() != null) { requestBuilder.contentDisposition( contentMetadata.getContentDisposition()); } if (contentMetadata.getContentEncoding() != null) { requestBuilder.contentEncoding( contentMetadata.getContentEncoding()); } if (contentMetadata.getContentLanguage() != null) { requestBuilder.contentLanguage( contentMetadata.getContentLanguage()); } if (contentMetadata.getContentType() != null) { requestBuilder.contentType(contentMetadata.getContentType()); } } var userMetadata = blobMetadata.getUserMetadata(); if (userMetadata != null && !userMetadata.isEmpty()) { requestBuilder.metadata(userMetadata); } if (options != null && options.getBlobAccess() == BlobAccess.PUBLIC_READ) { requestBuilder.acl(ObjectCannedACL.PUBLIC_READ); } if (blobMetadata.getTier() != null && blobMetadata.getTier() != Tier.STANDARD) { requestBuilder.storageClass( toStorageClass(blobMetadata.getTier())); } try { var response = s3Client.createMultipartUpload( requestBuilder.build()); return MultipartUpload.create(container, blobMetadata.getName(), response.uploadId(), blobMetadata, options); } catch (NoSuchBucketException e) { throw new ContainerNotFoundException(container, e.getMessage()); } catch (S3Exception e) { translateAndRethrowException(e, container, blobMetadata.getName()); throw e; } } @Override public void abortMultipartUpload(MultipartUpload mpu) { try { s3Client.abortMultipartUpload(AbortMultipartUploadRequest.builder() .bucket(mpu.containerName()) .key(mpu.blobName()) .uploadId(mpu.id()) .build()); } catch (NoSuchKeyException e) { throw new KeyNotFoundException(mpu.containerName(), mpu.blobName(), "Multipart upload not found: " + mpu.id()); } catch (S3Exception e) { if (e.statusCode() == 404) { throw new KeyNotFoundException(mpu.containerName(), mpu.blobName(), "Multipart upload not found: " + mpu.id()); } throw e; } } @Override public String completeMultipartUpload(MultipartUpload mpu, List parts) { var sortedParts = sortAndValidateParts(parts); var completedParts = sortedParts.stream() .map(part -> CompletedPart.builder() .partNumber(part.partNumber()) .eTag(part.partETag()) .build()) .toList(); var requestBuilder = CompleteMultipartUploadRequest.builder() .bucket(mpu.containerName()) .key(mpu.blobName()) .uploadId(mpu.id()) .multipartUpload(CompletedMultipartUpload.builder() .parts(completedParts) .build()); try { var response = s3Client.completeMultipartUpload( requestBuilder.build()); applyMultipartAclIfNeeded(mpu); return response.eTag(); } catch (S3Exception e) { translateAndRethrowException(e, mpu.containerName(), mpu.blobName()); throw e; } } @Override public MultipartPart uploadMultipartPart(MultipartUpload mpu, int partNumber, Payload payload) { Long contentLength = payload.getContentMetadata().getContentLength(); if (contentLength == null) { throw new IllegalArgumentException("Content-Length is required"); } try (InputStream is = payload.openStream()) { var response = s3Client.uploadPart(UploadPartRequest.builder() .bucket(mpu.containerName()) .key(mpu.blobName()) .uploadId(mpu.id()) .partNumber(partNumber) .build(), RequestBody.fromInputStream(is, contentLength)); return MultipartPart.create(partNumber, contentLength, response.eTag(), null); } catch (IOException e) { throw new RuntimeException("Failed to upload part", e); } catch (S3Exception e) { translateAndRethrowException(e, mpu.containerName(), mpu.blobName()); throw e; } } @Override public List listMultipartUpload(MultipartUpload mpu) { try { var parts = ImmutableList.builder(); Integer partNumberMarker = null; do { var response = s3Client.listParts(ListPartsRequest.builder() .bucket(mpu.containerName()) .key(mpu.blobName()) .uploadId(mpu.id()) .partNumberMarker(partNumberMarker) .build()); for (Part part : response.parts()) { parts.add(MultipartPart.create(part.partNumber(), part.size(), part.eTag(), toDate(part.lastModified()))); } partNumberMarker = response.isTruncated() ? response.nextPartNumberMarker() : null; } while (partNumberMarker != null); return parts.build(); } catch (S3Exception e) { if (e.statusCode() == 404) { return ImmutableList.of(); } translateAndRethrowException(e, mpu.containerName(), mpu.blobName()); throw e; } } @Override public List listMultipartUploads(String container) { try { var builder = ImmutableList.builder(); String keyMarker = null; String uploadIdMarker = null; do { var response = s3Client.listMultipartUploads( ListMultipartUploadsRequest.builder() .bucket(container) .keyMarker(keyMarker) .uploadIdMarker(uploadIdMarker) .build()); for (var upload : response.uploads()) { builder.add(MultipartUpload.create(container, upload.key(), upload.uploadId(), null, null)); } if (response.isTruncated()) { keyMarker = response.nextKeyMarker(); uploadIdMarker = response.nextUploadIdMarker(); } else { keyMarker = null; } } while (keyMarker != null); return builder.build(); } catch (NoSuchBucketException e) { throw new ContainerNotFoundException(container, e.getMessage()); } catch (S3Exception e) { translateAndRethrowException(e, container, null); throw e; } } @Override public long getMinimumMultipartPartSize() { // S3 minimum part size is 5MB (except for last part) return 5L * 1024 * 1024; } @Override public long getMaximumMultipartPartSize() { // S3 maximum part size is 5GB return 5L * 1024 * 1024 * 1024; } @Override public int getMaximumNumberOfParts() { return 10000; } @Override public InputStream streamBlob(String container, String name) { throw new UnsupportedOperationException("not yet implemented"); } private static List sortAndValidateParts( List parts) { if (parts == null || parts.isEmpty()) { throw new IllegalArgumentException( "At least one multipart part is required"); } var sortedParts = parts.stream() .sorted(Comparator.comparingInt(MultipartPart::partNumber)) .toList(); int previousPartNumber = 0; for (MultipartPart part : sortedParts) { int partNumber = part.partNumber(); if (partNumber <= 0) { throw new IllegalArgumentException( "Part numbers must be positive integers"); } if (partNumber < previousPartNumber) { throw new IllegalArgumentException( "Parts must be provided in ascending PartNumber order"); } previousPartNumber = partNumber; } return sortedParts; } private static Date toDate(@Nullable Instant instant) { if (instant == null) { return null; } return Date.from(instant); } private static StorageClass toStorageClass(Tier tier) { return switch (tier) { case ARCHIVE -> StorageClass.GLACIER; case COLD -> StorageClass.GLACIER_IR; case COOL, INFREQUENT -> StorageClass.STANDARD_IA; case STANDARD -> StorageClass.STANDARD; }; } private static Tier toTier(@Nullable StorageClass storageClass) { if (storageClass == null) { return Tier.STANDARD; } return switch (storageClass) { case GLACIER, DEEP_ARCHIVE -> Tier.ARCHIVE; case GLACIER_IR -> Tier.COLD; case STANDARD_IA, ONEZONE_IA -> Tier.INFREQUENT; default -> Tier.STANDARD; }; } private static Tier toTier( software.amazon.awssdk.services.s3.model.@Nullable ObjectStorageClass storageClass) { if (storageClass == null) { return Tier.STANDARD; } return switch (storageClass) { case GLACIER, DEEP_ARCHIVE -> Tier.ARCHIVE; case GLACIER_IR -> Tier.COLD; case STANDARD_IA, ONEZONE_IA -> Tier.INFREQUENT; default -> Tier.STANDARD; }; } private static org.jclouds.io.ContentMetadata toContentMetadata( HeadObjectResponse response) { var builder = ContentMetadataBuilder.create(); if (response.cacheControl() != null) { builder.cacheControl(response.cacheControl()); } if (response.contentDisposition() != null) { builder.contentDisposition(response.contentDisposition()); } if (response.contentEncoding() != null) { builder.contentEncoding(response.contentEncoding()); } if (response.contentLanguage() != null) { builder.contentLanguage(response.contentLanguage()); } if (response.contentLength() != null) { builder.contentLength(response.contentLength()); } if (response.contentType() != null) { builder.contentType(response.contentType()); } if (response.expires() != null) { builder.expires(Date.from(response.expires())); } return builder.build(); } private void translateAndRethrowException(S3Exception e, @Nullable String container, @Nullable String key) { if (container != null && e.statusCode() == 404) { String errorCode = e.awsErrorDetails().errorCode(); if ("NoSuchBucket".equals(errorCode)) { throw new ContainerNotFoundException(container, e.getMessage()); } else if ("NoSuchKey".equals(errorCode)) { if (key == null) { throw new ContainerNotFoundException(container, e.getMessage()); } throw new KeyNotFoundException(container, key, e.getMessage()); } if (key != null) { throw new KeyNotFoundException(container, key, e.getMessage()); } else { throw new ContainerNotFoundException(container, e.getMessage()); } } var request = HttpRequest.builder() .method("GET") .endpoint(endpoint) .build(); var responseBuilder = HttpResponse.builder() .statusCode(e.statusCode()) .message(e.getMessage()); if (e.statusCode() == 304) { e.awsErrorDetails().sdkHttpResponse().firstMatchingHeader(HttpHeaders.ETAG) .ifPresent(etag -> responseBuilder.addHeader(HttpHeaders.ETAG, etag)); } throw new HttpResponseException( new HttpCommand(request), responseBuilder.build(), e); } /** * Ensures the ETag is surrounded by quotes if not already. */ private static String maybeQuoteETag(String eTag) { if (!eTag.startsWith("\"") && !eTag.endsWith("\"")) { eTag = "\"" + eTag + "\""; } return eTag; } /** * Strips surrounding quotes from ETag if stripETagQuotes is enabled. * Required for backends with Ceph Reef bug. * See: https://tracker.ceph.com/issues/68712 * TODO: Can be removed after 2027-01-01 - by then every provider should * have migrated to a newer Ceph version (including Hetzner). */ private String maybeStripETagQuotes(String eTag) { if (!stripETagQuotes || eTag == null) { return eTag; } if (eTag.length() >= 2 && eTag.startsWith("\"") && eTag.endsWith("\"")) { return eTag.substring(1, eTag.length() - 1); } return eTag; } /** * Compares two ETags, ignoring surrounding quotes. */ private static boolean equalsIgnoringSurroundingQuotes( String s1, String s2) { if (s1.length() >= 2 && s1.startsWith("\"") && s1.endsWith("\"")) { s1 = s1.substring(1, s1.length() - 1); } if (s2.length() >= 2 && s2.startsWith("\"") && s2.endsWith("\"")) { s2 = s2.substring(1, s2.length() - 1); } return s1.equals(s2); } private void throwPreconditionFailed() { var request = HttpRequest.builder() .method("PUT") .endpoint(endpoint) .build(); var response = HttpResponse.builder() .statusCode(412) .message("Precondition Failed") .build(); throw new HttpResponseException(new HttpCommand(request), response); } private void throwKeyNotFound(String container, String key) { throw new KeyNotFoundException(container, key, "Object does not exist for If-Match condition"); } /** * For S3-compatible backends that don't support If-Match/If-None-Match * headers natively. */ private void validateConditionalPut(String container, String blobName, @Nullable String ifMatch, @Nullable String ifNoneMatch) { BlobMetadata metadata = blobMetadata(container, blobName); if (ifMatch != null) { validateIfMatch(container, blobName, ifMatch, metadata); } if (ifNoneMatch != null) { validateIfNoneMatch(ifNoneMatch, metadata); } } private void validateIfMatch(String container, String blobName, String ifMatch, @Nullable BlobMetadata metadata) { if ("*".equals(ifMatch)) { if (metadata == null) { throwPreconditionFailed(); } return; } if (metadata == null) { throwKeyNotFound(container, blobName); } String currentETag = metadata.getETag(); if (currentETag == null || !equalsIgnoringSurroundingQuotes(ifMatch, maybeQuoteETag(currentETag))) { throwPreconditionFailed(); } } private void validateIfNoneMatch(String ifNoneMatch, @Nullable BlobMetadata metadata) { if ("*".equals(ifNoneMatch)) { if (metadata != null) { throwPreconditionFailed(); } return; } if (metadata == null) { return; } String currentETag = metadata.getETag(); if (currentETag != null && equalsIgnoringSurroundingQuotes(ifNoneMatch, maybeQuoteETag(currentETag))) { throwPreconditionFailed(); } } } ================================================ FILE: src/main/java/org/gaul/s3proxy/awssdk/AwsS3SdkBlobStoreContextModule.java ================================================ /* * Copyright 2014-2026 Andrew Gaul * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.gaul.s3proxy.awssdk; import com.google.inject.AbstractModule; import com.google.inject.Scopes; import org.jclouds.blobstore.BlobStore; import org.jclouds.blobstore.attr.ConsistencyModel; public final class AwsS3SdkBlobStoreContextModule extends AbstractModule { @Override protected void configure() { bind(ConsistencyModel.class).toInstance(ConsistencyModel.STRICT); bind(BlobStore.class).to(AwsS3SdkBlobStore.class).in(Scopes.SINGLETON); } } ================================================ FILE: src/main/java/org/gaul/s3proxy/awssdk/AwsS3SdkProviderMetadata.java ================================================ /* * Copyright 2014-2026 Andrew Gaul * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.gaul.s3proxy.awssdk; import java.util.Properties; import com.google.auto.service.AutoService; import org.jclouds.providers.ProviderMetadata; import org.jclouds.providers.internal.BaseProviderMetadata; @AutoService(ProviderMetadata.class) public final class AwsS3SdkProviderMetadata extends BaseProviderMetadata { public AwsS3SdkProviderMetadata() { super(builder()); } public AwsS3SdkProviderMetadata(Builder builder) { super(builder); } public static Builder builder() { return new Builder(); } @Override public Builder toBuilder() { return builder().fromProviderMetadata(this); } public static Properties defaultProperties() { var properties = new Properties(); return properties; } public static final class Builder extends BaseProviderMetadata.Builder { protected Builder() { id("aws-s3-sdk") .name("AWS S3 SDK Backend") .apiMetadata(new AwsS3SdkApiMetadata()) .endpoint("https://s3.amazonaws.com") .defaultProperties( AwsS3SdkProviderMetadata.defaultProperties()); } @Override public AwsS3SdkProviderMetadata build() { return new AwsS3SdkProviderMetadata(this); } @Override public Builder fromProviderMetadata(ProviderMetadata in) { super.fromProviderMetadata(in); return this; } } } ================================================ FILE: src/main/java/org/gaul/s3proxy/azureblob/AzureBlobApiMetadata.java ================================================ /* * Copyright 2014-2026 Andrew Gaul * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.gaul.s3proxy.azureblob; import java.net.URI; import java.util.Properties; import java.util.Set; import org.jclouds.azure.storage.config.AuthType; import org.jclouds.azure.storage.config.AzureStorageProperties; import org.jclouds.blobstore.BlobStoreContext; import org.jclouds.blobstore.reference.BlobStoreConstants; import org.jclouds.reflect.Reflection2; import org.jclouds.rest.internal.BaseHttpApiMetadata; @SuppressWarnings("rawtypes") public final class AzureBlobApiMetadata extends BaseHttpApiMetadata { public AzureBlobApiMetadata() { this(builder()); } protected AzureBlobApiMetadata(Builder builder) { super(builder); } private static Builder builder() { return new Builder(); } @Override public Builder toBuilder() { return builder().fromApiMetadata(this); } public static Properties defaultProperties() { Properties properties = BaseHttpApiMetadata.defaultProperties(); properties.setProperty(BlobStoreConstants.PROPERTY_USER_METADATA_PREFIX, "x-ms-meta-"); properties.setProperty(AzureStorageProperties.AUTH_TYPE, AuthType.AZURE_KEY.toString()); properties.setProperty(AzureStorageProperties.ACCOUNT, ""); properties.setProperty(AzureStorageProperties.TENANT_ID, ""); return properties; } // Fake API client private interface AzureBlobClient { } public static final class Builder extends BaseHttpApiMetadata.Builder { protected Builder() { super(AzureBlobClient.class); id("azureblob-sdk") .name("Microsoft Azure Blob Service API") .identityName("Account Name") .credentialName("Access Key") // TODO: update .version("2017-11-09") .defaultEndpoint( "https://${jclouds.identity}.blob.core.windows.net") .documentation(URI.create( "https://learn.microsoft.com/en-us/rest/api/" + "storageservices/Blob-Service-REST-API")) .defaultProperties(AzureBlobApiMetadata.defaultProperties()) .view(Reflection2.typeToken(BlobStoreContext.class)) .defaultModules(Set.of(AzureBlobStoreContextModule.class)); } @Override public AzureBlobApiMetadata build() { return new AzureBlobApiMetadata(this); } @Override protected Builder self() { return this; } } } ================================================ FILE: src/main/java/org/gaul/s3proxy/azureblob/AzureBlobProviderMetadata.java ================================================ /* * Copyright 2014-2026 Andrew Gaul * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.gaul.s3proxy.azureblob; import java.net.URI; import java.util.Properties; import com.google.auto.service.AutoService; import org.jclouds.azure.storage.config.AzureStorageProperties; import org.jclouds.oauth.v2.config.CredentialType; import org.jclouds.oauth.v2.config.OAuthProperties; import org.jclouds.providers.ProviderMetadata; import org.jclouds.providers.internal.BaseProviderMetadata; /** * Implementation of org.jclouds.types.ProviderMetadata for Microsoft Azure * Blob Service. */ @AutoService(ProviderMetadata.class) public final class AzureBlobProviderMetadata extends BaseProviderMetadata { public AzureBlobProviderMetadata() { super(builder()); } public AzureBlobProviderMetadata(Builder builder) { super(builder); } public static Builder builder() { return new Builder(); } @Override public Builder toBuilder() { return builder().fromProviderMetadata(this); } public static Properties defaultProperties() { var properties = new Properties(); properties.put("oauth.endpoint", "https://login.microsoft.com/${" + AzureStorageProperties.TENANT_ID + "}/oauth2/token"); properties.put(OAuthProperties.RESOURCE, "https://storage.azure.com"); properties.put(OAuthProperties.CREDENTIAL_TYPE, CredentialType.CLIENT_CREDENTIALS_SECRET.toString()); properties.put(AzureStorageProperties.ACCOUNT, "${jclouds.identity}"); return properties; } public static final class Builder extends BaseProviderMetadata.Builder { protected Builder() { id("azureblob-sdk") .name("Microsoft Azure Blob Service") .apiMetadata(new AzureBlobApiMetadata()) .endpoint("https://${" + AzureStorageProperties.ACCOUNT + "}.blob.core.windows.net") .homepage(URI.create( "http://www.microsoft.com/windowsazure/storage/")) .console(URI.create("https://windows.azure.com/default.aspx")) .linkedServices("azureblob", "azurequeue", "azuretable") .iso3166Codes("US-TX", "US-IL", "IE-D", "SG", "NL-NH", "HK") .defaultProperties( AzureBlobProviderMetadata.defaultProperties()); } @Override public AzureBlobProviderMetadata build() { return new AzureBlobProviderMetadata(this); } @Override public Builder fromProviderMetadata( ProviderMetadata in) { super.fromProviderMetadata(in); return this; } } } ================================================ FILE: src/main/java/org/gaul/s3proxy/azureblob/AzureBlobStore.java ================================================ /* * Copyright 2014-2026 Andrew Gaul * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.gaul.s3proxy.azureblob; import java.io.ByteArrayInputStream; import java.io.IOException; import java.io.InputStream; import java.net.URLDecoder; import java.nio.ByteBuffer; import java.nio.charset.StandardCharsets; import java.security.MessageDigest; import java.time.OffsetDateTime; import java.time.ZoneOffset; import java.util.Base64; import java.util.Date; import java.util.List; import java.util.Map; import java.util.Set; import java.util.UUID; import com.azure.core.credential.AzureNamedKeyCredential; import com.azure.core.http.rest.PagedResponse; import com.azure.identity.DefaultAzureCredentialBuilder; import com.azure.storage.blob.BlobServiceClient; import com.azure.storage.blob.BlobServiceClientBuilder; import com.azure.storage.blob.models.AccessTier; import com.azure.storage.blob.models.BlobErrorCode; import com.azure.storage.blob.models.BlobHttpHeaders; import com.azure.storage.blob.models.BlobItem; import com.azure.storage.blob.models.BlobListDetails; import com.azure.storage.blob.models.BlobProperties; import com.azure.storage.blob.models.BlobRange; import com.azure.storage.blob.models.BlobRequestConditions; import com.azure.storage.blob.models.BlobStorageException; import com.azure.storage.blob.models.BlockList; import com.azure.storage.blob.models.BlockListType; import com.azure.storage.blob.models.ListBlobsOptions; import com.azure.storage.blob.models.PublicAccessType; import com.azure.storage.blob.options.BlobContainerCreateOptions; import com.azure.storage.blob.options.BlobUploadFromUrlOptions; import com.azure.storage.blob.options.BlockBlobCommitBlockListOptions; import com.azure.storage.blob.options.BlockBlobOutputStreamOptions; import com.azure.storage.blob.options.BlockBlobSimpleUploadOptions; import com.azure.storage.blob.sas.BlobSasPermission; import com.azure.storage.blob.sas.BlobServiceSasSignatureValues; import com.azure.storage.blob.specialized.BlobInputStream; import com.azure.storage.blob.specialized.BlockBlobAsyncClient; import com.azure.storage.common.policy.RequestRetryOptions; import com.azure.storage.common.policy.RetryPolicyType; import com.google.common.base.Supplier; import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableSet; import com.google.common.hash.HashFunction; import com.google.common.hash.Hashing; import com.google.common.hash.HashingInputStream; import com.google.common.io.BaseEncoding; import com.google.common.net.HttpHeaders; import jakarta.inject.Inject; import jakarta.inject.Singleton; import jakarta.ws.rs.core.Response.Status; import org.gaul.s3proxy.PutOptions2; import org.jclouds.blobstore.BlobStoreContext; import org.jclouds.blobstore.ContainerNotFoundException; import org.jclouds.blobstore.KeyNotFoundException; import org.jclouds.blobstore.domain.Blob; import org.jclouds.blobstore.domain.BlobAccess; import org.jclouds.blobstore.domain.BlobMetadata; import org.jclouds.blobstore.domain.ContainerAccess; import org.jclouds.blobstore.domain.MultipartPart; import org.jclouds.blobstore.domain.MultipartUpload; import org.jclouds.blobstore.domain.PageSet; import org.jclouds.blobstore.domain.StorageMetadata; import org.jclouds.blobstore.domain.StorageType; import org.jclouds.blobstore.domain.Tier; import org.jclouds.blobstore.domain.internal.BlobBuilderImpl; import org.jclouds.blobstore.domain.internal.BlobMetadataImpl; import org.jclouds.blobstore.domain.internal.PageSetImpl; import org.jclouds.blobstore.domain.internal.StorageMetadataImpl; import org.jclouds.blobstore.internal.BaseBlobStore; import org.jclouds.blobstore.options.CopyOptions; import org.jclouds.blobstore.options.CreateContainerOptions; import org.jclouds.blobstore.options.GetOptions; import org.jclouds.blobstore.options.ListContainerOptions; import org.jclouds.blobstore.options.PutOptions; import org.jclouds.blobstore.util.BlobUtils; import org.jclouds.collect.Memoized; import org.jclouds.domain.Credentials; import org.jclouds.domain.Location; import org.jclouds.http.HttpCommand; import org.jclouds.http.HttpRequest; import org.jclouds.http.HttpResponse; import org.jclouds.http.HttpResponseException; import org.jclouds.io.ContentMetadata; import org.jclouds.io.ContentMetadataBuilder; import org.jclouds.io.Payload; import org.jclouds.io.PayloadSlicer; import org.jclouds.providers.ProviderMetadata; import org.jspecify.annotations.Nullable; import reactor.core.publisher.Flux; @Singleton public final class AzureBlobStore extends BaseBlobStore { private static final String STUB_BLOB_PREFIX = ".s3proxy/stubs/"; private static final String TARGET_BLOB_NAME_TAG = "s3proxy_target_blob_name"; private static final HashFunction MD5 = Hashing.md5(); // Disable retries since client should retry on errors. private static final RequestRetryOptions NO_RETRY_OPTIONS = new RequestRetryOptions( RetryPolicyType.FIXED, /*maxTries=*/ 1, /*tryTimeoutInSeconds=*/ (Integer) null, /*retryDelayInMs=*/ null, /*maxRetryDelayInMs=*/ null, /*secondaryHost=*/ null); private final BlobServiceClient blobServiceClient; private final String endpoint; private final Supplier creds; @Inject AzureBlobStore(BlobStoreContext context, BlobUtils blobUtils, Supplier defaultLocation, @Memoized Supplier> locations, PayloadSlicer slicer, @org.jclouds.location.Provider Supplier creds, ProviderMetadata provider) { super(context, blobUtils, defaultLocation, locations, slicer); this.endpoint = provider.getEndpoint(); this.creds = creds; var cred = creds.get(); var blobServiceClientBuilder = new BlobServiceClientBuilder(); if (!cred.identity.isEmpty() && !cred.credential.isEmpty()) { blobServiceClientBuilder.credential( new AzureNamedKeyCredential(cred.identity, cred.credential)); } else { blobServiceClientBuilder.credential( new DefaultAzureCredentialBuilder().build()); } blobServiceClient = blobServiceClientBuilder .endpoint(endpoint) .retryOptions(NO_RETRY_OPTIONS) .buildClient(); } @Override public PageSet list() { var set = ImmutableSet.builder(); for (var container : blobServiceClient.listBlobContainers()) { set.add(new StorageMetadataImpl(StorageType.CONTAINER, /*id=*/ null, container.getName(), /*location=*/ null, /*uri=*/ null, /*eTag=*/ null, /*creationDate=*/ null, toDate(container.getProperties().getLastModified()), Map.of(), /*size=*/ null, Tier.STANDARD)); } return new PageSetImpl(set.build(), null); } @Override public PageSet list(String container, ListContainerOptions options) { var client = blobServiceClient.getBlobContainerClient(container); var azureOptions = new ListBlobsOptions(); azureOptions.setPrefix(options.getPrefix()); azureOptions.setMaxResultsPerPage(options.getMaxResults()); var marker = options.getMarker() != null ? URLDecoder.decode(options.getMarker(), StandardCharsets.UTF_8) : null; var set = ImmutableSet.builder(); PagedResponse page; try { page = client.listBlobsByHierarchy( options.getDelimiter(), azureOptions, /*timeout=*/ null) .iterableByPage(marker).iterator().next(); } catch (BlobStorageException bse) { translateAndRethrowException(bse, container, /*key=*/ null); throw bse; } for (var blob : page.getValue()) { var properties = blob.getProperties(); if (blob.isPrefix()) { set.add(new StorageMetadataImpl(StorageType.RELATIVE_PATH, /*id=*/ null, blob.getName(), /*location=*/ null, /*uri=*/ null, /*eTag=*/ null, /*creationDate=*/ null, /*lastModified=*/ null, Map.of(), /*size=*/ null, Tier.STANDARD)); } else { set.add(new StorageMetadataImpl(StorageType.BLOB, /*id=*/ null, blob.getName(), /*location=*/ null, /*uri=*/ null, properties.getETag(), toDate(properties.getCreationTime()), toDate(properties.getLastModified()), Map.of(), properties.getContentLength(), toTier(properties.getAccessTier()))); } } return new PageSetImpl(set.build(), page.getContinuationToken()); } @Override public boolean containerExists(String container) { var client = blobServiceClient.getBlobContainerClient(container); return client.exists(); } @Override public boolean createContainerInLocation(Location location, String container) { return createContainerInLocation(location, container, new CreateContainerOptions()); } @Override public boolean createContainerInLocation(Location location, String container, CreateContainerOptions options) { var azureOptions = new BlobContainerCreateOptions(); if (options.isPublicRead()) { azureOptions.setPublicAccessType(PublicAccessType.CONTAINER); } try { var response = blobServiceClient .createBlobContainerIfNotExistsWithResponse( container, azureOptions, /*context=*/ null); return switch (response.getStatusCode()) { case 201 -> true; case 409 -> false; default -> false; }; } catch (BlobStorageException bse) { translateAndRethrowException(bse, container, /*key=*/ null); throw bse; } } @Override public void deleteContainer(String container) { try { blobServiceClient.deleteBlobContainer(container); } catch (BlobStorageException bse) { if (!bse.getErrorCode().equals(BlobErrorCode.CONTAINER_NOT_FOUND)) { throw bse; } } } @Override public boolean deleteContainerIfEmpty(String container) { var client = blobServiceClient.getBlobContainerClient(container); try { var page = client.listBlobsByHierarchy( /*delimiter=*/ null, /*options=*/ null, /*timeout=*/ null) .iterableByPage().iterator().next(); if (!page.getValue().isEmpty()) { return false; } blobServiceClient.deleteBlobContainer(container); return true; } catch (BlobStorageException bse) { if (bse.getErrorCode().equals(BlobErrorCode.CONTAINER_NOT_FOUND)) { return true; } throw bse; } } @Override public boolean blobExists(String container, String key) { var client = blobServiceClient.getBlobContainerClient(container) .getBlobClient(key); return client.exists(); } @Override public Blob getBlob(String container, String key, GetOptions options) { var client = blobServiceClient.getBlobContainerClient(container) .getBlobClient(key); BlobRange azureRange = null; if (!options.getRanges().isEmpty()) { var ranges = options.getRanges().get(0).split("-", 2); if (ranges[0].isEmpty()) { // handle to read from the end long offset = 0; long end = Long.parseLong(ranges[1]); long length = end; azureRange = new BlobRange(offset, length); throw new UnsupportedOperationException( "trailing ranges unsupported"); } else if (ranges[1].isEmpty()) { // handle to read from an offset till the end long offset = Long.parseLong(ranges[0]); azureRange = new BlobRange(offset); } else { // handle to read from an offset long offset = Long.parseLong(ranges[0]); long end = Long.parseLong(ranges[1]); long length = end - offset + 1; azureRange = new BlobRange(offset, length); } } var conditions = new BlobRequestConditions() .setIfMatch(options.getIfMatch()) .setIfModifiedSince(toOffsetDateTime( options.getIfModifiedSince())) .setIfNoneMatch(options.getIfNoneMatch()) .setIfUnmodifiedSince(toOffsetDateTime( options.getIfUnmodifiedSince())); BlobInputStream blobStream; try { blobStream = client.openInputStream(azureRange, conditions); } catch (BlobStorageException bse) { translateAndRethrowException(bse, container, key); if (bse.getStatusCode() == Status.REQUESTED_RANGE_NOT_SATISFIABLE.getStatusCode()) { throw new HttpResponseException( "illegal range: " + azureRange, null, HttpResponse.builder() .statusCode(Status.REQUESTED_RANGE_NOT_SATISFIABLE .getStatusCode()) .build()); } throw bse; } var properties = blobStream.getProperties(); var expires = properties.getExpiresOn(); long contentLength; if (azureRange == null) { contentLength = properties.getBlobSize(); } else { if (azureRange.getCount() == null) { contentLength = properties.getBlobSize() - azureRange.getOffset(); } else { contentLength = azureRange.getCount(); } } var blob = new BlobBuilderImpl() .name(key) .userMetadata(properties.getMetadata()) .payload(blobStream) .cacheControl(properties.getCacheControl()) .contentDisposition(properties.getContentDisposition()) .contentEncoding(properties.getContentEncoding()) .contentLanguage(properties.getContentLanguage()) .contentLength(contentLength) .contentType(properties.getContentType()) .expires(expires != null ? toDate(expires) : null) .build(); if (azureRange != null) { blob.getAllHeaders().put(HttpHeaders.CONTENT_RANGE, "bytes " + azureRange.getOffset() + "-" + (azureRange.getOffset() + contentLength - 1) + "/" + properties.getBlobSize()); } var metadata = blob.getMetadata(); metadata.setETag(properties.getETag()); metadata.setCreationDate(toDate(properties.getCreationTime())); metadata.setLastModified(toDate(properties.getLastModified())); return blob; } @Override public String putBlob(String container, Blob blob) { return putBlob(container, blob, new PutOptions()); } @Override public String putBlob(String container, Blob blob, PutOptions options) { var client = blobServiceClient.getBlobContainerClient(container) .getBlobClient(blob.getMetadata().getName()) .getBlockBlobClient(); try (var is = blob.getPayload().openStream()) { var azureOptions = new BlockBlobOutputStreamOptions(); azureOptions.setMetadata(blob.getMetadata().getUserMetadata()); // TODO: Expires? var blobHttpHeaders = new BlobHttpHeaders(); var contentMetadata = blob.getMetadata().getContentMetadata(); blobHttpHeaders.setCacheControl(contentMetadata.getCacheControl()); blobHttpHeaders.setContentDisposition( contentMetadata.getContentDisposition()); blobHttpHeaders.setContentEncoding( contentMetadata.getContentEncoding()); blobHttpHeaders.setContentLanguage( contentMetadata.getContentLanguage()); var hash = contentMetadata.getContentMD5AsHashCode(); blobHttpHeaders.setContentMd5(hash != null ? hash.asBytes() : null); blobHttpHeaders.setContentType(contentMetadata.getContentType()); azureOptions.setHeaders(blobHttpHeaders); if (blob.getMetadata().getTier() != Tier.STANDARD) { azureOptions.setTier(toAccessTier( blob.getMetadata().getTier())); } if (options instanceof PutOptions2 putOptions2) { String ifMatch = putOptions2.getIfMatch(); String ifNoneMatch = putOptions2.getIfNoneMatch(); if (ifMatch != null || ifNoneMatch != null) { azureOptions.setRequestConditions(new BlobRequestConditions() .setIfMatch(ifMatch) .setIfNoneMatch(ifNoneMatch)); } } try (var os = client.getBlobOutputStream( azureOptions, /*context=*/ null)) { is.transferTo(os); } // TODO: racy return blobServiceClient .getBlobContainerClient(container) .getBlobClient(blob.getMetadata().getName()) .getProperties() .getETag(); } catch (IOException ioe) { var cause = ioe.getCause(); if (cause instanceof BlobStorageException bse) { translateAndRethrowException( bse, container, /*key=*/ null); } throw new RuntimeException(ioe); } } @Override public String copyBlob(String fromContainer, String fromName, String toContainer, String toName, CopyOptions options) { var expiryTime = OffsetDateTime.now().plusDays(1); var permission = new BlobSasPermission().setReadPermission(true); var values = new BlobServiceSasSignatureValues(expiryTime, permission) .setStartTime(OffsetDateTime.now()); var fromClient = blobServiceClient .getBlobContainerClient(fromContainer) .getBlobClient(fromName); var url = fromClient.getBlobUrl(); String token; var cred = creds.get(); if (!cred.identity.isEmpty() && !cred.credential.isEmpty()) { token = fromClient.generateSas(values); } else { var userDelegationKey = blobServiceClient.getUserDelegationKey( OffsetDateTime.now().minusMinutes(5), expiryTime); token = fromClient.generateUserDelegationSas(values, userDelegationKey); } // TODO: is this the best way to generate a SAS URL? var azureOptions = new BlobUploadFromUrlOptions(url + "?" + token); var client = blobServiceClient .getBlobContainerClient(toContainer) .getBlobClient(toName) .getBlockBlobClient(); var headers = new BlobHttpHeaders(); var contentMetadata = options.contentMetadata(); if (contentMetadata != null) { var cacheControl = contentMetadata.getCacheControl(); if (cacheControl != null) { headers.setCacheControl(cacheControl); } var contentDisposition = contentMetadata.getContentDisposition(); if (contentDisposition != null) { headers.setContentDisposition(contentDisposition); } var contentEncoding = contentMetadata.getContentEncoding(); if (contentEncoding != null) { headers.setContentEncoding(contentEncoding); } var contentLanguage = contentMetadata.getContentLanguage(); if (contentLanguage != null) { headers.setContentLanguage(contentLanguage); } var contentType = contentMetadata.getContentType(); if (contentType != null) { headers.setContentType(contentType); } } azureOptions.setHeaders(headers); // TODO: setSourceRequestConditions(BlobRequestConditions) var response = client.uploadFromUrlWithResponse( azureOptions, /*timeout=*/ null, /*context=*/ null); // TODO: cannot do this as part of uploadFromUrlWithResponse? var userMetadata = options.userMetadata(); if (userMetadata != null) { client.setMetadata(userMetadata); } return response.getValue().getETag(); } @Override public void removeBlob(String container, String key) { var client = blobServiceClient.getBlobContainerClient(container) .getBlobClient(key); try { client.delete(); } catch (BlobStorageException bse) { if (!bse.getErrorCode().equals(BlobErrorCode.BLOB_NOT_FOUND) && !bse.getErrorCode().equals(BlobErrorCode.CONTAINER_NOT_FOUND)) { throw bse; } } } @Override public BlobMetadata blobMetadata(String container, String key) { var client = blobServiceClient.getBlobContainerClient(container) .getBlobClient(key); BlobProperties properties; try { properties = client.getProperties(); } catch (BlobStorageException bse) { if (bse.getErrorCode().equals(BlobErrorCode.BLOB_NOT_FOUND)) { return null; } translateAndRethrowException(bse, container, /*key=*/ null); throw bse; } return new BlobMetadataImpl(/*id=*/ null, key, /*location=*/ null, /*uri=*/ null, properties.getETag(), toDate(properties.getCreationTime()), toDate(properties.getLastModified()), properties.getMetadata(), /*publicUri=*/ null, container, toContentMetadata(properties), properties.getBlobSize(), toTier(properties.getAccessTier())); } @Override protected boolean deleteAndVerifyContainerGone(String container) { blobServiceClient.deleteBlobContainer(container); return true; } @Override public ContainerAccess getContainerAccess(String container) { var client = blobServiceClient.getBlobContainerClient(container); try { var blobAccessType = client.getAccessPolicy().getBlobAccessType(); return blobAccessType != null && blobAccessType.equals( PublicAccessType.CONTAINER) ? ContainerAccess.PUBLIC_READ : ContainerAccess.PRIVATE; } catch (BlobStorageException bse) { translateAndRethrowException(bse, container, /*key=*/ null); throw bse; } } @Override public void setContainerAccess(String container, ContainerAccess access) { var client = blobServiceClient.getBlobContainerClient(container); var publicAccess = access == ContainerAccess.PUBLIC_READ ? PublicAccessType.CONTAINER : PublicAccessType.BLOB; client.setAccessPolicy(publicAccess, List.of()); } @Override public BlobAccess getBlobAccess(String container, String key) { return BlobAccess.PRIVATE; } @Override public void setBlobAccess(String container, String key, BlobAccess access) { throw new UnsupportedOperationException("unsupported in Azure"); } @Override public MultipartUpload initiateMultipartUpload(String container, BlobMetadata blobMetadata, PutOptions options) { var containerClient = blobServiceClient.getBlobContainerClient(container); try { if (!containerClient.exists()) { throw new ContainerNotFoundException(container, ""); } } catch (BlobStorageException bse) { translateAndRethrowException(bse, container, /*key=*/ null); throw bse; } var userMetadata = blobMetadata.getUserMetadata(); if (userMetadata != null && !userMetadata.isEmpty()) { for (var key : userMetadata.keySet()) { if (!isValidMetadataKey(key)) { throw new IllegalArgumentException( "Invalid metadata key: " + key); } } } String uploadKey = STUB_BLOB_PREFIX + UUID.randomUUID().toString(); String targetBlobName = blobMetadata.getName(); var stubBlobClient = containerClient.getBlobClient(uploadKey).getBlockBlobClient(); var contentMetadata = blobMetadata.getContentMetadata(); BlobHttpHeaders headers = new BlobHttpHeaders(); if (contentMetadata != null) { headers.setContentType(contentMetadata.getContentType()); headers.setContentDisposition(contentMetadata.getContentDisposition()); headers.setContentEncoding(contentMetadata.getContentEncoding()); headers.setContentLanguage(contentMetadata.getContentLanguage()); headers.setCacheControl(contentMetadata.getCacheControl()); } var uploadOptions = new BlockBlobSimpleUploadOptions( new ByteArrayInputStream(new byte[0]), 0); uploadOptions.setHeaders(headers); if (userMetadata != null && !userMetadata.isEmpty()) { uploadOptions.setMetadata(userMetadata); } if (blobMetadata.getTier() != null && blobMetadata.getTier() != Tier.STANDARD) { uploadOptions.setTier(toAccessTier(blobMetadata.getTier())); } stubBlobClient.uploadWithResponse(uploadOptions, null, null); var tags = new java.util.HashMap(); tags.put(TARGET_BLOB_NAME_TAG, targetBlobName); stubBlobClient.setTags(tags); return MultipartUpload.create(container, targetBlobName, uploadKey, blobMetadata, options); } /** * Validates metadata key according to Azure naming rules. * Keys must be valid C# identifiers (alphanumeric and underscores). */ private static boolean isValidMetadataKey(String key) { if (key == null || key.isEmpty()) { return false; } // Must start with letter or underscore if (!Character.isLetter(key.charAt(0)) && key.charAt(0) != '_') { return false; } // Rest must be alphanumeric or underscore for (int i = 1; i < key.length(); i++) { char c = key.charAt(i); if (!Character.isLetterOrDigit(c) && c != '_') { return false; } } return true; } @Override public void abortMultipartUpload(MultipartUpload mpu) { // Delete the stub blob to remove the upload from listMultipartUploads // Note: Uncommitted blocks are automatically removed by Azure after 7 days try { blobServiceClient .getBlobContainerClient(mpu.containerName()) .getBlobClient(mpu.id()) .delete(); } catch (BlobStorageException bse) { if (bse.getStatusCode() == 404) { throw new KeyNotFoundException(mpu.containerName(), mpu.id(), "Multipart upload not found: " + mpu.id()); } throw bse; } } @Override public String completeMultipartUpload(MultipartUpload mpu, List parts) { String uploadKey = mpu.id(); String nonce = uploadKey.substring(STUB_BLOB_PREFIX.length()); var containerClient = blobServiceClient.getBlobContainerClient(mpu.containerName()); var stubBlobClient = containerClient.getBlobClient(uploadKey); BlobProperties stubProperties; java.util.Map stubTags; try { stubProperties = stubBlobClient.getProperties(); stubTags = stubBlobClient.getTags(); } catch (BlobStorageException bse) { if (bse.getErrorCode().equals(BlobErrorCode.BLOB_NOT_FOUND)) { throw new IllegalArgumentException( "Upload not found: uploadId=" + uploadKey); } throw bse; } String targetBlobName = stubTags.get(TARGET_BLOB_NAME_TAG); if (targetBlobName == null) { throw new IllegalArgumentException( "Stub blob missing target name tag: uploadId=" + uploadKey); } var userMetadata = stubProperties.getMetadata(); var contentMetadata = toContentMetadata(stubProperties); var tier = stubProperties.getAccessTier(); if (parts == null || parts.isEmpty()) { throw new IllegalArgumentException("Parts list cannot be empty"); } int previousPartNumber = 0; for (var part : parts) { int partNumber = part.partNumber(); if (partNumber <= previousPartNumber) { throw new IllegalArgumentException( "Parts must be in strictly ascending order"); } previousPartNumber = partNumber; } if (parts.size() > 50_000) { throw new IllegalArgumentException( "Too many parts: " + parts.size() + " (max 50,000)"); } var client = containerClient .getBlobClient(targetBlobName) .getBlockBlobClient(); var blockList = client.listBlocks(BlockListType.UNCOMMITTED); var uncommittedBlocks = blockList.getUncommittedBlocks(); var blockMap = new java.util.HashMap(); for (var block : uncommittedBlocks) { blockMap.put(block.getName(), block.getSizeLong()); } var blockIds = ImmutableList.builder(); for (int i = 0; i < parts.size(); i++) { var part = parts.get(i); int partNumber = part.partNumber(); String blockId = makeBlockId(nonce, partNumber); blockIds.add(blockId); if (!blockMap.containsKey(blockId)) { throw new IllegalArgumentException( "Part " + partNumber + " not found in staged blocks"); } } BlobHttpHeaders blobHttpHeaders = new BlobHttpHeaders(); blobHttpHeaders.setContentType(contentMetadata.getContentType()); blobHttpHeaders.setContentDisposition(contentMetadata.getContentDisposition()); blobHttpHeaders.setContentEncoding(contentMetadata.getContentEncoding()); blobHttpHeaders.setContentLanguage(contentMetadata.getContentLanguage()); blobHttpHeaders.setCacheControl(contentMetadata.getCacheControl()); var options = new BlockBlobCommitBlockListOptions( blockIds.build()); options.setHeaders(blobHttpHeaders); if (userMetadata != null && !userMetadata.isEmpty()) { options.setMetadata(userMetadata); } if (tier != null) { options.setTier(tier); } // Support conditional writes (If-Match/If-None-Match) if (mpu.putOptions() instanceof PutOptions2) { var putOptions2 = (PutOptions2) mpu.putOptions(); String ifMatch = putOptions2.getIfMatch(); String ifNoneMatch = putOptions2.getIfNoneMatch(); if (ifMatch != null || ifNoneMatch != null) { options.setRequestConditions(new BlobRequestConditions() .setIfMatch(ifMatch) .setIfNoneMatch(ifNoneMatch)); } } try { var response = client.commitBlockListWithResponse( options, /*timeout=*/ null, /*context=*/ null); stubBlobClient.delete(); String finalETag = response.getValue().getETag(); return finalETag; } catch (BlobStorageException bse) { var errorCode = bse.getErrorCode(); if (errorCode.equals(BlobErrorCode.BLOB_NOT_FOUND) || errorCode.equals(BlobErrorCode.CONTAINER_NOT_FOUND)) { throw new IllegalArgumentException( "Upload not found: container=" + mpu.containerName() + ", key=" + targetBlobName); } else if (bse.getStatusCode() == 409) { throw new IllegalArgumentException( "Conflict during commit: " + bse.getMessage(), bse); } else if (bse.getStatusCode() == 412) { translateAndRethrowException(bse, mpu.containerName(), targetBlobName); } throw bse; } } @Override public MultipartPart uploadMultipartPart(MultipartUpload mpu, int partNumber, Payload payload) { if (partNumber < 1 || partNumber > 10_000) { throw new IllegalArgumentException( "Part number must be between 1 and 10,000, got: " + partNumber); } Long contentLength = payload.getContentMetadata().getContentLength(); if (contentLength == null) { throw new IllegalArgumentException("Content-Length is required"); } if (contentLength < 0) { throw new IllegalArgumentException( "Content-Length must be non-negative, got: " + contentLength); } if (contentLength > getMaximumMultipartPartSize()) { throw new IllegalArgumentException( "Part size exceeds maximum of " + getMaximumMultipartPartSize() + " bytes: " + contentLength); } String uploadKey = mpu.id(); String nonce = uploadKey.substring(STUB_BLOB_PREFIX.length()); String blockId = makeBlockId(nonce, partNumber); var asyncClient = createNonRetryingBlockBlobAsyncClient( mpu.containerName(), mpu.blobName()); byte[] md5Hash; try (var is = payload.openStream(); var his = new HashingInputStream(MD5, is)) { var providedMd5 = payload.getContentMetadata().getContentMD5AsHashCode(); final int maxChunkSize = 4 * 1024 * 1024; Flux body = Flux.generate( () -> 0L, (position, sink) -> { try { if (position >= contentLength) { sink.complete(); return position; } int chunkSize = (int) Math.min(maxChunkSize, contentLength - position); ByteBuffer buffer = ByteBuffer.allocate(chunkSize); byte[] array = buffer.array(); int totalRead = 0; while (totalRead < chunkSize) { int read = his.read(array, totalRead, chunkSize - totalRead); if (read == -1) { if (position + totalRead < contentLength) { sink.error(new IOException( "Stream ended at %d bytes, expected %d".formatted( position + totalRead, contentLength))); return position + totalRead; } break; } totalRead += read; } if (totalRead == 0) { sink.error(new IOException( "Stream ended at %d bytes, expected %d".formatted( position, contentLength))); return position; } buffer.position(totalRead); buffer.flip(); sink.next(buffer.asReadOnlyBuffer()); long nextPosition = position + totalRead; if (nextPosition >= contentLength) { sink.complete(); } return nextPosition; } catch (IOException e) { sink.error(e); return position; } }, position -> { // Stream is closed by try-with-resources } ); asyncClient.stageBlock(blockId, body, contentLength).block(); md5Hash = his.hash().asBytes(); if (providedMd5 != null) { if (!MessageDigest.isEqual(md5Hash, providedMd5.asBytes())) { throw new IllegalArgumentException("Content-MD5 mismatch"); } } } catch (BlobStorageException bse) { translateAndRethrowException(bse, mpu.containerName(), mpu.blobName()); throw new RuntimeException( "Failed to upload part %d for blob '%s' in container '%s': %s".formatted( partNumber, mpu.blobName(), mpu.containerName(), bse.getMessage()), bse); } catch (IOException ioe) { throw new RuntimeException( "Failed to upload part %d for blob '%s' in container '%s': %s".formatted( partNumber, mpu.blobName(), mpu.containerName(), ioe.getMessage()), ioe); } String eTag = BaseEncoding.base16() .lowerCase().encode(md5Hash); Date lastModified = null; return MultipartPart.create(partNumber, contentLength, eTag, lastModified); } /** * Creates a BlockBlobAsyncClient with retries disabled for streaming uploads. * This allows us to stream directly from non-markable InputStreams without * needing temp files or buffering. The S3 client can retry the entire part * upload if needed. */ private BlockBlobAsyncClient createNonRetryingBlockBlobAsyncClient( String container, String blobName) { var cred = creds.get(); var clientBuilder = new BlobServiceClientBuilder() .endpoint(endpoint) .retryOptions(NO_RETRY_OPTIONS); if (!cred.identity.isEmpty() && !cred.credential.isEmpty()) { clientBuilder.credential( new AzureNamedKeyCredential(cred.identity, cred.credential)); } else { clientBuilder.credential(new DefaultAzureCredentialBuilder().build()); } return clientBuilder.buildAsyncClient() .getBlobContainerAsyncClient(container) .getBlobAsyncClient(blobName) .getBlockBlobAsyncClient(); } @Override public List listMultipartUpload(MultipartUpload mpu) { String uploadKey = mpu.id(); String nonce = uploadKey.substring(STUB_BLOB_PREFIX.length()); var containerClient = blobServiceClient.getBlobContainerClient(mpu.containerName()); var stubBlobClient = containerClient.getBlobClient(uploadKey); String targetBlobName; try { var stubTags = stubBlobClient.getTags(); targetBlobName = stubTags.get(TARGET_BLOB_NAME_TAG); } catch (BlobStorageException bse) { if (bse.getErrorCode().equals(BlobErrorCode.BLOB_NOT_FOUND)) { throw new IllegalArgumentException( "Upload not found: uploadId=" + uploadKey); } throw bse; } var client = containerClient .getBlobClient(targetBlobName) .getBlockBlobClient(); BlockList blockList; try { blockList = client.listBlocks(BlockListType.ALL); } catch (BlobStorageException bse) { if (bse.getStatusCode() == 404) { return ImmutableList.of(); } throw bse; } var parts = ImmutableList.builder(); String noncePrefix = nonce + ":"; for (var properties : blockList.getUncommittedBlocks()) { String encodedBlockId = properties.getName(); String blockId; try { blockId = new String(Base64.getDecoder().decode(encodedBlockId), StandardCharsets.UTF_8); } catch (IllegalArgumentException e) { continue; } if (!blockId.startsWith(noncePrefix)) { continue; } int partNumber; try { String partNumberStr = blockId.substring(noncePrefix.length()); partNumber = Integer.parseInt(partNumberStr); } catch (NumberFormatException e) { continue; } String eTag = ""; // listBlocks does not return ETag Date lastModified = null; // listBlocks does not return LastModified parts.add(MultipartPart.create(partNumber, properties.getSizeLong(), eTag, lastModified)); } return parts.build(); } @Override public List listMultipartUploads(String container) { var containerClient = blobServiceClient.getBlobContainerClient(container); var builder = ImmutableList.builder(); var options = new ListBlobsOptions(); options.setPrefix(STUB_BLOB_PREFIX); var details = new BlobListDetails(); details.setRetrieveTags(true); options.setDetails(details); for (var blobItem : containerClient.listBlobs(options, null, null)) { // e.g., ".s3proxy/stubs/" String uploadKey = blobItem.getName(); var tags = blobItem.getTags(); if (tags == null || tags.get(TARGET_BLOB_NAME_TAG) == null) { continue; } String targetBlobName = tags.get(TARGET_BLOB_NAME_TAG); builder.add(MultipartUpload.create(container, targetBlobName, uploadKey, null, null)); } return builder.build(); } @Override public long getMinimumMultipartPartSize() { return 1; } @Override public long getMaximumMultipartPartSize() { return 4000L * 1024 * 1024; } @Override public int getMaximumNumberOfParts() { return 50 * 1000; } @Override public InputStream streamBlob(String container, String name) { throw new UnsupportedOperationException("not yet implemented"); } private static OffsetDateTime toOffsetDateTime(@Nullable Date date) { if (date == null) { return null; } return date.toInstant().atOffset(ZoneOffset.UTC); } private static Date toDate(OffsetDateTime time) { return new Date(time.toInstant().toEpochMilli()); } private static AccessTier toAccessTier(Tier tier) { return switch (tier) { case ARCHIVE -> AccessTier.ARCHIVE; case COOL -> AccessTier.COOL; case INFREQUENT -> AccessTier.COOL; case COLD -> AccessTier.COLD; case STANDARD -> AccessTier.HOT; }; } private static Tier toTier(AccessTier tier) { if (tier == null) { return Tier.STANDARD; } else if (tier.equals(AccessTier.ARCHIVE)) { return Tier.ARCHIVE; } else if (tier.equals(AccessTier.COLD)) { return Tier.COLD; } else if (tier.equals(AccessTier.COOL)) { return Tier.COOL; } else { return Tier.STANDARD; } } private static ContentMetadata toContentMetadata( BlobProperties properties) { var expires = properties.getExpiresOn(); return ContentMetadataBuilder.create() .cacheControl(properties.getCacheControl()) .contentDisposition(properties.getContentDisposition()) .contentEncoding(properties.getContentEncoding()) .contentLanguage(properties.getContentLanguage()) .contentLength(properties.getBlobSize()) .contentType(properties.getContentType()) .expires(expires != null ? toDate(expires) : null) .build(); } /** * Creates a deterministic Base64-encoded block ID using the upload nonce * and padded part number. * * "Block IDs are strings of equal length within a blob. Block client code usually uses base-64 encoding to normalize strings into equal lengths." * Source: https://learn.microsoft.com/en-us/rest/api/storageservices/understanding-block-blobs--append-blobs--and-page-blobs * * Format: nonce + ":" + 5-digit padded part number (e.g., "nonce:00001") * * @param nonce The upload session nonce from the uploadId context * @param partNumber The part number (1-10,000) * @return Base64-encoded block ID */ private static String makeBlockId(String nonce, int partNumber) { String rawId = "%s:%05d".formatted(nonce, partNumber); return Base64.getEncoder().encodeToString( rawId.getBytes(StandardCharsets.UTF_8)); } /** * Translate BlobStorageException to a jclouds exception. Throws if * translated otherwise returns. */ private void translateAndRethrowException(BlobStorageException bse, String container, @Nullable String key) { var code = bse.getErrorCode(); if (code.equals(BlobErrorCode.BLOB_NOT_FOUND)) { var exception = new KeyNotFoundException(container, key, ""); exception.initCause(bse); throw exception; } else if (code.equals(BlobErrorCode.CONTAINER_NOT_FOUND)) { var exception = new ContainerNotFoundException(container, ""); exception.initCause(bse); throw exception; } else if (code.equals(BlobErrorCode.CONDITION_NOT_MET)) { var request = HttpRequest.builder() .method("GET") .endpoint(endpoint) .build(); var response = HttpResponse.builder() .statusCode(Status.PRECONDITION_FAILED.getStatusCode()) .build(); throw new HttpResponseException( new HttpCommand(request), response, bse); } else if (code.equals(BlobErrorCode.BLOB_ALREADY_EXISTS)) { var request = HttpRequest.builder() .method("PUT") .endpoint(endpoint) .build(); var response = HttpResponse.builder() .statusCode(Status.PRECONDITION_FAILED.getStatusCode()) .build(); throw new HttpResponseException( new HttpCommand(request), response, bse); } else if (code.equals(BlobErrorCode.INVALID_OPERATION)) { var request = HttpRequest.builder() .method("GET") .endpoint(endpoint) .build(); var response = HttpResponse.builder() .statusCode(Status.BAD_REQUEST.getStatusCode()) .build(); throw new HttpResponseException( new HttpCommand(request), response, bse); } else if (bse.getErrorCode().equals(BlobErrorCode.INVALID_RESOURCE_NAME)) { throw new IllegalArgumentException( "Invalid container name", bse); } } } ================================================ FILE: src/main/java/org/gaul/s3proxy/azureblob/AzureBlobStoreContextModule.java ================================================ /* * Copyright 2014-2026 Andrew Gaul * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.gaul.s3proxy.azureblob; import com.google.inject.AbstractModule; import com.google.inject.Scopes; import org.jclouds.blobstore.BlobStore; import org.jclouds.blobstore.attr.ConsistencyModel; public final class AzureBlobStoreContextModule extends AbstractModule { @Override protected void configure() { bind(ConsistencyModel.class).toInstance(ConsistencyModel.STRICT); bind(BlobStore.class).to(AzureBlobStore.class).in(Scopes.SINGLETON); } } ================================================ FILE: src/main/java/org/gaul/s3proxy/crypto/Constants.java ================================================ /* * Copyright 2014-2026 Andrew Gaul * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.gaul.s3proxy.crypto; import java.nio.charset.StandardCharsets; import java.util.regex.Pattern; public final class Constants { public static final short VERSION = 1; public static final String AES_CIPHER = "AES/CFB/NoPadding"; public static final String S3_ENC_SUFFIX = ".s3enc"; public static final String MPU_FOLDER = ".mpu/"; public static final Pattern MPU_ETAG_SUFFIX_PATTERN = Pattern.compile("-([0-9]+)$"); public static final String METADATA_ENCRYPTION_PARTS = "s3proxy_encryption_parts"; public static final String METADATA_IS_ENCRYPTED_MULTIPART = "s3proxy_encryption_multipart"; public static final String METADATA_MULTIPART_KEY = "s3proxy_mpu_key"; public static final int AES_BLOCK_SIZE = 16; public static final int PADDING_BLOCK_SIZE = 64; public static final byte[] DELIMITER = "-S3-ENC-".getBytes(StandardCharsets.UTF_8); public static final int PADDING_DELIMITER_LENGTH = DELIMITER.length; public static final int PADDING_IV_LENGTH = 16; public static final int PADDING_PART_LENGTH = 4; public static final int PADDING_SIZE_LENGTH = 8; public static final int PADDING_VERSION_LENGTH = 2; private Constants() { throw new AssertionError("Cannot instantiate utility constructor"); } } ================================================ FILE: src/main/java/org/gaul/s3proxy/crypto/Decryption.java ================================================ /* * Copyright 2014-2026 Andrew Gaul * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.gaul.s3proxy.crypto; import java.io.IOException; import java.io.InputStream; import java.nio.charset.StandardCharsets; import java.util.Arrays; import java.util.TreeMap; import javax.crypto.SecretKey; import javax.crypto.spec.SecretKeySpec; import com.google.common.io.ByteStreams; import org.jclouds.blobstore.BlobStore; import org.jclouds.blobstore.domain.Blob; import org.jclouds.blobstore.domain.BlobMetadata; import org.jclouds.blobstore.options.GetOptions; public class Decryption { private final SecretKey encryptionKey; private TreeMap partList; private long outputOffset; private long outputLength; private boolean skipFirstBlock; private long unencryptedSize; private long encryptedSize; private long startAt; private int skipParts; private long skipPartBytes; private boolean isEncrypted; public Decryption(SecretKeySpec key, BlobStore blobStore, BlobMetadata meta, long offset, long length) throws IOException { encryptionKey = key; outputLength = length; isEncrypted = true; // if blob does not exist or size is smaller than the part padding // then the file is considered not encrypted if (meta == null || meta.getSize() <= 64) { blobIsNotEncrypted(offset); return; } // get the 64 byte of part padding from the end of the blob var options = new GetOptions(); options.range(meta.getSize() - Constants.PADDING_BLOCK_SIZE, meta.getSize()); Blob blob = blobStore.getBlob(meta.getContainer(), meta.getName(), options); // read the padding structure PartPadding lastPartPadding = PartPadding.readPartPaddingFromBlob(blob); if (!Arrays.equals( lastPartPadding.getDelimiter().getBytes(StandardCharsets.UTF_8), Constants.DELIMITER)) { blobIsNotEncrypted(offset); return; } partList = new TreeMap<>(); // detect multipart if (lastPartPadding.getPart() > 1 && meta.getSize() > (lastPartPadding.getSize() + Constants.PADDING_BLOCK_SIZE)) { unencryptedSize = lastPartPadding.getSize(); encryptedSize = lastPartPadding.getSize() + Constants.PADDING_BLOCK_SIZE; // note that parts are in reversed order int part = 1; // add the last part to the list partList.put(part, lastPartPadding); // loop part by part from end to the beginning // to build a list of all blocks while (encryptedSize < meta.getSize()) { // get the next block // rewind by the current encrypted block size // minus the encryption padding options = new GetOptions(); long startAt = (meta.getSize() - encryptedSize) - Constants.PADDING_BLOCK_SIZE; long endAt = meta.getSize() - encryptedSize - 1; options.range(startAt, endAt); blob = blobStore.getBlob(meta.getContainer(), meta.getName(), options); part++; // read the padding structure PartPadding partPadding = PartPadding.readPartPaddingFromBlob(blob); // add the part to the list this.partList.put(part, partPadding); // update the encrypted size encryptedSize = encryptedSize + (partPadding.getSize() + Constants.PADDING_BLOCK_SIZE); unencryptedSize = this.unencryptedSize + partPadding.getSize(); } } else { // add the single part to the list partList.put(1, lastPartPadding); // update the unencrypted size unencryptedSize = meta.getSize() - Constants.PADDING_BLOCK_SIZE; // update the encrypted size encryptedSize = meta.getSize(); } // calculate the offset calculateOffset(offset); // if there is a offset and no length set the output length if (offset > 0 && length <= 0) { outputLength = unencryptedSize - offset; } } private void blobIsNotEncrypted(long offset) { isEncrypted = false; startAt = offset; } // calculate the tail bytes we need to read // because we know the unencryptedSize we can return startAt offset public final long calculateTail() { long offset = unencryptedSize - outputLength; calculateOffset(offset); return startAt; } public final long getEncryptedSize() { return encryptedSize; } public final long getUnencryptedSize() { return unencryptedSize; } public final long calculateEndAt(long endAt) { // need to have always one more endAt++; // handle multipart if (partList.size() > 1) { long plaintextSize = 0; // always skip 1 part at the end int partCounter = 1; // we need the map in reversed order for (var part : partList.descendingMap().entrySet()) { // check the parts that are between offset and end plaintextSize = plaintextSize + part.getValue().getSize(); if (endAt > plaintextSize) { partCounter++; } else { break; } } // add the paddings of all parts endAt = endAt + ((long) Constants.PADDING_BLOCK_SIZE * partCounter); } else { // we need to read one AES block more in AES CFB mode long rest = endAt % Constants.AES_BLOCK_SIZE; if (rest > 0) { endAt = endAt + Constants.AES_BLOCK_SIZE; } } return endAt; } // open the streams and pipes public final InputStream openStream(InputStream is) throws IOException { // if the blob is not encrypted return the unencrypted stream if (!isEncrypted) { return is; } // pass input stream through decryption InputStream dis = new DecryptionInputStream(is, encryptionKey, partList, skipParts, skipPartBytes); // skip some bytes if necessary long offset = outputOffset; if (this.skipFirstBlock) { offset = offset + Constants.AES_BLOCK_SIZE; } dis.skipNBytes(offset); // trim the stream to a specific length if needed return outputLength >= 0 ? ByteStreams.limit(dis, outputLength) : dis; } private void calculateOffset(long offset) { startAt = 0; skipParts = 0; // handle multipart if (partList.size() > 1) { // init counters long plaintextSize = 0; long encryptedSize = 0; long partOffset; long partStartAt = 0; // we need the map in reversed order for (var part : partList.descendingMap().entrySet()) { // compute the plaintext size of the current part plaintextSize = plaintextSize + part.getValue().getSize(); // check if the offset is located in another part if (offset > plaintextSize) { // compute the encrypted size of the skipped part encryptedSize = encryptedSize + part.getValue().getSize() + Constants.PADDING_BLOCK_SIZE; // compute offset in this part partOffset = offset - plaintextSize; // skip the first block in CFB mode skipFirstBlock = partOffset >= 16; // compute the offset of the output outputOffset = partOffset % Constants.AES_BLOCK_SIZE; // skip this part skipParts++; // we always need to read one previous AES block in CFB mode // if we read from offset if (partOffset > Constants.AES_BLOCK_SIZE) { long rest = partOffset % Constants.AES_BLOCK_SIZE; partStartAt = (partOffset - Constants.AES_BLOCK_SIZE) - rest; } else { partStartAt = 0; } } else { // start at a specific byte position // while respecting other parts startAt = encryptedSize + partStartAt; // skip part bytes if we are not starting // from the beginning of a part skipPartBytes = partStartAt; break; } } } // handle single part if (skipParts == 0) { // skip the first block in CFB mode skipFirstBlock = offset >= 16; // compute the offset of the output outputOffset = offset % Constants.AES_BLOCK_SIZE; // we always need to read one previous AES block in CFB mode // if we read from offset if (offset > Constants.AES_BLOCK_SIZE) { long rest = offset % Constants.AES_BLOCK_SIZE; startAt = (offset - Constants.AES_BLOCK_SIZE) - rest; } // skip part bytes if we are not starting // from the beginning of a part skipPartBytes = startAt; } } public final long getStartAt() { return startAt; } public final boolean isEncrypted() { return isEncrypted; } public final long getContentLength() { if (outputLength > 0) { return outputLength; } else { return unencryptedSize; } } } ================================================ FILE: src/main/java/org/gaul/s3proxy/crypto/DecryptionInputStream.java ================================================ /* * Copyright 2014-2026 Andrew Gaul * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.gaul.s3proxy.crypto; import java.io.FilterInputStream; import java.io.IOException; import java.io.InputStream; import java.util.SortedMap; import javax.crypto.Cipher; import javax.crypto.SecretKey; import javax.crypto.ShortBufferException; public class DecryptionInputStream extends FilterInputStream { // the cipher engine to use to process stream data private final Cipher cipher; // the secret key private final SecretKey key; // the list of parts we expect in the stream private final SortedMap parts; /* the buffer holding data that have been read in from the underlying stream, but have not been processed by the cipher engine. */ private final byte[] ibuffer = new byte[4096]; // having reached the end of the underlying input stream private boolean done; /* the buffer holding data that have been processed by the cipher engine, but have not been read out */ private byte[] obuffer; // the offset pointing to the next "new" byte private int ostart; // the offset pointing to the last "new" byte private int ofinish; // stream status private boolean closed; // the current part private int part; // the remaining bytes of the current part private long partBytesRemain; /** * Constructs a CipherInputStream from an InputStream and a * Cipher. *
Note: if the specified input stream or cipher is * null, a NullPointerException may be thrown later when * they are used. * * @param is the to-be-processed input stream * @param key the decryption key * @param parts the list of parts * @param skipParts the amount of parts to skip * @param skipPartBytes the amount of part bytes to skip * @throws IOException if cipher fails */ public DecryptionInputStream(InputStream is, SecretKey key, SortedMap parts, int skipParts, long skipPartBytes) throws IOException { super(is); in = is; this.parts = parts; this.key = key; PartPadding partPadding = parts.get(parts.size() - skipParts); try { // init the cipher cipher = Cipher.getInstance(Constants.AES_CIPHER); cipher.init(Cipher.DECRYPT_MODE, key, partPadding.getIv()); } catch (Exception e) { throw new IOException(e); } // set the part to begin with part = parts.size() - skipParts; // adjust part size due to offset partBytesRemain = parts.get(part).getSize() - skipPartBytes; } /** * Ensure obuffer is big enough for the next update or doFinal * operation, given the input length inLen (in bytes) * The ostart and ofinish indices are reset to 0. * * @param inLen the input length (in bytes) */ private void ensureCapacity(int inLen) { int minLen = cipher.getOutputSize(inLen); if (obuffer == null || obuffer.length < minLen) { obuffer = new byte[minLen]; } ostart = 0; ofinish = 0; } /** * Private convenience function, read in data from the underlying * input stream and process them with cipher. This method is called * when the processed bytes inside obuffer has been exhausted. *

* Entry condition: ostart = ofinish *

* Exit condition: ostart = 0 AND ostart <= ofinish *

* return (ofinish-ostart) (we have this many bytes for you) * return 0 (no data now, but could have more later) * return -1 (absolutely no more data) *

* Note: Exceptions are only thrown after the stream is completely read. * For AEAD ciphers a read() of any length will internally cause the * whole stream to be read fully and verify the authentication tag before * returning decrypted data or exceptions. */ private int getMoreData() throws IOException { if (done) { return -1; } int readLimit = ibuffer.length; if (partBytesRemain < ibuffer.length) { readLimit = (int) partBytesRemain; } int readin; if (partBytesRemain == 0) { readin = -1; } else { readin = in.read(ibuffer, 0, readLimit); } if (readin == -1) { ensureCapacity(0); try { ofinish = cipher.doFinal(obuffer, 0); } catch (Exception e) { throw new IOException(e); } int nextPart = part - 1; if (parts.containsKey(nextPart)) { // reset cipher PartPadding partPadding = parts.get(nextPart); try { cipher.init(Cipher.DECRYPT_MODE, key, partPadding.getIv()); } catch (Exception e) { throw new IOException(e); } // update to the next part part = nextPart; // update the remaining bytes of the next part partBytesRemain = parts.get(nextPart).getSize(); // Cannot call ByteStreams.skipFully since in may be shorter in.readNBytes(Constants.PADDING_BLOCK_SIZE); return ofinish; } else { done = true; if (ofinish == 0) { return -1; } else { return ofinish; } } } ensureCapacity(readin); try { ofinish = cipher.update(ibuffer, 0, readin, obuffer, ostart); } catch (ShortBufferException e) { throw new IOException(e); } partBytesRemain = partBytesRemain - readin; return ofinish; } /** * Reads the next byte of data from this input stream. The value * byte is returned as an int in the range * 0 to 255. If no byte is available * because the end of the stream has been reached, the value * -1 is returned. This method blocks until input data * is available, the end of the stream is detected, or an exception * is thrown. * * @return the next byte of data, or -1 if the end of the * stream is reached. * @throws IOException if an I/O error occurs. */ @Override public final int read() throws IOException { if (ostart >= ofinish) { // we loop for new data as the spec says we are blocking int i = 0; while (i == 0) { i = getMoreData(); } if (i == -1) { return -1; } } return (int) obuffer[ostart++] & 0xff; } /** * Reads up to b.length bytes of data from this input * stream into an array of bytes. *

* The read method of InputStream calls * the read method of three arguments with the arguments * b, 0, and b.length. * * @param b the buffer into which the data is read. * @return the total number of bytes read into the buffer, or * -1 is there is no more data because the end of * the stream has been reached. * @throws IOException if an I/O error occurs. * @see java.io.InputStream#read(byte[], int, int) */ @Override public final int read(byte[] b) throws IOException { return read(b, 0, b.length); } /** * Reads up to len bytes of data from this input stream * into an array of bytes. This method blocks until some input is * available. If the first argument is null, up to * len bytes are read and discarded. * * @param b the buffer into which the data is read. * @param off the start offset in the destination array * buf * @param len the maximum number of bytes read. * @return the total number of bytes read into the buffer, or * -1 if there is no more data because the end of * the stream has been reached. * @throws IOException if an I/O error occurs. * @see java.io.InputStream#read() */ @Override public final int read(byte[] b, int off, int len) throws IOException { if (ostart >= ofinish) { // we loop for new data as the spec says we are blocking int i = 0; while (i == 0) { i = getMoreData(); } if (i == -1) { return -1; } } if (len <= 0) { return 0; } int available = ofinish - ostart; if (len < available) { available = len; } if (b != null) { System.arraycopy(obuffer, ostart, b, off, available); } ostart = ostart + available; return available; } /** * Skips n bytes of input from the bytes that can be read * from this input stream without blocking. * *

Fewer bytes than requested might be skipped. * The actual number of bytes skipped is equal to n or * the result of a call to * {@link #available() available}, * whichever is smaller. * If n is less than zero, no bytes are skipped. * *

The actual number of bytes skipped is returned. * * @param n the number of bytes to be skipped. * @return the actual number of bytes skipped. * @throws IOException if an I/O error occurs. */ @Override public final long skip(long n) throws IOException { int available = ofinish - ostart; if (n > available) { n = available; } if (n < 0) { return 0; } ostart += (int) n; return n; } /** * Returns the number of bytes that can be read from this input * stream without blocking. The available method of * InputStream returns 0. This method * should be overridden by subclasses. * * @return the number of bytes that can be read from this input stream * without blocking. */ @Override public final int available() { return ofinish - ostart; } /** * Closes this input stream and releases any system resources * associated with the stream. *

* The close method of CipherInputStream * calls the close method of its underlying input * stream. * * @throws IOException if an I/O error occurs. */ @Override public final void close() throws IOException { if (closed) { return; } closed = true; in.close(); // Throw away the unprocessed data and throw no crypto exceptions. // AEAD ciphers are fully read before closing. Any authentication // exceptions would occur while reading. if (!done) { ensureCapacity(0); try { cipher.doFinal(obuffer, 0); } catch (Exception e) { // Catch exceptions as the rest of the stream is unused. } } obuffer = null; } /** * Tests if this input stream supports the mark * and reset methods, which it does not. * * @return false, since this class does not support the * mark and reset methods. * @see java.io.InputStream#mark(int) * @see java.io.InputStream#reset() */ @Override public final boolean markSupported() { return false; } } ================================================ FILE: src/main/java/org/gaul/s3proxy/crypto/Encryption.java ================================================ /* * Copyright 2014-2026 Andrew Gaul * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.gaul.s3proxy.crypto; import java.io.IOException; import java.io.InputStream; import java.security.GeneralSecurityException; import java.security.SecureRandom; import javax.crypto.Cipher; import javax.crypto.CipherInputStream; import javax.crypto.spec.IvParameterSpec; import javax.crypto.spec.SecretKeySpec; public class Encryption { private final InputStream cis; private final IvParameterSpec iv; private final int part; public Encryption(SecretKeySpec key, InputStream isRaw, int partNumber) throws GeneralSecurityException { iv = generateIV(); Cipher cipher = Cipher.getInstance(Constants.AES_CIPHER); cipher.init(Cipher.ENCRYPT_MODE, key, iv); cis = new CipherInputStream(isRaw, cipher); part = partNumber; } public final InputStream openStream() throws IOException { return new EncryptionInputStream(cis, part, iv); } private IvParameterSpec generateIV() { byte[] iv = new byte[Constants.AES_BLOCK_SIZE]; var randomSecureRandom = new SecureRandom(); randomSecureRandom.nextBytes(iv); return new IvParameterSpec(iv); } } ================================================ FILE: src/main/java/org/gaul/s3proxy/crypto/EncryptionInputStream.java ================================================ /* * Copyright 2014-2026 Andrew Gaul * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.gaul.s3proxy.crypto; import java.io.ByteArrayInputStream; import java.io.IOException; import java.io.InputStream; import java.nio.ByteBuffer; import javax.crypto.spec.IvParameterSpec; public class EncryptionInputStream extends InputStream { private final int part; private final IvParameterSpec iv; private boolean hasPadding; private long size; private InputStream in; public EncryptionInputStream(InputStream in, int part, IvParameterSpec iv) { this.part = part; this.iv = iv; this.in = in; } // Padding (64 byte) // Delimiter (8 byte) // IV (16 byte) // Part (4 byte) // Size (8 byte) // Version (2 byte) // Reserved (26 byte) final void padding() throws IOException { if (in != null) { in.close(); } if (!hasPadding) { ByteBuffer bb = ByteBuffer.allocate(Constants.PADDING_BLOCK_SIZE); bb.put(Constants.DELIMITER); bb.put(iv.getIV()); bb.putInt(part); bb.putLong(size); bb.putShort(Constants.VERSION); in = new ByteArrayInputStream(bb.array()); hasPadding = true; } else { in = null; } } @Override public final int available() throws IOException { if (in == null) { return 0; // no way to signal EOF from available() } return in.available(); } @Override public final int read() throws IOException { while (in != null) { int c = in.read(); if (c != -1) { size++; return c; } padding(); } return -1; } @Override public final int read(byte[] b, int off, int len) throws IOException { if (in == null) { return -1; } else if (b == null) { throw new NullPointerException(); } else if (off < 0 || len < 0 || len > b.length - off) { throw new IndexOutOfBoundsException(); } else if (len == 0) { return 0; } do { int n = in.read(b, off, len); if (n > 0) { size = size + n; return n; } padding(); } while (in != null); return -1; } @Override public final void close() throws IOException { IOException ioe = null; while (in != null) { try { in.close(); } catch (IOException e) { if (ioe == null) { ioe = e; } else { ioe.addSuppressed(e); } } padding(); } if (ioe != null) { throw ioe; } } } ================================================ FILE: src/main/java/org/gaul/s3proxy/crypto/PartPadding.java ================================================ /* * Copyright 2014-2026 Andrew Gaul * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.gaul.s3proxy.crypto; import java.io.IOException; import java.nio.ByteBuffer; import java.nio.charset.StandardCharsets; import java.util.Arrays; import javax.crypto.spec.IvParameterSpec; import org.jclouds.blobstore.domain.Blob; import org.slf4j.Logger; import org.slf4j.LoggerFactory; public class PartPadding { private static final Logger logger = LoggerFactory.getLogger(PartPadding.class); private String delimiter; private IvParameterSpec iv; private int part; private long size; private short version; public static PartPadding readPartPaddingFromBlob(Blob blob) throws IOException { var partPadding = new PartPadding(); try (var is = blob.getPayload().openStream()) { byte[] paddingBytes = is.readAllBytes(); ByteBuffer bb = ByteBuffer.wrap(paddingBytes); byte[] delimiterBytes = new byte[Constants.PADDING_DELIMITER_LENGTH]; bb.get(delimiterBytes); partPadding.delimiter = new String(delimiterBytes, StandardCharsets.UTF_8); byte[] ivBytes = new byte[Constants.PADDING_IV_LENGTH]; bb.get(ivBytes); partPadding.iv = new IvParameterSpec(ivBytes); partPadding.part = bb.getInt(); partPadding.size = bb.getLong(); partPadding.version = bb.getShort(); logger.debug("delimiter {}", partPadding.delimiter); logger.debug("iv {}", Arrays.toString(ivBytes)); logger.debug("part {}", partPadding.part); logger.debug("size {}", partPadding.size); logger.debug("version {}", partPadding.version); return partPadding; } } public final String getDelimiter() { return delimiter; } public final IvParameterSpec getIv() { return iv; } public final int getPart() { return part; } public final long getSize() { return size; } } ================================================ FILE: src/main/java/org/gaul/s3proxy/gcloudsdk/GCloudApiMetadata.java ================================================ /* * Copyright 2014-2026 Andrew Gaul * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.gaul.s3proxy.gcloudsdk; import java.net.URI; import java.util.Properties; import java.util.Set; import org.jclouds.blobstore.BlobStoreContext; import org.jclouds.blobstore.reference.BlobStoreConstants; import org.jclouds.reflect.Reflection2; import org.jclouds.rest.internal.BaseHttpApiMetadata; @SuppressWarnings("rawtypes") public final class GCloudApiMetadata extends BaseHttpApiMetadata { public GCloudApiMetadata() { this(builder()); } protected GCloudApiMetadata(Builder builder) { super(builder); } private static Builder builder() { return new Builder(); } @Override public Builder toBuilder() { return builder().fromApiMetadata(this); } public static Properties defaultProperties() { Properties properties = BaseHttpApiMetadata.defaultProperties(); properties.setProperty(BlobStoreConstants.PROPERTY_USER_METADATA_PREFIX, "x-goog-meta-"); return properties; } // Fake API client private interface GCloudClient { } public static final class Builder extends BaseHttpApiMetadata.Builder { protected Builder() { super(GCloudClient.class); id("google-cloud-storage-sdk") .name("Google Cloud Storage API") .identityName("Project ID") .credentialName("JSON Key or Path") .version("v1") .defaultEndpoint("https://storage.googleapis.com") .documentation(URI.create( "https://cloud.google.com/storage/docs/json_api")) .defaultProperties(GCloudApiMetadata.defaultProperties()) .view(Reflection2.typeToken(BlobStoreContext.class)) .defaultModules(Set.of( GCloudBlobStoreContextModule.class)); } @Override public GCloudApiMetadata build() { return new GCloudApiMetadata(this); } @Override protected Builder self() { return this; } } } ================================================ FILE: src/main/java/org/gaul/s3proxy/gcloudsdk/GCloudBlobStore.java ================================================ /* * Copyright 2014-2026 Andrew Gaul * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.gaul.s3proxy.gcloudsdk; import java.io.ByteArrayInputStream; import java.io.IOException; import java.io.InputStream; import java.nio.channels.Channels; import java.nio.charset.StandardCharsets; import java.security.MessageDigest; import java.util.Date; import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.Set; import java.util.UUID; import com.google.auth.oauth2.GoogleCredentials; import com.google.auth.oauth2.ServiceAccountCredentials; import com.google.cloud.NoCredentials; import com.google.cloud.ReadChannel; import com.google.cloud.storage.Acl; import com.google.cloud.storage.Blob; import com.google.cloud.storage.BlobId; import com.google.cloud.storage.BlobInfo; import com.google.cloud.storage.Bucket; import com.google.cloud.storage.BucketInfo; import com.google.cloud.storage.Storage; import com.google.cloud.storage.Storage.BlobField; import com.google.cloud.storage.Storage.BlobGetOption; import com.google.cloud.storage.Storage.BlobListOption; import com.google.cloud.storage.Storage.BlobWriteOption; import com.google.cloud.storage.Storage.BucketField; import com.google.cloud.storage.Storage.BucketGetOption; import com.google.cloud.storage.Storage.ComposeRequest; import com.google.cloud.storage.Storage.CopyRequest; import com.google.cloud.storage.StorageException; import com.google.cloud.storage.StorageOptions; import com.google.common.base.Supplier; import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableSet; import com.google.common.hash.HashFunction; import com.google.common.hash.Hashing; import com.google.common.hash.HashingInputStream; import com.google.common.io.BaseEncoding; import com.google.common.net.HttpHeaders; import jakarta.inject.Inject; import jakarta.inject.Singleton; import org.gaul.s3proxy.PutOptions2; import org.jclouds.blobstore.BlobStoreContext; import org.jclouds.blobstore.ContainerNotFoundException; import org.jclouds.blobstore.KeyNotFoundException; import org.jclouds.blobstore.domain.BlobAccess; import org.jclouds.blobstore.domain.BlobMetadata; import org.jclouds.blobstore.domain.ContainerAccess; import org.jclouds.blobstore.domain.MultipartPart; import org.jclouds.blobstore.domain.MultipartUpload; import org.jclouds.blobstore.domain.PageSet; import org.jclouds.blobstore.domain.StorageMetadata; import org.jclouds.blobstore.domain.StorageType; import org.jclouds.blobstore.domain.Tier; import org.jclouds.blobstore.domain.internal.BlobBuilderImpl; import org.jclouds.blobstore.domain.internal.BlobMetadataImpl; import org.jclouds.blobstore.domain.internal.PageSetImpl; import org.jclouds.blobstore.domain.internal.StorageMetadataImpl; import org.jclouds.blobstore.internal.BaseBlobStore; import org.jclouds.blobstore.options.CopyOptions; import org.jclouds.blobstore.options.CreateContainerOptions; import org.jclouds.blobstore.options.GetOptions; import org.jclouds.blobstore.options.ListContainerOptions; import org.jclouds.blobstore.options.PutOptions; import org.jclouds.blobstore.util.BlobUtils; import org.jclouds.collect.Memoized; import org.jclouds.domain.Credentials; import org.jclouds.domain.Location; import org.jclouds.http.HttpCommand; import org.jclouds.http.HttpRequest; import org.jclouds.http.HttpResponse; import org.jclouds.http.HttpResponseException; import org.jclouds.io.ContentMetadata; import org.jclouds.io.ContentMetadataBuilder; import org.jclouds.io.PayloadSlicer; import org.jclouds.providers.ProviderMetadata; import org.jspecify.annotations.Nullable; @Singleton public final class GCloudBlobStore extends BaseBlobStore { private static final String STUB_BLOB_PREFIX = ".s3proxy/stubs/"; private static final String TARGET_BLOB_NAME_KEY = "s3proxy_target_blob_name"; private static final HashFunction MD5 = Hashing.md5(); // GCS compose supports up to 32 source objects private static final int MAX_COMPOSE_PARTS = 32; private final Storage storage; @Inject GCloudBlobStore(BlobStoreContext context, BlobUtils blobUtils, Supplier defaultLocation, @Memoized Supplier> locations, PayloadSlicer slicer, @org.jclouds.location.Provider Supplier creds, ProviderMetadata provider) { super(context, blobUtils, defaultLocation, locations, slicer); var cred = creds.get(); var storageBuilder = StorageOptions.newBuilder(); if (cred.identity != null && !cred.identity.isEmpty()) { storageBuilder.setProjectId(cred.identity); } if (cred.credential != null && !cred.credential.isEmpty()) { try { var credentials = ServiceAccountCredentials.fromStream( new ByteArrayInputStream( cred.credential.getBytes(StandardCharsets.UTF_8))); storageBuilder.setCredentials(credentials); } catch (IOException ioe) { // Fall back to application default credentials try { storageBuilder.setCredentials( GoogleCredentials.getApplicationDefault()); } catch (IOException ioe2) { throw new RuntimeException( "Failed to initialize GCS credentials", ioe2); } } } else { // No credentials provided — use NoCredentials for emulator storageBuilder.setCredentials(NoCredentials.getInstance()); } var endpoint = provider.getEndpoint(); if (endpoint != null && !endpoint.isEmpty() && !endpoint.equals("https://storage.googleapis.com")) { storageBuilder.setHost(endpoint); } storage = storageBuilder.build().getService(); } @Override public PageSet list() { var set = ImmutableSet.builder(); for (Bucket bucket : storage.list().iterateAll()) { set.add(new StorageMetadataImpl(StorageType.CONTAINER, /*id=*/ null, bucket.getName(), /*location=*/ null, /*uri=*/ null, /*eTag=*/ null, toDate(bucket.getCreateTimeOffsetDateTime()), toDate(bucket.getUpdateTimeOffsetDateTime()), Map.of(), /*size=*/ null, Tier.STANDARD)); } return new PageSetImpl(set.build(), null); } @Override public PageSet list(String container, ListContainerOptions options) { var gcsOptions = new java.util.ArrayList(); if (options.getPrefix() != null) { gcsOptions.add(BlobListOption.prefix(options.getPrefix())); } if (options.getMaxResults() != null) { gcsOptions.add(BlobListOption.pageSize( options.getMaxResults())); } String marker = options.getMarker(); if (options.getDelimiter() != null) { gcsOptions.add(BlobListOption.delimiter(options.getDelimiter())); } com.google.api.gax.paging.Page page; try { page = storage.list(container, gcsOptions.toArray(new BlobListOption[0])); } catch (StorageException se) { translateAndRethrowException(se, container, null); throw se; } var set = ImmutableSet.builder(); Integer maxResults = options.getMaxResults(); int count = 0; boolean hasMore = false; String lastName = null; for (Blob blob : page.iterateAll()) { // Skip blobs at or before the marker (S3 marker is exclusive) if (marker != null && blob.getName().compareTo(marker) <= 0) { continue; } if (maxResults != null && count >= maxResults) { hasMore = true; break; } if (blob.isDirectory()) { set.add(new StorageMetadataImpl(StorageType.RELATIVE_PATH, /*id=*/ null, blob.getName(), /*location=*/ null, /*uri=*/ null, /*eTag=*/ null, /*creationDate=*/ null, /*lastModified=*/ null, Map.of(), /*size=*/ null, Tier.STANDARD)); } else { set.add(new StorageMetadataImpl(StorageType.BLOB, /*id=*/ null, blob.getName(), /*location=*/ null, /*uri=*/ null, blob.getEtag(), toDate(blob.getCreateTimeOffsetDateTime()), toDate(blob.getUpdateTimeOffsetDateTime()), Map.of(), blob.getSize(), toTier(blob.getStorageClass()))); } lastName = blob.getName(); count++; } // Synthesize a next marker if we truncated results String nextMarker = hasMore ? lastName : null; return new PageSetImpl(set.build(), nextMarker); } @Override public boolean containerExists(String container) { return storage.get(container, BucketGetOption.fields(BucketField.NAME)) != null; } @Override public boolean createContainerInLocation(Location location, String container) { return createContainerInLocation(location, container, new CreateContainerOptions()); } @Override public boolean createContainerInLocation(Location location, String container, CreateContainerOptions options) { try { var bucketInfo = BucketInfo.newBuilder(container).build(); storage.create(bucketInfo); if (options.isPublicRead()) { try { storage.createAcl(container, Acl.of(Acl.User.ofAllUsers(), Acl.Role.READER)); } catch (StorageException se2) { // ACL operations not supported (e.g., emulator) } } return true; } catch (StorageException se) { if (se.getCode() == 409) { return false; } throw se; } } @Override public void deleteContainer(String container) { try { // Delete all blobs first since GCS requires empty bucket var page = storage.list(container); for (Blob blob : page.iterateAll()) { storage.delete(blob.getBlobId()); } storage.delete(container); } catch (StorageException se) { if (se.getCode() != 404) { throw se; } } } @Override public boolean deleteContainerIfEmpty(String container) { var page = storage.list(container, BlobListOption.pageSize(1)); if (page.getValues().iterator().hasNext()) { return false; } try { storage.delete(container); return true; } catch (StorageException se) { if (se.getCode() == 404) { return true; } throw se; } } @Override public boolean blobExists(String container, String key) { return storage.get(BlobId.of(container, key), BlobGetOption.fields(BlobField.NAME)) != null; } @Override public org.jclouds.blobstore.domain.Blob getBlob(String container, String key, GetOptions options) { var gcsOptions = new java.util.ArrayList(); Blob gcsBlob; try { gcsBlob = storage.get(BlobId.of(container, key), gcsOptions.toArray(new BlobGetOption[0])); } catch (StorageException se) { translateAndRethrowException(se, container, key); throw se; } if (gcsBlob == null) { throw new KeyNotFoundException(container, key, ""); } Long rangeOffset = null; Long rangeEnd = null; if (!options.getRanges().isEmpty()) { var ranges = options.getRanges().get(0).split("-", 2); if (ranges[0].isEmpty()) { // trailing range: last N bytes long trailing = Long.parseLong(ranges[1]); long blobSz = gcsBlob.getSize(); rangeOffset = Math.max(0, blobSz - trailing); rangeEnd = blobSz - 1; } else if (ranges[1].isEmpty()) { rangeOffset = Long.parseLong(ranges[0]); } else { rangeOffset = Long.parseLong(ranges[0]); rangeEnd = Long.parseLong(ranges[1]); } } InputStream is; long contentLength; long blobSize = gcsBlob.getSize(); try { if (rangeOffset != null) { ReadChannel reader = gcsBlob.reader(); reader.seek(rangeOffset); if (rangeEnd != null) { reader.limit(rangeEnd + 1); contentLength = rangeEnd - rangeOffset + 1; } else { contentLength = blobSize - rangeOffset; } is = Channels.newInputStream(reader); } else { ReadChannel reader = gcsBlob.reader(); is = Channels.newInputStream(reader); contentLength = blobSize; } } catch (IOException ioe) { throw new RuntimeException(ioe); } var metadata = gcsBlob.getMetadata(); var blob = new BlobBuilderImpl() .name(key) .userMetadata(metadata != null ? metadata : Map.of()) .payload(is) .cacheControl(gcsBlob.getCacheControl()) .contentDisposition(gcsBlob.getContentDisposition()) .contentEncoding(gcsBlob.getContentEncoding()) .contentLanguage(gcsBlob.getContentLanguage()) .contentLength(contentLength) .contentType(gcsBlob.getContentType()) .build(); if (rangeOffset != null) { long end = rangeEnd != null ? rangeEnd : blobSize - 1; blob.getAllHeaders().put(HttpHeaders.CONTENT_RANGE, "bytes " + rangeOffset + "-" + end + "/" + blobSize); } var blobMeta = blob.getMetadata(); blobMeta.setETag(gcsBlob.getEtag()); blobMeta.setSize(blobSize); blobMeta.setTier(toTier(gcsBlob.getStorageClass())); blobMeta.setCreationDate( toDate(gcsBlob.getCreateTimeOffsetDateTime())); blobMeta.setLastModified( toDate(gcsBlob.getUpdateTimeOffsetDateTime())); return blob; } @Override public String putBlob(String container, org.jclouds.blobstore.domain.Blob blob) { return putBlob(container, blob, new PutOptions()); } @Override public String putBlob(String container, org.jclouds.blobstore.domain.Blob blob, PutOptions options) { var contentMetadata = blob.getMetadata().getContentMetadata(); var blobInfo = BlobInfo.newBuilder( BlobId.of(container, blob.getMetadata().getName())); blobInfo.setContentType(contentMetadata.getContentType()); blobInfo.setContentDisposition( contentMetadata.getContentDisposition()); blobInfo.setContentEncoding(contentMetadata.getContentEncoding()); blobInfo.setContentLanguage(contentMetadata.getContentLanguage()); blobInfo.setCacheControl(contentMetadata.getCacheControl()); var hash = contentMetadata.getContentMD5AsHashCode(); if (hash != null) { blobInfo.setMd5(hash.toString()); } if (blob.getMetadata().getUserMetadata() != null) { blobInfo.setMetadata(blob.getMetadata().getUserMetadata()); } if (blob.getMetadata().getTier() != null && blob.getMetadata().getTier() != Tier.STANDARD) { blobInfo.setStorageClass( toStorageClass(blob.getMetadata().getTier())); } var writeOptions = new java.util.ArrayList(); if (options instanceof PutOptions2 putOptions2) { String ifMatch = putOptions2.getIfMatch(); String ifNoneMatch = putOptions2.getIfNoneMatch(); if (ifNoneMatch != null && ifNoneMatch.equals("*")) { writeOptions.add(BlobWriteOption.doesNotExist()); } else if (ifMatch != null) { writeOptions.add( BlobWriteOption.generationMatch( getGeneration(container, blob.getMetadata().getName(), ifMatch))); } } try (var is = blob.getPayload().openStream()) { Blob gcsBlob = storage.createFrom(blobInfo.build(), is, writeOptions.toArray(new BlobWriteOption[0])); return gcsBlob.getEtag(); } catch (StorageException se) { translateAndRethrowException(se, container, null); throw se; } catch (IOException ioe) { throw new RuntimeException(ioe); } } @Override public String copyBlob(String fromContainer, String fromName, String toContainer, String toName, CopyOptions options) { var source = BlobId.of(fromContainer, fromName); var targetBuilder = BlobInfo.newBuilder( BlobId.of(toContainer, toName)); var contentMetadata = options.contentMetadata(); if (contentMetadata != null) { if (contentMetadata.getCacheControl() != null) { targetBuilder.setCacheControl( contentMetadata.getCacheControl()); } if (contentMetadata.getContentDisposition() != null) { targetBuilder.setContentDisposition( contentMetadata.getContentDisposition()); } if (contentMetadata.getContentEncoding() != null) { targetBuilder.setContentEncoding( contentMetadata.getContentEncoding()); } if (contentMetadata.getContentLanguage() != null) { targetBuilder.setContentLanguage( contentMetadata.getContentLanguage()); } if (contentMetadata.getContentType() != null) { targetBuilder.setContentType( contentMetadata.getContentType()); } } var userMetadata = options.userMetadata(); if (userMetadata != null) { targetBuilder.setMetadata(userMetadata); } try { var copyRequest = CopyRequest.newBuilder() .setSource(source) .setTarget(targetBuilder.build()) .build(); var result = storage.copy(copyRequest); return result.getResult().getEtag(); } catch (StorageException se) { translateAndRethrowException(se, fromContainer, fromName); throw se; } } @Override public void removeBlob(String container, String key) { try { storage.delete(BlobId.of(container, key)); } catch (StorageException se) { if (se.getCode() != 404) { throw se; } } } @Override public BlobMetadata blobMetadata(String container, String key) { Blob gcsBlob; try { gcsBlob = storage.get(BlobId.of(container, key)); } catch (StorageException se) { if (se.getCode() == 404) { return null; } translateAndRethrowException(se, container, null); throw se; } if (gcsBlob == null) { return null; } Long size = gcsBlob.getSize(); return new BlobMetadataImpl(/*id=*/ null, key, /*location=*/ null, /*uri=*/ null, gcsBlob.getEtag(), toDate(gcsBlob.getCreateTimeOffsetDateTime()), toDate(gcsBlob.getUpdateTimeOffsetDateTime()), gcsBlob.getMetadata() != null ? gcsBlob.getMetadata() : Map.of(), /*publicUri=*/ null, container, toContentMetadata(gcsBlob), size != null ? size : 0L, toTier(gcsBlob.getStorageClass())); } @Override protected boolean deleteAndVerifyContainerGone(String container) { try { storage.delete(container); } catch (StorageException se) { if (se.getCode() == 404) { return true; } throw se; } return true; } @Override public ContainerAccess getContainerAccess(String container) { var bucket = storage.get(container); if (bucket == null) { throw new ContainerNotFoundException(container, ""); } try { var acls = bucket.listAcls(); for (var acl : acls) { if (acl.getEntity().equals(Acl.User.ofAllUsers())) { return ContainerAccess.PUBLIC_READ; } } } catch (StorageException se) { // ACL operations not supported (e.g., emulator) } return ContainerAccess.PRIVATE; } @Override public void setContainerAccess(String container, ContainerAccess access) { try { if (access == ContainerAccess.PUBLIC_READ) { storage.createAcl(container, Acl.of(Acl.User.ofAllUsers(), Acl.Role.READER)); } else { storage.deleteAcl(container, Acl.User.ofAllUsers()); } } catch (StorageException se) { // ACL operations not supported (e.g., emulator) } } @Override public BlobAccess getBlobAccess(String container, String key) { return BlobAccess.PRIVATE; } @Override public void setBlobAccess(String container, String key, BlobAccess access) { throw new UnsupportedOperationException( "unsupported in Google Cloud Storage"); } @Override public MultipartUpload initiateMultipartUpload(String container, BlobMetadata blobMetadata, PutOptions options) { if (!containerExists(container)) { throw new ContainerNotFoundException(container, ""); } String uploadKey = STUB_BLOB_PREFIX + UUID.randomUUID().toString(); String targetBlobName = blobMetadata.getName(); // Store stub blob with metadata for later use during complete var stubMetadata = new HashMap(); stubMetadata.put(TARGET_BLOB_NAME_KEY, targetBlobName); var contentMetadata = blobMetadata.getContentMetadata(); if (contentMetadata != null) { if (contentMetadata.getContentType() != null) { stubMetadata.put("s3proxy_content_type", contentMetadata.getContentType()); } if (contentMetadata.getContentDisposition() != null) { stubMetadata.put("s3proxy_content_disposition", contentMetadata.getContentDisposition()); } if (contentMetadata.getContentEncoding() != null) { stubMetadata.put("s3proxy_content_encoding", contentMetadata.getContentEncoding()); } if (contentMetadata.getContentLanguage() != null) { stubMetadata.put("s3proxy_content_language", contentMetadata.getContentLanguage()); } if (contentMetadata.getCacheControl() != null) { stubMetadata.put("s3proxy_cache_control", contentMetadata.getCacheControl()); } } var userMetadata = blobMetadata.getUserMetadata(); if (userMetadata != null) { for (var entry : userMetadata.entrySet()) { stubMetadata.put("s3proxy_user_" + entry.getKey(), entry.getValue()); } } if (blobMetadata.getTier() != null && blobMetadata.getTier() != Tier.STANDARD) { stubMetadata.put("s3proxy_tier", blobMetadata.getTier().name()); } var stubInfo = BlobInfo.newBuilder( BlobId.of(container, uploadKey)) .setMetadata(stubMetadata) .build(); storage.create(stubInfo, new byte[0]); return MultipartUpload.create(container, targetBlobName, uploadKey, blobMetadata, options); } @Override public void abortMultipartUpload(MultipartUpload mpu) { String uploadKey = mpu.id(); if (!uploadKey.startsWith(STUB_BLOB_PREFIX)) { throw new KeyNotFoundException(mpu.containerName(), uploadKey, "Multipart upload not found: " + uploadKey); } String nonce = uploadKey.substring(STUB_BLOB_PREFIX.length()); // Delete part blobs var page = storage.list(mpu.containerName(), BlobListOption.prefix(STUB_BLOB_PREFIX + nonce + "/")); for (Blob blob : page.iterateAll()) { storage.delete(blob.getBlobId()); } // Delete stub if (!storage.delete(BlobId.of(mpu.containerName(), uploadKey))) { throw new KeyNotFoundException(mpu.containerName(), uploadKey, "Multipart upload not found: " + uploadKey); } } @Override public String completeMultipartUpload(MultipartUpload mpu, List parts) { String uploadKey = mpu.id(); String nonce = uploadKey.substring(STUB_BLOB_PREFIX.length()); Blob stubBlob = storage.get( BlobId.of(mpu.containerName(), uploadKey)); if (stubBlob == null) { throw new IllegalArgumentException( "Upload not found: uploadId=" + uploadKey); } var stubMetadata = stubBlob.getMetadata(); String targetBlobName = stubMetadata.get(TARGET_BLOB_NAME_KEY); if (targetBlobName == null) { throw new IllegalArgumentException( "Stub blob missing target name: uploadId=" + uploadKey); } if (parts == null || parts.isEmpty()) { throw new IllegalArgumentException("Parts list cannot be empty"); } int previousPartNumber = 0; for (var part : parts) { if (part.partNumber() <= previousPartNumber) { throw new IllegalArgumentException( "Parts must be in strictly ascending order"); } previousPartNumber = part.partNumber(); } // Build target blob info from stub metadata var targetBuilder = BlobInfo.newBuilder( BlobId.of(mpu.containerName(), targetBlobName)); if (stubMetadata.containsKey("s3proxy_content_type")) { targetBuilder.setContentType( stubMetadata.get("s3proxy_content_type")); } if (stubMetadata.containsKey("s3proxy_content_disposition")) { targetBuilder.setContentDisposition( stubMetadata.get("s3proxy_content_disposition")); } if (stubMetadata.containsKey("s3proxy_content_encoding")) { targetBuilder.setContentEncoding( stubMetadata.get("s3proxy_content_encoding")); } if (stubMetadata.containsKey("s3proxy_content_language")) { targetBuilder.setContentLanguage( stubMetadata.get("s3proxy_content_language")); } if (stubMetadata.containsKey("s3proxy_cache_control")) { targetBuilder.setCacheControl( stubMetadata.get("s3proxy_cache_control")); } if (stubMetadata.containsKey("s3proxy_tier")) { targetBuilder.setStorageClass(toStorageClass( Tier.valueOf(stubMetadata.get("s3proxy_tier")))); } // Restore user metadata var userMetadata = new HashMap(); for (var entry : stubMetadata.entrySet()) { if (entry.getKey().startsWith("s3proxy_user_")) { userMetadata.put( entry.getKey().substring("s3proxy_user_".length()), entry.getValue()); } } if (!userMetadata.isEmpty()) { targetBuilder.setMetadata(userMetadata); } // If single part, just copy it to the target if (parts.size() == 1) { String partBlobName = makePartBlobName(nonce, parts.get(0).partNumber()); var source = BlobId.of(mpu.containerName(), partBlobName); var copyRequest = CopyRequest.newBuilder() .setSource(source) .setTarget(targetBuilder.build()) .build(); var result = storage.copy(copyRequest); // Clean up storage.delete(source); storage.delete(BlobId.of(mpu.containerName(), uploadKey)); return result.getResult().getEtag(); } // GCS compose supports up to 32 parts. // For more parts, compose recursively. var sourceBlobIds = new java.util.ArrayList(); for (var part : parts) { String partBlobName = makePartBlobName(nonce, part.partNumber()); sourceBlobIds.add(BlobId.of(mpu.containerName(), partBlobName)); } String eTag = composeRecursive(mpu.containerName(), targetBuilder.build(), sourceBlobIds, nonce); // Clean up part blobs and stub for (var blobId : sourceBlobIds) { storage.delete(blobId); } // Clean up any intermediate compose blobs var intermediatePage = storage.list(mpu.containerName(), BlobListOption.prefix( STUB_BLOB_PREFIX + nonce + "/compose_")); for (Blob blob : intermediatePage.iterateAll()) { storage.delete(blob.getBlobId()); } storage.delete(BlobId.of(mpu.containerName(), uploadKey)); return eTag; } /** * Recursively compose blobs to handle more than 32 parts. * GCS compose supports max 32 sources, so for N > 32 parts we * compose in groups of 32, then compose those results. */ private String composeRecursive(String container, BlobInfo target, List sources, String nonce) { if (sources.size() <= MAX_COMPOSE_PARTS) { var composeBuilder = ComposeRequest.newBuilder(); composeBuilder.setTarget(target); for (var source : sources) { composeBuilder.addSource(source.getName()); } var result = storage.compose(composeBuilder.build()); return result.getEtag(); } // Compose in groups of MAX_COMPOSE_PARTS var intermediateIds = new java.util.ArrayList(); int groupIndex = 0; for (int i = 0; i < sources.size(); i += MAX_COMPOSE_PARTS) { int end = Math.min(i + MAX_COMPOSE_PARTS, sources.size()); var group = sources.subList(i, end); String intermediateName = STUB_BLOB_PREFIX + nonce + "/compose_" + groupIndex; var intermediateInfo = BlobInfo.newBuilder( BlobId.of(container, intermediateName)).build(); var composeBuilder = ComposeRequest.newBuilder(); composeBuilder.setTarget(intermediateInfo); for (var source : group) { composeBuilder.addSource(source.getName()); } storage.compose(composeBuilder.build()); intermediateIds.add(BlobId.of(container, intermediateName)); groupIndex++; } // Recursively compose intermediates return composeRecursive(container, target, intermediateIds, nonce); } @Override public MultipartPart uploadMultipartPart(MultipartUpload mpu, int partNumber, org.jclouds.io.Payload payload) { if (partNumber < 1 || partNumber > 10_000) { throw new IllegalArgumentException( "Part number must be between 1 and 10,000, got: " + partNumber); } Long contentLength = payload.getContentMetadata() .getContentLength(); if (contentLength == null) { throw new IllegalArgumentException( "Content-Length is required"); } String uploadKey = mpu.id(); String nonce = uploadKey.substring(STUB_BLOB_PREFIX.length()); String partBlobName = makePartBlobName(nonce, partNumber); byte[] md5Hash; try (var is = payload.openStream(); var his = new HashingInputStream(MD5, is)) { var partInfo = BlobInfo.newBuilder( BlobId.of(mpu.containerName(), partBlobName)).build(); storage.createFrom(partInfo, his); md5Hash = his.hash().asBytes(); var providedMd5 = payload.getContentMetadata() .getContentMD5AsHashCode(); if (providedMd5 != null) { if (!MessageDigest.isEqual(md5Hash, providedMd5.asBytes())) { // Clean up the uploaded part storage.delete(BlobId.of(mpu.containerName(), partBlobName)); throw new IllegalArgumentException( "Content-MD5 mismatch"); } } } catch (StorageException se) { translateAndRethrowException(se, mpu.containerName(), mpu.blobName()); throw new RuntimeException(( "Failed to upload part %d for blob '%s' in " + "container '%s': %s").formatted( partNumber, mpu.blobName(), mpu.containerName(), se.getMessage()), se); } catch (IOException ioe) { throw new RuntimeException(( "Failed to upload part %d for blob '%s' in " + "container '%s': %s").formatted( partNumber, mpu.blobName(), mpu.containerName(), ioe.getMessage()), ioe); } String eTag = BaseEncoding.base16().lowerCase().encode(md5Hash); return MultipartPart.create(partNumber, contentLength, eTag, null); } @Override public List listMultipartUpload(MultipartUpload mpu) { String uploadKey = mpu.id(); String nonce = uploadKey.substring(STUB_BLOB_PREFIX.length()); String prefix = STUB_BLOB_PREFIX + nonce + "/part_"; var parts = ImmutableList.builder(); var page = storage.list(mpu.containerName(), BlobListOption.prefix(prefix)); for (Blob blob : page.iterateAll()) { String name = blob.getName(); String partNumberStr = name.substring( name.lastIndexOf('_') + 1); int partNumber; try { partNumber = Integer.parseInt(partNumberStr); } catch (NumberFormatException e) { continue; } parts.add(MultipartPart.create(partNumber, blob.getSize(), "", null)); } return parts.build(); } @Override public List listMultipartUploads(String container) { var builder = ImmutableList.builder(); var page = storage.list(container, BlobListOption.prefix(STUB_BLOB_PREFIX)); for (Blob blob : page.iterateAll()) { String name = blob.getName(); // Only look at stub blobs, not part blobs if (name.contains("/part_") || name.contains("/compose_")) { continue; } var metadata = blob.getMetadata(); if (metadata == null || !metadata.containsKey(TARGET_BLOB_NAME_KEY)) { continue; } String targetBlobName = metadata.get(TARGET_BLOB_NAME_KEY); builder.add(MultipartUpload.create(container, targetBlobName, name, null, null)); } return builder.build(); } @Override public long getMinimumMultipartPartSize() { // GCS minimum part is 5 MB except for last part return 5L * 1024 * 1024; } @Override public long getMaximumMultipartPartSize() { return 5L * 1024 * 1024 * 1024; } @Override public int getMaximumNumberOfParts() { // With recursive compose we can handle many more than 32 return 10_000; } @Override public InputStream streamBlob(String container, String name) { throw new UnsupportedOperationException("not yet implemented"); } private static String makePartBlobName(String nonce, int partNumber) { return STUB_BLOB_PREFIX + nonce + "/part_%05d".formatted(partNumber); } /** * Get blob generation for conditional writes. GCS uses generations * rather than ETags for conditional operations. */ private long getGeneration(String container, String name, String eTag) { Blob blob = storage.get(BlobId.of(container, name)); if (blob == null) { throw new KeyNotFoundException(container, name, ""); } // If the ETag doesn't match, the precondition fails if (!eTag.equals("*") && !eTag.equals(blob.getEtag())) { var request = HttpRequest.builder() .method("PUT") .endpoint("https://storage.googleapis.com") .build(); var response = HttpResponse.builder() .statusCode(412) .build(); throw new HttpResponseException( new HttpCommand(request), response); } return blob.getGeneration(); } private static Date toDate( java.time.@Nullable OffsetDateTime offsetDateTime) { if (offsetDateTime == null) { return null; } return new Date(offsetDateTime.toInstant().toEpochMilli()); } private static com.google.cloud.storage.StorageClass toStorageClass( Tier tier) { if (tier == Tier.ARCHIVE) { return com.google.cloud.storage.StorageClass.ARCHIVE; } else if (tier == Tier.COLD) { return com.google.cloud.storage.StorageClass.COLDLINE; } else if (tier == Tier.COOL || tier == Tier.INFREQUENT) { return com.google.cloud.storage.StorageClass.NEARLINE; } else { return com.google.cloud.storage.StorageClass.STANDARD; } } private static Tier toTier( com.google.cloud.storage.@Nullable StorageClass storageClass) { if (storageClass == null) { return Tier.STANDARD; } else if (storageClass.equals( com.google.cloud.storage.StorageClass.ARCHIVE)) { return Tier.ARCHIVE; } else if (storageClass.equals( com.google.cloud.storage.StorageClass.COLDLINE)) { return Tier.COLD; } else if (storageClass.equals( com.google.cloud.storage.StorageClass.NEARLINE)) { return Tier.COOL; } else { return Tier.STANDARD; } } private static ContentMetadata toContentMetadata(Blob blob) { return ContentMetadataBuilder.create() .cacheControl(blob.getCacheControl()) .contentDisposition(blob.getContentDisposition()) .contentEncoding(blob.getContentEncoding()) .contentLanguage(blob.getContentLanguage()) .contentLength(blob.getSize()) .contentType(blob.getContentType()) .build(); } /** * Translate StorageException to jclouds exceptions. */ private static void translateAndRethrowException(StorageException se, String container, @Nullable String key) { switch (se.getCode()) { case 404: if (key != null) { var keyEx = new KeyNotFoundException(container, key, ""); keyEx.initCause(se); throw keyEx; } else { var containerEx = new ContainerNotFoundException( container, ""); containerEx.initCause(se); throw containerEx; } case 412: var request = HttpRequest.builder() .method("GET") .endpoint("https://storage.googleapis.com") .build(); var response = HttpResponse.builder() .statusCode(412) .build(); throw new HttpResponseException( new HttpCommand(request), response, se); default: break; } } } ================================================ FILE: src/main/java/org/gaul/s3proxy/gcloudsdk/GCloudBlobStoreContextModule.java ================================================ /* * Copyright 2014-2026 Andrew Gaul * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.gaul.s3proxy.gcloudsdk; import com.google.inject.AbstractModule; import com.google.inject.Scopes; import org.jclouds.blobstore.BlobStore; import org.jclouds.blobstore.attr.ConsistencyModel; public final class GCloudBlobStoreContextModule extends AbstractModule { @Override protected void configure() { bind(ConsistencyModel.class).toInstance(ConsistencyModel.STRICT); bind(BlobStore.class).to(GCloudBlobStore.class).in(Scopes.SINGLETON); } } ================================================ FILE: src/main/java/org/gaul/s3proxy/gcloudsdk/GCloudProviderMetadata.java ================================================ /* * Copyright 2014-2026 Andrew Gaul * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.gaul.s3proxy.gcloudsdk; import java.net.URI; import java.util.Properties; import com.google.auto.service.AutoService; import org.jclouds.providers.ProviderMetadata; import org.jclouds.providers.internal.BaseProviderMetadata; /** * Implementation of org.jclouds.types.ProviderMetadata for Google Cloud * Storage using the official Google Cloud Storage SDK. */ @AutoService(ProviderMetadata.class) public final class GCloudProviderMetadata extends BaseProviderMetadata { public GCloudProviderMetadata() { super(builder()); } public GCloudProviderMetadata(Builder builder) { super(builder); } public static Builder builder() { return new Builder(); } @Override public Builder toBuilder() { return builder().fromProviderMetadata(this); } public static Properties defaultProperties() { var properties = new Properties(); return properties; } public static final class Builder extends BaseProviderMetadata.Builder { protected Builder() { id("google-cloud-storage-sdk") .name("Google Cloud Storage") .apiMetadata(new GCloudApiMetadata()) .endpoint("https://storage.googleapis.com") .homepage(URI.create( "https://cloud.google.com/storage")) .console(URI.create( "https://console.cloud.google.com/storage")) .linkedServices("google-cloud-storage") .iso3166Codes("US", "EU") .defaultProperties( GCloudProviderMetadata.defaultProperties()); } @Override public GCloudProviderMetadata build() { return new GCloudProviderMetadata(this); } @Override public Builder fromProviderMetadata(ProviderMetadata in) { super.fromProviderMetadata(in); return this; } } } ================================================ FILE: src/main/java/org/gaul/s3proxy/junit/S3ProxyExtension.java ================================================ /* * Copyright 2014-2026 Andrew Gaul * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.gaul.s3proxy.junit; import java.net.URI; import org.gaul.s3proxy.AuthenticationType; import org.junit.jupiter.api.extension.AfterEachCallback; import org.junit.jupiter.api.extension.BeforeEachCallback; import org.junit.jupiter.api.extension.ExtensionContext; /** * A JUnit 5 Extension that manages an S3Proxy instance which tests * can use as an S3 API endpoint. */ public final class S3ProxyExtension implements AfterEachCallback, BeforeEachCallback { private final S3ProxyJunitCore core; public static final class Builder { private final S3ProxyJunitCore.Builder builder; private Builder() { builder = new S3ProxyJunitCore.Builder(); } public Builder withCredentials(AuthenticationType authType, String accessKey, String secretKey) { builder.withCredentials(authType, accessKey, secretKey); return this; } public Builder withCredentials(String accessKey, String secretKey) { builder.withCredentials(accessKey, secretKey); return this; } public Builder withSecretStore(String path, String password) { builder.withSecretStore(path, password); return this; } public Builder withPort(int port) { builder.withPort(port); return this; } public Builder withBlobStoreProvider(String blobStoreProvider) { builder.withBlobStoreProvider(blobStoreProvider); return this; } public Builder ignoreUnknownHeaders() { builder.ignoreUnknownHeaders(); return this; } public S3ProxyExtension build() { return new S3ProxyExtension(this); } } private S3ProxyExtension(Builder builder) { core = new S3ProxyJunitCore(builder.builder); } public static Builder builder() { return new Builder(); } @Override public void beforeEach(ExtensionContext extensionContext) throws Exception { core.beforeEach(); } @Override public void afterEach(ExtensionContext extensionContext) { core.afterEach(); } public URI getUri() { return core.getUri(); } public String getAccessKey() { return core.getAccessKey(); } public String getSecretKey() { return core.getSecretKey(); } } ================================================ FILE: src/main/java/org/gaul/s3proxy/junit/S3ProxyJunitCore.java ================================================ /* * Copyright 2014-2026 Andrew Gaul * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.gaul.s3proxy.junit; import java.io.File; import java.io.IOException; import java.net.URI; import java.nio.file.Files; import java.util.Properties; import com.google.common.io.MoreFiles; import org.eclipse.jetty.util.component.AbstractLifeCycle; import org.gaul.s3proxy.AuthenticationType; import org.gaul.s3proxy.S3Proxy; import org.jclouds.ContextBuilder; import org.jclouds.blobstore.BlobStore; import org.jclouds.blobstore.BlobStoreContext; import org.jclouds.blobstore.domain.StorageMetadata; import org.slf4j.Logger; import org.slf4j.LoggerFactory; public class S3ProxyJunitCore { private static final Logger logger = LoggerFactory.getLogger( S3ProxyJunitCore.class); private static final String LOCALHOST = "127.0.0.1"; private final String accessKey; private final String secretKey; private final String endpointFormat; private final S3Proxy s3Proxy; private final BlobStoreContext blobStoreContext; private URI endpointUri; private final File blobStoreLocation; public static final class Builder { private AuthenticationType authType = AuthenticationType.NONE; private String accessKey; private String secretKey; private String secretStorePath; private String secretStorePassword; private int port = -1; private boolean ignoreUnknownHeaders; private String blobStoreProvider = "filesystem"; public Builder withCredentials(AuthenticationType authType, String accessKey, String secretKey) { this.authType = authType; this.accessKey = accessKey; this.secretKey = secretKey; return this; } public Builder withCredentials(String accessKey, String secretKey) { return withCredentials(AuthenticationType.AWS_V2_OR_V4, accessKey, secretKey); } public Builder withSecretStore(String path, String password) { secretStorePath = path; secretStorePassword = password; return this; } public Builder withPort(int port) { this.port = port; return this; } public Builder withBlobStoreProvider(String blobStoreProvider) { this.blobStoreProvider = blobStoreProvider; return this; } public Builder ignoreUnknownHeaders() { ignoreUnknownHeaders = true; return this; } public S3ProxyJunitCore build() { return new S3ProxyJunitCore(this); } } S3ProxyJunitCore(Builder builder) { accessKey = builder.accessKey; secretKey = builder.secretKey; var properties = new Properties(); try { blobStoreLocation = Files.createTempDirectory("S3Proxy") .toFile(); properties.setProperty("jclouds.filesystem.basedir", blobStoreLocation.getCanonicalPath()); } catch (IOException e) { throw new RuntimeException("Unable to initialize Blob Store", e); } ContextBuilder blobStoreContextBuilder = ContextBuilder.newBuilder( builder.blobStoreProvider) .overrides(properties); if (!AuthenticationType.NONE.equals(builder.authType)) { blobStoreContextBuilder = blobStoreContextBuilder.credentials( accessKey, secretKey); } blobStoreContext = blobStoreContextBuilder.build( BlobStoreContext.class); S3Proxy.Builder s3ProxyBuilder = S3Proxy.builder() .blobStore(blobStoreContext.getBlobStore()) .awsAuthentication(builder.authType, accessKey, secretKey) .ignoreUnknownHeaders(builder.ignoreUnknownHeaders); if (builder.secretStorePath != null || builder.secretStorePassword != null) { s3ProxyBuilder.keyStore(builder.secretStorePath, builder.secretStorePassword); } int port = Math.max(builder.port, 0); endpointFormat = "http://%s:%d"; String endpoint = endpointFormat.formatted(LOCALHOST, port); s3ProxyBuilder.endpoint(URI.create(endpoint)); s3Proxy = s3ProxyBuilder.build(); } public final void beforeEach() throws Exception { logger.debug("S3 proxy is starting"); s3Proxy.start(); while (!s3Proxy.getState().equals(AbstractLifeCycle.STARTED)) { Thread.sleep(10); } endpointUri = URI.create(endpointFormat.formatted(LOCALHOST, s3Proxy.getPort())); logger.debug("S3 proxy is running"); } public final void afterEach() { logger.debug("S3 proxy is stopping"); try { s3Proxy.stop(); BlobStore blobStore = blobStoreContext.getBlobStore(); for (StorageMetadata metadata : blobStore.list()) { blobStore.deleteContainer(metadata.getName()); } blobStoreContext.close(); } catch (Exception e) { throw new RuntimeException("Unable to stop S3 proxy", e); } try { MoreFiles.deleteRecursively(blobStoreLocation.toPath()); } catch (IOException ioe) { // ignore } logger.debug("S3 proxy has stopped"); } public final URI getUri() { return endpointUri; } public final String getAccessKey() { return accessKey; } public final String getSecretKey() { return secretKey; } } ================================================ FILE: src/main/java/org/gaul/s3proxy/junit/S3ProxyRule.java ================================================ /* * Copyright 2014-2026 Andrew Gaul * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.gaul.s3proxy.junit; import java.net.URI; import com.google.common.annotations.Beta; import org.gaul.s3proxy.AuthenticationType; import org.junit.rules.ExternalResource; /** * A JUnit Rule that manages an S3Proxy instance which tests can use as an S3 * API endpoint. */ @Beta public final class S3ProxyRule extends ExternalResource { private final S3ProxyJunitCore core; public static final class Builder { private final S3ProxyJunitCore.Builder builder; private Builder() { builder = new S3ProxyJunitCore.Builder(); } public Builder withCredentials(AuthenticationType authType, String accessKey, String secretKey) { builder.withCredentials(authType, accessKey, secretKey); return this; } public Builder withCredentials(String accessKey, String secretKey) { builder.withCredentials(accessKey, secretKey); return this; } public Builder withSecretStore(String path, String password) { builder.withSecretStore(path, password); return this; } public Builder withPort(int port) { builder.withPort(port); return this; } public Builder withBlobStoreProvider(String blobStoreProvider) { builder.withBlobStoreProvider(blobStoreProvider); return this; } public Builder ignoreUnknownHeaders() { builder.ignoreUnknownHeaders(); return this; } public S3ProxyRule build() { return new S3ProxyRule(this); } } private S3ProxyRule(Builder builder) { core = new S3ProxyJunitCore(builder.builder); } public static Builder builder() { return new Builder(); } @Override protected void before() throws Throwable { core.beforeEach(); } @Override protected void after() { core.afterEach(); } public URI getUri() { return core.getUri(); } public String getAccessKey() { return core.getAccessKey(); } public String getSecretKey() { return core.getSecretKey(); } } ================================================ FILE: src/main/java/org/gaul/s3proxy/nio2blob/AbstractNio2BlobStore.java ================================================ /* * Copyright 2014-2026 Andrew Gaul * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.gaul.s3proxy.nio2blob; import java.io.IOException; import java.io.InputStream; import java.nio.ByteBuffer; import java.nio.charset.StandardCharsets; import java.nio.file.DirectoryNotEmptyException; import java.nio.file.FileAlreadyExistsException; import java.nio.file.Files; import java.nio.file.NoSuchFileException; import java.nio.file.Path; import java.nio.file.StandardCopyOption; import java.nio.file.attribute.BasicFileAttributes; import java.nio.file.attribute.PosixFilePermission; import java.nio.file.attribute.UserDefinedFileAttributeView; import java.util.Date; import java.util.HashSet; import java.util.Iterator; import java.util.List; import java.util.Map; import java.util.Set; import java.util.UUID; import java.util.stream.Collectors; import com.google.common.base.Supplier; import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableMap; import com.google.common.collect.ImmutableSortedSet; import com.google.common.hash.HashCode; import com.google.common.hash.HashFunction; import com.google.common.hash.Hashing; import com.google.common.hash.HashingInputStream; import com.google.common.io.BaseEncoding; import com.google.common.io.ByteSource; import com.google.common.io.ByteStreams; import com.google.common.net.HttpHeaders; import com.google.common.primitives.Longs; import jakarta.inject.Singleton; import jakarta.ws.rs.core.Response.Status; import org.jclouds.blobstore.BlobStore; import org.jclouds.blobstore.BlobStoreContext; import org.jclouds.blobstore.ContainerNotFoundException; import org.jclouds.blobstore.KeyNotFoundException; import org.jclouds.blobstore.domain.Blob; import org.jclouds.blobstore.domain.BlobAccess; import org.jclouds.blobstore.domain.BlobMetadata; import org.jclouds.blobstore.domain.ContainerAccess; import org.jclouds.blobstore.domain.MultipartPart; import org.jclouds.blobstore.domain.MultipartUpload; import org.jclouds.blobstore.domain.PageSet; import org.jclouds.blobstore.domain.StorageMetadata; import org.jclouds.blobstore.domain.StorageType; import org.jclouds.blobstore.domain.Tier; import org.jclouds.blobstore.domain.internal.BlobBuilderImpl; import org.jclouds.blobstore.domain.internal.PageSetImpl; import org.jclouds.blobstore.domain.internal.StorageMetadataImpl; import org.jclouds.blobstore.internal.BaseBlobStore; import org.jclouds.blobstore.options.CopyOptions; import org.jclouds.blobstore.options.CreateContainerOptions; import org.jclouds.blobstore.options.GetOptions; import org.jclouds.blobstore.options.ListContainerOptions; import org.jclouds.blobstore.options.PutOptions; import org.jclouds.blobstore.util.BlobStoreUtils; import org.jclouds.blobstore.util.BlobUtils; import org.jclouds.collect.Memoized; import org.jclouds.domain.Credentials; import org.jclouds.domain.Location; import org.jclouds.http.HttpCommand; import org.jclouds.http.HttpRequest; import org.jclouds.http.HttpResponse; import org.jclouds.http.HttpResponseException; import org.jclouds.io.Payload; import org.jclouds.io.PayloadSlicer; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @Singleton public abstract class AbstractNio2BlobStore extends BaseBlobStore { private static final Logger logger = LoggerFactory.getLogger( AbstractNio2BlobStore.class); private static final String XATTR_CACHE_CONTROL = "user.cache-control"; private static final String XATTR_CONTENT_DISPOSITION = "user.content-disposition"; private static final String XATTR_CONTENT_ENCODING = "user.content-encoding"; private static final String XATTR_CONTENT_LANGUAGE = "user.content-language"; private static final String XATTR_CONTENT_MD5 = "user.content-md5"; private static final String XATTR_CONTENT_TYPE = "user.content-type"; private static final String XATTR_EXPIRES = "user.expires"; private static final String XATTR_STORAGE_TIER = "user.storage-tier"; private static final String XATTR_USER_METADATA_PREFIX = "user.user-metadata."; private static final Set NO_ATTRIBUTES = Set.of(); private static final String MULTIPART_PREFIX = ".mpus-"; @SuppressWarnings("deprecation") private static final HashFunction md5 = Hashing.md5(); private static final byte[] DIRECTORY_MD5 = md5.hashBytes(new byte[0]).asBytes(); private final Path root; protected AbstractNio2BlobStore(BlobStoreContext context, BlobUtils blobUtils, Supplier defaultLocation, @Memoized Supplier> locations, PayloadSlicer slicer, @org.jclouds.location.Provider Supplier creds, Path root) { super(context, blobUtils, defaultLocation, locations, slicer); this.root = root; } protected final Path getRoot() { return root; } @Override public final PageSet list() { var set = ImmutableSortedSet.naturalOrder(); try (var stream = Files.newDirectoryStream(root)) { for (var path : stream) { var attr = Files.readAttributes(path, BasicFileAttributes.class); var lastModifiedTime = new Date( attr.lastModifiedTime().toMillis()); var creationTime = new Date(attr.creationTime().toMillis()); set.add(new StorageMetadataImpl(StorageType.CONTAINER, /*id=*/ null, path.getFileName().toString(), /*location=*/ null, /*uri=*/ null, /*eTag=*/ null, creationTime, lastModifiedTime, Map.of(), /*size=*/ null, Tier.STANDARD)); } } catch (IOException ioe) { throw new RuntimeException(ioe); } return new PageSetImpl(set.build(), null); } @Override public final PageSet list(String container, ListContainerOptions options) { if (!containerExists(container)) { throw new ContainerNotFoundException(container, ""); } var delimiter = options.getDelimiter(); if ("".equals(delimiter)) { delimiter = null; } else if (delimiter != null && !delimiter.equals("/")) { throw new IllegalArgumentException("Delimiters other than / not supported"); } var prefix = options.getPrefix(); var dirPrefix = root.resolve(container); if (prefix != null) { int idx = prefix.lastIndexOf('/'); if (idx != -1) { dirPrefix = dirPrefix.resolve(prefix.substring(0, idx)); } } else { prefix = ""; } var containerPath = root.resolve(container); var pathPrefix = containerPath.resolve(prefix).normalize(); checkValidPath(containerPath, pathPrefix); logger.debug("Listing blobs at: {}", pathPrefix); var set = ImmutableSortedSet.naturalOrder(); try { listHelper(set, container, dirPrefix, pathPrefix, delimiter); var sorted = set.build(); if (options.getMarker() != null) { var found = false; for (var blob : sorted) { if (blob.getName().compareTo(options.getMarker()) > 0) { sorted = sorted.tailSet(blob); found = true; break; } } if (!found) { sorted = ImmutableSortedSet.of(); } } String marker = null; if (options.getMaxResults() != null) { // TODO: efficiency? var temp = ImmutableSortedSet.copyOf(sorted.stream().limit(options.getMaxResults().intValue()).collect(Collectors.toSet())); if (!temp.isEmpty()) { var next = sorted.higher(temp.last()); if (next != null) { marker = temp.last().getName(); } } sorted = temp; } return new PageSetImpl(sorted, marker); } catch (IOException ioe) { logger.error("unexpected exception", ioe); throw new RuntimeException(ioe); } } private void listHelper(ImmutableSortedSet.Builder builder, String container, Path parent, Path prefix, String delimiter) throws IOException { logger.debug("recursing at: {} with prefix: {}", parent, prefix); if (!Files.isDirectory(parent)) { // TODO: TOCTOU return; } try (var stream = Files.newDirectoryStream(parent)) { for (var path : stream) { logger.debug("examining: {}", path); if (!path.toAbsolutePath().toString().startsWith(root.resolve(prefix).toAbsolutePath().toString())) { // ignore } else if (Files.isDirectory(path)) { if (!"/".equals(delimiter)) { listHelper(builder, container, path, prefix, delimiter); } // Add a prefix if the directory blob exists or if the delimiter causes us not to recuse. if (safeGetXattrs(path).attributes().contains(XATTR_CONTENT_MD5) || "/".equals(delimiter)) { var name = path.toString().substring((root.resolve(container) + "/").length()); if (path.getFileSystem().getSeparator().equals("\\")) { name = name.replace('\\', '/'); } logger.debug("adding prefix: {}", name); builder.add(new StorageMetadataImpl( StorageType.RELATIVE_PATH, /*id=*/ null, name + "/", /*location=*/ null, /*uri=*/ null, /*eTag=*/ null, /*creationDate=*/ null, /*lastModified=*/ null, Map.of(), /*size=*/ null, Tier.STANDARD)); } } else { var name = path.toString().substring((root.resolve(container) + "/").length()); if (path.getFileSystem().getSeparator().equals("\\")) { name = name.replace('\\', '/'); } logger.debug("adding: {}", name); var attr = Files.readAttributes(path, BasicFileAttributes.class); var lastModifiedTime = new Date(attr.lastModifiedTime().toMillis()); var creationTime = new Date(attr.creationTime().toMillis()); String eTag = null; Tier tier = Tier.STANDARD; var xattrs = safeGetXattrs(path); if (xattrs.view() != null) { var view = xattrs.view(); var attributes = xattrs.attributes(); if (attributes.contains(XATTR_CONTENT_MD5)) { var buf = ByteBuffer.allocate(view.size(XATTR_CONTENT_MD5)); view.read(XATTR_CONTENT_MD5, buf); var etagBytes = buf.array(); if (etagBytes.length == 16) { // regular object var hashCode = HashCode.fromBytes(buf.array()); eTag = "\"" + hashCode + "\""; } else { // multi-part object eTag = new String(etagBytes, StandardCharsets.US_ASCII); } } var tierString = readStringAttributeIfPresent(view, attributes, XATTR_STORAGE_TIER); if (tierString != null) { tier = Tier.valueOf(tierString); } } builder.add(new StorageMetadataImpl(StorageType.BLOB, /*id=*/ null, name, /*location=*/ null, /*uri=*/ null, eTag, creationTime, lastModifiedTime, Map.of(), attr.size(), tier)); } } } catch (NoSuchFileException nsfe) { // ignore } } @Override public final boolean containerExists(String container) { return Files.exists(root.resolve(container)); } @Override public final boolean createContainerInLocation(Location location, String container) { return createContainerInLocation(location, container, new CreateContainerOptions()); } @Override public final boolean createContainerInLocation(Location location, String container, CreateContainerOptions options) { try { Files.createDirectory(root.resolve(container)); } catch (FileAlreadyExistsException faee) { return false; } catch (IOException ioe) { throw new RuntimeException(ioe); } setContainerAccess(container, options.isPublicRead() ? ContainerAccess.PUBLIC_READ : ContainerAccess.PRIVATE); return true; } @Override public final void deleteContainer(String container) { try { Files.deleteIfExists(root.resolve(container)); } catch (DirectoryNotEmptyException dnee) { // TODO: what to do? } catch (IOException ioe) { throw new RuntimeException(ioe); } } @Override public final boolean blobExists(String container, String key) { return blobMetadata(container, key) != null; } @Override public final Blob getBlob(String container, String key, GetOptions options) { if (!containerExists(container)) { throw new ContainerNotFoundException(container, ""); } var containerPath = root.resolve(container); var path = containerPath.resolve(key); if (path.toString().equals("/")) { path = containerPath; } checkValidPath(containerPath, path); logger.debug("Getting blob at: {}", path); try { var isDirectory = Files.isDirectory(path); var attr = Files.readAttributes(path, BasicFileAttributes.class); var xattrs = safeGetXattrs(path); var view = xattrs.view(); var attributes = xattrs.attributes(); String cacheControl = null; String contentDisposition = null; String contentEncoding = null; String contentLanguage = null; String contentType = isDirectory ? "application/x-directory" : null; Date expires = null; HashCode hashCode = null; String eTag = null; var tier = Tier.STANDARD; var userMetadata = ImmutableMap.builder(); var lastModifiedTime = new Date(attr.lastModifiedTime().toMillis()); var creationTime = new Date(attr.creationTime().toMillis()); if (view != null) { cacheControl = readStringAttributeIfPresent(view, attributes, XATTR_CACHE_CONTROL); contentDisposition = readStringAttributeIfPresent(view, attributes, XATTR_CONTENT_DISPOSITION); contentEncoding = readStringAttributeIfPresent(view, attributes, XATTR_CONTENT_ENCODING); contentLanguage = readStringAttributeIfPresent(view, attributes, XATTR_CONTENT_LANGUAGE); if (!isDirectory) { contentType = readStringAttributeIfPresent(view, attributes, XATTR_CONTENT_TYPE); } } if (contentType == null && !isDirectory) { contentType = Files.probeContentType(path); if (contentType == null) { contentType = "application/octet-stream"; } } if (isDirectory) { if (!attributes.contains(XATTR_CONTENT_MD5)) { // Lacks directory marker -- implicit directory. return null; } } else if (attributes.contains(XATTR_CONTENT_MD5)) { var buf = ByteBuffer.allocate(view.size(XATTR_CONTENT_MD5)); view.read(XATTR_CONTENT_MD5, buf); var etagBytes = buf.array(); if (etagBytes.length == 16) { // regular object hashCode = HashCode.fromBytes(buf.array()); eTag = "\"" + hashCode + "\""; } else { // multi-part object eTag = new String(etagBytes, StandardCharsets.US_ASCII); } } if (attributes.contains(XATTR_EXPIRES)) { ByteBuffer buf = ByteBuffer.allocate(view.size(XATTR_EXPIRES)); view.read(XATTR_EXPIRES, buf); buf.flip(); expires = new Date(buf.asLongBuffer().get()); } if (view != null) { var tierString = readStringAttributeIfPresent(view, attributes, XATTR_STORAGE_TIER); if (tierString != null) { tier = Tier.valueOf(tierString); } for (String attribute : attributes) { if (!attribute.startsWith(XATTR_USER_METADATA_PREFIX)) { continue; } var value = readStringAttributeIfPresent(view, attributes, attribute); userMetadata.put(attribute.substring(XATTR_USER_METADATA_PREFIX.length()), value); } } // Handle range. String contentRange = null; InputStream inputStream; long size; if (isDirectory) { inputStream = ByteSource.empty().openStream(); size = 0; } else { inputStream = Files.newInputStream(path); // TODO: leaky on exception size = attr.size(); if (options.getRanges().size() > 0) { var range = options.getRanges().get(0); // HTTP uses a closed interval while Java array indexing uses a // half-open interval. long offset = 0; long last = size; if (range.startsWith("-")) { offset = last - Long.parseLong(range.substring(1)); if (offset < 0) { offset = 0; } } else if (range.endsWith("-")) { offset = Long.parseLong(range.substring(0, range.length() - 1)); } else if (range.contains("-")) { String[] firstLast = range.split("\\-", 2); offset = Long.parseLong(firstLast[0]); last = Long.parseLong(firstLast[1]); } else { throw new HttpResponseException("illegal range: " + range, null, HttpResponse.builder().statusCode(416).build()); } if (offset >= size) { throw new HttpResponseException("illegal range: " + range, null, HttpResponse.builder().statusCode(416).build()); } if (last + 1 > size) { last = size - 1; } inputStream.skipNBytes(offset); size = last - offset + 1; inputStream = ByteStreams.limit(inputStream, size); contentRange = "bytes " + offset + "-" + last + "/" + attr.size(); } } if (eTag != null) { eTag = maybeQuoteETag(eTag); if (options.getIfMatch() != null) { if (!eTag.equals(maybeQuoteETag(options.getIfMatch()))) { HttpResponse response = HttpResponse.builder().statusCode(Status.PRECONDITION_FAILED.getStatusCode()).addHeader(HttpHeaders.ETAG, eTag).build(); throw new HttpResponseException(new HttpCommand(HttpRequest.builder().method("GET").endpoint("http://stub").build()), response); } } if (options.getIfNoneMatch() != null) { if (eTag.equals(maybeQuoteETag(options.getIfNoneMatch()))) { HttpResponse response = HttpResponse.builder().statusCode(Status.NOT_MODIFIED.getStatusCode()).addHeader(HttpHeaders.ETAG, eTag).build(); throw new HttpResponseException(new HttpCommand(HttpRequest.builder().method("GET").endpoint("http://stub").build()), response); } } } if (options.getIfModifiedSince() != null) { Date modifiedSince = options.getIfModifiedSince(); if (lastModifiedTime.before(modifiedSince)) { @SuppressWarnings("rawtypes") HttpResponse.Builder response = HttpResponse.builder().statusCode(Status.NOT_MODIFIED.getStatusCode()); if (eTag != null) { response.addHeader(HttpHeaders.ETAG, eTag); } throw new HttpResponseException("%1$s is before %2$s".formatted(lastModifiedTime, modifiedSince), null, response.build()); } } if (options.getIfUnmodifiedSince() != null) { Date unmodifiedSince = options.getIfUnmodifiedSince(); if (lastModifiedTime.after(unmodifiedSince)) { @SuppressWarnings("rawtypes") HttpResponse.Builder response = HttpResponse.builder().statusCode(Status.PRECONDITION_FAILED.getStatusCode()); if (eTag != null) { response.addHeader(HttpHeaders.ETAG, eTag); } throw new HttpResponseException("%1$s is after %2$s".formatted(lastModifiedTime, unmodifiedSince), null, response.build()); } } Blob blob = new BlobBuilderImpl() .type(isDirectory ? StorageType.FOLDER : StorageType.BLOB) .name(key) .userMetadata(userMetadata.build()) .payload(inputStream) .cacheControl(cacheControl) .contentDisposition(contentDisposition) .contentEncoding(contentEncoding) .contentLanguage(contentLanguage) .contentLength(size) .contentMD5(hashCode) .contentType(contentType) .eTag(eTag) .expires(expires) .tier(tier) .build(); blob.getMetadata().setContainer(container); blob.getMetadata().setCreationDate(creationTime); blob.getMetadata().setLastModified(lastModifiedTime); blob.getMetadata().setSize(size); if (contentRange != null) { blob.getAllHeaders().put(HttpHeaders.CONTENT_RANGE, contentRange); } if (hashCode != null) { blob.getMetadata().setETag(BaseEncoding.base16().lowerCase().encode(hashCode.asBytes())); } return blob; } catch (NoSuchFileException nsfe) { return null; } catch (IOException ioe) { throw new RuntimeException(ioe); } } @Override public final String putBlob(String container, Blob blob) { return putBlob(container, blob, new PutOptions()); } @Override public final String putBlob(String container, Blob blob, PutOptions options) { if (!containerExists(container)) { throw new ContainerNotFoundException(container, ""); } var containerPath = root.resolve(container); var path = containerPath.resolve(blob.getMetadata().getName()).normalize(); if (path.toString().equals("/")) { path = containerPath; } checkValidPath(containerPath, path); // TODO: should we use a known suffix to filter these out during list? var tmpPath = root.resolve(container).resolve(blob.getMetadata().getName() + "-" + UUID.randomUUID()); logger.debug("Creating blob at: {}", path); if (blob.getMetadata().getName().endsWith("/")) { try { logger.debug("Creating directory blob: {}", path); Files.createDirectories(path); } catch (FileAlreadyExistsException faee) { logger.debug("Parent directories already exist: {}", path.getParent()); } catch (IOException ioe) { throw new RuntimeException(ioe); } var view = Files.getFileAttributeView(path, UserDefinedFileAttributeView.class); if (view != null) { try { writeCommonMetadataAttr(view, blob); view.write(XATTR_CONTENT_MD5, ByteBuffer.wrap(DIRECTORY_MD5)); } catch (IOException ioe) { logger.debug("xattrs not supported on {}", path); } } return BaseEncoding.base16().lowerCase().encode(DIRECTORY_MD5); } // Create parent directories. try { Files.createDirectories(path.getParent()); } catch (FileAlreadyExistsException faee) { logger.debug("Parent directories already exist: {}", path.getParent()); } catch (IOException ioe) { throw new RuntimeException(ioe); } var metadata = blob.getMetadata().getContentMetadata(); try (var is = new HashingInputStream(md5, blob.getPayload().openStream()); var os = Files.newOutputStream(tmpPath)) { is.transferTo(os); var actualHashCode = is.hash(); var expectedHashCode = metadata.getContentMD5AsHashCode(); if (expectedHashCode != null && !actualHashCode.equals(expectedHashCode)) { Files.delete(tmpPath); throw returnResponseException(400); } var view = Files.getFileAttributeView(tmpPath, UserDefinedFileAttributeView.class); if (view != null) { try { var eTag = actualHashCode.asBytes(); view.write(XATTR_CONTENT_MD5, ByteBuffer.wrap(eTag)); writeStringAttributeIfPresent(view, XATTR_CACHE_CONTROL, metadata.getCacheControl()); writeStringAttributeIfPresent(view, XATTR_CONTENT_DISPOSITION, metadata.getContentDisposition()); writeStringAttributeIfPresent(view, XATTR_CONTENT_ENCODING, metadata.getContentEncoding()); writeStringAttributeIfPresent(view, XATTR_CONTENT_LANGUAGE, metadata.getContentLanguage()); writeStringAttributeIfPresent(view, XATTR_CONTENT_TYPE, metadata.getContentType()); var expires = metadata.getExpires(); if (expires != null) { ByteBuffer buf = ByteBuffer.allocate(Longs.BYTES).putLong(expires.getTime()); buf.flip(); view.write(XATTR_EXPIRES, buf); } writeStringAttributeIfPresent(view, XATTR_STORAGE_TIER, blob.getMetadata().getTier().toString()); for (var entry : blob.getMetadata().getUserMetadata().entrySet()) { writeStringAttributeIfPresent(view, XATTR_USER_METADATA_PREFIX + entry.getKey(), entry.getValue()); } } catch (IOException e) { // TODO: //logger.debug("xattrs not supported on %s", path); } } setBlobAccessHelper(tmpPath, options.getBlobAccess()); Files.move(tmpPath, path, StandardCopyOption.ATOMIC_MOVE, StandardCopyOption.REPLACE_EXISTING); return "\"" + actualHashCode + "\""; } catch (IOException ioe) { throw new RuntimeException(ioe); } } @Override public final String copyBlob(String fromContainer, String fromName, String toContainer, String toName, CopyOptions options) { var blob = getBlob(fromContainer, fromName); if (blob == null) { throw new KeyNotFoundException(fromContainer, fromName, "while copying"); } var eTag = blob.getMetadata().getETag(); if (eTag != null) { eTag = maybeQuoteETag(eTag); if (options.ifMatch() != null && !maybeQuoteETag(options.ifMatch()).equals(eTag)) { throw returnResponseException(412); } if (options.ifNoneMatch() != null && maybeQuoteETag(options.ifNoneMatch()).equals(eTag)) { throw returnResponseException(412); } } var lastModified = blob.getMetadata().getLastModified(); if (lastModified != null) { if (options.ifModifiedSince() != null && lastModified.compareTo(options.ifModifiedSince()) <= 0) { throw returnResponseException(412); } if (options.ifUnmodifiedSince() != null && lastModified.compareTo(options.ifUnmodifiedSince()) >= 0) { throw returnResponseException(412); } } try (var is = blob.getPayload().openStream()) { var metadata = blob.getMetadata().getContentMetadata(); var builder = blobBuilder(toName).payload(is); Long contentLength = metadata.getContentLength(); if (contentLength != null) { builder.contentLength(contentLength); } var contentMetadata = options.contentMetadata(); if (contentMetadata != null) { String cacheControl = contentMetadata.getCacheControl(); if (cacheControl != null) { builder.cacheControl(cacheControl); } String contentDisposition = contentMetadata.getContentDisposition(); if (contentDisposition != null) { builder.contentDisposition(contentDisposition); } String contentEncoding = contentMetadata.getContentEncoding(); if (contentEncoding != null) { builder.contentEncoding(contentEncoding); } String contentLanguage = contentMetadata.getContentLanguage(); if (contentLanguage != null) { builder.contentLanguage(contentLanguage); } String contentType = contentMetadata.getContentType(); if (contentType != null) { builder.contentType(contentType); } } else { builder.cacheControl(metadata.getCacheControl()) .contentDisposition(metadata.getContentDisposition()) .contentEncoding(metadata.getContentEncoding()) .contentLanguage(metadata.getContentLanguage()) .contentType(metadata.getContentType()); } var userMetadata = options.userMetadata(); if (userMetadata != null) { builder.userMetadata(userMetadata); } else { builder.userMetadata(blob.getMetadata().getUserMetadata()); } return putBlob(toContainer, builder.build()); } catch (IOException ioe) { throw new RuntimeException(ioe); } } @Override public final void removeBlob(String container, String key) { try { var containerPath = root.resolve(container); var path = containerPath.resolve(key).normalize(); if (path.toString().equals("/")) { path = containerPath; } checkValidPath(containerPath, path); logger.debug("Deleting blob at: {}", path); Files.delete(path); removeEmptyParentDirectories(containerPath, path.getParent()); } catch (NoSuchFileException nsfe) { return; } catch (IOException ioe) { throw new RuntimeException(ioe); } } @Override public final BlobMetadata blobMetadata(String container, String key) { Blob blob = getBlob(container, key); if (blob == null) { return null; } try { blob.getPayload().openStream().close(); } catch (IOException ioe) { throw new RuntimeException(ioe); } return blob != null ? (BlobMetadata) BlobStoreUtils.copy(blob.getMetadata()) : null; } @Override protected final boolean deleteAndVerifyContainerGone(String container) { deleteContainer(container); return !containerExists(container); } @Override public final ContainerAccess getContainerAccess(String container) { if (!containerExists(container)) { throw new ContainerNotFoundException(container, ""); } var path = root.resolve(container); Set permissions; try { permissions = Files.getPosixFilePermissions(path); } catch (UnsupportedOperationException uoe) { // Windows/SMB/other non-POSIX: default to PRIVATE return ContainerAccess.PRIVATE; } catch (IOException ioe) { throw new RuntimeException(ioe); } return permissions.contains(PosixFilePermission.OTHERS_READ) ? ContainerAccess.PUBLIC_READ : ContainerAccess.PRIVATE; } @Override public final void setContainerAccess(String container, ContainerAccess access) { if (!containerExists(container)) { throw new ContainerNotFoundException(container, ""); } var path = root.resolve(container); Set permissions; try { permissions = new HashSet<>(Files.getPosixFilePermissions(path)); if (access == ContainerAccess.PRIVATE) { permissions.remove(PosixFilePermission.OTHERS_READ); } else if (access == ContainerAccess.PUBLIC_READ) { permissions.add(PosixFilePermission.OTHERS_READ); } Files.setPosixFilePermissions(path, permissions); } catch (UnsupportedOperationException uoe) { // Windows/SMB/other non-POSIX: ignore, cannot set permissions return; } catch (IOException ioe) { throw new RuntimeException(ioe); } } @Override public final BlobAccess getBlobAccess(String container, String key) { if (!containerExists(container)) { throw new ContainerNotFoundException(container, ""); } if (!blobExists(container, key)) { throw new KeyNotFoundException(container, key, ""); } var containerPath = root.resolve(container); var path = containerPath.resolve(key).normalize(); if (path.toString().equals("/")) { path = containerPath; } checkValidPath(containerPath, path); Set permissions; try { permissions = Files.getPosixFilePermissions(path); } catch (UnsupportedOperationException uoe) { // Windows/SMB/other non-POSIX: default to PRIVATE return BlobAccess.PRIVATE; } catch (IOException ioe) { throw new RuntimeException(ioe); } return permissions.contains(PosixFilePermission.OTHERS_READ) ? BlobAccess.PUBLIC_READ : BlobAccess.PRIVATE; } @Override public final void setBlobAccess(String container, String key, BlobAccess access) { if (!containerExists(container)) { throw new ContainerNotFoundException(container, ""); } if (!blobExists(container, key)) { throw new KeyNotFoundException(container, key, ""); } var containerPath = root.resolve(container); var path = containerPath.resolve(key).normalize(); if (path.toString().equals("/")) { path = containerPath; } checkValidPath(containerPath, path); setBlobAccessHelper(path, access); } @Override public final MultipartUpload initiateMultipartUpload(String container, BlobMetadata blobMetadata, PutOptions options) { var uploadId = UUID.randomUUID().toString(); // create a stub blob var blob = blobBuilder(MULTIPART_PREFIX + uploadId + "-" + blobMetadata.getName() + "-stub").payload(ByteSource.empty()).build(); putBlob(container, blob); return MultipartUpload.create(container, blobMetadata.getName(), uploadId, blobMetadata, options); } @Override public final void abortMultipartUpload(MultipartUpload mpu) { var parts = listMultipartUpload(mpu); for (var part : parts) { removeBlob(mpu.containerName(), MULTIPART_PREFIX + mpu.id() + "-" + mpu.blobName() + "-" + part.partNumber()); } removeBlob(mpu.containerName(), MULTIPART_PREFIX + mpu.id() + "-" + mpu.blobName() + "-stub"); } @Override public final String completeMultipartUpload(MultipartUpload mpu, List parts) { var metas = ImmutableList.builder(); long contentLength = 0; var md5Hasher = md5.newHasher(); for (var part : parts) { var meta = blobMetadata(mpu.containerName(), MULTIPART_PREFIX + mpu.id() + "-" + mpu.blobName() + "-" + part.partNumber()); contentLength += meta.getContentMetadata().getContentLength(); metas.add(meta); if (meta.getETag() != null) { var eTag = meta.getETag(); if (eTag.startsWith("\"") && eTag.endsWith("\"") && eTag.length() >= 2) { eTag = eTag.substring(1, eTag.length() - 1); } md5Hasher.putBytes(BaseEncoding.base16().lowerCase().decode(eTag)); } } var mpuETag = "\"" + md5Hasher.hash() + "-" + parts.size() + "\""; var blobBuilder = blobBuilder(mpu.blobName()) .userMetadata(mpu.blobMetadata().getUserMetadata()) .payload(new MultiBlobInputStream(this, metas.build())) .contentLength(contentLength) .eTag(mpuETag); var cacheControl = mpu.blobMetadata().getContentMetadata().getCacheControl(); if (cacheControl != null) { blobBuilder.cacheControl(cacheControl); } var contentDisposition = mpu.blobMetadata().getContentMetadata().getContentDisposition(); if (contentDisposition != null) { blobBuilder.contentDisposition(contentDisposition); } var contentEncoding = mpu.blobMetadata().getContentMetadata().getContentEncoding(); if (contentEncoding != null) { blobBuilder.contentEncoding(contentEncoding); } var contentLanguage = mpu.blobMetadata().getContentMetadata().getContentLanguage(); if (contentLanguage != null) { blobBuilder.contentLanguage(contentLanguage); } // intentionally not copying MD5 var contentType = mpu.blobMetadata().getContentMetadata().getContentType(); if (contentType != null) { blobBuilder.contentType(contentType); } var expires = mpu.blobMetadata().getContentMetadata().getExpires(); if (expires != null) { blobBuilder.expires(expires); } var tier = mpu.blobMetadata().getTier(); if (tier != null) { blobBuilder.tier(tier); } putBlob(mpu.containerName(), blobBuilder.build()); for (var part : parts) { removeBlob(mpu.containerName(), MULTIPART_PREFIX + mpu.id() + "-" + mpu.blobName() + "-" + part.partNumber()); } removeBlob(mpu.containerName(), MULTIPART_PREFIX + mpu.id() + "-" + mpu.blobName() + "-stub"); setBlobAccess(mpu.containerName(), mpu.blobName(), mpu.putOptions().getBlobAccess()); return mpuETag; } @Override public final MultipartPart uploadMultipartPart(MultipartUpload mpu, int partNumber, Payload payload) { var partName = MULTIPART_PREFIX + mpu.id() + "-" + mpu.blobName() + "-" + partNumber; var blob = blobBuilder(partName) .payload(payload) .build(); var partETag = putBlob(mpu.containerName(), blob); var metadata = blobMetadata(mpu.containerName(), partName); // TODO: racy, how to get this from payload? var partSize = metadata.getContentMetadata().getContentLength(); return MultipartPart.create(partNumber, partSize, partETag, metadata.getLastModified()); } @Override public final List listMultipartUpload(MultipartUpload mpu) { var parts = ImmutableList.builder(); var options = new ListContainerOptions().prefix(MULTIPART_PREFIX + mpu.id() + "-" + mpu.blobName() + "-").recursive(); while (true) { var pageSet = list(mpu.containerName(), options); for (var sm : pageSet) { if (sm.getName().endsWith("-stub")) { continue; } int partNumber = Integer.parseInt(sm.getName().substring((MULTIPART_PREFIX + mpu.id() + "-" + mpu.blobName() + "-").length())); long partSize = sm.getSize(); parts.add(MultipartPart.create(partNumber, partSize, sm.getETag(), sm.getLastModified())); } if (pageSet.isEmpty() || pageSet.getNextMarker() == null) { break; } options.afterMarker(pageSet.getNextMarker()); } return parts.build(); } @Override public final List listMultipartUploads(String container) { var mpus = ImmutableList.builder(); var options = new ListContainerOptions().prefix(MULTIPART_PREFIX).recursive(); int uuidLength = UUID.randomUUID().toString().length(); while (true) { var pageSet = list(container, options); for (StorageMetadata sm : pageSet) { if (!sm.getName().endsWith("-stub")) { continue; } var uploadId = sm.getName().substring(MULTIPART_PREFIX.length(), MULTIPART_PREFIX.length() + uuidLength); var blobName = sm.getName().substring(MULTIPART_PREFIX.length() + uuidLength + 1); int index = blobName.lastIndexOf('-'); blobName = blobName.substring(0, index); mpus.add(MultipartUpload.create(container, blobName, uploadId, null, null)); } if (pageSet.isEmpty() || pageSet.getNextMarker() == null) { break; } options.afterMarker(pageSet.getNextMarker()); } return mpus.build(); } @Override public final long getMinimumMultipartPartSize() { return 1; } @Override public final long getMaximumMultipartPartSize() { return 100 * 1024 * 1024; } @Override public final int getMaximumNumberOfParts() { return 50 * 1000; } @Override public final InputStream streamBlob(String container, String name) { throw new UnsupportedOperationException("not yet implemented"); } /** * Read the String representation of a filesystem attribute, or return null * if not present. */ private static String readStringAttributeIfPresent( UserDefinedFileAttributeView view, Set attr, String name) throws IOException { if (!attr.contains(name)) { return null; } ByteBuffer buf = ByteBuffer.allocate(view.size(name)); view.read(name, buf); return new String(buf.array(), StandardCharsets.UTF_8); } /** Write the String representation of a filesystem attribute. */ private static void writeStringAttributeIfPresent( UserDefinedFileAttributeView view, String name, String value) throws IOException { if (value != null) { view.write(name, ByteBuffer.wrap(value.getBytes(StandardCharsets.UTF_8))); } } private static final class MultiBlobInputStream extends InputStream { private final BlobStore blobStore; private final Iterator metas; private InputStream current; MultiBlobInputStream(BlobStore blobStore, List metas) { this.blobStore = blobStore; this.metas = metas.iterator(); } @Override public int read() throws IOException { while (true) { if (current == null) { if (!metas.hasNext()) { return -1; } BlobMetadata meta = metas.next(); current = blobStore.getBlob(meta.getContainer(), meta.getName()).getPayload().openStream(); } int result = current.read(); if (result == -1) { current.close(); current = null; continue; } return result & 0x000000FF; } } @Override public int read(byte[] b, int off, int len) throws IOException { while (true) { if (current == null) { if (!metas.hasNext()) { return -1; } BlobMetadata meta = metas.next(); current = blobStore.getBlob(meta.getContainer(), meta.getName()).getPayload().openStream(); } int result = current.read(b, off, len); if (result == -1) { current.close(); current = null; continue; } return result; } } @Override public void close() throws IOException { if (current != null) { current.close(); current = null; } } } private static HttpResponseException returnResponseException(int code) { var response = HttpResponse.builder().statusCode(code).build(); return new HttpResponseException(new HttpCommand(HttpRequest.builder() .method("GET") .endpoint("http://stub") .build()), response); } private static String maybeQuoteETag(String eTag) { if (!eTag.startsWith("\"") && !eTag.endsWith("\"")) { eTag = "\"" + eTag + "\""; } return eTag; } /** * AbstractNio2BlobStore implicitly creates directories when creating a key /a/b/c. * When removing /a/b/c, it must clean up /a and /a/b, unless a client explicitly created a subdirectory which has file attributes. */ private static void removeEmptyParentDirectories(Path containerPath, Path path) throws IOException { logger.debug("removing empty parents: {}", path); while (true) { var parent = path.getParent(); if (parent == null || path.equals(containerPath)) { break; } if (safeGetXattrs(path).attributes().contains(XATTR_CONTENT_MD5)) { break; } try { logger.debug("deleting: {}", path); Files.delete(path); } catch (DirectoryNotEmptyException dnee) { break; } path = path.getParent(); } } // TODO: call in other places private static void writeCommonMetadataAttr(UserDefinedFileAttributeView view, Blob blob) throws IOException { var metadata = blob.getMetadata().getContentMetadata(); writeStringAttributeIfPresent(view, XATTR_CACHE_CONTROL, metadata.getCacheControl()); writeStringAttributeIfPresent(view, XATTR_CONTENT_DISPOSITION, metadata.getContentDisposition()); writeStringAttributeIfPresent(view, XATTR_CONTENT_ENCODING, metadata.getContentEncoding()); writeStringAttributeIfPresent(view, XATTR_CONTENT_LANGUAGE, metadata.getContentLanguage()); writeStringAttributeIfPresent(view, XATTR_CONTENT_TYPE, metadata.getContentType()); var expires = metadata.getExpires(); if (expires != null) { var buf = ByteBuffer.allocate(Longs.BYTES).putLong(expires.getTime()); buf.flip(); view.write(XATTR_EXPIRES, buf); } writeStringAttributeIfPresent(view, XATTR_STORAGE_TIER, blob.getMetadata().getTier().toString()); for (var entry : blob.getMetadata().getUserMetadata().entrySet()) { writeStringAttributeIfPresent(view, XATTR_USER_METADATA_PREFIX + entry.getKey(), entry.getValue()); } } private record XattrState(UserDefinedFileAttributeView view, Set attributes) { static final XattrState EMPTY = new XattrState(null, NO_ATTRIBUTES); } /** * Safely read extended attributes for a path. Returns a view and attribute * set, or EMPTY if the filesystem does not support extended attributes * (e.g., Docker Desktop bind mounts via VirtioFS, some NFS/NAS mounts). */ private static XattrState safeGetXattrs(Path path) { var view = Files.getFileAttributeView(path, UserDefinedFileAttributeView.class); if (view == null) { return XattrState.EMPTY; } try { return new XattrState(view, Set.copyOf(view.list())); } catch (IOException e) { logger.debug("xattrs not supported on {}", path); return XattrState.EMPTY; } } private static void checkValidPath(Path container, Path path) { if (!path.normalize().startsWith(container)) { throw new IllegalArgumentException("Invalid key name: path traversal attempt detected: " + container + " " + path); } } private static void setBlobAccessHelper(Path path, BlobAccess access) { try { var permissions = new HashSet<>(Files.getPosixFilePermissions(path)); if (access == BlobAccess.PRIVATE) { permissions.remove(PosixFilePermission.OTHERS_READ); } else if (access == BlobAccess.PUBLIC_READ) { permissions.add(PosixFilePermission.OTHERS_READ); } Files.setPosixFilePermissions(path, permissions); } catch (UnsupportedOperationException uoe) { // Windows/SMB/other non-POSIX: ignore, cannot set permissions return; } catch (IOException ioe) { throw new RuntimeException(ioe); } } } ================================================ FILE: src/main/java/org/gaul/s3proxy/nio2blob/FilesystemNio2BlobApiMetadata.java ================================================ /* * Copyright 2014-2026 Andrew Gaul * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.gaul.s3proxy.nio2blob; import java.net.URI; import java.util.Properties; import java.util.Set; import org.jclouds.blobstore.BlobStoreContext; import org.jclouds.reflect.Reflection2; import org.jclouds.rest.internal.BaseHttpApiMetadata; @SuppressWarnings("rawtypes") public final class FilesystemNio2BlobApiMetadata extends BaseHttpApiMetadata { public FilesystemNio2BlobApiMetadata() { this(builder()); } protected FilesystemNio2BlobApiMetadata(Builder builder) { super(builder); } private static Builder builder() { return new Builder(); } @Override public Builder toBuilder() { return builder().fromApiMetadata(this); } public static Properties defaultProperties() { return BaseHttpApiMetadata.defaultProperties(); } // Fake API client private interface FilesystemNio2BlobClient { } public static final class Builder extends BaseHttpApiMetadata.Builder { protected Builder() { super(FilesystemNio2BlobClient.class); id("filesystem-nio2") .name("Filesystem NIO.2 Blobstore") .identityName("Account Name") .credentialName("Access Key") .defaultEndpoint("http://localhost/") .documentation(URI.create( "http://www.jclouds.org/documentation/userguide" + "/blobstore-guide")) .defaultProperties(FilesystemNio2BlobApiMetadata.defaultProperties()) .view(Reflection2.typeToken(BlobStoreContext.class)) .defaultModules(Set.of(FilesystemNio2BlobStoreContextModule.class)); } @Override public FilesystemNio2BlobApiMetadata build() { return new FilesystemNio2BlobApiMetadata(this); } @Override protected Builder self() { return this; } } } ================================================ FILE: src/main/java/org/gaul/s3proxy/nio2blob/FilesystemNio2BlobProviderMetadata.java ================================================ /* * Copyright 2014-2026 Andrew Gaul * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.gaul.s3proxy.nio2blob; import java.util.Properties; import com.google.auto.service.AutoService; import org.jclouds.providers.ProviderMetadata; import org.jclouds.providers.internal.BaseProviderMetadata; /** * Implementation of org.jclouds.types.ProviderMetadata for NIO.2 filesystems. */ @AutoService(ProviderMetadata.class) public final class FilesystemNio2BlobProviderMetadata extends BaseProviderMetadata { public FilesystemNio2BlobProviderMetadata() { super(builder()); } public FilesystemNio2BlobProviderMetadata(Builder builder) { super(builder); } public static Builder builder() { return new Builder(); } @Override public Builder toBuilder() { return builder().fromProviderMetadata(this); } public static Properties defaultProperties() { Properties properties = new Properties(); // TODO: filesystem basedir return properties; } public static final class Builder extends BaseProviderMetadata.Builder { protected Builder() { id("filesystem-nio2") .name("NIO.2 filesystem blobstore") .apiMetadata(new FilesystemNio2BlobApiMetadata()) .endpoint("https://127.0.0.1") // TODO: .defaultProperties( FilesystemNio2BlobProviderMetadata.defaultProperties()); } @Override public FilesystemNio2BlobProviderMetadata build() { return new FilesystemNio2BlobProviderMetadata(this); } @Override public Builder fromProviderMetadata( ProviderMetadata in) { super.fromProviderMetadata(in); return this; } } } ================================================ FILE: src/main/java/org/gaul/s3proxy/nio2blob/FilesystemNio2BlobStore.java ================================================ /* * Copyright 2014-2026 Andrew Gaul * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.gaul.s3proxy.nio2blob; import java.nio.file.FileSystems; import java.nio.file.Files; import java.nio.file.NoSuchFileException; import java.util.Set; import com.google.common.base.Supplier; import jakarta.inject.Inject; import jakarta.inject.Named; import jakarta.inject.Singleton; import org.jclouds.blobstore.BlobStoreContext; import org.jclouds.blobstore.util.BlobUtils; import org.jclouds.collect.Memoized; import org.jclouds.domain.Credentials; import org.jclouds.domain.Location; import org.jclouds.filesystem.reference.FilesystemConstants; import org.jclouds.io.PayloadSlicer; @Singleton public final class FilesystemNio2BlobStore extends AbstractNio2BlobStore { @Inject FilesystemNio2BlobStore(BlobStoreContext context, BlobUtils blobUtils, Supplier defaultLocation, @Memoized Supplier> locations, PayloadSlicer slicer, @org.jclouds.location.Provider Supplier creds, @Named(FilesystemConstants.PROPERTY_BASEDIR) String baseDir) { super(context, blobUtils, defaultLocation, locations, slicer, creds, // cannot be closed FileSystems.getDefault().getPath(baseDir)); if (!Files.exists(getRoot())) { throw new RuntimeException(new NoSuchFileException(getRoot().toString())); } } } ================================================ FILE: src/main/java/org/gaul/s3proxy/nio2blob/FilesystemNio2BlobStoreContextModule.java ================================================ /* * Copyright 2014-2026 Andrew Gaul * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.gaul.s3proxy.nio2blob; import com.google.inject.AbstractModule; import com.google.inject.Scopes; import org.jclouds.blobstore.BlobStore; import org.jclouds.blobstore.attr.ConsistencyModel; public final class FilesystemNio2BlobStoreContextModule extends AbstractModule { @Override protected void configure() { bind(ConsistencyModel.class).toInstance(ConsistencyModel.STRICT); bind(BlobStore.class).to(FilesystemNio2BlobStore.class).in(Scopes.SINGLETON); } } ================================================ FILE: src/main/java/org/gaul/s3proxy/nio2blob/TransientNio2BlobApiMetadata.java ================================================ /* * Copyright 2014-2026 Andrew Gaul * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.gaul.s3proxy.nio2blob; import java.net.URI; import java.util.Properties; import java.util.Set; import org.jclouds.blobstore.BlobStoreContext; import org.jclouds.reflect.Reflection2; import org.jclouds.rest.internal.BaseHttpApiMetadata; @SuppressWarnings("rawtypes") public final class TransientNio2BlobApiMetadata extends BaseHttpApiMetadata { public TransientNio2BlobApiMetadata() { this(builder()); } protected TransientNio2BlobApiMetadata(Builder builder) { super(builder); } private static Builder builder() { return new Builder(); } @Override public Builder toBuilder() { return builder().fromApiMetadata(this); } public static Properties defaultProperties() { return BaseHttpApiMetadata.defaultProperties(); } // Fake API client private interface TransientNio2BlobClient { } public static final class Builder extends BaseHttpApiMetadata.Builder { protected Builder() { super(TransientNio2BlobClient.class); id("transient-nio2") .name("Transient NIO.2 Blobstore") .identityName("Account Name") .credentialName("Access Key") .defaultEndpoint("http://localhost/") .documentation(URI.create( "http://www.jclouds.org/documentation/userguide" + "/blobstore-guide")) .defaultProperties(TransientNio2BlobApiMetadata.defaultProperties()) .view(Reflection2.typeToken(BlobStoreContext.class)) .defaultModules(Set.of(TransientNio2BlobStoreContextModule.class)); } @Override public TransientNio2BlobApiMetadata build() { return new TransientNio2BlobApiMetadata(this); } @Override protected Builder self() { return this; } } } ================================================ FILE: src/main/java/org/gaul/s3proxy/nio2blob/TransientNio2BlobProviderMetadata.java ================================================ /* * Copyright 2014-2026 Andrew Gaul * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.gaul.s3proxy.nio2blob; import java.util.Properties; import com.google.auto.service.AutoService; import org.jclouds.providers.ProviderMetadata; import org.jclouds.providers.internal.BaseProviderMetadata; /** * Implementation of org.jclouds.types.ProviderMetadata for NIO.2 filesystems. */ @AutoService(ProviderMetadata.class) public final class TransientNio2BlobProviderMetadata extends BaseProviderMetadata { public TransientNio2BlobProviderMetadata() { super(builder()); } public TransientNio2BlobProviderMetadata(Builder builder) { super(builder); } public static Builder builder() { return new Builder(); } @Override public Builder toBuilder() { return builder().fromProviderMetadata(this); } public static Properties defaultProperties() { Properties properties = new Properties(); // TODO: filesystem basedir return properties; } public static final class Builder extends BaseProviderMetadata.Builder { protected Builder() { id("transient-nio2") .name("Filesystem NIO.2 blobstore") .apiMetadata(new TransientNio2BlobApiMetadata()) .endpoint("https://127.0.0.1") // TODO: .defaultProperties( TransientNio2BlobProviderMetadata.defaultProperties()); } @Override public TransientNio2BlobProviderMetadata build() { return new TransientNio2BlobProviderMetadata(this); } @Override public Builder fromProviderMetadata( ProviderMetadata in) { super.fromProviderMetadata(in); return this; } } } ================================================ FILE: src/main/java/org/gaul/s3proxy/nio2blob/TransientNio2BlobStore.java ================================================ /* * Copyright 2014-2026 Andrew Gaul * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.gaul.s3proxy.nio2blob; import java.nio.file.FileSystem; import java.util.Set; import com.google.common.base.Supplier; import com.google.common.jimfs.Configuration; import com.google.common.jimfs.Jimfs; import jakarta.inject.Inject; import jakarta.inject.Singleton; import org.jclouds.blobstore.BlobStoreContext; import org.jclouds.blobstore.util.BlobUtils; import org.jclouds.collect.Memoized; import org.jclouds.domain.Credentials; import org.jclouds.domain.Location; import org.jclouds.io.PayloadSlicer; @Singleton public final class TransientNio2BlobStore extends AbstractNio2BlobStore { @Inject TransientNio2BlobStore(BlobStoreContext context, BlobUtils blobUtils, Supplier defaultLocation, @Memoized Supplier> locations, PayloadSlicer slicer, @org.jclouds.location.Provider Supplier creds) { this(context, blobUtils, defaultLocation, locations, slicer, creds, Jimfs.newFileSystem(Configuration.unix().toBuilder() .setAttributeViews("posix", "user") .setWorkingDirectory("/") .build())); } // Helper to create Path private TransientNio2BlobStore(BlobStoreContext context, BlobUtils blobUtils, Supplier defaultLocation, @Memoized Supplier> locations, PayloadSlicer slicer, @org.jclouds.location.Provider Supplier creds, FileSystem fs) { // TODO: close fs? super(context, blobUtils, defaultLocation, locations, slicer, creds, fs.getPath("")); } } ================================================ FILE: src/main/java/org/gaul/s3proxy/nio2blob/TransientNio2BlobStoreContextModule.java ================================================ /* * Copyright 2014-2026 Andrew Gaul * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.gaul.s3proxy.nio2blob; import com.google.inject.AbstractModule; import com.google.inject.Scopes; import org.jclouds.blobstore.BlobStore; import org.jclouds.blobstore.attr.ConsistencyModel; public final class TransientNio2BlobStoreContextModule extends AbstractModule { @Override protected void configure() { bind(ConsistencyModel.class).toInstance(ConsistencyModel.STRICT); bind(BlobStore.class).to(TransientNio2BlobStore.class).in(Scopes.SINGLETON); } } ================================================ FILE: src/main/resources/checkstyle.xml ================================================ ================================================ FILE: src/main/resources/copyright_header.txt ================================================ /* * Copyright 2014-2026 Andrew Gaul * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ ================================================ FILE: src/main/resources/run-docker-container.sh ================================================ #!/bin/sh mkdir "${JCLOUDS_FILESYSTEM_BASEDIR}" exec java \ $S3PROXY_JAVA_OPTS \ -DLOG_LEVEL="${LOG_LEVEL}" \ -Ds3proxy.endpoint="${S3PROXY_ENDPOINT}" \ -Ds3proxy.secure-endpoint="${S3PROXY_SECURE_ENDPOINT}" \ -Ds3proxy.virtual-host="${S3PROXY_VIRTUALHOST}" \ -Ds3proxy.keystore-path="${S3PROXY_KEYSTORE_PATH}" \ -Ds3proxy.keystore-password="${S3PROXY_KEYSTORE_PASSWORD}" \ -Ds3proxy.authorization="${S3PROXY_AUTHORIZATION}" \ -Ds3proxy.identity="${S3PROXY_IDENTITY}" \ -Ds3proxy.credential="${S3PROXY_CREDENTIAL}" \ -Ds3proxy.cors-allow-all="${S3PROXY_CORS_ALLOW_ALL}" \ -Ds3proxy.cors-allow-origins="${S3PROXY_CORS_ALLOW_ORIGINS}" \ -Ds3proxy.cors-allow-methods="${S3PROXY_CORS_ALLOW_METHODS}" \ -Ds3proxy.cors-allow-headers="${S3PROXY_CORS_ALLOW_HEADERS}" \ -Ds3proxy.cors-exposed-headers="${S3PROXY_CORS_EXPOSED_HEADERS}" \ -Ds3proxy.cors-allow-credential="${S3PROXY_CORS_ALLOW_CREDENTIAL}" \ -Ds3proxy.ignore-unknown-headers="${S3PROXY_IGNORE_UNKNOWN_HEADERS}" \ -Ds3proxy.encrypted-blobstore="${S3PROXY_ENCRYPTED_BLOBSTORE}" \ -Ds3proxy.encrypted-blobstore-password="${S3PROXY_ENCRYPTED_BLOBSTORE_PASSWORD}" \ -Ds3proxy.encrypted-blobstore-salt="${S3PROXY_ENCRYPTED_BLOBSTORE_SALT}" \ -Ds3proxy.v4-max-non-chunked-request-size="${S3PROXY_V4_MAX_NON_CHUNKED_REQ_SIZE:-134217728}" \ -Ds3proxy.v4-max-chunk-size="${S3PROXY_V4_MAX_CHUNK_SIZE:-16777216}" \ -Ds3proxy.read-only-blobstore="${S3PROXY_READ_ONLY_BLOBSTORE:-false}" \ -Ds3proxy.no-cache-blobstore="${S3PROXY_NO_CACHE_BLOBSTORE:-false}" \ -Ds3proxy.maximum-timeskew="${S3PROXY_MAXIMUM_TIMESKEW}" \ -Ds3proxy.metrics.enabled="${S3PROXY_METRICS_ENABLED}" \ -Ds3proxy.metrics.port="${S3PROXY_METRICS_PORT}" \ -Ds3proxy.metrics.host="${S3PROXY_METRICS_HOST}" \ -Ds3proxy.service-path="${S3PROXY_SERVICE_PATH}" \ -Djclouds.provider="${JCLOUDS_PROVIDER}" \ -Djclouds.identity="${JCLOUDS_IDENTITY}" \ -Djclouds.credential="${JCLOUDS_CREDENTIAL}" \ -Djclouds.endpoint="${JCLOUDS_ENDPOINT}" \ -Djclouds.region="${JCLOUDS_REGION}" \ -Djclouds.regions="${JCLOUDS_REGIONS}" \ -Djclouds.keystone.version="${JCLOUDS_KEYSTONE_VERSION}" \ -Djclouds.keystone.scope="${JCLOUDS_KEYSTONE_SCOPE}" \ -Djclouds.keystone.project-domain-name="${JCLOUDS_KEYSTONE_PROJECT_DOMAIN_NAME}" \ -Djclouds.filesystem.basedir="${JCLOUDS_FILESYSTEM_BASEDIR}" \ -Djclouds.azureblob.tenantId="${JCLOUDS_AZUREBLOB_TENANTID}" \ -Djclouds.azureblob.auth="${JCLOUDS_AZUREBLOB_AUTH}" \ -Djclouds.azureblob.account="${JCLOUDS_AZUREBLOB_ACCOUNT}" \ -jar /opt/s3proxy/s3proxy \ --properties /dev/null ================================================ FILE: src/test/java/org/gaul/s3proxy/AliasBlobStoreTest.java ================================================ /* * Copyright 2014-2026 Andrew Gaul * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.gaul.s3proxy; import static org.assertj.core.api.Assertions.assertThat; import java.io.IOException; import java.io.InputStream; import java.util.ArrayList; import java.util.List; import java.util.Properties; import com.google.common.collect.ImmutableBiMap; import com.google.common.collect.ImmutableList; import com.google.common.hash.HashCode; import com.google.common.hash.Hashing; import com.google.common.io.ByteSource; import org.assertj.core.api.Assertions; import org.jclouds.ContextBuilder; import org.jclouds.blobstore.BlobStore; import org.jclouds.blobstore.BlobStoreContext; import org.jclouds.blobstore.domain.Blob; import org.jclouds.blobstore.domain.BlobMetadata; import org.jclouds.blobstore.domain.MultipartPart; import org.jclouds.blobstore.domain.MultipartUpload; import org.jclouds.blobstore.domain.PageSet; import org.jclouds.blobstore.domain.StorageMetadata; import org.jclouds.blobstore.options.PutOptions; import org.jclouds.io.Payloads; import org.jclouds.logging.slf4j.config.SLF4JLoggingModule; import org.junit.After; import org.junit.Before; import org.junit.Test; public final class AliasBlobStoreTest { private String containerName; private String aliasContainerName; private BlobStoreContext context; private BlobStore blobStore; private BlobStore aliasBlobStore; private List createdContainers; @Before public void setUp() { containerName = TestUtils.createRandomContainerName(); aliasContainerName = String.format("alias-%s", containerName); context = ContextBuilder .newBuilder("transient") .credentials("identity", "credential") .modules(List.of(new SLF4JLoggingModule())) .build(BlobStoreContext.class); blobStore = context.getBlobStore(); var aliasesBuilder = new ImmutableBiMap.Builder(); aliasesBuilder.put(aliasContainerName, containerName); aliasBlobStore = AliasBlobStore.newAliasBlobStore( blobStore, aliasesBuilder.build()); createdContainers = new ArrayList<>(); } @After public void tearDown() { if (this.context != null) { for (String container : this.createdContainers) { blobStore.deleteContainer(container); } context.close(); } } private void createContainer(String container) { assertThat(aliasBlobStore.createContainerInLocation( null, container)).isTrue(); if (container.equals(aliasContainerName)) { createdContainers.add(containerName); } else { createdContainers.add(container); } } @Test public void testListNoAliasContainers() { String regularContainer = TestUtils.createRandomContainerName(); createContainer(regularContainer); PageSet listing = aliasBlobStore.list(); assertThat(listing.size()).isEqualTo(1); assertThat(listing.iterator().next().getName()).isEqualTo( regularContainer); } @Test public void testListAliasContainer() { createContainer(aliasContainerName); PageSet listing = aliasBlobStore.list(); assertThat(listing.size()).isEqualTo(1); assertThat(listing.iterator().next().getName()).isEqualTo( aliasContainerName); listing = blobStore.list(); assertThat(listing.size()).isEqualTo(1); assertThat(listing.iterator().next().getName()).isEqualTo( containerName); } @Test public void testAliasBlob() throws IOException { createContainer(aliasContainerName); String blobName = TestUtils.createRandomBlobName(); ByteSource content = TestUtils.randomByteSource().slice(0, 1024); @SuppressWarnings("deprecation") String contentMD5 = Hashing.md5().hashBytes(content.read()).toString(); Blob blob = aliasBlobStore.blobBuilder(blobName).payload(content) .build(); String eTag = aliasBlobStore.putBlob(aliasContainerName, blob); assertThat(eTag).isEqualTo(contentMD5); BlobMetadata blobMetadata = aliasBlobStore.blobMetadata( aliasContainerName, blobName); assertThat(blobMetadata.getETag()).isEqualTo(contentMD5); blob = aliasBlobStore.getBlob(aliasContainerName, blobName); try (InputStream actual = blob.getPayload().openStream(); InputStream expected = content.openStream()) { assertThat(actual).hasSameContentAs(expected); } } @Test public void testAliasMultipartUpload() throws IOException { createContainer(aliasContainerName); String blobName = TestUtils.createRandomBlobName(); ByteSource content = TestUtils.randomByteSource().slice(0, 1024); @SuppressWarnings("deprecation") HashCode contentHash = Hashing.md5().hashBytes(content.read()); Blob blob = aliasBlobStore.blobBuilder(blobName).build(); MultipartUpload mpu = aliasBlobStore.initiateMultipartUpload( aliasContainerName, blob.getMetadata(), PutOptions.NONE); assertThat(mpu.containerName()).isEqualTo(aliasContainerName); MultipartPart part = aliasBlobStore.uploadMultipartPart( mpu, 1, Payloads.newPayload(content)); assertThat(part.partETag()).isEqualTo(contentHash.toString()); var parts = new ImmutableList.Builder(); parts.add(part); String mpuETag = aliasBlobStore.completeMultipartUpload(mpu, parts.build()); @SuppressWarnings("deprecation") HashCode contentHash2 = Hashing.md5().hashBytes(contentHash.asBytes()); assertThat(mpuETag).isEqualTo( String.format("\"%s-1\"", contentHash2)); blob = aliasBlobStore.getBlob(aliasContainerName, blobName); try (InputStream actual = blob.getPayload().openStream(); InputStream expected = content.openStream()) { assertThat(actual).hasSameContentAs(expected); } } @Test public void testParseDuplicateAliases() { var properties = new Properties(); properties.setProperty(String.format("%s.alias", S3ProxyConstants.PROPERTY_ALIAS_BLOBSTORE), "bucket"); properties.setProperty(String.format("%s.other-alias", S3ProxyConstants.PROPERTY_ALIAS_BLOBSTORE), "bucket"); try { AliasBlobStore.parseAliases(properties); Assertions.failBecauseExceptionWasNotThrown( IllegalArgumentException.class); } catch (IllegalArgumentException exc) { assertThat(exc.getMessage()).isEqualTo( "Backend bucket bucket is aliased twice"); } } } ================================================ FILE: src/test/java/org/gaul/s3proxy/AwsS3SdkBlobStoreTest.java ================================================ /* * Copyright 2014-2026 Andrew Gaul * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.gaul.s3proxy; import static org.assertj.core.api.Assertions.assertThat; import java.util.Properties; import org.jclouds.ContextBuilder; import org.jclouds.blobstore.BlobStoreContext; import org.junit.Test; public final class AwsS3SdkBlobStoreTest { @Test public void testProviderRegistration() { // Verify that the provider is discoverable via jclouds var providers = ContextBuilder.newBuilder("aws-s3-sdk"); assertThat(providers).isNotNull(); } @Test public void testProviderMetadata() { var properties = new Properties(); properties.setProperty("jclouds.identity", "test-identity"); properties.setProperty("jclouds.credential", "test-credential"); properties.setProperty("jclouds.endpoint", "http://localhost:9000"); // This validates that the provider can be instantiated // without actually connecting to a backend try (BlobStoreContext context = ContextBuilder.newBuilder("aws-s3-sdk") .overrides(properties) .buildView(BlobStoreContext.class)) { assertThat(context).isNotNull(); assertThat(context.getBlobStore()).isNotNull(); } } @Test public void testCustomRegionConfiguration() { var properties = new Properties(); properties.setProperty("jclouds.identity", "test-identity"); properties.setProperty("jclouds.credential", "test-credential"); properties.setProperty("jclouds.endpoint", "http://localhost:9000"); properties.setProperty("aws-s3-sdk.region", "eu-west-1"); // Verify that custom region configuration is accepted try (BlobStoreContext context = ContextBuilder.newBuilder("aws-s3-sdk") .overrides(properties) .buildView(BlobStoreContext.class)) { assertThat(context).isNotNull(); assertThat(context.getBlobStore()).isNotNull(); } } } ================================================ FILE: src/test/java/org/gaul/s3proxy/AwsSdk2Test.java ================================================ /* * Copyright 2014-2026 Andrew Gaul * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.gaul.s3proxy; import org.jclouds.blobstore.BlobStoreContext; import org.junit.After; import org.junit.Before; import org.junit.Test; import software.amazon.awssdk.auth.credentials.AwsBasicCredentials; import software.amazon.awssdk.auth.credentials.StaticCredentialsProvider; import software.amazon.awssdk.awscore.retry.AwsRetryStrategy; import software.amazon.awssdk.core.client.config.ClientOverrideConfiguration; import software.amazon.awssdk.core.sync.RequestBody; import software.amazon.awssdk.http.SdkHttpConfigurationOption; import software.amazon.awssdk.http.apache.ApacheHttpClient; import software.amazon.awssdk.regions.Region; import software.amazon.awssdk.services.s3.S3Client; import software.amazon.awssdk.services.s3.model.ChecksumAlgorithm; import software.amazon.awssdk.services.s3.model.PutObjectRequest; import software.amazon.awssdk.utils.AttributeMap; public final class AwsSdk2Test { private BlobStoreContext context; private S3Client s3Client; private String containerName; @Before public void setUp() throws Exception { var info = TestUtils.startS3Proxy(System.getProperty("s3proxy.test.conf", "s3proxy.conf")); context = info.getBlobStore().getContext(); var attributeMap = AttributeMap.builder() .put(SdkHttpConfigurationOption.TRUST_ALL_CERTIFICATES, true) .build(); s3Client = S3Client.builder() .credentialsProvider( StaticCredentialsProvider.create( AwsBasicCredentials.create(info.getS3Identity(), info.getS3Credential()))) .region(Region.US_EAST_1) .endpointOverride(info.getSecureEndpoint()) .httpClient(ApacheHttpClient.builder() .buildWithDefaults(attributeMap)) .overrideConfiguration(ClientOverrideConfiguration.builder() .retryStrategy(AwsRetryStrategy.doNotRetry()) .build()) .build(); containerName = AwsSdkTest.createRandomContainerName(); info.getBlobStore().createContainerInLocation(null, containerName); } @After public void tearDown() throws Exception { if (s3Client != null) { s3Client.close(); } if (context != null) { context.getBlobStore().deleteContainer(containerName); context.close(); } } @Test public void testPutObject() throws Exception { var key = "testPutObject"; var byteSource = TestUtils.randomByteSource().slice(0, 1024); var putRequest = PutObjectRequest.builder() .bucket(containerName) .key(key) // TODO: parameterize test with JUnit 5 //.checksumAlgorithm(ChecksumAlgorithm.CRC32) .checksumAlgorithm(ChecksumAlgorithm.CRC32_C) //.checksumAlgorithm(ChecksumAlgorithm.SHA1) //.checksumAlgorithm(ChecksumAlgorithm.SHA256) .build(); s3Client.putObject(putRequest, RequestBody.fromBytes(byteSource.read())); } } ================================================ FILE: src/test/java/org/gaul/s3proxy/AwsSdkAnonymousTest.java ================================================ /* * Copyright 2014-2026 Andrew Gaul * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.gaul.s3proxy; import static org.assertj.core.api.Assertions.assertThat; import java.io.InputStream; import java.net.HttpURLConnection; import java.net.URI; import java.nio.charset.StandardCharsets; import java.util.Random; import com.amazonaws.SDKGlobalConfiguration; import com.amazonaws.auth.AWSCredentials; import com.amazonaws.auth.AWSStaticCredentialsProvider; import com.amazonaws.auth.AnonymousAWSCredentials; import com.amazonaws.client.builder.AwsClientBuilder.EndpointConfiguration; import com.amazonaws.services.s3.AmazonS3; import com.amazonaws.services.s3.AmazonS3ClientBuilder; import com.amazonaws.services.s3.internal.SkipMd5CheckStrategy; import com.amazonaws.services.s3.model.ListBucketsPaginatedRequest; import com.amazonaws.services.s3.model.ObjectMetadata; import com.amazonaws.services.s3.model.S3Object; import com.google.common.io.ByteSource; import org.jclouds.blobstore.BlobStoreContext; import org.junit.After; import org.junit.Before; import org.junit.Test; public final class AwsSdkAnonymousTest { static { System.setProperty( SDKGlobalConfiguration.DISABLE_CERT_CHECKING_SYSTEM_PROPERTY, "true"); AwsSdkTest.disableSslVerification(); } private static final ByteSource BYTE_SOURCE = ByteSource.wrap(new byte[1]); private URI s3Endpoint; private URI httpEndpoint; private EndpointConfiguration s3EndpointConfig; private S3Proxy s3Proxy; private BlobStoreContext context; private String blobStoreType; private String containerName; private AWSCredentials awsCreds; private AmazonS3 client; private String servicePath; @Before public void setUp() throws Exception { TestUtils.S3ProxyLaunchInfo info = TestUtils.startS3Proxy( "s3proxy-anonymous.conf"); awsCreds = new AnonymousAWSCredentials(); context = info.getBlobStore().getContext(); s3Proxy = info.getS3Proxy(); httpEndpoint = info.getEndpoint(); s3Endpoint = info.getSecureEndpoint(); servicePath = info.getServicePath(); s3EndpointConfig = new EndpointConfiguration( s3Endpoint.toString() + servicePath, "us-east-1"); client = AmazonS3ClientBuilder.standard() .withCredentials(new AWSStaticCredentialsProvider(awsCreds)) .withEndpointConfiguration(s3EndpointConfig) .build(); containerName = createRandomContainerName(); info.getBlobStore().createContainerInLocation(null, containerName); blobStoreType = context.unwrap().getProviderMetadata().getId(); if (Quirks.OPAQUE_ETAG.contains(blobStoreType)) { System.setProperty( SkipMd5CheckStrategy .DISABLE_GET_OBJECT_MD5_VALIDATION_PROPERTY, "true"); System.setProperty( SkipMd5CheckStrategy .DISABLE_PUT_OBJECT_MD5_VALIDATION_PROPERTY, "true"); } } @After public void tearDown() throws Exception { if (s3Proxy != null) { s3Proxy.stop(); } if (context != null) { context.getBlobStore().deleteContainer(containerName); context.close(); } } @Test public void testListBuckets() throws Exception { client.listBuckets(new ListBucketsPaginatedRequest()); } @Test public void testAwsV4SignatureChunkedAnonymous() throws Exception { client = AmazonS3ClientBuilder.standard() .withChunkedEncodingDisabled(false) .withEndpointConfiguration(s3EndpointConfig) .build(); var metadata = new ObjectMetadata(); metadata.setContentLength(BYTE_SOURCE.size()); client.putObject(containerName, "foo", BYTE_SOURCE.openStream(), metadata); S3Object object = client.getObject(containerName, "foo"); assertThat(object.getObjectMetadata().getContentLength()).isEqualTo( BYTE_SOURCE.size()); try (InputStream actual = object.getObjectContent(); InputStream expected = BYTE_SOURCE.openStream()) { assertThat(actual).hasSameContentAs(expected); } } @Test public void testHealthzEndpoint() throws Exception { URI baseUri = httpEndpoint != null ? httpEndpoint : s3Endpoint; String path = (servicePath == null ? "" : servicePath) + "/healthz"; URI healthzUri = new URI(baseUri.getScheme(), baseUri.getUserInfo(), baseUri.getHost(), baseUri.getPort(), path, baseUri.getQuery(), baseUri.getFragment()); HttpURLConnection connection = (HttpURLConnection) healthzUri.toURL().openConnection(); connection.setRequestMethod("GET"); assertThat(connection.getResponseCode()).isEqualTo(200); String body; try (InputStream stream = connection.getInputStream()) { body = new String(stream.readAllBytes(), StandardCharsets.UTF_8); } finally { connection.disconnect(); } assertThat(body).contains("\"status\":\"OK\""); assertThat(body).contains("\"gitHash\":\""); assertThat(body).contains("\"launchTime\":\""); assertThat(body).contains("\"currentTime\":\""); assertThat(body).startsWith("{").endsWith("}"); } private static String createRandomContainerName() { return "s3proxy-" + new Random().nextInt(Integer.MAX_VALUE); } } ================================================ FILE: src/test/java/org/gaul/s3proxy/AwsSdkTest.java ================================================ /* * Copyright 2014-2026 Andrew Gaul * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.gaul.s3proxy; import static org.assertj.core.api.Assertions.assertThat; import static org.junit.Assume.assumeTrue; import java.io.InputStream; import java.io.OutputStream; import java.net.URI; import java.net.URL; import java.net.URLConnection; import java.security.KeyManagementException; import java.security.NoSuchAlgorithmException; import java.security.cert.X509Certificate; import java.util.Date; import java.util.List; import java.util.Map; import java.util.Random; import java.util.concurrent.TimeUnit; import javax.net.ssl.HostnameVerifier; import javax.net.ssl.HttpsURLConnection; import javax.net.ssl.SSLContext; import javax.net.ssl.SSLSession; import javax.net.ssl.TrustManager; import javax.net.ssl.X509TrustManager; import com.amazonaws.ClientConfiguration; import com.amazonaws.HttpMethod; import com.amazonaws.SDKGlobalConfiguration; import com.amazonaws.auth.AWSCredentials; import com.amazonaws.auth.AWSStaticCredentialsProvider; import com.amazonaws.auth.BasicAWSCredentials; import com.amazonaws.client.builder.AwsClientBuilder.EndpointConfiguration; import com.amazonaws.services.s3.AmazonS3; import com.amazonaws.services.s3.AmazonS3ClientBuilder; import com.amazonaws.services.s3.internal.SkipMd5CheckStrategy; import com.amazonaws.services.s3.model.AbortMultipartUploadRequest; import com.amazonaws.services.s3.model.AccessControlList; import com.amazonaws.services.s3.model.AmazonS3Exception; import com.amazonaws.services.s3.model.Bucket; import com.amazonaws.services.s3.model.BucketLoggingConfiguration; import com.amazonaws.services.s3.model.CannedAccessControlList; import com.amazonaws.services.s3.model.CompleteMultipartUploadRequest; import com.amazonaws.services.s3.model.CopyObjectRequest; import com.amazonaws.services.s3.model.CopyPartRequest; import com.amazonaws.services.s3.model.CopyPartResult; import com.amazonaws.services.s3.model.DeleteObjectsRequest; import com.amazonaws.services.s3.model.DeleteObjectsResult; import com.amazonaws.services.s3.model.GeneratePresignedUrlRequest; import com.amazonaws.services.s3.model.GetObjectRequest; import com.amazonaws.services.s3.model.GroupGrantee; import com.amazonaws.services.s3.model.HeadBucketRequest; import com.amazonaws.services.s3.model.InitiateMultipartUploadRequest; import com.amazonaws.services.s3.model.InitiateMultipartUploadResult; import com.amazonaws.services.s3.model.ListBucketsPaginatedRequest; import com.amazonaws.services.s3.model.ListMultipartUploadsRequest; import com.amazonaws.services.s3.model.ListObjectsRequest; import com.amazonaws.services.s3.model.ListObjectsV2Request; import com.amazonaws.services.s3.model.ListObjectsV2Result; import com.amazonaws.services.s3.model.ListPartsRequest; import com.amazonaws.services.s3.model.MultipartUploadListing; import com.amazonaws.services.s3.model.ObjectListing; import com.amazonaws.services.s3.model.ObjectMetadata; import com.amazonaws.services.s3.model.ObjectTagging; import com.amazonaws.services.s3.model.PartETag; import com.amazonaws.services.s3.model.PartListing; import com.amazonaws.services.s3.model.Permission; import com.amazonaws.services.s3.model.PutObjectRequest; import com.amazonaws.services.s3.model.PutObjectResult; import com.amazonaws.services.s3.model.ResponseHeaderOverrides; import com.amazonaws.services.s3.model.S3Object; import com.amazonaws.services.s3.model.S3ObjectSummary; import com.amazonaws.services.s3.model.SetBucketLoggingConfigurationRequest; import com.amazonaws.services.s3.model.UploadPartRequest; import com.amazonaws.services.s3.model.UploadPartResult; import com.google.common.collect.ImmutableList; import com.google.common.io.ByteSource; import org.assertj.core.api.Fail; import org.jclouds.ContextBuilder; import org.jclouds.blobstore.BlobStore; import org.jclouds.blobstore.BlobStoreContext; import org.jclouds.rest.HttpClient; import org.jspecify.annotations.Nullable; import org.junit.After; import org.junit.Before; import org.junit.Ignore; import org.junit.Test; public final class AwsSdkTest { static { System.setProperty( SDKGlobalConfiguration.DISABLE_CERT_CHECKING_SYSTEM_PROPERTY, "true"); disableSslVerification(); } private static final ByteSource BYTE_SOURCE = ByteSource.wrap(new byte[1]); private static final ClientConfiguration V2_SIGNER_CONFIG = new ClientConfiguration() .withMaxErrorRetry(0) .withSignerOverride("S3SignerType"); private static final long MINIMUM_MULTIPART_SIZE = 5 * 1024 * 1024; private static final int MINIO_PORT = 9000; private static final int LOCALSTACK_PORT = 4566; private URI s3Endpoint; private EndpointConfiguration s3EndpointConfig; private S3Proxy s3Proxy; private BlobStoreContext context; private URI blobStoreEndpoint; private String blobStoreType; private String containerName; private AWSCredentials awsCreds; private AmazonS3 client; private String servicePath; @Before public void setUp() throws Exception { TestUtils.S3ProxyLaunchInfo info = TestUtils.startS3Proxy( System.getProperty("s3proxy.test.conf", "s3proxy.conf")); awsCreds = new BasicAWSCredentials(info.getS3Identity(), info.getS3Credential()); context = info.getBlobStore().getContext(); s3Proxy = info.getS3Proxy(); s3Endpoint = info.getSecureEndpoint(); servicePath = info.getServicePath(); s3EndpointConfig = new EndpointConfiguration( s3Endpoint.toString() + servicePath, "us-east-1"); client = AmazonS3ClientBuilder.standard() .withClientConfiguration( new ClientConfiguration().withMaxErrorRetry(0)) .withCredentials(new AWSStaticCredentialsProvider(awsCreds)) .withEndpointConfiguration(s3EndpointConfig) .build(); containerName = createRandomContainerName(); info.getBlobStore().createContainerInLocation(null, containerName); blobStoreEndpoint = URI.create( context.unwrap().getProviderMetadata().getEndpoint()); blobStoreType = context.unwrap().getProviderMetadata().getId(); if (Quirks.OPAQUE_ETAG.contains(blobStoreType)) { System.setProperty( SkipMd5CheckStrategy .DISABLE_GET_OBJECT_MD5_VALIDATION_PROPERTY, "true"); System.setProperty( SkipMd5CheckStrategy .DISABLE_PUT_OBJECT_MD5_VALIDATION_PROPERTY, "true"); } } @After public void tearDown() throws Exception { if (s3Proxy != null) { s3Proxy.stop(); } if (context != null) { context.getBlobStore().deleteContainer(containerName); context.close(); } } @Test public void testAwsV2Signature() throws Exception { client = AmazonS3ClientBuilder.standard() .withClientConfiguration(V2_SIGNER_CONFIG) .withCredentials(new AWSStaticCredentialsProvider(awsCreds)) .withEndpointConfiguration(s3EndpointConfig) .build(); var metadata = new ObjectMetadata(); metadata.setContentLength(BYTE_SOURCE.size()); client.putObject(containerName, "foo", BYTE_SOURCE.openStream(), metadata); S3Object object = client.getObject(containerName, "foo"); assertThat(object.getObjectMetadata().getContentLength()).isEqualTo( BYTE_SOURCE.size()); try (InputStream actual = object.getObjectContent(); InputStream expected = BYTE_SOURCE.openStream()) { assertThat(actual).hasSameContentAs(expected); } } @Test public void testAwsV2SignatureWithOverrideParameters() throws Exception { client = AmazonS3ClientBuilder.standard() .withClientConfiguration(V2_SIGNER_CONFIG) .withCredentials(new AWSStaticCredentialsProvider(awsCreds)) .withEndpointConfiguration(s3EndpointConfig).build(); var metadata = new ObjectMetadata(); metadata.setContentLength(BYTE_SOURCE.size()); client.putObject(containerName, "foo", BYTE_SOURCE.openStream(), metadata); String blobName = "foo"; var headerOverride = new ResponseHeaderOverrides(); String expectedContentDisposition = "attachment; " + blobName; headerOverride.setContentDisposition(expectedContentDisposition); String expectedContentType = "text/plain"; headerOverride.setContentType(expectedContentType); var request = new GetObjectRequest(containerName, blobName); request.setResponseHeaders(headerOverride); S3Object object = client.getObject(request); assertThat(object.getObjectMetadata().getContentLength()).isEqualTo( BYTE_SOURCE.size()); assertThat(object.getObjectMetadata().getContentDisposition()) .isEqualTo(expectedContentDisposition); assertThat(object.getObjectMetadata().getContentType()).isEqualTo( expectedContentType); try (InputStream actual = object.getObjectContent(); InputStream expected = BYTE_SOURCE.openStream()) { assertThat(actual).hasSameContentAs(expected); } } @Test public void testAwsV4Signature() throws Exception { var metadata = new ObjectMetadata(); metadata.setContentLength(BYTE_SOURCE.size()); client.putObject(containerName, "foo", BYTE_SOURCE.openStream(), metadata); S3Object object = client.getObject(containerName, "foo"); assertThat(object.getObjectMetadata().getContentLength()).isEqualTo( BYTE_SOURCE.size()); try (InputStream actual = object.getObjectContent(); InputStream expected = BYTE_SOURCE.openStream()) { assertThat(actual).hasSameContentAs(expected); } } @Test public void testAwsV4SignatureChunkedSigned() throws Exception { client = AmazonS3ClientBuilder.standard() .withChunkedEncodingDisabled(false) .withPayloadSigningEnabled(true) .withCredentials(new AWSStaticCredentialsProvider(awsCreds)) .withEndpointConfiguration(s3EndpointConfig) .build(); var metadata = new ObjectMetadata(); metadata.setContentLength(BYTE_SOURCE.size()); client.putObject(containerName, "foo", BYTE_SOURCE.openStream(), metadata); var object = client.getObject(containerName, "foo"); assertThat(object.getObjectMetadata().getContentLength()).isEqualTo( BYTE_SOURCE.size()); try (var actual = object.getObjectContent(); var expected = BYTE_SOURCE.openStream()) { assertThat(actual).hasSameContentAs(expected); } } @Test public void testAwsV4SignatureNonChunked() throws Exception { client = AmazonS3ClientBuilder.standard() .withChunkedEncodingDisabled(true) .withCredentials(new AWSStaticCredentialsProvider(awsCreds)) .withEndpointConfiguration(s3EndpointConfig) .build(); var metadata = new ObjectMetadata(); metadata.setContentLength(BYTE_SOURCE.size()); client.putObject(containerName, "foo", BYTE_SOURCE.openStream(), metadata); S3Object object = client.getObject(containerName, "foo"); assertThat(object.getObjectMetadata().getContentLength()).isEqualTo( BYTE_SOURCE.size()); try (InputStream actual = object.getObjectContent(); InputStream expected = BYTE_SOURCE.openStream()) { assertThat(actual).hasSameContentAs(expected); } } @Test public void testAwsV4SignaturePayloadUnsigned() throws Exception { client = AmazonS3ClientBuilder.standard() .withChunkedEncodingDisabled(true) .withPayloadSigningEnabled(false) .withCredentials(new AWSStaticCredentialsProvider(awsCreds)) .withEndpointConfiguration(s3EndpointConfig) .build(); var metadata = new ObjectMetadata(); metadata.setContentLength(BYTE_SOURCE.size()); client.putObject(containerName, "foo", BYTE_SOURCE.openStream(), metadata); S3Object object = client.getObject(containerName, "foo"); assertThat(object.getObjectMetadata().getContentLength()).isEqualTo( BYTE_SOURCE.size()); try (InputStream actual = object.getObjectContent(); InputStream expected = BYTE_SOURCE.openStream()) { assertThat(actual).hasSameContentAs(expected); } } @Test public void testAwsV4SignatureBadIdentity() throws Exception { client = AmazonS3ClientBuilder.standard() .withCredentials(new AWSStaticCredentialsProvider( new BasicAWSCredentials( "bad-access-key", awsCreds.getAWSSecretKey()))) .withEndpointConfiguration(s3EndpointConfig) .build(); var metadata = new ObjectMetadata(); metadata.setContentLength(BYTE_SOURCE.size()); try { client.putObject(containerName, "foo", BYTE_SOURCE.openStream(), metadata); Fail.failBecauseExceptionWasNotThrown(AmazonS3Exception.class); } catch (AmazonS3Exception e) { assertThat(e.getErrorCode()).isEqualTo("InvalidAccessKeyId"); } } // This randomly fails with SocketException: Broken pipe @Ignore @Test public void testAwsV4SignatureBadCredential() throws Exception { client = AmazonS3ClientBuilder.standard() .withCredentials(new AWSStaticCredentialsProvider( new BasicAWSCredentials( awsCreds.getAWSAccessKeyId(), "bad-secret-key"))) .withEndpointConfiguration(s3EndpointConfig) .build(); var metadata = new ObjectMetadata(); metadata.setContentLength(BYTE_SOURCE.size()); try { client.putObject(containerName, "foo", BYTE_SOURCE.openStream(), metadata); Fail.failBecauseExceptionWasNotThrown(AmazonS3Exception.class); } catch (AmazonS3Exception e) { assertThat(e.getErrorCode()).isEqualTo("SignatureDoesNotMatch"); } } @Test public void testAwsV2UrlSigning() throws Exception { client = AmazonS3ClientBuilder.standard() .withClientConfiguration(V2_SIGNER_CONFIG) .withCredentials(new AWSStaticCredentialsProvider(awsCreds)) .withEndpointConfiguration(s3EndpointConfig) .build(); String blobName = "foo"; var metadata = new ObjectMetadata(); metadata.setContentLength(BYTE_SOURCE.size()); client.putObject(containerName, blobName, BYTE_SOURCE.openStream(), metadata); var expiration = new Date(System.currentTimeMillis() + TimeUnit.HOURS.toMillis(1)); URL url = client.generatePresignedUrl(containerName, blobName, expiration, HttpMethod.GET); try (InputStream actual = url.openStream(); InputStream expected = BYTE_SOURCE.openStream()) { assertThat(actual).hasSameContentAs(expected); } } @Test public void testAwsV2UrlSigningWithOverrideParameters() throws Exception { client = AmazonS3ClientBuilder.standard() .withClientConfiguration(V2_SIGNER_CONFIG) .withCredentials(new AWSStaticCredentialsProvider(awsCreds)) .withEndpointConfiguration(s3EndpointConfig).build(); String blobName = "foo"; var metadata = new ObjectMetadata(); metadata.setContentLength(BYTE_SOURCE.size()); client.putObject(containerName, blobName, BYTE_SOURCE.openStream(), metadata); GeneratePresignedUrlRequest generatePresignedUrlRequest = new GeneratePresignedUrlRequest(containerName, blobName); generatePresignedUrlRequest.setMethod(HttpMethod.GET); var headerOverride = new ResponseHeaderOverrides(); headerOverride.setContentDisposition("attachment; " + blobName); headerOverride.setContentType("text/plain"); generatePresignedUrlRequest.setResponseHeaders(headerOverride); var expiration = new Date(System.currentTimeMillis() + TimeUnit.HOURS.toMillis(1)); generatePresignedUrlRequest.setExpiration(expiration); URL url = client.generatePresignedUrl(generatePresignedUrlRequest); URLConnection connection = url.openConnection(); try (InputStream actual = connection.getInputStream(); InputStream expected = BYTE_SOURCE.openStream()) { String value = connection.getHeaderField("Content-Disposition"); assertThat(value).isEqualTo(headerOverride.getContentDisposition()); value = connection.getHeaderField("Content-Type"); assertThat(value).isEqualTo(headerOverride.getContentType()); assertThat(actual).hasSameContentAs(expected); } } @Test public void testAwsV4UrlSigning() throws Exception { String blobName = "foo"; var metadata = new ObjectMetadata(); metadata.setContentLength(BYTE_SOURCE.size()); client.putObject(containerName, blobName, BYTE_SOURCE.openStream(), metadata); var expiration = new Date(System.currentTimeMillis() + TimeUnit.HOURS.toMillis(1)); URL url = client.generatePresignedUrl(containerName, blobName, expiration, HttpMethod.GET); try (InputStream actual = url.openStream(); InputStream expected = BYTE_SOURCE.openStream()) { assertThat(actual).hasSameContentAs(expected); } } @Test public void testMultipartCopy() throws Exception { assumeTrue(!blobStoreType.equals("azureblob-sdk")); // B2 requires two parts to issue an MPU assumeTrue(!blobStoreType.equals("b2")); String sourceBlobName = "testMultipartCopy-source"; String targetBlobName = "testMultipartCopy-target"; var metadata = new ObjectMetadata(); metadata.setContentLength(BYTE_SOURCE.size()); client.putObject(containerName, sourceBlobName, BYTE_SOURCE.openStream(), metadata); InitiateMultipartUploadRequest initiateRequest = new InitiateMultipartUploadRequest(containerName, targetBlobName); InitiateMultipartUploadResult initResult = client.initiateMultipartUpload(initiateRequest); String uploadId = initResult.getUploadId(); var copyRequest = new CopyPartRequest() .withDestinationBucketName(containerName) .withDestinationKey(targetBlobName) .withSourceBucketName(containerName) .withSourceKey(sourceBlobName) .withUploadId(uploadId) .withFirstByte(0L) .withLastByte(BYTE_SOURCE.size() - 1) .withPartNumber(1); CopyPartResult copyPartResult = client.copyPart(copyRequest); CompleteMultipartUploadRequest completeRequest = new CompleteMultipartUploadRequest( containerName, targetBlobName, uploadId, List.of(copyPartResult.getPartETag())); client.completeMultipartUpload(completeRequest); S3Object object = client.getObject(containerName, targetBlobName); assertThat(object.getObjectMetadata().getContentLength()).isEqualTo( BYTE_SOURCE.size()); try (InputStream actual = object.getObjectContent(); InputStream expected = BYTE_SOURCE.openStream()) { assertThat(actual).hasSameContentAs(expected); } } @Test public void testBigMultipartUpload() throws Exception { String key = "multipart-upload"; long partSize = MINIMUM_MULTIPART_SIZE; long size = partSize + 1; ByteSource byteSource = TestUtils.randomByteSource().slice(0, size); InitiateMultipartUploadRequest initRequest = new InitiateMultipartUploadRequest(containerName, key); InitiateMultipartUploadResult initResponse = client.initiateMultipartUpload(initRequest); String uploadId = initResponse.getUploadId(); ByteSource byteSource1 = byteSource.slice(0, partSize); var uploadRequest1 = new UploadPartRequest() .withBucketName(containerName) .withKey(key) .withUploadId(uploadId) .withPartNumber(1) .withInputStream(byteSource1.openStream()) .withPartSize(byteSource1.size()); uploadRequest1.getRequestClientOptions().setReadLimit( (int) byteSource1.size()); UploadPartResult uploadPartResult1 = client.uploadPart(uploadRequest1); ByteSource byteSource2 = byteSource.slice(partSize, size - partSize); var uploadRequest2 = new UploadPartRequest() .withBucketName(containerName) .withKey(key) .withUploadId(uploadId) .withPartNumber(2) .withInputStream(byteSource2.openStream()) .withPartSize(byteSource2.size()); uploadRequest2.getRequestClientOptions().setReadLimit( (int) byteSource2.size()); UploadPartResult uploadPartResult2 = client.uploadPart(uploadRequest2); CompleteMultipartUploadRequest completeRequest = new CompleteMultipartUploadRequest( containerName, key, uploadId, List.of( uploadPartResult1.getPartETag(), uploadPartResult2.getPartETag())); client.completeMultipartUpload(completeRequest); S3Object object = client.getObject(containerName, key); assertThat(object.getObjectMetadata().getContentLength()).isEqualTo( size); try (InputStream actual = object.getObjectContent(); InputStream expected = byteSource.openStream()) { assertThat(actual).hasSameContentAs(expected); } } @Test public void testMultipartUploadReplace() throws Exception { String key = "multipart-upload"; long partSize = MINIMUM_MULTIPART_SIZE; long size = partSize + 1; ByteSource byteSource = TestUtils.randomByteSource().slice(0, size); // Create InitiateMultipartUploadRequest initRequest1 = new InitiateMultipartUploadRequest(containerName, key); InitiateMultipartUploadResult initResponse1 = client.initiateMultipartUpload(initRequest1); String uploadId1 = initResponse1.getUploadId(); ByteSource byteSource1 = byteSource.slice(0, partSize); var uploadRequest1 = new UploadPartRequest() .withBucketName(containerName) .withKey(key) .withUploadId(uploadId1) .withPartNumber(1) .withInputStream(byteSource1.openStream()) .withPartSize(byteSource1.size()); uploadRequest1.getRequestClientOptions().setReadLimit( (int) byteSource1.size()); UploadPartResult uploadPartResult1 = client.uploadPart(uploadRequest1); CompleteMultipartUploadRequest completeRequest1 = new CompleteMultipartUploadRequest( containerName, key, uploadId1, List.of(uploadPartResult1.getPartETag())); client.completeMultipartUpload(completeRequest1); // Replace InitiateMultipartUploadRequest initRequest2 = new InitiateMultipartUploadRequest(containerName, key); InitiateMultipartUploadResult initResponse2 = client.initiateMultipartUpload(initRequest2); String uploadId2 = initResponse2.getUploadId(); ByteSource byteSource2 = byteSource.slice(partSize, size - partSize); var uploadRequest2 = new UploadPartRequest() .withBucketName(containerName) .withKey(key) .withUploadId(uploadId2) .withPartNumber(1) .withInputStream(byteSource2.openStream()) .withPartSize(byteSource2.size()); uploadRequest2.getRequestClientOptions().setReadLimit( (int) byteSource2.size()); UploadPartResult uploadPartResult2 = client.uploadPart(uploadRequest2); CompleteMultipartUploadRequest completeRequest2 = new CompleteMultipartUploadRequest( containerName, key, uploadId2, List.of(uploadPartResult2.getPartETag())); client.completeMultipartUpload(completeRequest2); S3Object object = client.getObject(containerName, key); assertThat(object.getObjectMetadata().getContentLength()).isEqualTo( byteSource2.size()); try (InputStream actual = object.getObjectContent(); InputStream expected = byteSource2.openStream()) { assertThat(actual).hasSameContentAs(expected); } } // TODO: testMultipartUploadConditionalCopy @Test public void testUpdateBlobXmlAcls() throws Exception { // TODO: assumeTrue(!blobStoreType.equals("transient-nio2")); assumeTrue(!Quirks.NO_BLOB_ACCESS_CONTROL.contains(blobStoreType)); assumeTrue(blobStoreEndpoint.getPort() != MINIO_PORT); String blobName = "testUpdateBlobXmlAcls-blob"; var metadata = new ObjectMetadata(); metadata.setContentLength(BYTE_SOURCE.size()); client.putObject(containerName, blobName, BYTE_SOURCE.openStream(), metadata); AccessControlList acl = client.getObjectAcl(containerName, blobName); acl.grantPermission(GroupGrantee.AllUsers, Permission.Read); client.setObjectAcl(containerName, blobName, acl); assertThat(client.getObjectAcl(containerName, blobName)).isEqualTo(acl); acl.revokeAllPermissions(GroupGrantee.AllUsers); client.setObjectAcl(containerName, blobName, acl); assertThat(client.getObjectAcl(containerName, blobName)).isEqualTo(acl); acl.grantPermission(GroupGrantee.AllUsers, Permission.Write); try { client.setObjectAcl(containerName, blobName, acl); Fail.failBecauseExceptionWasNotThrown(AmazonS3Exception.class); } catch (AmazonS3Exception e) { assertThat(e.getErrorCode()).isEqualTo("NotImplemented"); } } @Test public void testUnicodeObject() throws Exception { String blobName = "ŪņЇЌœđЗ/☺ unicode € rocks ™"; var metadata = new ObjectMetadata(); metadata.setContentLength(BYTE_SOURCE.size()); client.putObject(containerName, blobName, BYTE_SOURCE.openStream(), metadata); metadata = client.getObjectMetadata(containerName, blobName); assertThat(metadata).isNotNull(); ObjectListing listing = client.listObjects(containerName); List summaries = listing.getObjectSummaries(); assertThat(summaries).hasSize(1); S3ObjectSummary summary = summaries.iterator().next(); assertThat(summary.getKey()).isEqualTo(blobName); } @Test public void testSpecialCharacters() throws Exception { // TODO: fixed in jclouds 2.6.1 assumeTrue(blobStoreEndpoint.getPort() != MINIO_PORT); assumeTrue(blobStoreEndpoint.getPort() != LOCALSTACK_PORT); String prefix = "special !\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~"; if (blobStoreType.equals("azureblob") || blobStoreType.equals("azureblob-sdk") || blobStoreType.equals("b2")) { prefix = prefix.replace("\\", ""); } if (blobStoreType.equals("azureblob") || blobStoreType.equals("azureblob-sdk")) { // Avoid blob names that end with a dot (.), a forward slash (/), or // a sequence or combination of the two. prefix = prefix.replace("./", "/") + "."; } String blobName = prefix + "foo"; var metadata = new ObjectMetadata(); metadata.setContentLength(BYTE_SOURCE.size()); client.putObject(containerName, blobName, BYTE_SOURCE.openStream(), metadata); ObjectListing listing = client.listObjects(new ListObjectsRequest() .withBucketName(containerName) .withPrefix(prefix)); List summaries = listing.getObjectSummaries(); assertThat(summaries).hasSize(1); S3ObjectSummary summary = summaries.iterator().next(); assertThat(summary.getKey()).isEqualTo(blobName); } @Test public void testAtomicMpuAbort() throws Exception { String key = "testAtomicMpuAbort"; var metadata = new ObjectMetadata(); metadata.setContentLength(BYTE_SOURCE.size()); client.putObject(containerName, key, BYTE_SOURCE.openStream(), metadata); InitiateMultipartUploadRequest initRequest = new InitiateMultipartUploadRequest(containerName, key); InitiateMultipartUploadResult initResponse = client.initiateMultipartUpload(initRequest); String uploadId = initResponse.getUploadId(); client.abortMultipartUpload(new AbortMultipartUploadRequest( containerName, key, uploadId)); S3Object object = client.getObject(containerName, key); assertThat(object.getObjectMetadata().getContentLength()).isEqualTo( BYTE_SOURCE.size()); try (InputStream actual = object.getObjectContent(); InputStream expected = BYTE_SOURCE.openStream()) { assertThat(actual).hasSameContentAs(expected); } } @Test public void testOverrideResponseHeader() throws Exception { String blobName = "foo"; var metadata = new ObjectMetadata(); metadata.setContentLength(BYTE_SOURCE.size()); client.putObject(containerName, blobName, BYTE_SOURCE.openStream(), metadata); String cacheControl = "no-cache"; String contentDisposition = "attachment; filename=foo.html"; String contentEncoding = "gzip"; String contentLanguage = "en"; String contentType = "text/html;charset=utf-8"; String expires = "Wed, 13 Jul 2016 21:23:51 GMT"; long expiresTime = 1468445031000L; var getObjectRequest = new GetObjectRequest(containerName, blobName); getObjectRequest.setResponseHeaders( new ResponseHeaderOverrides() .withCacheControl(cacheControl) .withContentDisposition(contentDisposition) .withContentEncoding(contentEncoding) .withContentLanguage(contentLanguage) .withContentType(contentType) .withExpires(expires)); S3Object object = client.getObject(getObjectRequest); try (InputStream is = object.getObjectContent()) { assertThat(is).isNotNull(); is.transferTo(OutputStream.nullOutputStream()); } ObjectMetadata responseMetadata = object.getObjectMetadata(); assertThat(responseMetadata.getCacheControl()).isEqualTo( cacheControl); assertThat(responseMetadata.getContentDisposition()).isEqualTo( contentDisposition); assertThat(responseMetadata.getContentEncoding()).isEqualTo( contentEncoding); assertThat(responseMetadata.getContentLanguage()).isEqualTo( contentLanguage); assertThat(responseMetadata.getContentType()).isEqualTo( contentType); assertThat(responseMetadata.getHttpExpiresDate().getTime()) .isEqualTo(expiresTime); } @Test public void testDeleteMultipleObjectsEmpty() throws Exception { var request = new DeleteObjectsRequest(containerName) .withKeys(); try { client.deleteObjects(request); Fail.failBecauseExceptionWasNotThrown(AmazonS3Exception.class); } catch (AmazonS3Exception e) { assertThat(e.getErrorCode()).isEqualTo("MalformedXML"); } } @Test public void testDeleteMultipleObjects() throws Exception { String blobName = "foo"; var metadata = new ObjectMetadata(); metadata.setContentLength(BYTE_SOURCE.size()); var request = new DeleteObjectsRequest(containerName) .withKeys(blobName); // without quiet client.putObject(containerName, blobName, BYTE_SOURCE.openStream(), metadata); DeleteObjectsResult result = client.deleteObjects(request); assertThat(result.getDeletedObjects()).hasSize(1); assertThat(result.getDeletedObjects().iterator().next().getKey()) .isEqualTo(blobName); // with quiet client.putObject(containerName, blobName, BYTE_SOURCE.openStream(), metadata); result = client.deleteObjects(request.withQuiet(true)); assertThat(result.getDeletedObjects()).isEmpty(); } @Test public void testPartNumberMarker() throws Exception { String blobName = "test-part-number-marker"; InitiateMultipartUploadResult result = client.initiateMultipartUpload( new InitiateMultipartUploadRequest(containerName, blobName)); var request = new ListPartsRequest(containerName, blobName, result.getUploadId()); client.listParts(request.withPartNumberMarker(0)); try { client.listParts(request.withPartNumberMarker(1)); Fail.failBecauseExceptionWasNotThrown(AmazonS3Exception.class); } catch (AmazonS3Exception e) { assertThat(e.getErrorCode()).isEqualTo("NotImplemented"); } finally { client.abortMultipartUpload(new AbortMultipartUploadRequest(containerName, blobName, result.getUploadId())); } } @Test public void testHttpClient() throws Exception { assumeTrue(blobStoreEndpoint.getPort() != MINIO_PORT); // aws-s3-sdk doesn't support jclouds HTTP client assumeTrue(!blobStoreType.equals("aws-s3-sdk")); assumeTrue(!blobStoreType.equals("google-cloud-storage-sdk")); String blobName = "blob-name"; var metadata = new ObjectMetadata(); metadata.setContentLength(BYTE_SOURCE.size()); client.putObject(containerName, blobName, BYTE_SOURCE.openStream(), metadata); if (Quirks.NO_BLOB_ACCESS_CONTROL.contains(blobStoreType)) { client.setBucketAcl(containerName, CannedAccessControlList.PublicRead); } else { client.setObjectAcl(containerName, blobName, CannedAccessControlList.PublicRead); } HttpClient httpClient = context.utils().http(); var uri = new URI(s3Endpoint.getScheme(), s3Endpoint.getUserInfo(), s3Endpoint.getHost(), s3Proxy.getSecurePort(), servicePath + "/" + containerName + "/" + blobName, /*query=*/ null, /*fragment=*/ null); try (InputStream actual = httpClient.get(uri); InputStream expected = BYTE_SOURCE.openStream()) { assertThat(actual).hasSameContentAs(expected); } } @Test public void testListBuckets() throws Exception { var builder = ImmutableList.builder(); for (Bucket bucket : client.listBuckets(new ListBucketsPaginatedRequest()).getBuckets()) { builder.add(bucket.getName()); } assertThat(builder.build()).contains(containerName); } @Test public void testContainerExists() throws Exception { client.headBucket(new HeadBucketRequest(containerName)); try { client.headBucket(new HeadBucketRequest( createRandomContainerName())); Fail.failBecauseExceptionWasNotThrown(AmazonS3Exception.class); } catch (AmazonS3Exception e) { assertThat(e.getErrorCode()).isEqualTo("404 Not Found"); } } @Test public void testContainerCreateDelete() throws Exception { assumeTrue(blobStoreEndpoint.getPort() != LOCALSTACK_PORT); // LocalStack in us-east-1 returns 200 OK for duplicate bucket creation (legacy S3 behavior) // https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html assumeTrue(!blobStoreType.equals("aws-s3-sdk")); String containerName2 = createRandomContainerName(); client.createBucket(containerName2); try { client.createBucket(containerName2); client.deleteBucket(containerName2); Fail.failBecauseExceptionWasNotThrown(AmazonS3Exception.class); } catch (AmazonS3Exception e) { assertThat(e.getErrorCode()).isEqualTo("BucketAlreadyOwnedByYou"); } } @Test public void testContainerDelete() throws Exception { client.headBucket(new HeadBucketRequest(containerName)); client.deleteBucket(containerName); try { client.headBucket(new HeadBucketRequest(containerName)); Fail.failBecauseExceptionWasNotThrown(AmazonS3Exception.class); } catch (AmazonS3Exception e) { assertThat(e.getErrorCode()).isEqualTo("404 Not Found"); } } private void putBlobAndCheckIt(String blobName) throws Exception { var metadata = new ObjectMetadata(); metadata.setContentLength(BYTE_SOURCE.size()); client.putObject(containerName, blobName, BYTE_SOURCE.openStream(), metadata); S3Object object = client.getObject(containerName, blobName); try (InputStream actual = object.getObjectContent(); InputStream expected = BYTE_SOURCE.openStream()) { assertThat(actual).hasSameContentAs(expected); } } @Test public void testBlobPutGet() throws Exception { putBlobAndCheckIt("blob"); putBlobAndCheckIt("blob%"); putBlobAndCheckIt("blob%%"); } @Test public void testBlobEscape() throws Exception { ObjectListing listing = client.listObjects(containerName); assertThat(listing.getObjectSummaries()).isEmpty(); putBlobAndCheckIt("blob%"); listing = client.listObjects(containerName); assertThat(listing.getObjectSummaries()).hasSize(1); assertThat(listing.getObjectSummaries().iterator().next().getKey()) .isEqualTo("blob%"); } @Test public void testBlobList() throws Exception { ObjectListing listing = client.listObjects(containerName); assertThat(listing.getObjectSummaries()).isEmpty(); var metadata = new ObjectMetadata(); metadata.setContentLength(BYTE_SOURCE.size()); var builder = ImmutableList.builder(); client.putObject(containerName, "blob1", BYTE_SOURCE.openStream(), metadata); listing = client.listObjects(containerName); for (S3ObjectSummary summary : listing.getObjectSummaries()) { builder.add(summary.getKey()); } assertThat(builder.build()).containsOnly("blob1"); builder = ImmutableList.builder(); client.putObject(containerName, "blob2", BYTE_SOURCE.openStream(), metadata); listing = client.listObjects(containerName); for (S3ObjectSummary summary : listing.getObjectSummaries()) { builder.add(summary.getKey()); } assertThat(builder.build()).containsOnly("blob1", "blob2"); } @Test public void testBlobListRecursive() throws Exception { ObjectListing listing = client.listObjects(containerName); assertThat(listing.getObjectSummaries()).isEmpty(); var metadata = new ObjectMetadata(); metadata.setContentLength(BYTE_SOURCE.size()); client.putObject(containerName, "prefix/blob1", BYTE_SOURCE.openStream(), metadata); client.putObject(containerName, "prefix/blob2", BYTE_SOURCE.openStream(), metadata); var builder = ImmutableList.builder(); listing = client.listObjects(new ListObjectsRequest() .withBucketName(containerName) .withDelimiter("/")); assertThat(listing.getObjectSummaries()).isEmpty(); for (String prefix : listing.getCommonPrefixes()) { builder.add(prefix); } assertThat(builder.build()).containsOnly("prefix/"); builder = ImmutableList.builder(); listing = client.listObjects(containerName); for (S3ObjectSummary summary : listing.getObjectSummaries()) { builder.add(summary.getKey()); } assertThat(builder.build()).containsOnly("prefix/blob1", "prefix/blob2"); assertThat(listing.getCommonPrefixes()).isEmpty(); } @Test public void testBlobListRecursiveImplicitMarker() throws Exception { assumeTrue(!Quirks.OPAQUE_MARKERS.contains(blobStoreType)); assumeTrue(!blobStoreType.equals("transient-nio2")); // TODO: ObjectListing listing = client.listObjects(containerName); assertThat(listing.getObjectSummaries()).isEmpty(); var metadata = new ObjectMetadata(); metadata.setContentLength(BYTE_SOURCE.size()); client.putObject(containerName, "blob1", BYTE_SOURCE.openStream(), metadata); client.putObject(containerName, "blob2", BYTE_SOURCE.openStream(), metadata); listing = client.listObjects(new ListObjectsRequest() .withBucketName(containerName) .withMaxKeys(1)); assertThat(listing.getObjectSummaries()).hasSize(1); assertThat(listing.getObjectSummaries().iterator().next().getKey()) .isEqualTo("blob1"); listing = client.listObjects(new ListObjectsRequest() .withBucketName(containerName) .withMaxKeys(1) .withMarker("blob1")); assertThat(listing.getObjectSummaries()).hasSize(1); assertThat(listing.getObjectSummaries().iterator().next().getKey()) .isEqualTo("blob2"); } @Test public void testBlobListV2() throws Exception { assumeTrue(!Quirks.OPAQUE_MARKERS.contains(blobStoreType)); var metadata = new ObjectMetadata(); metadata.setContentLength(BYTE_SOURCE.size()); for (int i = 1; i < 5; ++i) { client.putObject(containerName, String.valueOf(i), BYTE_SOURCE.openStream(), metadata); } ListObjectsV2Result result = client.listObjectsV2( new ListObjectsV2Request() .withBucketName(containerName) .withMaxKeys(1) .withStartAfter("1")); assertThat(result.getContinuationToken()).isEmpty(); assertThat(result.getStartAfter()).isEqualTo("1"); if (blobStoreEndpoint.getPort() != MINIO_PORT) { // Minio returns "2[minio_cache:v2,return:]" assertThat(result.getNextContinuationToken()).isEqualTo("2"); } assertThat(result.isTruncated()).isTrue(); assertThat(result.getObjectSummaries()).hasSize(1); assertThat(result.getObjectSummaries().get(0).getKey()).isEqualTo("2"); result = client.listObjectsV2( new ListObjectsV2Request() .withBucketName(containerName) .withMaxKeys(1) .withContinuationToken(result.getNextContinuationToken())); if (blobStoreEndpoint.getPort() != MINIO_PORT) { // Minio returns "2[minio_cache:v2,return:]" assertThat(result.getContinuationToken()).isEqualTo("2"); assertThat(result.getNextContinuationToken()).isEqualTo("3"); } assertThat(result.getStartAfter()).isEmpty(); assertThat(result.isTruncated()).isTrue(); assertThat(result.getObjectSummaries()).hasSize(1); assertThat(result.getObjectSummaries().get(0).getKey()).isEqualTo("3"); result = client.listObjectsV2( new ListObjectsV2Request() .withBucketName(containerName) .withMaxKeys(1) .withContinuationToken(result.getNextContinuationToken())); if (blobStoreEndpoint.getPort() != MINIO_PORT) { // Minio returns "3[minio_cache:v2,return:]" assertThat(result.getContinuationToken()).isEqualTo("3"); assertThat(result.getNextContinuationToken()).isNull(); } assertThat(result.getStartAfter()).isEmpty(); if (blobStoreEndpoint.getPort() != MINIO_PORT) { // TODO: why does this fail? assertThat(result.isTruncated()).isFalse(); } assertThat(result.getObjectSummaries()).hasSize(1); assertThat(result.getObjectSummaries().get(0).getKey()).isEqualTo("4"); } @Test public void testBlobMetadata() throws Exception { String blobName = "blob"; var metadata = new ObjectMetadata(); metadata.setContentLength(BYTE_SOURCE.size()); client.putObject(containerName, blobName, BYTE_SOURCE.openStream(), metadata); ObjectMetadata newMetadata = client.getObjectMetadata(containerName, blobName); assertThat(newMetadata.getContentLength()) .isEqualTo(BYTE_SOURCE.size()); } @Test public void testBlobRemove() throws Exception { String blobName = "blob"; var metadata = new ObjectMetadata(); metadata.setContentLength(BYTE_SOURCE.size()); client.putObject(containerName, blobName, BYTE_SOURCE.openStream(), metadata); assertThat(client.getObjectMetadata(containerName, blobName)) .isNotNull(); client.deleteObject(containerName, blobName); try { client.getObjectMetadata(containerName, blobName); Fail.failBecauseExceptionWasNotThrown(AmazonS3Exception.class); } catch (AmazonS3Exception e) { assertThat(e.getErrorCode()).isEqualTo("404 Not Found"); } client.deleteObject(containerName, blobName); } @Test public void testSinglepartUploadJettyCachedHeader() throws Exception { String blobName = "singlepart-upload-jetty-cached"; String contentType = "text/plain"; var metadata = new ObjectMetadata(); metadata.setContentLength(BYTE_SOURCE.size()); metadata.setContentType(contentType); client.putObject(containerName, blobName, BYTE_SOURCE.openStream(), metadata); S3Object object = client.getObject(containerName, blobName); try (InputStream actual = object.getObjectContent(); InputStream expected = BYTE_SOURCE.openStream()) { assertThat(actual).hasSameContentAs(expected); } ObjectMetadata newContentMetadata = object.getObjectMetadata(); assertThat(newContentMetadata.getContentType()).isEqualTo( contentType); } @Test public void testSinglepartUpload() throws Exception { String blobName = "singlepart-upload"; String cacheControl = "max-age=3600"; String contentDisposition = "attachment; filename=new.jpg"; String contentEncoding = "gzip"; String contentLanguage = "fr"; String contentType = "audio/mp4"; var userMetadata = Map.of( "key1", "value1", "key2", "value2"); var metadata = new ObjectMetadata(); if (!Quirks.NO_CACHE_CONTROL_SUPPORT.contains(blobStoreType)) { metadata.setCacheControl(cacheControl); } if (!Quirks.NO_CONTENT_DISPOSITION.contains(blobStoreType)) { metadata.setContentDisposition(contentDisposition); } if (!Quirks.NO_CONTENT_ENCODING.contains(blobStoreType)) { metadata.setContentEncoding(contentEncoding); } if (!Quirks.NO_CONTENT_LANGUAGE.contains(blobStoreType)) { metadata.setContentLanguage(contentLanguage); } metadata.setContentLength(BYTE_SOURCE.size()); metadata.setContentType(contentType); // TODO: expires metadata.setUserMetadata(userMetadata); client.putObject(containerName, blobName, BYTE_SOURCE.openStream(), metadata); S3Object object = client.getObject(containerName, blobName); try (InputStream actual = object.getObjectContent(); InputStream expected = BYTE_SOURCE.openStream()) { assertThat(actual).hasSameContentAs(expected); } ObjectMetadata newContentMetadata = object.getObjectMetadata(); if (!Quirks.NO_CACHE_CONTROL_SUPPORT.contains(blobStoreType)) { assertThat(newContentMetadata.getCacheControl()).isEqualTo( cacheControl); } if (!Quirks.NO_CONTENT_DISPOSITION.contains(blobStoreType)) { assertThat(newContentMetadata.getContentDisposition()).isEqualTo( contentDisposition); } if (!Quirks.NO_CONTENT_ENCODING.contains(blobStoreType)) { assertThat(newContentMetadata.getContentEncoding()).isEqualTo( contentEncoding); } if (!Quirks.NO_CONTENT_LANGUAGE.contains(blobStoreType)) { assertThat(newContentMetadata.getContentLanguage()).isEqualTo( contentLanguage); } assertThat(newContentMetadata.getContentType()).isEqualTo( contentType); // TODO: expires assertThat(newContentMetadata.getUserMetadata()).isEqualTo( userMetadata); } // TODO: fails for GCS (jclouds not implemented) @Test public void testMultipartUpload() throws Exception { String blobName = "multipart-upload"; String cacheControl = "max-age=3600"; String contentDisposition = "attachment; filename=new.jpg"; String contentEncoding = "gzip"; String contentLanguage = "fr"; String contentType = "audio/mp4"; var userMetadata = Map.of( "key1", "value1", "key2", "value2"); var metadata = new ObjectMetadata(); if (!Quirks.NO_CACHE_CONTROL_SUPPORT.contains(blobStoreType)) { metadata.setCacheControl(cacheControl); } if (!Quirks.NO_CONTENT_DISPOSITION.contains(blobStoreType)) { metadata.setContentDisposition(contentDisposition); } if (!Quirks.NO_CONTENT_ENCODING.contains(blobStoreType)) { metadata.setContentEncoding(contentEncoding); } if (!Quirks.NO_CONTENT_LANGUAGE.contains(blobStoreType)) { metadata.setContentLanguage(contentLanguage); } metadata.setContentType(contentType); // TODO: expires metadata.setUserMetadata(userMetadata); InitiateMultipartUploadResult result = client.initiateMultipartUpload( new InitiateMultipartUploadRequest(containerName, blobName, metadata)); ByteSource byteSource = TestUtils.randomByteSource().slice( 0, MINIMUM_MULTIPART_SIZE + 1); ByteSource byteSource1 = byteSource.slice(0, MINIMUM_MULTIPART_SIZE); ByteSource byteSource2 = byteSource.slice(MINIMUM_MULTIPART_SIZE, 1); UploadPartResult part1 = client.uploadPart(new UploadPartRequest() .withBucketName(containerName) .withKey(blobName) .withUploadId(result.getUploadId()) .withPartNumber(1) .withPartSize(byteSource1.size()) .withInputStream(byteSource1.openStream())); UploadPartResult part2 = client.uploadPart(new UploadPartRequest() .withBucketName(containerName) .withKey(blobName) .withUploadId(result.getUploadId()) .withPartNumber(2) .withPartSize(byteSource2.size()) .withInputStream(byteSource2.openStream())); client.completeMultipartUpload(new CompleteMultipartUploadRequest( containerName, blobName, result.getUploadId(), List.of(part1.getPartETag(), part2.getPartETag()))); ObjectListing listing = client.listObjects(containerName); assertThat(listing.getObjectSummaries()).hasSize(1); S3Object object = client.getObject(containerName, blobName); try (InputStream actual = object.getObjectContent(); InputStream expected = byteSource.openStream()) { assertThat(actual).hasSameContentAs(expected); } ObjectMetadata newContentMetadata = object.getObjectMetadata(); if (!Quirks.NO_CACHE_CONTROL_SUPPORT.contains(blobStoreType)) { assertThat(newContentMetadata.getCacheControl()).isEqualTo( cacheControl); } if (!Quirks.NO_CONTENT_DISPOSITION.contains(blobStoreType)) { assertThat(newContentMetadata.getContentDisposition()).isEqualTo( contentDisposition); } if (!Quirks.NO_CONTENT_ENCODING.contains(blobStoreType)) { assertThat(newContentMetadata.getContentEncoding()).isEqualTo( contentEncoding); } if (!Quirks.NO_CONTENT_LANGUAGE.contains(blobStoreType)) { assertThat(newContentMetadata.getContentLanguage()).isEqualTo( contentLanguage); } assertThat(newContentMetadata.getContentType()).isEqualTo( contentType); // TODO: expires assertThat(newContentMetadata.getUserMetadata()).isEqualTo( userMetadata); } // this test runs for several minutes @Ignore @Test public void testMaximumMultipartUpload() throws Exception { // skip with remote blobstores to avoid excessive run-times assumeTrue(blobStoreType.equals("filesystem") || blobStoreType.equals("transient")); String blobName = "multipart-upload"; int numParts = 32; long partSize = MINIMUM_MULTIPART_SIZE; ByteSource byteSource = TestUtils.randomByteSource().slice( 0, partSize * numParts); InitiateMultipartUploadResult result = client.initiateMultipartUpload( new InitiateMultipartUploadRequest(containerName, blobName)); var parts = ImmutableList.builder(); for (int i = 0; i < numParts; ++i) { ByteSource partByteSource = byteSource.slice( i * partSize, partSize); UploadPartResult partResult = client.uploadPart( new UploadPartRequest() .withBucketName(containerName) .withKey(blobName) .withUploadId(result.getUploadId()) .withPartNumber(i + 1) .withPartSize(partByteSource.size()) .withInputStream(partByteSource.openStream())); parts.add(partResult.getPartETag()); } client.completeMultipartUpload(new CompleteMultipartUploadRequest( containerName, blobName, result.getUploadId(), parts.build())); ObjectListing listing = client.listObjects(containerName); assertThat(listing.getObjectSummaries()).hasSize(1); S3Object object = client.getObject(containerName, blobName); ObjectMetadata contentMetadata = object.getObjectMetadata(); assertThat(contentMetadata.getContentLength()).isEqualTo( partSize * numParts); try (InputStream actual = object.getObjectContent(); InputStream expected = byteSource.openStream()) { assertThat(actual).hasSameContentAs(expected); } } @Test public void testMultipartUploadAbort() throws Exception { assumeTrue(!blobStoreType.equals("google-cloud-storage")); // TODO: fixed in jclouds 2.6.1 assumeTrue(blobStoreEndpoint.getPort() != MINIO_PORT); String blobName = "multipart-upload-abort"; ByteSource byteSource = TestUtils.randomByteSource().slice( 0, MINIMUM_MULTIPART_SIZE); InitiateMultipartUploadResult result = client.initiateMultipartUpload( new InitiateMultipartUploadRequest(containerName, blobName)); // TODO: google-cloud-storage and openstack-swift cannot list multipart // uploads MultipartUploadListing multipartListing = client.listMultipartUploads( new ListMultipartUploadsRequest(containerName)); assertThat(multipartListing.getMultipartUploads()).hasSize(1); PartListing partListing = client.listParts(new ListPartsRequest( containerName, blobName, result.getUploadId())); assertThat(partListing.getParts()).isEmpty(); client.uploadPart(new UploadPartRequest() .withBucketName(containerName) .withKey(blobName) .withUploadId(result.getUploadId()) .withPartNumber(1) .withPartSize(byteSource.size()) .withInputStream(byteSource.openStream())); multipartListing = client.listMultipartUploads( new ListMultipartUploadsRequest(containerName)); assertThat(multipartListing.getMultipartUploads()).hasSize(1); partListing = client.listParts(new ListPartsRequest( containerName, blobName, result.getUploadId())); assertThat(partListing.getParts()).hasSize(1); client.abortMultipartUpload(new AbortMultipartUploadRequest( containerName, blobName, result.getUploadId())); multipartListing = client.listMultipartUploads( new ListMultipartUploadsRequest(containerName)); assertThat(multipartListing.getMultipartUploads()).isEmpty(); ObjectListing listing = client.listObjects(containerName); assertThat(listing.getObjectSummaries()).isEmpty(); } // TODO: Fails since B2 returns the Cache-Control header on reads but does // not accept it on writes. @Test public void testCopyObjectPreserveMetadata() throws Exception { if (blobStoreType.equals("azureblob") || blobStoreType.equals("azureblob-sdk")) { // Azurite does not support copying blobs assumeTrue(!blobStoreEndpoint.getHost().equals("127.0.0.1")); } String fromName = "from-name"; String toName = "to-name"; String cacheControl = "max-age=3600"; String contentDisposition = "attachment; filename=old.jpg"; String contentEncoding = "gzip"; String contentLanguage = "en"; String contentType = "audio/ogg"; var userMetadata = Map.of( "key1", "value1", "key2", "value2"); var metadata = new ObjectMetadata(); if (!Quirks.NO_CACHE_CONTROL_SUPPORT.contains(blobStoreType)) { metadata.setCacheControl(cacheControl); } metadata.setContentLength(BYTE_SOURCE.size()); if (!Quirks.NO_CONTENT_DISPOSITION.contains(blobStoreType)) { metadata.setContentDisposition(contentDisposition); } if (!Quirks.NO_CONTENT_ENCODING.contains(blobStoreType)) { metadata.setContentEncoding(contentEncoding); } if (!Quirks.NO_CONTENT_LANGUAGE.contains(blobStoreType)) { metadata.setContentLanguage(contentLanguage); } metadata.setContentType(contentType); // TODO: expires metadata.setUserMetadata(userMetadata); client.putObject(containerName, fromName, BYTE_SOURCE.openStream(), metadata); client.copyObject(containerName, fromName, containerName, toName); S3Object object = client.getObject(containerName, toName); try (InputStream actual = object.getObjectContent(); InputStream expected = BYTE_SOURCE.openStream()) { assertThat(actual).hasSameContentAs(expected); } ObjectMetadata contentMetadata = object.getObjectMetadata(); assertThat(contentMetadata.getContentLength()).isEqualTo( BYTE_SOURCE.size()); if (!Quirks.NO_CACHE_CONTROL_SUPPORT.contains(blobStoreType)) { assertThat(contentMetadata.getCacheControl()).isEqualTo( cacheControl); } if (!Quirks.NO_CONTENT_DISPOSITION.contains(blobStoreType)) { assertThat(contentMetadata.getContentDisposition()).isEqualTo( contentDisposition); } if (!Quirks.NO_CONTENT_ENCODING.contains(blobStoreType)) { assertThat(contentMetadata.getContentEncoding()).isEqualTo( contentEncoding); } if (!Quirks.NO_CONTENT_LANGUAGE.contains(blobStoreType)) { assertThat(contentMetadata.getContentLanguage()).isEqualTo( contentLanguage); } assertThat(contentMetadata.getContentType()).isEqualTo( contentType); // TODO: expires assertThat(contentMetadata.getUserMetadata()).isEqualTo( userMetadata); } @Test public void testCopyObjectReplaceMetadata() throws Exception { if (blobStoreType.equals("azureblob") || blobStoreType.equals("azureblob-sdk")) { // Azurite does not support copying blobs assumeTrue(!blobStoreEndpoint.getHost().equals("127.0.0.1")); } String fromName = "from-name"; String toName = "to-name"; var metadata = new ObjectMetadata(); metadata.setContentLength(BYTE_SOURCE.size()); if (!Quirks.NO_CACHE_CONTROL_SUPPORT.contains(blobStoreType)) { metadata.setCacheControl("max-age=3600"); } if (!Quirks.NO_CONTENT_DISPOSITION.contains(blobStoreType)) { metadata.setContentDisposition("attachment; filename=old.jpg"); } if (!Quirks.NO_CONTENT_ENCODING.contains(blobStoreType)) { metadata.setContentEncoding("compress"); } if (!Quirks.NO_CONTENT_LANGUAGE.contains(blobStoreType)) { metadata.setContentLanguage("en"); } metadata.setContentType("audio/ogg"); // TODO: expires metadata.setUserMetadata(Map.of( "key1", "value1", "key2", "value2")); client.putObject(containerName, fromName, BYTE_SOURCE.openStream(), metadata); String cacheControl = "max-age=1800"; String contentDisposition = "attachment; filename=new.jpg"; String contentEncoding = "gzip"; String contentLanguage = "fr"; String contentType = "audio/mp4"; var contentMetadata = new ObjectMetadata(); if (!Quirks.NO_CACHE_CONTROL_SUPPORT.contains(blobStoreType)) { contentMetadata.setCacheControl(cacheControl); } if (!Quirks.NO_CONTENT_DISPOSITION.contains(blobStoreType)) { contentMetadata.setContentDisposition(contentDisposition); } if (!Quirks.NO_CONTENT_ENCODING.contains(blobStoreType)) { contentMetadata.setContentEncoding(contentEncoding); } if (!Quirks.NO_CONTENT_LANGUAGE.contains(blobStoreType)) { contentMetadata.setContentLanguage(contentLanguage); } contentMetadata.setContentType(contentType); // TODO: expires var userMetadata = Map.of( "key3", "value3", "key4", "value4"); contentMetadata.setUserMetadata(userMetadata); client.copyObject(new CopyObjectRequest( containerName, fromName, containerName, toName) .withNewObjectMetadata(contentMetadata)); S3Object object = client.getObject(containerName, toName); try (InputStream actual = object.getObjectContent(); InputStream expected = BYTE_SOURCE.openStream()) { assertThat(actual).hasSameContentAs(expected); } ObjectMetadata toContentMetadata = object.getObjectMetadata(); if (!Quirks.NO_CACHE_CONTROL_SUPPORT.contains(blobStoreType)) { assertThat(contentMetadata.getCacheControl()).isEqualTo( cacheControl); } if (!Quirks.NO_CONTENT_DISPOSITION.contains(blobStoreType)) { assertThat(toContentMetadata.getContentDisposition()).isEqualTo( contentDisposition); } if (!Quirks.NO_CONTENT_ENCODING.contains(blobStoreType)) { assertThat(toContentMetadata.getContentEncoding()).isEqualTo( contentEncoding); } if (!Quirks.NO_CONTENT_LANGUAGE.contains(blobStoreType)) { assertThat(toContentMetadata.getContentLanguage()).isEqualTo( contentLanguage); } assertThat(toContentMetadata.getContentType()).isEqualTo( contentType); // TODO: expires assertThat(toContentMetadata.getUserMetadata()).isEqualTo( userMetadata); } @Test public void testConditionalGet() throws Exception { assumeTrue(!blobStoreType.equals("b2")); // TODO: assumeTrue(!blobStoreType.equals("google-cloud-storage-sdk")); assumeTrue(!blobStoreType.equals("transient-nio2")); String blobName = "blob-name"; var metadata = new ObjectMetadata(); metadata.setContentLength(BYTE_SOURCE.size()); PutObjectResult result = client.putObject(containerName, blobName, BYTE_SOURCE.openStream(), metadata); S3Object object = client.getObject( new GetObjectRequest(containerName, blobName) .withMatchingETagConstraint(result.getETag())); try (InputStream is = object.getObjectContent()) { assertThat(is).isNotNull(); is.transferTo(OutputStream.nullOutputStream()); } object = client.getObject( new GetObjectRequest(containerName, blobName) .withNonmatchingETagConstraint(result.getETag())); assertThat(object).isNull(); } @Test public void testStorageClass() throws Exception { // Minio only supports STANDARD and REDUCED_REDUNDANCY assumeTrue(blobStoreEndpoint.getPort() != MINIO_PORT); // TODO: assumeTrue(!blobStoreType.equals("google-cloud-storage-sdk")); String blobName = "test-storage-class"; var metadata = new ObjectMetadata(); metadata.setContentLength(BYTE_SOURCE.size()); var request = new PutObjectRequest( containerName, blobName, BYTE_SOURCE.openStream(), metadata) .withStorageClass("STANDARD_IA"); client.putObject(request); metadata = client.getObjectMetadata(containerName, blobName); assertThat(metadata.getStorageClass()).isEqualTo("STANDARD_IA"); } @Test public void testGetObjectRange() throws Exception { var blobName = "test-range"; var metadata = new ObjectMetadata(); var byteSource = TestUtils.randomByteSource().slice(0, 1024); metadata.setContentLength(byteSource.size()); var request = new PutObjectRequest( containerName, blobName, byteSource.openStream(), metadata); client.putObject(request); var object = client.getObject( new GetObjectRequest(containerName, blobName) .withRange(42, 101)); assertThat(object.getObjectMetadata().getContentLength()).isEqualTo( 101 - 42 + 1); try (var actual = object.getObjectContent(); var expected = byteSource.slice(42, 101 - 42 + 1).openStream()) { assertThat(actual).hasSameContentAs(expected); } } @Test public void testUnknownHeader() throws Exception { String blobName = "test-unknown-header"; var metadata = new ObjectMetadata(); metadata.setContentLength(BYTE_SOURCE.size()); var request = new PutObjectRequest( containerName, blobName, BYTE_SOURCE.openStream(), metadata) .withTagging(new ObjectTagging(List.of())); try { client.putObject(request); Fail.failBecauseExceptionWasNotThrown(AmazonS3Exception.class); } catch (AmazonS3Exception e) { assertThat(e.getErrorCode()).isEqualTo("NotImplemented"); } } @Test public void testGetBucketPolicy() throws Exception { try { client.getBucketPolicy(containerName); Fail.failBecauseExceptionWasNotThrown(AmazonS3Exception.class); } catch (AmazonS3Exception e) { assertThat(e.getErrorCode()).isEqualTo("NoSuchPolicy"); } } @Test public void testUnknownParameter() throws Exception { try { client.setBucketLoggingConfiguration( new SetBucketLoggingConfigurationRequest( containerName, new BucketLoggingConfiguration())); Fail.failBecauseExceptionWasNotThrown(AmazonS3Exception.class); } catch (AmazonS3Exception e) { assertThat(e.getErrorCode()).isEqualTo("NotImplemented"); } } @Test public void testBlobStoreLocator() throws Exception { assumeTrue(blobStoreType.equals("filesystem") || blobStoreType.equals("transient")); final BlobStore blobStore1 = context.getBlobStore(); final BlobStore blobStore2 = ContextBuilder .newBuilder(blobStoreType) .credentials("other-identity", "credential") .build(BlobStoreContext.class) .getBlobStore(); s3Proxy.setBlobStoreLocator(new BlobStoreLocator() { @Override public Map.@Nullable Entry locateBlobStore( String identity, String container, String blob) { if (identity.equals(awsCreds.getAWSAccessKeyId())) { return Map.entry(awsCreds.getAWSSecretKey(), blobStore1); } else if (identity.equals("other-identity")) { return Map.entry("credential", blobStore2); } else { return null; } } }); // check first access key var buckets = client.listBuckets(new ListBucketsPaginatedRequest()).getBuckets(); assertThat(buckets).hasSize(1); assertThat(buckets.get(0).getName()).isEqualTo(containerName); // check second access key client = AmazonS3ClientBuilder.standard() .withClientConfiguration( new ClientConfiguration().withMaxErrorRetry(0)) .withCredentials(new AWSStaticCredentialsProvider( new BasicAWSCredentials("other-identity", "credential"))) .withEndpointConfiguration(s3EndpointConfig) .build(); buckets = client.listBuckets(new ListBucketsPaginatedRequest()).getBuckets(); assertThat(buckets).isEmpty(); // check invalid access key client = AmazonS3ClientBuilder.standard() .withCredentials(new AWSStaticCredentialsProvider( new BasicAWSCredentials("bad-identity", "credential"))) .withEndpointConfiguration(s3EndpointConfig) .build(); try { client.listBuckets(new ListBucketsPaginatedRequest()); Fail.failBecauseExceptionWasNotThrown(AmazonS3Exception.class); } catch (AmazonS3Exception e) { assertThat(e.getErrorCode()).isEqualTo("InvalidAccessKeyId"); } } @Test public void testCopyRelativePath() throws Exception { assumeTrue(!blobStoreType.equals("azureblob-sdk")); try { client.copyObject(new CopyObjectRequest( containerName, "../evil.txt", containerName, "good.txt")); Fail.failBecauseExceptionWasNotThrown(AmazonS3Exception.class); } catch (AmazonS3Exception e) { // expected } } @Test public void testDeleteRelativePath() throws Exception { try { client.deleteObject(containerName, "../evil.txt"); if (blobStoreType.equals("filesystem") || blobStoreType.equals("filesystem-nio2") || blobStoreType.equals("transient-nio2")) { Fail.failBecauseExceptionWasNotThrown(AmazonS3Exception.class); } } catch (AmazonS3Exception e) { // expected } } @Test public void testGetRelativePath() throws Exception { try { client.getObject(containerName, "../evil.txt"); Fail.failBecauseExceptionWasNotThrown(AmazonS3Exception.class); } catch (AmazonS3Exception e) { // expected } } @Test public void testPutRelativePath() throws Exception { try { var metadata = new ObjectMetadata(); metadata.setContentLength(BYTE_SOURCE.size()); client.putObject(containerName, "../evil.txt", BYTE_SOURCE.openStream(), metadata); if (blobStoreType.equals("filesystem") || blobStoreType.equals("filesystem-nio2") || blobStoreType.equals("transient-nio2")) { Fail.failBecauseExceptionWasNotThrown(AmazonS3Exception.class); } } catch (AmazonS3Exception e) { // expected } } @Test public void testListRelativePath() throws Exception { assumeTrue(!blobStoreType.equals("filesystem")); try { client.listObjects(new ListObjectsRequest() .withBucketName(containerName) .withPrefix("../evil/")); if (blobStoreType.equals("filesystem") || blobStoreType.equals("filesystem-nio2") || blobStoreType.equals("transient-nio2")) { Fail.failBecauseExceptionWasNotThrown(AmazonS3Exception.class); } } catch (AmazonS3Exception e) { // expected } } private static final class NullX509TrustManager implements X509TrustManager { @Override @Nullable public X509Certificate[] getAcceptedIssuers() { return null; } @Override public void checkClientTrusted(X509Certificate[] certs, String authType) { } @Override public void checkServerTrusted(X509Certificate[] certs, String authType) { } } static void disableSslVerification() { try { // Create a trust manager that does not validate certificate chains var trustAllCerts = new TrustManager[] { new NullX509TrustManager() }; // Install the all-trusting trust manager SSLContext sc = SSLContext.getInstance("SSL"); sc.init(null, trustAllCerts, new java.security.SecureRandom()); HttpsURLConnection.setDefaultSSLSocketFactory( sc.getSocketFactory()); // Create all-trusting host name verifier var allHostsValid = new HostnameVerifier() { @Override public boolean verify(String hostname, SSLSession session) { return true; } }; // Install the all-trusting host verifier HttpsURLConnection.setDefaultHostnameVerifier(allHostsValid); } catch (KeyManagementException | NoSuchAlgorithmException e) { throw new RuntimeException(e); } } static String createRandomContainerName() { return "s3proxy-" + new Random().nextInt(Integer.MAX_VALUE); } } ================================================ FILE: src/test/java/org/gaul/s3proxy/CrossOriginResourceSharingAllowAllResponseTest.java ================================================ /* * Copyright 2014-2026 Andrew Gaul * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.gaul.s3proxy; import static org.assertj.core.api.Assertions.assertThat; import java.net.URI; import java.nio.charset.StandardCharsets; import java.security.KeyManagementException; import java.security.KeyStoreException; import java.security.NoSuchAlgorithmException; import java.security.cert.CertificateException; import java.security.cert.X509Certificate; import java.util.Date; import java.util.Random; import java.util.concurrent.TimeUnit; import com.amazonaws.HttpMethod; import com.amazonaws.SDKGlobalConfiguration; import com.amazonaws.auth.AWSCredentials; import com.amazonaws.auth.AWSStaticCredentialsProvider; import com.amazonaws.auth.BasicAWSCredentials; import com.amazonaws.client.builder.AwsClientBuilder.EndpointConfiguration; import com.amazonaws.services.s3.AmazonS3; import com.amazonaws.services.s3.AmazonS3ClientBuilder; import com.amazonaws.services.s3.model.CannedAccessControlList; import com.google.common.io.ByteSource; import com.google.common.net.HttpHeaders; import org.apache.http.HttpResponse; import org.apache.http.HttpStatus; import org.apache.http.client.methods.HttpGet; import org.apache.http.client.methods.HttpOptions; import org.apache.http.config.Registry; import org.apache.http.config.RegistryBuilder; import org.apache.http.conn.socket.ConnectionSocketFactory; import org.apache.http.conn.socket.PlainConnectionSocketFactory; import org.apache.http.conn.ssl.NoopHostnameVerifier; import org.apache.http.conn.ssl.SSLConnectionSocketFactory; import org.apache.http.conn.ssl.TrustStrategy; import org.apache.http.impl.client.CloseableHttpClient; import org.apache.http.impl.client.HttpClients; import org.apache.http.impl.conn.PoolingHttpClientConnectionManager; import org.apache.http.ssl.SSLContextBuilder; import org.jclouds.blobstore.BlobStoreContext; import org.jclouds.blobstore.domain.Blob; import org.junit.After; import org.junit.Before; import org.junit.Test; public final class CrossOriginResourceSharingAllowAllResponseTest { static { System.setProperty( SDKGlobalConfiguration.DISABLE_CERT_CHECKING_SYSTEM_PROPERTY, "true"); AwsSdkTest.disableSslVerification(); } private URI s3Endpoint; private EndpointConfiguration s3EndpointConfig; private S3Proxy s3Proxy; private BlobStoreContext context; private String containerName; private AWSCredentials awsCreds; private AmazonS3 s3Client; private String servicePath; private CloseableHttpClient httpClient; private URI presignedGET; @Before public void setUp() throws Exception { TestUtils.S3ProxyLaunchInfo info = TestUtils.startS3Proxy( "s3proxy-cors-allow-all.conf"); awsCreds = new BasicAWSCredentials(info.getS3Identity(), info.getS3Credential()); context = info.getBlobStore().getContext(); s3Proxy = info.getS3Proxy(); s3Endpoint = info.getSecureEndpoint(); servicePath = info.getServicePath(); s3EndpointConfig = new EndpointConfiguration( s3Endpoint.toString() + servicePath, "us-east-1"); s3Client = AmazonS3ClientBuilder.standard() .withCredentials(new AWSStaticCredentialsProvider(awsCreds)) .withEndpointConfiguration(s3EndpointConfig) .build(); httpClient = getHttpClient(); containerName = createRandomContainerName(); info.getBlobStore().createContainerInLocation(null, containerName); s3Client.setBucketAcl(containerName, CannedAccessControlList.PublicRead); String blobName = "test"; ByteSource payload = ByteSource.wrap("blob-content".getBytes( StandardCharsets.UTF_8)); Blob blob = info.getBlobStore().blobBuilder(blobName) .payload(payload).contentLength(payload.size()).build(); info.getBlobStore().putBlob(containerName, blob); var expiration = new Date(System.currentTimeMillis() + TimeUnit.HOURS.toMillis(1)); presignedGET = s3Client.generatePresignedUrl(containerName, blobName, expiration, HttpMethod.GET).toURI(); } @After public void tearDown() throws Exception { if (s3Proxy != null) { s3Proxy.stop(); } if (context != null) { context.getBlobStore().deleteContainer(containerName); context.close(); } if (httpClient != null) { httpClient.close(); } } @Test public void testCorsPreflight() throws Exception { // Allowed origin, method and header combination var request = new HttpOptions(presignedGET); request.setHeader(HttpHeaders.ORIGIN, "https://example.com"); request.setHeader(HttpHeaders.ACCESS_CONTROL_REQUEST_METHOD, "GET"); request.setHeader(HttpHeaders.ACCESS_CONTROL_REQUEST_HEADERS, "Accept, Content-Type"); HttpResponse response = httpClient.execute(request); assertThat(response.getStatusLine().getStatusCode()) .isEqualTo(HttpStatus.SC_OK); assertThat(response.containsHeader( HttpHeaders.ACCESS_CONTROL_ALLOW_ORIGIN)).isTrue(); assertThat(response.getFirstHeader( HttpHeaders.ACCESS_CONTROL_ALLOW_ORIGIN).getValue()) .isEqualTo("*"); assertThat(response.containsHeader( HttpHeaders.ACCESS_CONTROL_ALLOW_METHODS)).isTrue(); assertThat(response.getFirstHeader( HttpHeaders.ACCESS_CONTROL_ALLOW_METHODS).getValue()) .isEqualTo("GET, HEAD, PUT, POST, DELETE"); assertThat(response.containsHeader( HttpHeaders.ACCESS_CONTROL_ALLOW_HEADERS)).isTrue(); assertThat(response.getFirstHeader( HttpHeaders.ACCESS_CONTROL_ALLOW_HEADERS).getValue()) .isEqualTo("Accept, Content-Type"); assertThat(response.containsHeader( HttpHeaders.ACCESS_CONTROL_EXPOSE_HEADERS)).isTrue(); assertThat(response.getFirstHeader( HttpHeaders.ACCESS_CONTROL_EXPOSE_HEADERS).getValue()) .isEqualTo("*"); } @Test public void testCorsActual() throws Exception { var request = new HttpGet(presignedGET); request.setHeader(HttpHeaders.ORIGIN, "https://example.com"); HttpResponse response = httpClient.execute(request); assertThat(response.getStatusLine().getStatusCode()) .isEqualTo(HttpStatus.SC_OK); assertThat(response.containsHeader( HttpHeaders.ACCESS_CONTROL_ALLOW_ORIGIN)).isTrue(); assertThat(response.getFirstHeader( HttpHeaders.ACCESS_CONTROL_ALLOW_ORIGIN).getValue()) .isEqualTo("*"); assertThat(response.containsHeader( HttpHeaders.ACCESS_CONTROL_ALLOW_METHODS)).isTrue(); assertThat(response.getFirstHeader( HttpHeaders.ACCESS_CONTROL_ALLOW_METHODS).getValue()) .isEqualTo("GET, HEAD, PUT, POST, DELETE"); assertThat(response.containsHeader( HttpHeaders.ACCESS_CONTROL_EXPOSE_HEADERS)).isTrue(); assertThat(response.getFirstHeader( HttpHeaders.ACCESS_CONTROL_EXPOSE_HEADERS).getValue()) .isEqualTo("*"); } @Test public void testNonCors() throws Exception { var request = new HttpGet(presignedGET); HttpResponse response = httpClient.execute(request); assertThat(response.getStatusLine().getStatusCode()) .isEqualTo(HttpStatus.SC_OK); assertThat(response.containsHeader( HttpHeaders.ACCESS_CONTROL_ALLOW_ORIGIN)).isFalse(); } private static String createRandomContainerName() { return "s3proxy-" + new Random().nextInt(Integer.MAX_VALUE); } private static CloseableHttpClient getHttpClient() throws KeyManagementException, NoSuchAlgorithmException, KeyStoreException { // Relax SSL Certificate check var sslContext = new SSLContextBuilder().loadTrustMaterial( null, new TrustStrategy() { @Override public boolean isTrusted(X509Certificate[] arg0, String arg1) throws CertificateException { return true; } }).build(); Registry registry = RegistryBuilder .create() .register("http", PlainConnectionSocketFactory.INSTANCE) .register("https", new SSLConnectionSocketFactory(sslContext, NoopHostnameVerifier.INSTANCE)).build(); PoolingHttpClientConnectionManager connectionManager = new PoolingHttpClientConnectionManager(registry); return HttpClients.custom().setConnectionManager(connectionManager) .build(); } } ================================================ FILE: src/test/java/org/gaul/s3proxy/CrossOriginResourceSharingResponseTest.java ================================================ /* * Copyright 2014-2026 Andrew Gaul * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.gaul.s3proxy; import static org.assertj.core.api.Assertions.assertThat; import java.net.URI; import java.nio.charset.StandardCharsets; import java.security.KeyManagementException; import java.security.KeyStoreException; import java.security.NoSuchAlgorithmException; import java.security.cert.CertificateException; import java.security.cert.X509Certificate; import java.util.Date; import java.util.Random; import java.util.concurrent.TimeUnit; import com.amazonaws.HttpMethod; import com.amazonaws.SDKGlobalConfiguration; import com.amazonaws.auth.AWSCredentials; import com.amazonaws.auth.AWSStaticCredentialsProvider; import com.amazonaws.auth.BasicAWSCredentials; import com.amazonaws.client.builder.AwsClientBuilder.EndpointConfiguration; import com.amazonaws.services.s3.AmazonS3; import com.amazonaws.services.s3.AmazonS3ClientBuilder; import com.amazonaws.services.s3.model.CannedAccessControlList; import com.google.common.io.ByteSource; import com.google.common.net.HttpHeaders; import org.apache.http.HttpResponse; import org.apache.http.HttpStatus; import org.apache.http.client.methods.HttpGet; import org.apache.http.client.methods.HttpOptions; import org.apache.http.config.Registry; import org.apache.http.config.RegistryBuilder; import org.apache.http.conn.socket.ConnectionSocketFactory; import org.apache.http.conn.socket.PlainConnectionSocketFactory; import org.apache.http.conn.ssl.NoopHostnameVerifier; import org.apache.http.conn.ssl.SSLConnectionSocketFactory; import org.apache.http.conn.ssl.TrustStrategy; import org.apache.http.impl.client.CloseableHttpClient; import org.apache.http.impl.client.HttpClients; import org.apache.http.impl.conn.PoolingHttpClientConnectionManager; import org.apache.http.ssl.SSLContextBuilder; import org.jclouds.blobstore.BlobStoreContext; import org.jclouds.blobstore.domain.Blob; import org.junit.After; import org.junit.Before; import org.junit.Test; public final class CrossOriginResourceSharingResponseTest { static { System.setProperty( SDKGlobalConfiguration.DISABLE_CERT_CHECKING_SYSTEM_PROPERTY, "true"); AwsSdkTest.disableSslVerification(); } private URI s3Endpoint; private EndpointConfiguration s3EndpointConfig; private S3Proxy s3Proxy; private BlobStoreContext context; private String containerName; private AWSCredentials awsCreds; private AmazonS3 s3Client; private String servicePath; private CloseableHttpClient httpClient; private URI presignedGET; private URI publicGET; @Before public void setUp() throws Exception { TestUtils.S3ProxyLaunchInfo info = TestUtils.startS3Proxy( "s3proxy-cors.conf"); awsCreds = new BasicAWSCredentials(info.getS3Identity(), info.getS3Credential()); context = info.getBlobStore().getContext(); s3Proxy = info.getS3Proxy(); s3Endpoint = info.getSecureEndpoint(); servicePath = info.getServicePath(); s3EndpointConfig = new EndpointConfiguration( s3Endpoint.toString() + servicePath, "us-east-1"); s3Client = AmazonS3ClientBuilder.standard() .withCredentials(new AWSStaticCredentialsProvider(awsCreds)) .withEndpointConfiguration(s3EndpointConfig) .build(); httpClient = getHttpClient(); containerName = createRandomContainerName(); info.getBlobStore().createContainerInLocation(null, containerName); s3Client.setBucketAcl(containerName, CannedAccessControlList.PublicRead); String blobName = "test"; ByteSource payload = ByteSource.wrap("blob-content".getBytes( StandardCharsets.UTF_8)); Blob blob = info.getBlobStore().blobBuilder(blobName) .payload(payload).contentLength(payload.size()).build(); info.getBlobStore().putBlob(containerName, blob); var expiration = new Date(System.currentTimeMillis() + TimeUnit.HOURS.toMillis(1)); presignedGET = s3Client.generatePresignedUrl(containerName, blobName, expiration, HttpMethod.GET).toURI(); publicGET = s3Client.getUrl(containerName, blobName).toURI(); } @After public void tearDown() throws Exception { if (s3Proxy != null) { s3Proxy.stop(); } if (context != null) { context.getBlobStore().deleteContainer(containerName); context.close(); } if (httpClient != null) { httpClient.close(); } } @Test public void testCorsPreflight() throws Exception { // Allowed origin and method var request = new HttpOptions(presignedGET); request.setHeader(HttpHeaders.ORIGIN, "https://example.com"); request.setHeader(HttpHeaders.ACCESS_CONTROL_REQUEST_METHOD, "GET"); HttpResponse response = httpClient.execute(request); assertThat(response.getStatusLine().getStatusCode()) .isEqualTo(HttpStatus.SC_OK); assertThat(response.containsHeader( HttpHeaders.ACCESS_CONTROL_ALLOW_ORIGIN)).isTrue(); assertThat(response.getFirstHeader( HttpHeaders.ACCESS_CONTROL_ALLOW_ORIGIN).getValue()) .isEqualTo("https://example.com"); assertThat(response.containsHeader( HttpHeaders.ACCESS_CONTROL_ALLOW_METHODS)).isTrue(); assertThat(response.getFirstHeader( HttpHeaders.ACCESS_CONTROL_ALLOW_METHODS).getValue()) .isEqualTo("GET, PUT"); assertThat(response.getFirstHeader( HttpHeaders.ACCESS_CONTROL_EXPOSE_HEADERS).getValue()) .isEqualTo("ETag"); // Allowed origin, method and header request.reset(); request.setHeader(HttpHeaders.ORIGIN, "https://example.com"); request.setHeader(HttpHeaders.ACCESS_CONTROL_REQUEST_METHOD, "GET"); request.setHeader(HttpHeaders.ACCESS_CONTROL_REQUEST_HEADERS, "Accept"); response = httpClient.execute(request); assertThat(response.getStatusLine().getStatusCode()) .isEqualTo(HttpStatus.SC_OK); assertThat(response.containsHeader( HttpHeaders.ACCESS_CONTROL_ALLOW_ORIGIN)).isTrue(); assertThat(response.getFirstHeader( HttpHeaders.ACCESS_CONTROL_ALLOW_ORIGIN).getValue()) .isEqualTo("https://example.com"); assertThat(response.containsHeader( HttpHeaders.ACCESS_CONTROL_ALLOW_METHODS)).isTrue(); assertThat(response.getFirstHeader( HttpHeaders.ACCESS_CONTROL_ALLOW_METHODS).getValue()) .isEqualTo("GET, PUT"); assertThat(response.containsHeader( HttpHeaders.ACCESS_CONTROL_ALLOW_HEADERS)).isTrue(); assertThat(response.getFirstHeader( HttpHeaders.ACCESS_CONTROL_ALLOW_HEADERS).getValue()) .isEqualTo("Accept"); assertThat(response.getFirstHeader( HttpHeaders.ACCESS_CONTROL_EXPOSE_HEADERS).getValue()) .isEqualTo("ETag"); // Allowed origin, method and header combination request.reset(); request.setHeader(HttpHeaders.ORIGIN, "https://example.com"); request.setHeader(HttpHeaders.ACCESS_CONTROL_REQUEST_METHOD, "GET"); request.setHeader(HttpHeaders.ACCESS_CONTROL_REQUEST_HEADERS, "Accept, Content-Type"); response = httpClient.execute(request); assertThat(response.getStatusLine().getStatusCode()) .isEqualTo(HttpStatus.SC_OK); assertThat(response.containsHeader( HttpHeaders.ACCESS_CONTROL_ALLOW_ORIGIN)).isTrue(); assertThat(response.getFirstHeader( HttpHeaders.ACCESS_CONTROL_ALLOW_ORIGIN).getValue()) .isEqualTo("https://example.com"); assertThat(response.containsHeader( HttpHeaders.ACCESS_CONTROL_ALLOW_METHODS)).isTrue(); assertThat(response.getFirstHeader( HttpHeaders.ACCESS_CONTROL_ALLOW_METHODS).getValue()) .isEqualTo("GET, PUT"); assertThat(response.containsHeader( HttpHeaders.ACCESS_CONTROL_ALLOW_HEADERS)).isTrue(); assertThat(response.getFirstHeader( HttpHeaders.ACCESS_CONTROL_ALLOW_HEADERS).getValue()) .isEqualTo("Accept, Content-Type"); assertThat(response.getFirstHeader( HttpHeaders.ACCESS_CONTROL_EXPOSE_HEADERS).getValue()) .isEqualTo("ETag"); } @Test public void testCorsPreflightPublicRead() throws Exception { // No CORS headers var request = new HttpOptions(publicGET); HttpResponse response = httpClient.execute(request); assertThat(response.getStatusLine().getStatusCode()) .isEqualTo(HttpStatus.SC_BAD_REQUEST); // Not allowed method request.reset(); request.setHeader(HttpHeaders.ORIGIN, "https://example.com"); request.setHeader(HttpHeaders.ACCESS_CONTROL_REQUEST_METHOD, "PATCH"); response = httpClient.execute(request); assertThat(response.getStatusLine().getStatusCode()) .isEqualTo(HttpStatus.SC_BAD_REQUEST); // Allowed origin and method request.reset(); request.setHeader(HttpHeaders.ORIGIN, "https://example.com"); request.setHeader(HttpHeaders.ACCESS_CONTROL_REQUEST_METHOD, "GET"); request.setHeader(HttpHeaders.ACCESS_CONTROL_REQUEST_HEADERS, "Accept, Content-Type"); response = httpClient.execute(request); assertThat(response.getStatusLine().getStatusCode()) .isEqualTo(HttpStatus.SC_OK); assertThat(response.containsHeader( HttpHeaders.ACCESS_CONTROL_ALLOW_ORIGIN)).isTrue(); assertThat(response.getFirstHeader( HttpHeaders.ACCESS_CONTROL_ALLOW_ORIGIN).getValue()) .isEqualTo("https://example.com"); assertThat(response.containsHeader( HttpHeaders.ACCESS_CONTROL_ALLOW_METHODS)).isTrue(); assertThat(response.getFirstHeader( HttpHeaders.ACCESS_CONTROL_ALLOW_METHODS).getValue()) .isEqualTo("GET, PUT"); assertThat(response.containsHeader( HttpHeaders.ACCESS_CONTROL_ALLOW_HEADERS)).isTrue(); assertThat(response.getFirstHeader( HttpHeaders.ACCESS_CONTROL_ALLOW_HEADERS).getValue()) .isEqualTo("Accept, Content-Type"); assertThat(response.getFirstHeader( HttpHeaders.ACCESS_CONTROL_EXPOSE_HEADERS).getValue()) .isEqualTo("ETag"); assertThat(response.getFirstHeader( HttpHeaders.ACCESS_CONTROL_ALLOW_CREDENTIALS)) .isNull(); } @Test public void testCorsActual() throws Exception { var request = new HttpGet(presignedGET); request.setHeader(HttpHeaders.ORIGIN, "https://example.com"); HttpResponse response = httpClient.execute(request); assertThat(response.getStatusLine().getStatusCode()) .isEqualTo(HttpStatus.SC_OK); assertThat(response.containsHeader( HttpHeaders.ACCESS_CONTROL_ALLOW_ORIGIN)).isTrue(); assertThat(response.getFirstHeader( HttpHeaders.ACCESS_CONTROL_ALLOW_ORIGIN).getValue()) .isEqualTo("https://example.com"); assertThat(response.containsHeader( HttpHeaders.ACCESS_CONTROL_ALLOW_METHODS)).isTrue(); assertThat(response.getFirstHeader( HttpHeaders.ACCESS_CONTROL_ALLOW_METHODS).getValue()) .isEqualTo("GET, PUT"); assertThat(response.getFirstHeader( HttpHeaders.ACCESS_CONTROL_EXPOSE_HEADERS).getValue()) .isEqualTo("ETag"); } @Test public void testNonCors() throws Exception { var request = new HttpGet(presignedGET); HttpResponse response = httpClient.execute(request); assertThat(response.getStatusLine().getStatusCode()) .isEqualTo(HttpStatus.SC_OK); assertThat(response.containsHeader( HttpHeaders.ACCESS_CONTROL_ALLOW_ORIGIN)).isFalse(); assertThat(response.containsHeader( HttpHeaders.ACCESS_CONTROL_EXPOSE_HEADERS)).isFalse(); } private static String createRandomContainerName() { return "s3proxy-" + new Random().nextInt(Integer.MAX_VALUE); } private static CloseableHttpClient getHttpClient() throws KeyManagementException, NoSuchAlgorithmException, KeyStoreException { // Relax SSL Certificate check var sslContext = new SSLContextBuilder().loadTrustMaterial( null, new TrustStrategy() { @Override public boolean isTrusted(X509Certificate[] arg0, String arg1) throws CertificateException { return true; } }).build(); Registry registry = RegistryBuilder .create() .register("http", PlainConnectionSocketFactory.INSTANCE) .register("https", new SSLConnectionSocketFactory(sslContext, NoopHostnameVerifier.INSTANCE)).build(); PoolingHttpClientConnectionManager connectionManager = new PoolingHttpClientConnectionManager(registry); return HttpClients.custom().setConnectionManager(connectionManager) .build(); } } ================================================ FILE: src/test/java/org/gaul/s3proxy/CrossOriginResourceSharingRuleTest.java ================================================ /* * Copyright 2014-2026 Andrew Gaul * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.gaul.s3proxy; import static org.assertj.core.api.Assertions.assertThat; import java.util.List; import org.junit.Before; import org.junit.Test; public final class CrossOriginResourceSharingRuleTest { private CrossOriginResourceSharing corsAll; private CrossOriginResourceSharing corsCfg; private CrossOriginResourceSharing corsOff; @Before public void setUp() throws Exception { // CORS Allow All corsAll = new CrossOriginResourceSharing(); // CORS Configured corsCfg = new CrossOriginResourceSharing( List.of("https://example\\.com", "https://.+\\.example\\.com", "https://example\\.cloud"), List.of("GET", "PUT"), List.of("Accept", "Content-Type"), List.of(), "true"); // CORS disabled corsOff = new CrossOriginResourceSharing(null, null, null, null, null); } @Test public void testCorsOffOrigin() throws Exception { String probe = ""; assertThat(corsOff.isOriginAllowed(probe)) .as("check '%s' as origin", probe).isFalse(); probe = "https://example.com"; assertThat(corsOff.isOriginAllowed(probe)) .as("check '%s' as origin", probe).isFalse(); } @Test public void testCorsOffMethod() throws Exception { String probe = ""; assertThat(corsOff.isMethodAllowed(probe)) .as("check '%s' as method", probe).isFalse(); probe = "GET"; assertThat(corsOff.isMethodAllowed(probe)) .as("check '%s' as method", probe).isFalse(); } @Test public void testCorsOffHeader() throws Exception { String probe = ""; assertThat(corsOff.isEveryHeaderAllowed(probe)) .as("check '%s' as header", probe).isFalse(); probe = "Accept"; assertThat(corsOff.isEveryHeaderAllowed(probe)) .as("check '%s' as header", probe).isFalse(); probe = "Accept, Content-Type"; assertThat(corsOff.isEveryHeaderAllowed(probe)) .as("check '%s' as header", probe).isFalse(); } @Test public void testCorsAllOrigin() throws Exception { String probe = ""; assertThat(corsAll.isOriginAllowed(probe)) .as("check '%s' as origin", probe).isFalse(); probe = "https://example.com"; assertThat(corsAll.isOriginAllowed(probe)) .as("check '%s' as origin", probe).isTrue(); probe = "https://sub.example.com"; assertThat(corsAll.isOriginAllowed(probe)) .as("check '%s' as origin", probe).isTrue(); } @Test public void testCorsAllMethod() throws Exception { String probe = ""; assertThat(corsAll.isMethodAllowed(probe)) .as("check '%s' as method", probe).isFalse(); probe = "PATCH"; assertThat(corsAll.isMethodAllowed(probe)) .as("check '%s' as method", probe).isFalse(); probe = "GET"; assertThat(corsAll.isMethodAllowed(probe)) .as("check '%s' as method", probe).isTrue(); probe = "PUT"; assertThat(corsAll.isMethodAllowed(probe)) .as("check '%s' as method", probe).isTrue(); probe = "POST"; assertThat(corsAll.isMethodAllowed(probe)) .as("check '%s' as method", probe).isTrue(); probe = "HEAD"; assertThat(corsAll.isMethodAllowed(probe)) .as("check '%s' as method", probe).isTrue(); probe = "DELETE"; assertThat(corsAll.isMethodAllowed(probe)) .as("check '%s' as method", probe).isTrue(); } @Test public void testCorsAllHeader() throws Exception { String probe = ""; assertThat(corsAll.isEveryHeaderAllowed(probe)) .as("check '%s' as header", probe).isFalse(); probe = "Accept"; assertThat(corsAll.isEveryHeaderAllowed(probe)) .as("check '%s' as header", probe).isTrue(); probe = "Accept, Content-Type"; assertThat(corsAll.isEveryHeaderAllowed(probe)) .as("check '%s' as header", probe).isTrue(); } @Test public void testCorsCfgOrigin() throws Exception { String probe = ""; assertThat(corsCfg.isOriginAllowed(probe)) .as("check '%s' as origin", probe).isFalse(); probe = "https://example.org"; assertThat(corsCfg.isOriginAllowed(probe)) .as("check '%s' as origin", probe).isFalse(); probe = "https://example.com"; assertThat(corsCfg.isOriginAllowed(probe)) .as("check '%s' as origin", probe).isTrue(); probe = "https://sub.example.com"; assertThat(corsCfg.isOriginAllowed(probe)) .as("check '%s' as origin", probe).isTrue(); probe = "https://example.cloud"; assertThat(corsCfg.isOriginAllowed(probe)) .as("check '%s' as origin", probe).isTrue(); } @Test public void testCorsCfgMethod() throws Exception { String probe = ""; assertThat(corsCfg.isMethodAllowed(probe)) .as("check '%s' as method", probe).isFalse(); probe = "PATCH"; assertThat(corsCfg.isMethodAllowed(probe)) .as("check '%s' as method", probe).isFalse(); probe = "GET"; assertThat(corsCfg.isMethodAllowed(probe)) .as("check '%s' as method", probe).isTrue(); probe = "PUT"; assertThat(corsCfg.isMethodAllowed(probe)) .as("check '%s' as method", probe).isTrue(); } @Test public void testCorsCfgHeader() throws Exception { String probe = ""; assertThat(corsCfg.isEveryHeaderAllowed(probe)) .as("check '%s' as header", probe).isFalse(); probe = "Accept-Language"; assertThat(corsCfg.isEveryHeaderAllowed(probe)) .as("check '%s' as header", probe).isFalse(); probe = "Accept, Accept-Encoding"; assertThat(corsCfg.isEveryHeaderAllowed(probe)) .as("check '%s' as header", probe).isFalse(); probe = "Accept"; assertThat(corsCfg.isEveryHeaderAllowed(probe)) .as("check '%s' as header", probe).isTrue(); probe = "Accept, Content-Type"; assertThat(corsCfg.isEveryHeaderAllowed(probe)) .as("check '%s' as header", probe).isTrue(); } @Test public void testAllowCredentials() { assertThat(corsOff.isAllowCredentials()).isFalse(); assertThat(corsCfg.isAllowCredentials()).isTrue(); } } ================================================ FILE: src/test/java/org/gaul/s3proxy/EncryptedBlobStoreTest.java ================================================ /* * Copyright 2014-2026 Andrew Gaul * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.gaul.s3proxy; import static org.assertj.core.api.Assertions.assertThat; import java.io.BufferedReader; import java.io.ByteArrayInputStream; import java.io.IOException; import java.io.InputStream; import java.io.InputStreamReader; import java.nio.charset.StandardCharsets; import java.util.ArrayList; import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.Properties; import java.util.Random; import java.util.stream.Collectors; import org.gaul.s3proxy.crypto.Constants; import org.jclouds.ContextBuilder; import org.jclouds.blobstore.BlobStore; import org.jclouds.blobstore.BlobStoreContext; import org.jclouds.blobstore.domain.Blob; import org.jclouds.blobstore.domain.BlobAccess; import org.jclouds.blobstore.domain.BlobMetadata; import org.jclouds.blobstore.domain.MultipartPart; import org.jclouds.blobstore.domain.MultipartUpload; import org.jclouds.blobstore.domain.PageSet; import org.jclouds.blobstore.domain.StorageMetadata; import org.jclouds.blobstore.domain.StorageType; import org.jclouds.blobstore.options.CopyOptions; import org.jclouds.blobstore.options.GetOptions; import org.jclouds.blobstore.options.ListContainerOptions; import org.jclouds.blobstore.options.PutOptions; import org.jclouds.http.HttpResponseException; import org.jclouds.io.Payload; import org.jclouds.io.Payloads; import org.jclouds.logging.slf4j.config.SLF4JLoggingModule; import org.junit.After; import org.junit.Assert; import org.junit.Before; import org.junit.Test; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @SuppressWarnings("UnstableApiUsage") public final class EncryptedBlobStoreTest { private static final Logger logger = LoggerFactory.getLogger(EncryptedBlobStoreTest.class); private BlobStoreContext context; private BlobStore blobStore; private String containerName; private BlobStore encryptedBlobStore; private static Blob makeBlob(BlobStore blobStore, String blobName, InputStream is, long contentLength) { return blobStore.blobBuilder(blobName) .payload(is) .contentLength(contentLength) .build(); } private static Blob makeBlob(BlobStore blobStore, String blobName, byte[] payload, long contentLength) { return blobStore.blobBuilder(blobName) .payload(payload) .contentLength(contentLength) .build(); } private static Blob makeBlobWithContentType(BlobStore blobStore, String blobName, long contentLength, InputStream is, String contentType) { return blobStore.blobBuilder(blobName) .payload(is) .contentLength(contentLength) .contentType(contentType) .build(); } @Before public void setUp() throws Exception { String password = "Password1234567!"; String salt = "12345678"; containerName = TestUtils.createRandomContainerName(); //noinspection UnstableApiUsage context = ContextBuilder .newBuilder("transient") .credentials("identity", "credential") .modules(List.of(new SLF4JLoggingModule())) .build(BlobStoreContext.class); blobStore = context.getBlobStore(); blobStore.createContainerInLocation(null, containerName); var properties = new Properties(); properties.put(S3ProxyConstants.PROPERTY_ENCRYPTED_BLOBSTORE, "true"); properties.put(S3ProxyConstants.PROPERTY_ENCRYPTED_BLOBSTORE_PASSWORD, password); properties.put(S3ProxyConstants.PROPERTY_ENCRYPTED_BLOBSTORE_SALT, salt); encryptedBlobStore = EncryptedBlobStore.newEncryptedBlobStore(blobStore, properties); } @After public void tearDown() throws Exception { if (context != null) { blobStore.deleteContainer(containerName); context.close(); } } @Test public void testBlobNotExists() { String blobName = TestUtils.createRandomBlobName(); Blob blob = encryptedBlobStore.getBlob(containerName, blobName); assertThat(blob).isNull(); blob = encryptedBlobStore.getBlob(containerName, blobName, new GetOptions()); assertThat(blob).isNull(); } @Test public void testBlobNotEncrypted() throws Exception { var tests = new String[] { "1", // only 1 char "123456789A12345", // lower then the AES block "123456789A1234567", // one byte bigger then the AES block "123456789A123456123456789B123456123456789C" + "1234123456789A123456123456789B123456123456789C1234" }; Map contentLengths = new HashMap<>(); for (String content : tests) { String blobName = TestUtils.createRandomBlobName(); InputStream is = new ByteArrayInputStream( content.getBytes(StandardCharsets.UTF_8)); contentLengths.put(blobName, (long) content.length()); Blob blob = makeBlob(blobStore, blobName, is, content.length()); blobStore.putBlob(containerName, blob); blob = encryptedBlobStore.getBlob(containerName, blobName); try (InputStream blobIs = blob.getPayload().openStream()) { var reader = new BufferedReader(new InputStreamReader(blobIs)); String plaintext = reader.lines().collect(Collectors.joining()); logger.debug("plaintext {}", plaintext); assertThat(content).isEqualTo(plaintext); } var options = new GetOptions(); blob = encryptedBlobStore.getBlob(containerName, blobName, options); try (InputStream blobIs = blob.getPayload().openStream()) { var reader = new BufferedReader(new InputStreamReader(blobIs)); String plaintext = reader.lines().collect(Collectors.joining()); logger.debug("plaintext {} with empty options ", plaintext); assertThat(content).isEqualTo(plaintext); } } PageSet blobs = encryptedBlobStore.list(containerName, new ListContainerOptions()); for (StorageMetadata blob : blobs) { assertThat(blob.getSize()).isEqualTo( contentLengths.get(blob.getName())); } blobs = encryptedBlobStore.list(); StorageMetadata metadata = blobs.iterator().next(); assertThat(StorageType.CONTAINER).isEqualTo(metadata.getType()); } @Test public void testListEncrypted() { var contents = new String[] { "1", // only 1 char "123456789A12345", // lower then the AES block "123456789A1234567", // one byte bigger then the AES block "123456789A123456123456789B123456123456789C1234" }; Map contentLengths = new HashMap<>(); for (String content : contents) { String blobName = TestUtils.createRandomBlobName(); InputStream is = new ByteArrayInputStream( content.getBytes(StandardCharsets.UTF_8)); contentLengths.put(blobName, (long) content.length()); Blob blob = makeBlob(encryptedBlobStore, blobName, is, content.length()); encryptedBlobStore.putBlob(containerName, blob); } PageSet blobs = encryptedBlobStore.list(containerName); for (StorageMetadata blob : blobs) { assertThat(blob.getSize()).isEqualTo( contentLengths.get(blob.getName())); } blobs = encryptedBlobStore.list(containerName, new ListContainerOptions()); for (StorageMetadata blob : blobs) { assertThat(blob.getSize()).isEqualTo( contentLengths.get(blob.getName())); encryptedBlobStore.removeBlob(containerName, blob.getName()); } blobs = encryptedBlobStore.list(containerName, new ListContainerOptions()); assertThat(blobs.size()).isEqualTo(0); } @Test public void testListEncryptedMultipart() { String blobName = TestUtils.createRandomBlobName(); var contentParts = new String[] { "123456789A123456123456789B123456123456789C1234", "123456789D123456123456789E123456123456789F123456", "123456789G123456123456789H123456123456789I123" }; String content = contentParts[0] + contentParts[1] + contentParts[2]; BlobMetadata blobMetadata = makeBlob(encryptedBlobStore, blobName, content.getBytes(StandardCharsets.UTF_8), content.length()).getMetadata(); MultipartUpload mpu = encryptedBlobStore.initiateMultipartUpload(containerName, blobMetadata, new PutOptions()); Payload payload1 = Payloads.newByteArrayPayload( contentParts[0].getBytes(StandardCharsets.UTF_8)); Payload payload2 = Payloads.newByteArrayPayload( contentParts[1].getBytes(StandardCharsets.UTF_8)); Payload payload3 = Payloads.newByteArrayPayload( contentParts[2].getBytes(StandardCharsets.UTF_8)); encryptedBlobStore.uploadMultipartPart(mpu, 1, payload1); encryptedBlobStore.uploadMultipartPart(mpu, 2, payload2); encryptedBlobStore.uploadMultipartPart(mpu, 3, payload3); List parts = encryptedBlobStore.listMultipartUpload(mpu); int index = 0; for (MultipartPart part : parts) { assertThat((long) contentParts[index].length()).isEqualTo( part.partSize()); index++; } encryptedBlobStore.completeMultipartUpload(mpu, parts); PageSet blobs = encryptedBlobStore.list(containerName); StorageMetadata metadata = blobs.iterator().next(); assertThat((long) content.length()).isEqualTo(metadata.getSize()); var options = new ListContainerOptions(); blobs = encryptedBlobStore.list(containerName, options.withDetails()); metadata = blobs.iterator().next(); assertThat((long) content.length()).isEqualTo(metadata.getSize()); blobs = encryptedBlobStore.list(); metadata = blobs.iterator().next(); assertThat(StorageType.CONTAINER).isEqualTo(metadata.getType()); List singleList = new ArrayList<>(); singleList.add(blobName); encryptedBlobStore.removeBlobs(containerName, singleList); blobs = encryptedBlobStore.list(containerName); assertThat(blobs.size()).isEqualTo(0); } @Test public void testBlobNotEncryptedRanges() throws Exception { for (int run = 0; run < 100; run++) { var tests = new String[] { "123456789A12345", // lower then the AES block "123456789A1234567", // one byte bigger then the AES block "123456789A123456123456789B123456123456789C" + "1234123456789A123456123456789B123456123456789C1234" }; for (String content : tests) { String blobName = TestUtils.createRandomBlobName(); var rand = new Random(); InputStream is = new ByteArrayInputStream( content.getBytes(StandardCharsets.UTF_8)); Blob blob = makeBlob(blobStore, blobName, is, content.length()); blobStore.putBlob(containerName, blob); var options = new GetOptions(); int offset = rand.nextInt(content.length() - 1); logger.debug("content {} with offset {}", content, offset); options.startAt(offset); blob = encryptedBlobStore.getBlob(containerName, blobName, options); try (InputStream blobIs = blob.getPayload().openStream()) { var reader = new BufferedReader( new InputStreamReader(blobIs)); String plaintext = reader.lines().collect( Collectors.joining()); logger.debug("plaintext {} with offset {}", plaintext, offset); assertThat(plaintext).isEqualTo(content.substring(offset)); } options = new GetOptions(); int tail = rand.nextInt(content.length()); if (tail == 0) { tail++; } logger.debug("content {} with tail {}", content, tail); options.tail(tail); blob = encryptedBlobStore.getBlob(containerName, blobName, options); try (InputStream blobIs = blob.getPayload().openStream()) { var reader = new BufferedReader( new InputStreamReader(blobIs)); String plaintext = reader.lines().collect( Collectors.joining()); logger.debug("plaintext {} with tail {}", plaintext, tail); assertThat(plaintext).isEqualTo( content.substring(content.length() - tail)); } options = new GetOptions(); offset = 1; int end = content.length() - 2; logger.debug("content {} with range {}-{}", content, offset, end); options.range(offset, end); blob = encryptedBlobStore.getBlob(containerName, blobName, options); try (InputStream blobIs = blob.getPayload().openStream()) { var reader = new BufferedReader( new InputStreamReader(blobIs)); String plaintext = reader.lines().collect( Collectors.joining()); logger.debug("plaintext {} with range {}-{}", plaintext, offset, end); assertThat(plaintext).isEqualTo( content.substring(offset, end + 1)); } } } } @Test public void testEncryptContent() throws Exception { var tests = new String[] { "1", // only 1 char "123456789A12345", // lower then the AES block "123456789A1234567", // one byte bigger then the AES block "123456789A123456123456789B123456123456789C1234" }; for (String content : tests) { String blobName = TestUtils.createRandomBlobName(); String contentType = "plain/text"; InputStream is = new ByteArrayInputStream( content.getBytes(StandardCharsets.UTF_8)); Blob blob = makeBlobWithContentType(encryptedBlobStore, blobName, content.length(), is, contentType); encryptedBlobStore.putBlob(containerName, blob); blob = encryptedBlobStore.getBlob(containerName, blobName); try (InputStream blobIs = blob.getPayload().openStream()) { var reader = new BufferedReader(new InputStreamReader(blobIs)); String plaintext = reader.lines().collect(Collectors.joining()); logger.debug("plaintext {}", plaintext); assertThat(plaintext).isEqualTo(content); } blob = blobStore.getBlob(containerName, blobName + Constants.S3_ENC_SUFFIX); try (InputStream blobIs = blob.getPayload().openStream()) { var reader = new BufferedReader(new InputStreamReader(blobIs)); String encrypted = reader.lines().collect(Collectors.joining()); logger.debug("encrypted {}", encrypted); assertThat(content).isNotEqualTo(encrypted); } assertThat(encryptedBlobStore.blobExists(containerName, blobName)).isTrue(); BlobAccess access = encryptedBlobStore.getBlobAccess(containerName, blobName); assertThat(access).isEqualTo(BlobAccess.PRIVATE); encryptedBlobStore.setBlobAccess(containerName, blobName, BlobAccess.PUBLIC_READ); access = encryptedBlobStore.getBlobAccess(containerName, blobName); assertThat(access).isEqualTo(BlobAccess.PUBLIC_READ); } } @Test public void testEncryptContentWithOptions() throws Exception { var tests = new String[] { "1", // only 1 char "123456789A12345", // lower then the AES block "123456789A1234567", // one byte bigger then the AES block "123456789A123456123456789B123456123456789C1234" }; for (String content : tests) { String blobName = TestUtils.createRandomBlobName(); String contentType = "plain/text; charset=utf-8"; InputStream is = new ByteArrayInputStream( content.getBytes(StandardCharsets.UTF_8)); Blob blob = makeBlobWithContentType(encryptedBlobStore, blobName, content.length(), is, contentType); var options = new PutOptions(); encryptedBlobStore.putBlob(containerName, blob, options); blob = encryptedBlobStore.getBlob(containerName, blobName); try (InputStream blobIs = blob.getPayload().openStream()) { var reader = new BufferedReader(new InputStreamReader(blobIs)); String plaintext = reader.lines().collect(Collectors.joining()); logger.debug("plaintext {}", plaintext); assertThat(content).isEqualTo(plaintext); } blob = blobStore.getBlob(containerName, blobName + Constants.S3_ENC_SUFFIX); try (InputStream blobIs = blob.getPayload().openStream()) { var reader = new BufferedReader(new InputStreamReader(blobIs)); String encrypted = reader.lines().collect(Collectors.joining()); logger.debug("encrypted {}", encrypted); assertThat(content).isNotEqualTo(encrypted); } BlobMetadata metadata = encryptedBlobStore.blobMetadata(containerName, blobName + Constants.S3_ENC_SUFFIX); assertThat(contentType).isEqualTo( metadata.getContentMetadata().getContentType()); encryptedBlobStore.copyBlob(containerName, blobName, containerName, blobName + "-copy", CopyOptions.NONE); blob = blobStore.getBlob(containerName, blobName + Constants.S3_ENC_SUFFIX); try (InputStream blobIs = blob.getPayload().openStream()) { var reader = new BufferedReader(new InputStreamReader(blobIs)); String encrypted = reader.lines().collect(Collectors.joining()); logger.debug("encrypted {}", encrypted); assertThat(content).isNotEqualTo(encrypted); } blob = encryptedBlobStore.getBlob(containerName, blobName + "-copy"); try (InputStream blobIs = blob.getPayload().openStream()) { var reader = new BufferedReader(new InputStreamReader(blobIs)); String plaintext = reader.lines().collect(Collectors.joining()); logger.debug("plaintext {}", plaintext); assertThat(content).isEqualTo(plaintext); } } } @Test public void testEncryptMultipartContent() throws Exception { String blobName = TestUtils.createRandomBlobName(); String content1 = "123456789A123456123456789B123456123456789C1234"; String content2 = "123456789D123456123456789E123456123456789F123456"; String content3 = "123456789G123456123456789H123456123456789I123"; String content = content1 + content2 + content3; BlobMetadata blobMetadata = makeBlob(encryptedBlobStore, blobName, content.getBytes(StandardCharsets.UTF_8), content.length()).getMetadata(); MultipartUpload mpu = encryptedBlobStore.initiateMultipartUpload(containerName, blobMetadata, new PutOptions()); Payload payload1 = Payloads.newByteArrayPayload( content1.getBytes(StandardCharsets.UTF_8)); Payload payload2 = Payloads.newByteArrayPayload( content2.getBytes(StandardCharsets.UTF_8)); Payload payload3 = Payloads.newByteArrayPayload( content3.getBytes(StandardCharsets.UTF_8)); encryptedBlobStore.uploadMultipartPart(mpu, 1, payload1); encryptedBlobStore.uploadMultipartPart(mpu, 2, payload2); encryptedBlobStore.uploadMultipartPart(mpu, 3, payload3); List mpus = encryptedBlobStore.listMultipartUploads(containerName); assertThat(mpus.size()).isEqualTo(1); List parts = encryptedBlobStore.listMultipartUpload(mpu); assertThat(mpus.get(0).id()).isEqualTo(mpu.id()); encryptedBlobStore.completeMultipartUpload(mpu, parts); Blob blob = encryptedBlobStore.getBlob(containerName, blobName); try (InputStream blobIs = blob.getPayload().openStream()) { var reader = new BufferedReader(new InputStreamReader(blobIs)); String plaintext = reader.lines().collect(Collectors.joining()); logger.debug("plaintext {}", plaintext); assertThat(plaintext).isEqualTo(content); } blob = blobStore.getBlob(containerName, blobName + Constants.S3_ENC_SUFFIX); try (InputStream blobIs = blob.getPayload().openStream()) { var reader = new BufferedReader(new InputStreamReader(blobIs)); String encrypted = reader.lines().collect(Collectors.joining()); logger.debug("encrypted {}", encrypted); assertThat(content).isNotEqualTo(encrypted); } } @Test public void testReadPartial() throws Exception { for (int offset = 0; offset < 60; offset++) { logger.debug("Test with offset {}", offset); String blobName = TestUtils.createRandomBlobName(); String content = "123456789A123456123456789B123456123456789" + "C123456789D123456789E12345"; InputStream is = new ByteArrayInputStream( content.getBytes(StandardCharsets.UTF_8)); Blob blob = makeBlob(encryptedBlobStore, blobName, is, content.length()); encryptedBlobStore.putBlob(containerName, blob); var options = new GetOptions(); options.startAt(offset); blob = encryptedBlobStore.getBlob(containerName, blobName, options); try (InputStream blobIs = blob.getPayload().openStream()) { var reader = new BufferedReader(new InputStreamReader(blobIs)); String plaintext = reader.lines().collect(Collectors.joining()); logger.debug("plaintext {}", plaintext); assertThat(plaintext).isEqualTo(content.substring(offset)); } long expectedEndRange = (offset != 0) ? content.length() : 0; assertThat(blob.getAllHeaders().get("Content-Range")) .contains("bytes " + offset + "-" + expectedEndRange + "/" + content.length()); } } @Test public void testReadTail() throws Exception { for (int length = 1; length < 60; length++) { logger.debug("Test with length {}", length); String blobName = TestUtils.createRandomBlobName(); String content = "123456789A123456123456789B123456123456789C" + "123456789D123456789E12345"; InputStream is = new ByteArrayInputStream( content.getBytes(StandardCharsets.UTF_8)); Blob blob = makeBlob(encryptedBlobStore, blobName, is, content.length()); encryptedBlobStore.putBlob(containerName, blob); var options = new GetOptions(); options.tail(length); blob = encryptedBlobStore.getBlob(containerName, blobName, options); try (InputStream blobIs = blob.getPayload().openStream()) { var reader = new BufferedReader(new InputStreamReader(blobIs)); String plaintext = reader.lines().collect(Collectors.joining()); logger.debug("plaintext {}", plaintext); assertThat(plaintext).isEqualTo( content.substring(content.length() - length)); } assertThat(blob.getAllHeaders().get("Content-Range")) .contains("bytes " + 0 + "-" + length + "/" + content.length()); } } @Test public void testReadPartialWithRandomEnd() throws Exception { for (int run = 0; run < 100; run++) { for (int offset = 0; offset < 50; offset++) { var rand = new Random(); int end = offset + rand.nextInt(20) + 2; int size = end - offset + 1; logger.debug("Test with offset {} and end {} size {}", offset, end, size); String blobName = TestUtils.createRandomBlobName(); String content = "123456789A123456-123456789B123456-123456789C123456-" + "123456789D123456-123456789E123456"; InputStream is = new ByteArrayInputStream( content.getBytes(StandardCharsets.UTF_8)); Blob blob = makeBlob(encryptedBlobStore, blobName, is, content.length()); encryptedBlobStore.putBlob(containerName, blob); var options = new GetOptions(); options.range(offset, end); blob = encryptedBlobStore.getBlob(containerName, blobName, options); try (InputStream blobIs = blob.getPayload().openStream()) { var reader = new BufferedReader( new InputStreamReader(blobIs)); String plaintext = reader.lines().collect( Collectors.joining()); logger.debug("plaintext {}", plaintext); assertThat(plaintext).hasSize(size); assertThat(plaintext).isEqualTo( content.substring(offset, end + 1)); } assertThat(blob.getAllHeaders().get("Content-Range")) .contains("bytes " + offset + "-" + end + "/" + content.length()); } } } @Test public void testMultipartReadPartial() throws Exception { for (int offset = 0; offset < 130; offset++) { logger.debug("Test with offset {}", offset); String blobName = TestUtils.createRandomBlobName(); String content1 = "PART1-789A123456123456789B123456123456789C1234"; String content2 = "PART2-789D123456123456789E123456123456789F123456"; String content3 = "PART3-789G123456123456789H123456123456789I123"; String content = content1 + content2 + content3; BlobMetadata blobMetadata = makeBlob(encryptedBlobStore, blobName, content.getBytes(StandardCharsets.UTF_8), content.length()).getMetadata(); MultipartUpload mpu = encryptedBlobStore.initiateMultipartUpload(containerName, blobMetadata, new PutOptions()); Payload payload1 = Payloads.newByteArrayPayload( content1.getBytes(StandardCharsets.UTF_8)); Payload payload2 = Payloads.newByteArrayPayload( content2.getBytes(StandardCharsets.UTF_8)); Payload payload3 = Payloads.newByteArrayPayload( content3.getBytes(StandardCharsets.UTF_8)); encryptedBlobStore.uploadMultipartPart(mpu, 1, payload1); encryptedBlobStore.uploadMultipartPart(mpu, 2, payload2); encryptedBlobStore.uploadMultipartPart(mpu, 3, payload3); List parts = encryptedBlobStore.listMultipartUpload(mpu); encryptedBlobStore.completeMultipartUpload(mpu, parts); var options = new GetOptions(); options.startAt(offset); Blob blob = encryptedBlobStore.getBlob(containerName, blobName, options); try (InputStream blobIs = blob.getPayload().openStream()) { var reader = new BufferedReader(new InputStreamReader(blobIs)); String plaintext = reader.lines().collect(Collectors.joining()); logger.debug("plaintext {}", plaintext); assertThat(plaintext).isEqualTo(content.substring(offset)); } } } @Test public void testMultipartReadTail() throws Exception { for (int length = 1; length < 130; length++) { logger.debug("Test with length {}", length); String blobName = TestUtils.createRandomBlobName(); String content1 = "PART1-789A123456123456789B123456123456789C1234"; String content2 = "PART2-789D123456123456789E123456123456789F123456"; String content3 = "PART3-789G123456123456789H123456123456789I123"; String content = content1 + content2 + content3; BlobMetadata blobMetadata = makeBlob(encryptedBlobStore, blobName, content.getBytes(StandardCharsets.UTF_8), content.length()).getMetadata(); MultipartUpload mpu = encryptedBlobStore.initiateMultipartUpload(containerName, blobMetadata, new PutOptions()); Payload payload1 = Payloads.newByteArrayPayload( content1.getBytes(StandardCharsets.UTF_8)); Payload payload2 = Payloads.newByteArrayPayload( content2.getBytes(StandardCharsets.UTF_8)); Payload payload3 = Payloads.newByteArrayPayload( content3.getBytes(StandardCharsets.UTF_8)); encryptedBlobStore.uploadMultipartPart(mpu, 1, payload1); encryptedBlobStore.uploadMultipartPart(mpu, 2, payload2); encryptedBlobStore.uploadMultipartPart(mpu, 3, payload3); List parts = encryptedBlobStore.listMultipartUpload(mpu); encryptedBlobStore.completeMultipartUpload(mpu, parts); var options = new GetOptions(); options.tail(length); Blob blob = encryptedBlobStore.getBlob(containerName, blobName, options); try (InputStream blobIs = blob.getPayload().openStream()) { var reader = new BufferedReader(new InputStreamReader(blobIs)); String plaintext = reader.lines().collect(Collectors.joining()); logger.debug("plaintext {}", plaintext); assertThat(plaintext).isEqualTo( content.substring(content.length() - length)); } } } @Test public void testMultipartReadPartialWithRandomEnd() throws Exception { for (int run = 0; run < 100; run++) { // total len = 139 for (int offset = 0; offset < 70; offset++) { var rand = new Random(); int end = offset + rand.nextInt(60) + 2; int size = end - offset + 1; logger.debug("Test with offset {} and end {} size {}", offset, end, size); String blobName = TestUtils.createRandomBlobName(); String content1 = "PART1-789A123456123456789B123456123456789C1234"; String content2 = "PART2-789D123456123456789E123456123456789F123456"; String content3 = "PART3-789G123456123456789H123456123456789I123"; String content = content1 + content2 + content3; BlobMetadata blobMetadata = makeBlob(encryptedBlobStore, blobName, content.getBytes(StandardCharsets.UTF_8), content.length()).getMetadata(); MultipartUpload mpu = encryptedBlobStore.initiateMultipartUpload(containerName, blobMetadata, new PutOptions()); Payload payload1 = Payloads.newByteArrayPayload( content1.getBytes(StandardCharsets.UTF_8)); Payload payload2 = Payloads.newByteArrayPayload( content2.getBytes(StandardCharsets.UTF_8)); Payload payload3 = Payloads.newByteArrayPayload( content3.getBytes(StandardCharsets.UTF_8)); encryptedBlobStore.uploadMultipartPart(mpu, 1, payload1); encryptedBlobStore.uploadMultipartPart(mpu, 2, payload2); encryptedBlobStore.uploadMultipartPart(mpu, 3, payload3); List parts = encryptedBlobStore.listMultipartUpload(mpu); encryptedBlobStore.completeMultipartUpload(mpu, parts); var options = new GetOptions(); options.range(offset, end); Blob blob = encryptedBlobStore.getBlob(containerName, blobName, options); try (InputStream blobIs = blob.getPayload().openStream()) { var reader = new BufferedReader( new InputStreamReader(blobIs)); String plaintext = reader.lines().collect( Collectors.joining()); logger.debug("plaintext {}", plaintext); assertThat(plaintext).isEqualTo( content.substring(offset, end + 1)); } } } } @Test public void testReadConditional() { String blobName = TestUtils.createRandomBlobName(); String content = "Hello world."; InputStream is = new ByteArrayInputStream(content.getBytes(StandardCharsets.UTF_8)); Blob blob = makeBlob(encryptedBlobStore, blobName, is, content.length()); encryptedBlobStore.putBlob(containerName, blob); GetOptions options = new GetOptions(); blob = encryptedBlobStore.getBlob(containerName, blobName, options); String etag = blob.getMetadata().getETag(); GetOptions conditionalOptions = GetOptions.Builder.ifETagDoesntMatch(etag); var e = Assert.assertThrows(HttpResponseException.class, () -> encryptedBlobStore.getBlob(containerName, blobName, conditionalOptions)); assertThat(e.getResponse().getStatusCode()).isEqualTo(304); } @Test public void testReadDoubleZeroRange() throws IOException { String blobName = TestUtils.createRandomBlobName(); String content = "Hello world."; InputStream is = new ByteArrayInputStream(content.getBytes(StandardCharsets.UTF_8)); Blob blob = makeBlob(encryptedBlobStore, blobName, is, content.length()); encryptedBlobStore.putBlob(containerName, blob); GetOptions rangeOptions = new GetOptions(); rangeOptions.getRanges().add("0-0"); var result = encryptedBlobStore.getBlob(containerName, blobName, rangeOptions); assertThat(result.getPayload().openStream().readAllBytes().length).isEqualTo(1); assertThat(result.getAllHeaders().get("Content-Range")) .contains("bytes 0-0/" + content.length()); } } ================================================ FILE: src/test/java/org/gaul/s3proxy/EventualBlobStoreTest.java ================================================ /* * Copyright 2014-2026 Andrew Gaul * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.gaul.s3proxy; import static org.assertj.core.api.Assertions.assertThat; import java.io.IOException; import java.io.InputStream; import java.util.List; import java.util.Map; import java.util.Random; import java.util.concurrent.Executors; import java.util.concurrent.ScheduledExecutorService; import java.util.concurrent.TimeUnit; import com.google.common.io.ByteSource; import com.google.common.net.MediaType; import org.jclouds.ContextBuilder; import org.jclouds.blobstore.BlobStore; import org.jclouds.blobstore.BlobStoreContext; import org.jclouds.blobstore.domain.Blob; import org.jclouds.blobstore.domain.MultipartPart; import org.jclouds.blobstore.domain.MultipartUpload; import org.jclouds.blobstore.options.CopyOptions; import org.jclouds.blobstore.options.PutOptions; import org.jclouds.io.ContentMetadata; import org.jclouds.logging.slf4j.config.SLF4JLoggingModule; import org.junit.After; import org.junit.Before; import org.junit.Test; public final class EventualBlobStoreTest { private static final int DELAY = 5; private static final TimeUnit DELAY_UNIT = TimeUnit.SECONDS; private static final ByteSource BYTE_SOURCE = TestUtils.randomByteSource().slice(0, 1024); private BlobStoreContext nearContext; private BlobStoreContext farContext; private BlobStore nearBlobStore; private BlobStore farBlobStore; private String containerName; private ScheduledExecutorService executorService; private BlobStore eventualBlobStore; @Before public void setUp() throws Exception { containerName = createRandomContainerName(); nearContext = ContextBuilder .newBuilder("transient") .credentials("identity", "credential") .modules(List.of(new SLF4JLoggingModule())) .build(BlobStoreContext.class); nearBlobStore = nearContext.getBlobStore(); nearBlobStore.createContainerInLocation(null, containerName); farContext = ContextBuilder .newBuilder("transient") .credentials("identity", "credential") .modules(List.of(new SLF4JLoggingModule())) .build(BlobStoreContext.class); farBlobStore = farContext.getBlobStore(); farBlobStore.createContainerInLocation(null, containerName); executorService = Executors.newScheduledThreadPool(1); eventualBlobStore = EventualBlobStore.newEventualBlobStore( nearBlobStore, farBlobStore, executorService, DELAY, DELAY_UNIT, 1.0); } @After public void tearDown() throws Exception { if (nearContext != null) { nearBlobStore.deleteContainer(containerName); nearContext.close(); } if (farContext != null) { farBlobStore.deleteContainer(containerName); farContext.close(); } if (executorService != null) { executorService.shutdown(); } } @Test public void testReadAfterCreate() throws Exception { String blobName = createRandomBlobName(); Blob blob = makeBlob(eventualBlobStore, blobName); eventualBlobStore.putBlob(containerName, blob); assertThat(eventualBlobStore.getBlob(containerName, blobName)) .isNull(); delay(); validateBlob(eventualBlobStore.getBlob(containerName, blobName)); } @Test public void testReadAfterDelete() throws Exception { String blobName = createRandomBlobName(); Blob blob = makeBlob(eventualBlobStore, blobName); eventualBlobStore.putBlob(containerName, blob); assertThat(eventualBlobStore.getBlob(containerName, blobName)) .isNull(); delay(); eventualBlobStore.removeBlob(containerName, blobName); validateBlob(eventualBlobStore.getBlob(containerName, blobName)); delay(); assertThat(eventualBlobStore.getBlob(containerName, blobName)) .isNull(); } @Test public void testOverwriteAfterDelete() throws Exception { String blobName = createRandomBlobName(); Blob blob = makeBlob(eventualBlobStore, blobName); eventualBlobStore.putBlob(containerName, blob); delay(); eventualBlobStore.removeBlob(containerName, blobName); blob = makeBlob(eventualBlobStore, blobName); eventualBlobStore.putBlob(containerName, blob); delay(); validateBlob(eventualBlobStore.getBlob(containerName, blobName)); } @Test public void testReadAfterCopy() throws Exception { String fromName = createRandomBlobName(); String toName = createRandomBlobName(); Blob blob = makeBlob(eventualBlobStore, fromName); eventualBlobStore.putBlob(containerName, blob); delay(); eventualBlobStore.copyBlob(containerName, fromName, containerName, toName, CopyOptions.NONE); assertThat(eventualBlobStore.getBlob(containerName, toName)) .isNull(); delay(); validateBlob(eventualBlobStore.getBlob(containerName, toName)); } @Test public void testReadAfterMultipartUpload() throws Exception { String blobName = createRandomBlobName(); Blob blob = makeBlob(eventualBlobStore, blobName); MultipartUpload mpu = eventualBlobStore.initiateMultipartUpload( containerName, blob.getMetadata(), new PutOptions()); MultipartPart part = eventualBlobStore.uploadMultipartPart(mpu, /*partNumber=*/ 1, blob.getPayload()); eventualBlobStore.completeMultipartUpload(mpu, List.of(part)); assertThat(eventualBlobStore.getBlob(containerName, blobName)) .isNull(); delay(); validateBlob(eventualBlobStore.getBlob(containerName, blobName)); } @Test public void testListAfterCreate() throws Exception { String blobName = createRandomBlobName(); Blob blob = makeBlob(eventualBlobStore, blobName); eventualBlobStore.putBlob(containerName, blob); assertThat(eventualBlobStore.list(containerName)).isEmpty(); delay(); assertThat(eventualBlobStore.list(containerName)).isNotEmpty(); } private static String createRandomContainerName() { return "container-" + new Random().nextInt(Integer.MAX_VALUE); } private static String createRandomBlobName() { return "blob-" + new Random().nextInt(Integer.MAX_VALUE); } private static Blob makeBlob(BlobStore blobStore, String blobName) throws IOException { return blobStore.blobBuilder(blobName) .payload(BYTE_SOURCE) .contentDisposition("attachment; filename=foo.mp4") .contentEncoding("compress") .contentLength(BYTE_SOURCE.size()) .contentType(MediaType.MP4_AUDIO) .contentMD5(BYTE_SOURCE.hash(TestUtils.MD5)) .userMetadata(Map.of("key", "value")) .build(); } private static void validateBlob(Blob blob) throws IOException { assertThat(blob).isNotNull(); ContentMetadata contentMetadata = blob.getMetadata().getContentMetadata(); assertThat(contentMetadata.getContentDisposition()) .isEqualTo("attachment; filename=foo.mp4"); assertThat(contentMetadata.getContentEncoding()) .isEqualTo("compress"); assertThat(contentMetadata.getContentLength()) .isEqualTo(BYTE_SOURCE.size()); assertThat(contentMetadata.getContentType()) .isEqualTo(MediaType.MP4_AUDIO.toString()); assertThat(blob.getMetadata().getUserMetadata()) .isEqualTo(Map.of("key", "value")); try (InputStream actual = blob.getPayload().openStream(); InputStream expected = BYTE_SOURCE.openStream()) { assertThat(actual).hasSameContentAs(expected); } } private static void delay() throws InterruptedException { DELAY_UNIT.sleep(1 + DELAY); } } ================================================ FILE: src/test/java/org/gaul/s3proxy/GlobBlobStoreLocatorTest.java ================================================ /* * Copyright 2014-2026 Andrew Gaul * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.gaul.s3proxy; import static org.assertj.core.api.Assertions.assertThat; import java.nio.file.FileSystems; import java.nio.file.PathMatcher; import java.util.List; import java.util.Map; import com.google.common.collect.ImmutableMap; import com.google.common.collect.ImmutableSortedMap; import com.google.common.collect.Maps; import org.jclouds.ContextBuilder; import org.jclouds.blobstore.BlobStore; import org.jclouds.blobstore.BlobStoreContext; import org.jclouds.logging.slf4j.config.SLF4JLoggingModule; import org.junit.Before; import org.junit.Test; public final class GlobBlobStoreLocatorTest { private BlobStore blobStoreOne; private BlobStore blobStoreTwo; @Before public void setUp() { blobStoreOne = ContextBuilder .newBuilder("transient") .credentials("identity", "credential") .modules(List.of(new SLF4JLoggingModule())) .build(BlobStoreContext.class).getBlobStore(); blobStoreTwo = ContextBuilder .newBuilder("transient") .credentials("identity", "credential") .modules(List.of(new SLF4JLoggingModule())) .build(BlobStoreContext.class).getBlobStore(); } @Test public void testLocateIdentity() { var credsMap = ImmutableSortedMap.of( "id1", Map.entry("one", blobStoreOne), "id2", Map.entry("two", blobStoreTwo)); var locator = new GlobBlobStoreLocator( credsMap, Map.of()); assertThat(locator.locateBlobStore("id2", null, null).getKey()) .isEqualTo("two"); assertThat(locator.locateBlobStore(null, null, null).getKey()) .isEqualTo("one"); assertThat(locator.locateBlobStore("foo", null, null)).isNull(); } @Test public void testLocateContainer() { // Must support null keys var credsMap = ImmutableMap.of( "id1", Map.entry("one", blobStoreOne), "id2", Map.entry("two", blobStoreTwo)); var globMap = Map.of( FileSystems.getDefault().getPathMatcher("glob:container1"), Map.entry("id1", blobStoreOne), FileSystems.getDefault().getPathMatcher("glob:container2"), Map.entry("id2", blobStoreTwo)); var locator = new GlobBlobStoreLocator(credsMap, globMap); assertThat(locator.locateBlobStore(null, "container1", null) .getValue()).isSameAs(blobStoreOne); assertThat(locator.locateBlobStore(null, "container2", null) .getValue()).isSameAs(blobStoreTwo); assertThat(locator.locateBlobStore("id1", "foo", null) .getValue()).isSameAs(blobStoreOne); assertThat(locator.locateBlobStore("id2", "foo", null) .getValue()).isSameAs(blobStoreTwo); assertThat(locator.locateBlobStore("foo", "container1", null)) .isNull(); assertThat(locator.locateBlobStore("foo", "container2", null)) .isNull(); } @Test public void testLocateGlob() { var credsMap = ImmutableSortedMap.>of( "id0", Maps.immutableEntry("zero", null), "id1", Map.entry("one", blobStoreOne), "id2", Map.entry("two", blobStoreTwo)); var globMap = Map.>of( FileSystems.getDefault().getPathMatcher( "glob:{one,two}"), Map.entry("id1", blobStoreOne), FileSystems.getDefault().getPathMatcher("glob:cont?X*"), Map.entry("id2", blobStoreTwo)); var locator = new GlobBlobStoreLocator(credsMap, globMap); assertThat(locator.locateBlobStore(null, "one", null) .getValue()).isSameAs(blobStoreOne); assertThat(locator.locateBlobStore("id1", "two", null) .getValue()).isSameAs(blobStoreOne); assertThat(locator.locateBlobStore("id2", "cont5X.extra", null) .getValue()).isSameAs(blobStoreTwo); } @Test public void testGlobLocatorAnonymous() { // Must support null keys var globMap = ImmutableMap.>of( FileSystems.getDefault().getPathMatcher("glob:one"), Maps.immutableEntry(null, blobStoreOne), FileSystems.getDefault().getPathMatcher("glob:two"), Maps.immutableEntry(null, blobStoreTwo)); var locator = new GlobBlobStoreLocator( ImmutableMap.of(), globMap); assertThat(locator.locateBlobStore(null, null, null) .getValue()).isSameAs(blobStoreOne); assertThat(locator.locateBlobStore(null, "one", null) .getValue()).isSameAs(blobStoreOne); assertThat(locator.locateBlobStore(null, "two", null) .getValue()).isSameAs(blobStoreTwo); } } ================================================ FILE: src/test/java/org/gaul/s3proxy/LatencyBlobStoreTest.java ================================================ /* * Copyright 2014-2026 Andrew Gaul * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.gaul.s3proxy; import static org.assertj.core.api.Assertions.assertThat; import java.io.ByteArrayInputStream; import java.io.IOException; import java.io.InputStream; import java.util.ArrayList; import java.util.List; import java.util.Map; import java.util.Properties; import java.util.Random; import java.util.concurrent.Callable; import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; import com.google.common.io.ByteSource; import org.assertj.core.api.Assertions; import org.jclouds.ContextBuilder; import org.jclouds.blobstore.BlobStore; import org.jclouds.blobstore.BlobStoreContext; import org.jclouds.blobstore.domain.Blob; import org.jclouds.io.Payload; import org.jclouds.io.Payloads; import org.jclouds.logging.slf4j.config.SLF4JLoggingModule; import org.junit.After; import org.junit.Before; import org.junit.Test; public final class LatencyBlobStoreTest { private BlobStoreContext context; private BlobStore delegate; private String containerName; @Before public void setUp() throws Exception { containerName = createRandomContainerName(); context = ContextBuilder .newBuilder("transient") .credentials("identity", "credential") .modules(List.of(new SLF4JLoggingModule())) .build(BlobStoreContext.class); delegate = context.getBlobStore(); delegate.createContainerInLocation(null, containerName); } @After public void tearDown() throws Exception { if (context != null) { delegate.deleteContainer(containerName); context.close(); } } @Test public void testLoadProperties() throws Exception { String propertiesString = "s3proxy.latency-blobstore.*.latency=1000\n" + "s3proxy.latency-blobstore.put.speed=10"; InputStream stream = new ByteArrayInputStream(propertiesString.getBytes()); Properties properties = new Properties(); properties.load(stream); Map latencies = LatencyBlobStore.parseLatencies(properties); Map speeds = LatencyBlobStore.parseSpeeds(properties); assertThat(latencies.containsKey("*")).isTrue(); assertThat(latencies.get("*")).isEqualTo(1000L); assertThat(speeds.containsKey("put")).isTrue(); assertThat(speeds.get("put")).isEqualTo(10); assertThat(speeds.containsKey("*")).isFalse(); } @Test public void testAllLatency() { BlobStore latencyBlobStore = LatencyBlobStore.newLatencyBlobStore(delegate, Map.ofEntries(Map.entry("*", 1000L)), Map.ofEntries()); long timeTaken = time(() -> latencyBlobStore.containerExists(containerName)); assertThat(timeTaken).isGreaterThanOrEqualTo(1000L); } @Test public void testSpecificLatency() { BlobStore latencyBlobStore = LatencyBlobStore.newLatencyBlobStore(delegate, Map.ofEntries(Map.entry("*", 0L), Map.entry("container-exists", 1000L)), Map.ofEntries()); long timeTaken = time(() -> latencyBlobStore.containerExists(containerName)); assertThat(timeTaken).isGreaterThanOrEqualTo(1000L); } @Test public void testAllSpeed() throws Exception { BlobStore latencyBlobStore = LatencyBlobStore.newLatencyBlobStore(delegate, Map.ofEntries(), Map.ofEntries(Map.entry("*", 1L))); String blobName = createRandomBlobName(); ByteSource content = TestUtils.randomByteSource().slice(0, 1024); Payload payload = Payloads.newByteSourcePayload(content); payload.getContentMetadata().setContentLength(content.size()); Blob blob = latencyBlobStore.blobBuilder(blobName).payload(payload).build(); long timeTaken = time(() -> latencyBlobStore.putBlob(containerName, blob)); assertThat(timeTaken).isGreaterThanOrEqualTo(1000L); } @Test public void testSpecificSpeed() throws Exception { BlobStore latencyBlobStore = LatencyBlobStore.newLatencyBlobStore(delegate, Map.ofEntries(), Map.ofEntries(Map.entry("*", 1000L), Map.entry("put", 1L))); String blobName = createRandomBlobName(); ByteSource content = TestUtils.randomByteSource().slice(0, 1024); Payload payload = Payloads.newByteSourcePayload(content); payload.getContentMetadata().setContentLength(content.size()); Blob blob = latencyBlobStore.blobBuilder(blobName).payload(payload).build(); long timeTaken = time(() -> latencyBlobStore.putBlob(containerName, blob)); assertThat(timeTaken).isGreaterThanOrEqualTo(1000L); } @Test public void testInvalidLatency() { Assertions.assertThatIllegalArgumentException().isThrownBy(() -> LatencyBlobStore.newLatencyBlobStore(delegate, Map.ofEntries(Map.entry("*", -1000L)), Map.ofEntries())); } @Test public void testInvalidSpeed() { Assertions.assertThatIllegalArgumentException().isThrownBy(() -> LatencyBlobStore.newLatencyBlobStore(delegate, Map.ofEntries(), Map.ofEntries(Map.entry("*", 0L)))); Assertions.assertThatIllegalArgumentException().isThrownBy(() -> LatencyBlobStore.newLatencyBlobStore(delegate, Map.ofEntries(), Map.ofEntries(Map.entry("*", -1000L)))); } @Test public void testLatencyAndSpeed() throws Exception { BlobStore latencyBlobStore = LatencyBlobStore.newLatencyBlobStore(delegate, Map.ofEntries(Map.entry("*", 1000L)), Map.ofEntries(Map.entry("put", 1L))); String blobName = createRandomBlobName(); ByteSource content = TestUtils.randomByteSource().slice(0, 1024); Payload payload = Payloads.newByteSourcePayload(content); payload.getContentMetadata().setContentLength(content.size()); Blob blob = latencyBlobStore.blobBuilder(blobName).payload(payload).build(); long timeTaken = time(() -> latencyBlobStore.putBlob(containerName, blob)); assertThat(timeTaken).isGreaterThanOrEqualTo(2000L); } @Test public void testLatencyAndSpeedWithEmptyContent() throws Exception { BlobStore latencyBlobStore = LatencyBlobStore.newLatencyBlobStore(delegate, Map.ofEntries(Map.entry("put", 1000L)), Map.ofEntries(Map.entry("put", 1L))); String blobName = createRandomBlobName(); ByteSource content = TestUtils.randomByteSource().slice(0, 0); Payload payload = Payloads.newByteSourcePayload(content); payload.getContentMetadata().setContentLength(content.size()); Blob blob = latencyBlobStore.blobBuilder(blobName).payload(payload).build(); long timeTaken = time(() -> latencyBlobStore.putBlob(containerName, blob)); assertThat(timeTaken).isGreaterThanOrEqualTo(1000L); } @Test public void testMultipleOperations() throws Exception { BlobStore latencyBlobStore = LatencyBlobStore.newLatencyBlobStore(delegate, Map.ofEntries(Map.entry("*", 1000L)), Map.ofEntries(Map.entry("get", 1L))); String blobName = createRandomBlobName(); ByteSource content = TestUtils.randomByteSource().slice(0, 1024); Payload payload = Payloads.newByteSourcePayload(content); payload.getContentMetadata().setContentLength(content.size()); Blob blob = latencyBlobStore.blobBuilder(blobName).payload(payload).build(); long timeTaken = time(() -> { latencyBlobStore.putBlob(containerName, blob); consume(latencyBlobStore.getBlob(containerName, blobName)); }); assertThat(timeTaken).isGreaterThanOrEqualTo(3000L); } @Test public void testSimultaneousOperations() throws Exception { BlobStore latencyBlobStore = LatencyBlobStore.newLatencyBlobStore(delegate, Map.ofEntries(Map.entry("*", 1000L)), Map.ofEntries(Map.entry("get", 1L))); String blobName = createRandomBlobName(); ByteSource content = TestUtils.randomByteSource().slice(0, 1024); Payload payload = Payloads.newByteSourcePayload(content); payload.getContentMetadata().setContentLength(content.size()); Blob blob = latencyBlobStore.blobBuilder(blobName).payload(payload).build(); latencyBlobStore.putBlob(containerName, blob); ExecutorService executorService = null; try { executorService = Executors.newFixedThreadPool(5); List> tasks = new ArrayList<>(); for (int i = 0; i < 5; i++) { tasks.add(Executors.callable(() -> consume(latencyBlobStore.getBlob(containerName, blobName)))); } final ExecutorService service = executorService; long timeTaken = time(() -> { try { service.invokeAll(tasks); } catch (Exception e) { // Ignore } }); assertThat(timeTaken).isGreaterThanOrEqualTo(2000L); } finally { if (executorService != null) { executorService.shutdown(); } } } private static String createRandomContainerName() { return "container-" + new Random().nextInt(Integer.MAX_VALUE); } private static String createRandomBlobName() { return "blob-" + new Random().nextInt(Integer.MAX_VALUE); } private static long time(Runnable runnable) { long startTime = System.currentTimeMillis(); runnable.run(); return System.currentTimeMillis() - startTime; } private static void consume(Blob blob) { try (InputStream stream = blob.getPayload().openStream()) { stream.readAllBytes(); } catch (IOException ioe) { // Ignore } } } ================================================ FILE: src/test/java/org/gaul/s3proxy/NoCacheBlobStoreTest.java ================================================ /* * Copyright 2014-2026 Andrew Gaul * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.gaul.s3proxy; import static org.assertj.core.api.Assertions.assertThat; import java.time.Instant; import java.util.Date; import org.jclouds.blobstore.options.GetOptions; import org.junit.jupiter.api.Test; public final class NoCacheBlobStoreTest { @Test public void testResetCacheHeadersKeepRange() { var options = GetOptions.Builder.range(1, 5); var optionsResult = NoCacheBlobStore.resetCacheHeaders(options); assertThat(optionsResult.getRanges()).isEqualTo(options.getRanges()); } @Test public void testResetCacheHeadersKeepTail() { var options = GetOptions.Builder.range(1, 5).tail(3).startAt(10); var optionsResult = NoCacheBlobStore.resetCacheHeaders(options); assertThat(optionsResult.getRanges()).isEqualTo(options.getRanges()); } @Test public void testResetCacheHeadersRangeDropCache() { var options = GetOptions.Builder .range(1, 5) .tail(3) .startAt(10) .ifETagDoesntMatch("abc") .ifModifiedSince(Date.from(Instant.EPOCH)); var optionsResult = NoCacheBlobStore.resetCacheHeaders(options); assertThat(optionsResult.getRanges()).isEqualTo(options.getRanges()); assertThat(optionsResult.getIfNoneMatch()).isEqualTo(null); assertThat(optionsResult.getIfModifiedSince()).isEqualTo((Date) null); } @Test public void testResetCacheHeadersNoRange() { var options = GetOptions.Builder .ifETagMatches("abc") .ifUnmodifiedSince(Date.from(Instant.EPOCH)); var optionsResult = NoCacheBlobStore.resetCacheHeaders(options); assertThat(optionsResult.getRanges()).isEqualTo(options.getRanges()); assertThat(optionsResult.getIfMatch()).isEqualTo(null); assertThat(optionsResult.getIfUnmodifiedSince()).isEqualTo((Date) null); } } ================================================ FILE: src/test/java/org/gaul/s3proxy/NullBlobStoreTest.java ================================================ /* * Copyright 2014-2026 Andrew Gaul * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.gaul.s3proxy; import static org.assertj.core.api.Assertions.assertThat; import java.io.IOException; import java.io.InputStream; import java.io.OutputStream; import java.util.List; import java.util.Map; import java.util.Random; import com.google.common.io.ByteSource; import com.google.common.net.MediaType; import org.jclouds.ContextBuilder; import org.jclouds.blobstore.BlobStore; import org.jclouds.blobstore.BlobStoreContext; import org.jclouds.blobstore.domain.Blob; import org.jclouds.blobstore.domain.BlobMetadata; import org.jclouds.blobstore.domain.MultipartPart; import org.jclouds.blobstore.domain.MultipartUpload; import org.jclouds.blobstore.domain.PageSet; import org.jclouds.blobstore.domain.StorageMetadata; import org.jclouds.blobstore.options.PutOptions; import org.jclouds.io.ContentMetadata; import org.jclouds.io.Payload; import org.jclouds.io.Payloads; import org.jclouds.logging.slf4j.config.SLF4JLoggingModule; import org.junit.After; import org.junit.Before; import org.junit.Test; public final class NullBlobStoreTest { private static final ByteSource BYTE_SOURCE = TestUtils.randomByteSource().slice(0, 1024); private BlobStoreContext context; private BlobStore blobStore; private String containerName; private BlobStore nullBlobStore; @Before public void setUp() throws Exception { containerName = createRandomContainerName(); context = ContextBuilder .newBuilder("transient") .credentials("identity", "credential") .modules(List.of(new SLF4JLoggingModule())) .build(BlobStoreContext.class); blobStore = context.getBlobStore(); blobStore.createContainerInLocation(null, containerName); nullBlobStore = NullBlobStore.newNullBlobStore(blobStore); } @After public void tearDown() throws Exception { if (context != null) { blobStore.deleteContainer(containerName); context.close(); } } @Test public void testCreateBlobGetBlob() throws Exception { String blobName = createRandomBlobName(); Blob blob = makeBlob(nullBlobStore, blobName); nullBlobStore.putBlob(containerName, blob); blob = nullBlobStore.getBlob(containerName, blobName); validateBlobMetadata(blob.getMetadata()); // content differs, only compare length try (InputStream actual = blob.getPayload().openStream(); InputStream expected = BYTE_SOURCE.openStream()) { long actualLength = actual.transferTo( OutputStream.nullOutputStream()); long expectedLength = expected.transferTo( OutputStream.nullOutputStream()); assertThat(actualLength).isEqualTo(expectedLength); } PageSet pageSet = nullBlobStore.list( containerName); assertThat(pageSet).hasSize(1); StorageMetadata sm = pageSet.iterator().next(); assertThat(sm.getName()).isEqualTo(blobName); assertThat(sm.getSize()).isEqualTo(0); } @Test public void testCreateBlobBlobMetadata() throws Exception { String blobName = createRandomBlobName(); Blob blob = makeBlob(nullBlobStore, blobName); nullBlobStore.putBlob(containerName, blob); BlobMetadata metadata = nullBlobStore.blobMetadata(containerName, blobName); validateBlobMetadata(metadata); } @Test public void testCreateMultipartBlobGetBlob() throws Exception { String blobName = "multipart-upload"; BlobMetadata blobMetadata = makeBlob(nullBlobStore, blobName) .getMetadata(); MultipartUpload mpu = nullBlobStore.initiateMultipartUpload( containerName, blobMetadata, new PutOptions()); ByteSource byteSource = TestUtils.randomByteSource().slice( 0, nullBlobStore.getMinimumMultipartPartSize() + 1); ByteSource byteSource1 = byteSource.slice( 0, nullBlobStore.getMinimumMultipartPartSize()); ByteSource byteSource2 = byteSource.slice( nullBlobStore.getMinimumMultipartPartSize(), 1); Payload payload1 = Payloads.newByteSourcePayload(byteSource1); Payload payload2 = Payloads.newByteSourcePayload(byteSource2); payload1.getContentMetadata().setContentLength(byteSource1.size()); payload2.getContentMetadata().setContentLength(byteSource2.size()); MultipartPart part1 = nullBlobStore.uploadMultipartPart(mpu, 1, payload1); MultipartPart part2 = nullBlobStore.uploadMultipartPart(mpu, 2, payload2); List parts = nullBlobStore.listMultipartUpload(mpu); assertThat(parts.get(0).partNumber()).isEqualTo(1); assertThat(parts.get(0).partSize()).isEqualTo(byteSource1.size()); assertThat(parts.get(0).partETag()).isEqualTo(part1.partETag()); assertThat(parts.get(1).partNumber()).isEqualTo(2); assertThat(parts.get(1).partSize()).isEqualTo(byteSource2.size()); assertThat(parts.get(1).partETag()).isEqualTo(part2.partETag()); assertThat(nullBlobStore.listMultipartUpload(mpu)).hasSize(2); nullBlobStore.completeMultipartUpload(mpu, parts); Blob newBlob = nullBlobStore.getBlob(containerName, blobName); validateBlobMetadata(newBlob.getMetadata()); // content differs, only compare length try (InputStream actual = newBlob.getPayload().openStream(); InputStream expected = byteSource.openStream()) { long actualLength = actual.transferTo( OutputStream.nullOutputStream()); long expectedLength = expected.transferTo( OutputStream.nullOutputStream()); assertThat(actualLength).isEqualTo(expectedLength); } nullBlobStore.removeBlob(containerName, blobName); assertThat(nullBlobStore.list(containerName)).isEmpty(); } private static String createRandomContainerName() { return "container-" + new Random().nextInt(Integer.MAX_VALUE); } private static String createRandomBlobName() { return "blob-" + new Random().nextInt(Integer.MAX_VALUE); } private static Blob makeBlob(BlobStore blobStore, String blobName) throws IOException { return blobStore.blobBuilder(blobName) .payload(BYTE_SOURCE) .contentDisposition("attachment; filename=foo.mp4") .contentEncoding("compress") .contentLength(BYTE_SOURCE.size()) .contentType(MediaType.MP4_AUDIO) .contentMD5(BYTE_SOURCE.hash(TestUtils.MD5)) .userMetadata(Map.of("key", "value")) .build(); } private static void validateBlobMetadata(BlobMetadata metadata) throws IOException { assertThat(metadata).isNotNull(); ContentMetadata contentMetadata = metadata.getContentMetadata(); assertThat(contentMetadata.getContentDisposition()) .isEqualTo("attachment; filename=foo.mp4"); assertThat(contentMetadata.getContentEncoding()) .isEqualTo("compress"); assertThat(contentMetadata.getContentType()) .isEqualTo(MediaType.MP4_AUDIO.toString()); assertThat(metadata.getUserMetadata()) .isEqualTo(Map.of("key", "value")); } } ================================================ FILE: src/test/java/org/gaul/s3proxy/PrefixBlobStoreTest.java ================================================ /* * Copyright 2014-2026 Andrew Gaul * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.gaul.s3proxy; import static org.assertj.core.api.Assertions.assertThat; import java.io.IOException; import java.io.InputStream; import java.util.List; import java.util.Map; import java.util.Properties; import com.google.common.collect.ImmutableList; import com.google.common.io.ByteSource; import org.assertj.core.api.Assertions; import org.jclouds.ContextBuilder; import org.jclouds.blobstore.BlobStore; import org.jclouds.blobstore.BlobStoreContext; import org.jclouds.blobstore.domain.Blob; import org.jclouds.blobstore.domain.MultipartPart; import org.jclouds.blobstore.domain.MultipartUpload; import org.jclouds.blobstore.domain.PageSet; import org.jclouds.blobstore.domain.StorageMetadata; import org.jclouds.blobstore.options.PutOptions; import org.jclouds.io.Payloads; import org.jclouds.logging.slf4j.config.SLF4JLoggingModule; import org.junit.After; import org.junit.Before; import org.junit.Test; public final class PrefixBlobStoreTest { private String containerName; private String prefix; private BlobStoreContext context; private BlobStore blobStore; private BlobStore prefixBlobStore; @Before public void setUp() { containerName = TestUtils.createRandomContainerName(); prefix = "forward-prefix/"; context = ContextBuilder .newBuilder("transient") .credentials("identity", "credential") .modules(List.of(new SLF4JLoggingModule())) .build(BlobStoreContext.class); blobStore = context.getBlobStore(); blobStore.createContainerInLocation(null, containerName); prefixBlobStore = PrefixBlobStore.newPrefixBlobStore( blobStore, Map.of(containerName, prefix)); } @After public void tearDown() { if (context != null) { blobStore.clearContainer(containerName); blobStore.deleteContainer(containerName); context.close(); } } @Test public void testPutAndGetBlob() throws IOException { ByteSource content = TestUtils.randomByteSource().slice(0, 256); Blob blob = prefixBlobStore.blobBuilder("object.txt") .payload(content) .build(); prefixBlobStore.putBlob(containerName, blob); assertThat(blobStore.blobExists(containerName, prefix + "object.txt")).isTrue(); Blob stored = prefixBlobStore.getBlob(containerName, "object.txt"); assertThat(stored).isNotNull(); assertThat(stored.getMetadata().getName()).isEqualTo("object.txt"); try (InputStream expected = content.openStream(); InputStream actual = stored.getPayload().openStream()) { assertThat(actual).hasSameContentAs(expected); } } @Test public void testListTrimsPrefix() throws IOException { ByteSource content = TestUtils.randomByteSource().slice(0, 64); prefixBlobStore.putBlob(containerName, prefixBlobStore.blobBuilder( "file-one.txt").payload(content).build()); blobStore.putBlob(containerName, blobStore.blobBuilder( prefix + "file-two.txt").payload(content).build()); blobStore.putBlob(containerName, blobStore.blobBuilder( "outside.txt").payload(content).build()); PageSet listing = prefixBlobStore.list(containerName); List names = ImmutableList.copyOf(listing).stream() .map(StorageMetadata::getName) .collect(ImmutableList.toImmutableList()); assertThat(names).containsExactlyInAnyOrder( "file-one.txt", "file-two.txt"); assertThat(listing.getNextMarker()).isNull(); } @Test public void testClearContainerKeepsOtherObjects() { ByteSource content = TestUtils.randomByteSource().slice(0, 32); prefixBlobStore.putBlob(containerName, prefixBlobStore.blobBuilder( "inside.txt").payload(content).build()); blobStore.putBlob(containerName, blobStore.blobBuilder( "outside.txt").payload(content).build()); prefixBlobStore.clearContainer(containerName); assertThat(blobStore.blobExists(containerName, prefix + "inside.txt")).isFalse(); assertThat(blobStore.blobExists(containerName, "outside.txt")).isTrue(); } @Test public void testMultipartUploadUsesPrefix() throws IOException { ByteSource content = TestUtils.randomByteSource().slice(0, 512); Blob blob = prefixBlobStore.blobBuilder("archive.bin").build(); MultipartUpload mpu = prefixBlobStore.initiateMultipartUpload( containerName, blob.getMetadata(), PutOptions.NONE); assertThat(mpu.containerName()).isEqualTo(containerName); assertThat(mpu.blobName()).isEqualTo("archive.bin"); MultipartPart part = prefixBlobStore.uploadMultipartPart( mpu, 1, Payloads.newPayload(content)); prefixBlobStore.completeMultipartUpload(mpu, List.of(part)); assertThat(blobStore.blobExists(containerName, prefix + "archive.bin")).isTrue(); } @Test public void testListMultipartUploadsTrimsPrefix() { Blob blob = prefixBlobStore.blobBuilder("pending.bin").build(); MultipartUpload mpu = prefixBlobStore.initiateMultipartUpload( containerName, blob.getMetadata(), PutOptions.NONE); try { List uploads = prefixBlobStore.listMultipartUploads(containerName); assertThat(uploads).hasSize(1); assertThat(uploads.get(0).blobName()).isEqualTo("pending.bin"); } finally { prefixBlobStore.abortMultipartUpload(mpu); } } @Test public void testParseRejectsEmptyPrefix() { var properties = new Properties(); properties.setProperty(String.format("%s.bucket", S3ProxyConstants.PROPERTY_PREFIX_BLOBSTORE), ""); try { PrefixBlobStore.parsePrefixes(properties); Assertions.failBecauseExceptionWasNotThrown( IllegalArgumentException.class); } catch (IllegalArgumentException exc) { assertThat(exc.getMessage()).isEqualTo( "Prefix for bucket bucket must not be empty"); } } } ================================================ FILE: src/test/java/org/gaul/s3proxy/ReadOnlyBlobStoreTest.java ================================================ /* * Copyright 2014-2026 Andrew Gaul * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.gaul.s3proxy; import static org.assertj.core.api.Assertions.assertThat; import java.util.List; import java.util.Random; import org.assertj.core.api.Fail; import org.jclouds.ContextBuilder; import org.jclouds.blobstore.BlobStore; import org.jclouds.blobstore.BlobStoreContext; import org.jclouds.blobstore.options.PutOptions; import org.jclouds.logging.slf4j.config.SLF4JLoggingModule; import org.junit.After; import org.junit.Before; import org.junit.Test; public final class ReadOnlyBlobStoreTest { private BlobStoreContext context; private BlobStore blobStore; private String containerName; private BlobStore readOnlyBlobStore; @Before public void setUp() throws Exception { containerName = createRandomContainerName(); context = ContextBuilder .newBuilder("transient") .credentials("identity", "credential") .modules(List.of(new SLF4JLoggingModule())) .build(BlobStoreContext.class); blobStore = context.getBlobStore(); blobStore.createContainerInLocation(null, containerName); readOnlyBlobStore = ReadOnlyBlobStore.newReadOnlyBlobStore(blobStore); } @After public void tearDown() throws Exception { if (context != null) { blobStore.deleteContainer(containerName); context.close(); } } @Test public void testContainerExists() throws Exception { assertThat(readOnlyBlobStore.containerExists(containerName)).isTrue(); assertThat(readOnlyBlobStore.containerExists( containerName + "-fake")).isFalse(); } @Test public void testPutBlob() throws Exception { try { readOnlyBlobStore.putBlob(containerName, null); Fail.failBecauseExceptionWasNotThrown( UnsupportedOperationException.class); } catch (UnsupportedOperationException ne) { // expected } } @Test public void testPutBlobOptions() throws Exception { try { readOnlyBlobStore.putBlob(containerName, null, new PutOptions()); Fail.failBecauseExceptionWasNotThrown( UnsupportedOperationException.class); } catch (UnsupportedOperationException ne) { // expected } } private static String createRandomContainerName() { return "container-" + new Random().nextInt(Integer.MAX_VALUE); } } ================================================ FILE: src/test/java/org/gaul/s3proxy/RegexBlobStoreTest.java ================================================ /* * Copyright 2014-2026 Andrew Gaul * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.gaul.s3proxy; import static org.assertj.core.api.Assertions.assertThat; import java.io.IOException; import java.io.InputStream; import java.util.AbstractMap.SimpleEntry; import java.util.List; import java.util.Map; import java.util.Properties; import java.util.Random; import java.util.regex.Pattern; import com.google.common.hash.Hashing; import com.google.common.io.ByteSource; import org.assertj.core.api.Assertions; import org.jclouds.ContextBuilder; import org.jclouds.blobstore.BlobStore; import org.jclouds.blobstore.BlobStoreContext; import org.jclouds.blobstore.domain.Blob; import org.jclouds.blobstore.domain.BlobMetadata; import org.jclouds.logging.slf4j.config.SLF4JLoggingModule; import org.junit.After; import org.junit.Before; import org.junit.Test; public final class RegexBlobStoreTest { private BlobStoreContext context; private BlobStore delegate; private String containerName; @Before public void setUp() throws Exception { containerName = createRandomContainerName(); context = ContextBuilder .newBuilder("transient") .credentials("identity", "credential") .modules(List.of(new SLF4JLoggingModule())) .build(BlobStoreContext.class); delegate = context.getBlobStore(); delegate.createContainerInLocation(null, containerName); } @After public void tearDown() throws Exception { if (context != null) { delegate.deleteContainer(containerName); context.close(); } } @Test public void testRemoveSomeCharsFromName() throws IOException { var regexes = List.>of( new SimpleEntry( Pattern.compile("[^a-zA-Z0-9/_.]"), "_")); BlobStore regexBlobStore = RegexBlobStore.newRegexBlobStore(delegate, regexes); String initialBlobName = "test/remove:badchars-folder/blob.txt"; String targetBlobName = "test/remove_badchars_folder/blob.txt"; ByteSource content = TestUtils.randomByteSource().slice(0, 1024); @SuppressWarnings("deprecation") String contentHash = Hashing.md5().hashBytes(content.read()).toString(); Blob blob = regexBlobStore.blobBuilder(initialBlobName).payload( content).build(); String eTag = regexBlobStore.putBlob(containerName, blob); assertThat(eTag).isEqualTo(contentHash); BlobMetadata blobMetadata = regexBlobStore.blobMetadata( containerName, targetBlobName); assertThat(blobMetadata.getETag()).isEqualTo(contentHash); blob = regexBlobStore.getBlob(containerName, targetBlobName); try (InputStream actual = blob.getPayload().openStream(); InputStream expected = content.openStream()) { assertThat(actual).hasSameContentAs(expected); } blob = regexBlobStore.getBlob(containerName, initialBlobName); try (InputStream actual = blob.getPayload().openStream(); InputStream expected = content.openStream()) { assertThat(actual).hasSameContentAs(expected); } } @Test public void testParseMatchWithoutReplace() { var properties = new Properties(); properties.put( String.format("%s.%s.sample1", S3ProxyConstants.PROPERTY_REGEX_BLOBSTORE, S3ProxyConstants.PROPERTY_REGEX_BLOBSTORE_MATCH), "test"); properties.put( String.format("%s.%s.sample2", S3ProxyConstants.PROPERTY_REGEX_BLOBSTORE, S3ProxyConstants.PROPERTY_REGEX_BLOBSTORE_MATCH), "test"); properties.put( String.format("%s.%s.sample1", S3ProxyConstants.PROPERTY_REGEX_BLOBSTORE, S3ProxyConstants.PROPERTY_REGEX_BLOBSTORE_REPLACE), "test"); try { RegexBlobStore.parseRegexs(properties); Assertions.failBecauseExceptionWasNotThrown( IllegalArgumentException.class); } catch (IllegalArgumentException exc) { assertThat(exc.getMessage()).isEqualTo( "Regex sample2 has no replace property associated"); } } private static String createRandomContainerName() { return "container-" + new Random().nextInt(Integer.MAX_VALUE); } } ================================================ FILE: src/test/java/org/gaul/s3proxy/ShardedBlobStoreTest.java ================================================ /* * Copyright 2014-2026 Andrew Gaul * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.gaul.s3proxy; import static org.assertj.core.api.Assertions.assertThat; import java.io.InputStream; import java.util.ArrayList; import java.util.List; import java.util.Map; import com.google.common.io.ByteSource; import org.jclouds.ContextBuilder; import org.jclouds.blobstore.BlobStore; import org.jclouds.blobstore.BlobStoreContext; import org.jclouds.blobstore.domain.Blob; import org.jclouds.blobstore.domain.PageSet; import org.jclouds.blobstore.domain.StorageMetadata; import org.jclouds.blobstore.options.CopyOptions; import org.jclouds.logging.slf4j.config.SLF4JLoggingModule; import org.junit.After; import org.junit.Before; import org.junit.Test; public final class ShardedBlobStoreTest { private int shards; private String prefix; private String containerName; private BlobStoreContext context; private BlobStore blobStore; private BlobStore shardedBlobStore; private List createdContainers; private Map prefixesMap; @Before public void setUp() { containerName = TestUtils.createRandomContainerName(); shards = 10; prefix = TestUtils.createRandomContainerName(); context = ContextBuilder .newBuilder("transient") .credentials("identity", "credential") .modules(List.of(new SLF4JLoggingModule())) .build(BlobStoreContext.class); blobStore = context.getBlobStore(); var shardsMap = Map.of(containerName, shards); prefixesMap = Map.of(containerName, prefix); shardedBlobStore = ShardedBlobStore.newShardedBlobStore( blobStore, shardsMap, prefixesMap); createdContainers = new ArrayList<>(); } @After public void tearDown() { if (this.context != null) { for (String container : this.createdContainers) { blobStore.deleteContainer(container); } context.close(); } } private void createContainer(String container) { String prefix = this.prefixesMap.get(container); if (prefix != null) { for (int n = 0; n < this.shards; ++n) { this.createdContainers.add( String.format("%s-%d", this.prefix, n)); } } else { this.createdContainers.add(container); } assertThat(shardedBlobStore.createContainerInLocation( null, container)).isTrue(); } public int countShards() { PageSet listing = blobStore.list(); int blobStoreShards = 0; for (StorageMetadata entry: listing) { if (entry.getName().startsWith(prefix)) { blobStoreShards++; } } return blobStoreShards; } @Test public void testCreateContainer() { this.createContainer(containerName); assertThat(blobStore.containerExists(containerName)).isFalse(); assertThat(this.countShards()).isEqualTo(this.shards); } @Test public void testDeleteContainer() { this.createContainer(containerName); assertThat(this.countShards()).isEqualTo(this.shards); assertThat(shardedBlobStore.deleteContainerIfEmpty(containerName)) .isTrue(); assertThat(this.countShards()).isZero(); } @Test public void testPutBlob() throws Exception { String blobName = "foo"; String blobName2 = "bar"; ByteSource content = TestUtils.randomByteSource().slice(0, 1024); ByteSource content2 = TestUtils.randomByteSource().slice(1024, 1024); Blob blob = shardedBlobStore.blobBuilder(blobName).payload(content) .build(); Blob blob2 = shardedBlobStore.blobBuilder(blobName2).payload(content2) .build(); createContainer(containerName); shardedBlobStore.putBlob(containerName, blob); shardedBlobStore.putBlob(containerName, blob2); blob = shardedBlobStore.getBlob(containerName, blobName); try (InputStream actual = blob.getPayload().openStream(); InputStream expected = content.openStream()) { assertThat(actual).hasSameContentAs(expected); } blob2 = shardedBlobStore.getBlob(containerName, blobName2); try (InputStream actual = blob2.getPayload().openStream(); InputStream expected = content2.openStream()) { assertThat(actual).hasSameContentAs(expected); } String blobContainer = null; String blob2Container = null; for (int i = 0; i < shards; i++) { String shard = String.format("%s-%d", prefix, i); for (StorageMetadata entry : blobStore.list(shard)) { if (entry.getName().equals(blobName)) { blobContainer = shard; } if (entry.getName().equals(blobName2)) { blob2Container = shard; } } } assertThat(blobContainer).isNotNull(); assertThat(blob2Container).isNotNull(); assertThat(blobContainer).isNotEqualTo(blob2Container); } @Test public void testDeleteBlob() { String blobName = TestUtils.createRandomBlobName(); ByteSource content = TestUtils.randomByteSource().slice(0, 1024); Blob blob = shardedBlobStore.blobBuilder(blobName).payload(content) .build(); this.createContainer(containerName); shardedBlobStore.putBlob(containerName, blob); assertThat(shardedBlobStore.blobExists(containerName, blobName)) .isTrue(); shardedBlobStore.removeBlob(containerName, blobName); assertThat(shardedBlobStore.blobExists(containerName, blobName)) .isFalse(); } @Test public void testPutBlobUnsharded() throws Exception { String unshardedContainer = TestUtils.createRandomContainerName(); String blobName = TestUtils.createRandomBlobName(); ByteSource content = TestUtils.randomByteSource().slice(0, 1024); Blob blob = shardedBlobStore.blobBuilder(blobName).payload(content) .build(); this.createContainer(unshardedContainer); shardedBlobStore.putBlob(unshardedContainer, blob); blob = blobStore.getBlob(unshardedContainer, blobName); try (InputStream actual = blob.getPayload().openStream(); InputStream expected = content.openStream()) { assertThat(actual).hasSameContentAs(expected); } } @Test public void testCopyBlob() throws Exception { String blobName = TestUtils.createRandomBlobName(); ByteSource content = TestUtils.randomByteSource().slice(0, 1024); Blob blob = shardedBlobStore.blobBuilder(blobName).payload(content) .build(); this.createContainer(containerName); shardedBlobStore.putBlob(containerName, blob); String copyBlobName = TestUtils.createRandomBlobName(); shardedBlobStore.copyBlob( containerName, blobName, containerName, copyBlobName, CopyOptions.NONE); blob = shardedBlobStore.getBlob(containerName, copyBlobName); try (InputStream actual = blob.getPayload().openStream(); InputStream expected = content.openStream()) { assertThat(actual).hasSameContentAs(expected); } } @Test public void testCopyBlobUnshardedToSharded() throws Exception { String blobName = TestUtils.createRandomBlobName(); String unshardedContainer = TestUtils.createRandomContainerName(); ByteSource content = TestUtils.randomByteSource().slice(0, 1024); Blob blob = shardedBlobStore.blobBuilder(blobName).payload(content) .build(); this.createContainer(containerName); this.createContainer(unshardedContainer); shardedBlobStore.putBlob(unshardedContainer, blob); shardedBlobStore.copyBlob( unshardedContainer, blobName, containerName, blobName, CopyOptions.NONE); blob = shardedBlobStore.getBlob(containerName, blobName); try (InputStream actual = blob.getPayload().openStream(); InputStream expected = content.openStream()) { assertThat(actual).hasSameContentAs(expected); } } @Test public void testCopyBlobShardedToUnsharded() throws Exception { String blobName = TestUtils.createRandomBlobName(); String unshardedContainer = TestUtils.createRandomContainerName(); ByteSource content = TestUtils.randomByteSource().slice(0, 1024); Blob blob = shardedBlobStore.blobBuilder(blobName).payload(content) .build(); this.createContainer(containerName); this.createContainer(unshardedContainer); shardedBlobStore.putBlob(containerName, blob); shardedBlobStore.copyBlob( containerName, blobName, unshardedContainer, blobName, CopyOptions.NONE); blob = shardedBlobStore.getBlob(unshardedContainer, blobName); try (InputStream actual = blob.getPayload().openStream(); InputStream expected = content.openStream()) { assertThat(actual).hasSameContentAs(expected); } } } ================================================ FILE: src/test/java/org/gaul/s3proxy/TestUtils.java ================================================ /* * Copyright 2014-2026 Andrew Gaul * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.gaul.s3proxy; import java.io.IOException; import java.io.InputStream; import java.net.URI; import java.nio.charset.StandardCharsets; import java.nio.file.FileSystems; import java.nio.file.Files; import java.util.List; import java.util.Properties; import java.util.Random; import com.google.common.base.Strings; import com.google.common.hash.HashFunction; import com.google.common.hash.Hashing; import com.google.common.io.ByteSource; import com.google.common.io.MoreFiles; import com.google.common.io.Resources; import org.eclipse.jetty.util.component.AbstractLifeCycle; import org.jclouds.Constants; import org.jclouds.ContextBuilder; import org.jclouds.JcloudsVersion; import org.jclouds.blobstore.BlobStore; import org.jclouds.blobstore.BlobStoreContext; import org.jclouds.logging.slf4j.config.SLF4JLoggingModule; final class TestUtils { @SuppressWarnings("deprecation") static final HashFunction MD5 = Hashing.md5(); private TestUtils() { throw new AssertionError("intentionally unimplemented"); } static ByteSource randomByteSource() { return randomByteSource(0); } static ByteSource randomByteSource(long seed) { return new RandomByteSource(seed); } private static final class RandomByteSource extends ByteSource { private final long seed; RandomByteSource(long seed) { this.seed = seed; } @Override public InputStream openStream() { return new RandomInputStream(seed); } } private static final class RandomInputStream extends InputStream { private final Random random; private boolean closed; RandomInputStream(long seed) { this.random = new Random(seed); } @Override public synchronized int read() throws IOException { if (closed) { throw new IOException("Stream already closed"); } // return value between 0 and 255 return random.nextInt() & 0xff; } @Override public synchronized int read(byte[] b) throws IOException { return read(b, 0, b.length); } @Override public synchronized int read(byte[] b, int off, int len) throws IOException { for (int i = 0; i < len; ++i) { b[off + i] = (byte) read(); } return len; } @Override public void close() throws IOException { super.close(); closed = true; } } static final class S3ProxyLaunchInfo { private S3Proxy s3Proxy; private final Properties properties = new Properties(); private String s3Identity; private String s3Credential; private BlobStore blobStore; private URI endpoint; private URI secureEndpoint; private String servicePath; S3Proxy getS3Proxy() { return s3Proxy; } Properties getProperties() { return properties; } String getS3Identity() { return s3Identity; } String getS3Credential() { return s3Credential; } String getServicePath() { return servicePath; } BlobStore getBlobStore() { return blobStore; } URI getEndpoint() { return endpoint; } URI getSecureEndpoint() { return secureEndpoint; } } static S3ProxyLaunchInfo startS3Proxy(String configFile) throws Exception { var info = new S3ProxyLaunchInfo(); try (InputStream is = Resources.asByteSource(Resources.getResource( configFile)).openStream()) { info.getProperties().load(is); } String provider = info.getProperties().getProperty( Constants.PROPERTY_PROVIDER); String identity = info.getProperties().getProperty( Constants.PROPERTY_IDENTITY); String credential = info.getProperties().getProperty( Constants.PROPERTY_CREDENTIAL); if (provider.equals("google-cloud-storage") || provider.equals("google-cloud-storage-sdk")) { if (credential != null && !credential.isEmpty()) { var path = FileSystems.getDefault().getPath(credential); if (Files.exists(path)) { credential = MoreFiles.asCharSource(path, StandardCharsets.UTF_8).read(); } } identity = Strings.nullToEmpty(identity); credential = Strings.nullToEmpty(credential); info.getProperties().remove(Constants.PROPERTY_CREDENTIAL); } String endpoint = info.getProperties().getProperty( Constants.PROPERTY_ENDPOINT); ContextBuilder builder = ContextBuilder .newBuilder(provider) .credentials(identity, credential) .modules(List.of(new SLF4JLoggingModule())) .overrides(info.getProperties()); if (!Strings.isNullOrEmpty(endpoint)) { builder.endpoint(endpoint); } BlobStoreContext context = builder.build(BlobStoreContext.class); info.blobStore = context.getBlobStore(); String encrypted = info.getProperties().getProperty( S3ProxyConstants.PROPERTY_ENCRYPTED_BLOBSTORE); if (encrypted != null && encrypted.equals("true")) { info.blobStore = EncryptedBlobStore.newEncryptedBlobStore(info.blobStore, info.getProperties()); } S3Proxy.Builder s3ProxyBuilder = S3Proxy.Builder.fromProperties( info.getProperties()); s3ProxyBuilder.blobStore(info.blobStore); info.endpoint = s3ProxyBuilder.getEndpoint(); info.secureEndpoint = s3ProxyBuilder.getSecureEndpoint(); info.s3Identity = s3ProxyBuilder.getIdentity(); info.s3Credential = s3ProxyBuilder.getCredential(); info.servicePath = s3ProxyBuilder.getServicePath(); info.getProperties().setProperty(Constants.PROPERTY_USER_AGENT, String.format("s3proxy/%s jclouds/%s java/%s", TestUtils.class.getPackage().getImplementationVersion(), JcloudsVersion.get(), System.getProperty("java.version"))); // resolve relative path for tests String keyStorePath = info.getProperties().getProperty( S3ProxyConstants.PROPERTY_KEYSTORE_PATH); String keyStorePassword = info.getProperties().getProperty( S3ProxyConstants.PROPERTY_KEYSTORE_PASSWORD); if (keyStorePath != null || keyStorePassword != null) { s3ProxyBuilder.keyStore( Resources.getResource(keyStorePath).toString(), keyStorePassword); } info.s3Proxy = s3ProxyBuilder.build(); info.s3Proxy.start(); while (!info.s3Proxy.getState().equals(AbstractLifeCycle.STARTED)) { Thread.sleep(1); } // reset endpoint to handle zero port info.endpoint = new URI(info.endpoint.getScheme(), info.endpoint.getUserInfo(), info.endpoint.getHost(), info.s3Proxy.getPort(), info.endpoint.getPath(), info.endpoint.getQuery(), info.endpoint.getFragment()); if (info.secureEndpoint != null) { info.secureEndpoint = new URI(info.secureEndpoint.getScheme(), info.secureEndpoint.getUserInfo(), info.secureEndpoint.getHost(), info.s3Proxy.getSecurePort(), info.secureEndpoint.getPath(), info.secureEndpoint.getQuery(), info.secureEndpoint.getFragment()); } return info; } static String createRandomContainerName() { return "container-" + new Random().nextInt(Integer.MAX_VALUE); } static String createRandomBlobName() { return "blob-" + new Random().nextInt(Integer.MAX_VALUE); } } ================================================ FILE: src/test/java/org/gaul/s3proxy/TierBlobStoreTest.java ================================================ /* * Copyright 2014-2026 Andrew Gaul * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.gaul.s3proxy; import static org.assertj.core.api.Assertions.assertThat; import java.util.List; import org.jclouds.ContextBuilder; import org.jclouds.blobstore.BlobStore; import org.jclouds.blobstore.BlobStoreContext; import org.jclouds.blobstore.domain.Tier; import org.jclouds.blobstore.options.PutOptions; import org.jclouds.io.Payloads; import org.jclouds.logging.slf4j.config.SLF4JLoggingModule; import org.jclouds.s3.domain.ObjectMetadata.StorageClass; import org.junit.After; import org.junit.Before; import org.junit.Test; @SuppressWarnings("UnstableApiUsage") public final class TierBlobStoreTest { private BlobStoreContext context; private BlobStore blobStore; private String containerName; private BlobStore tierBlobStore; @Before public void setUp() throws Exception { containerName = TestUtils.createRandomContainerName(); //noinspection UnstableApiUsage context = ContextBuilder .newBuilder("transient") .credentials("identity", "credential") .modules(List.of(new SLF4JLoggingModule())) .build(BlobStoreContext.class); blobStore = context.getBlobStore(); blobStore.createContainerInLocation(null, containerName); tierBlobStore = StorageClassBlobStore.newStorageClassBlobStore( blobStore, StorageClass.DEEP_ARCHIVE.toString()); } @After public void tearDown() throws Exception { if (context != null) { blobStore.deleteContainer(containerName); context.close(); } } @Test public void testPutNewBlob() { var blobName = TestUtils.createRandomBlobName(); var content = TestUtils.randomByteSource().slice(0, 1024); var blob = tierBlobStore.blobBuilder(blobName).payload(content).build(); tierBlobStore.putBlob(containerName, blob); var blobMetadata = tierBlobStore.blobMetadata(containerName, blobName); assertThat(blobMetadata.getTier()).isEqualTo(Tier.ARCHIVE); } @Test public void testGetExistingBlob() { var blobName = TestUtils.createRandomBlobName(); var content = TestUtils.randomByteSource().slice(0, 1024); var blob = blobStore.blobBuilder(blobName).payload(content).build(); blobStore.putBlob(containerName, blob); var blobMetadata = tierBlobStore.blobMetadata(containerName, blobName); assertThat(blobMetadata.getTier()).isEqualTo(Tier.STANDARD); } @Test public void testPutNewMpu() { var blobName = TestUtils.createRandomBlobName(); var content = TestUtils.randomByteSource().slice(0, 1024); var blob = tierBlobStore.blobBuilder(blobName).payload(content).build(); var mpu = tierBlobStore.initiateMultipartUpload( containerName, blob.getMetadata(), new PutOptions()); var payload = Payloads.newByteSourcePayload(content); tierBlobStore.uploadMultipartPart(mpu, 1, payload); var parts = tierBlobStore.listMultipartUpload(mpu); tierBlobStore.completeMultipartUpload(mpu, parts); var blobMetadata = tierBlobStore.blobMetadata(containerName, blobName); assertThat(blobMetadata.getTier()).isEqualTo(Tier.ARCHIVE); } @Test public void testGetExistingMpu() { var blobName = TestUtils.createRandomBlobName(); var content = TestUtils.randomByteSource().slice(0, 1024); var blob = blobStore.blobBuilder(blobName).payload(content).build(); var mpu = blobStore.initiateMultipartUpload( containerName, blob.getMetadata(), new PutOptions()); var payload = Payloads.newByteSourcePayload(content); blobStore.uploadMultipartPart(mpu, 1, payload); var parts = blobStore.listMultipartUpload(mpu); blobStore.completeMultipartUpload(mpu, parts); var blobMetadata = tierBlobStore.blobMetadata(containerName, blobName); assertThat(blobMetadata.getTier()).isEqualTo(Tier.STANDARD); } } ================================================ FILE: src/test/java/org/gaul/s3proxy/UserMetadataReplacerBlobStoreTest.java ================================================ /* * Copyright 2014-2026 Andrew Gaul * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.gaul.s3proxy; import static org.assertj.core.api.Assertions.assertThat; import java.util.List; import java.util.Map; import org.jclouds.ContextBuilder; import org.jclouds.blobstore.BlobStore; import org.jclouds.blobstore.BlobStoreContext; import org.jclouds.blobstore.options.PutOptions; import org.jclouds.logging.slf4j.config.SLF4JLoggingModule; import org.junit.After; import org.junit.Before; import org.junit.Test; @SuppressWarnings("UnstableApiUsage") public final class UserMetadataReplacerBlobStoreTest { private BlobStoreContext context; private BlobStore blobStore; private String containerName; // TODO: better name? private BlobStore userMetadataReplacerBlobStore; @Before public void setUp() throws Exception { containerName = TestUtils.createRandomContainerName(); //noinspection UnstableApiUsage context = ContextBuilder .newBuilder("transient") .credentials("identity", "credential") .modules(List.of(new SLF4JLoggingModule())) .build(BlobStoreContext.class); blobStore = context.getBlobStore(); blobStore.createContainerInLocation(null, containerName); userMetadataReplacerBlobStore = UserMetadataReplacerBlobStore .newUserMetadataReplacerBlobStore(blobStore, "-", "_"); } @After public void tearDown() throws Exception { if (context != null) { blobStore.deleteContainer(containerName); context.close(); } } @Test public void testPutNewBlob() { var blobName = TestUtils.createRandomBlobName(); var content = TestUtils.randomByteSource().slice(0, 1024); var blob = userMetadataReplacerBlobStore.blobBuilder(blobName) .payload(content) .userMetadata(Map.of("my-key", "my-value-")) .build(); userMetadataReplacerBlobStore.putBlob(containerName, blob); // check underlying blobStore var mutableBlobMetadata = blobStore.getBlob(containerName, blobName) .getMetadata(); var userMetadata = mutableBlobMetadata.getUserMetadata(); assertThat(userMetadata).hasSize(1); var entry = userMetadata.entrySet().iterator().next(); assertThat(entry.getKey()).isEqualTo("my_key"); assertThat(entry.getValue()).isEqualTo("my_value_"); // check getBlob mutableBlobMetadata = userMetadataReplacerBlobStore.getBlob( containerName, blobName).getMetadata(); userMetadata = mutableBlobMetadata.getUserMetadata(); assertThat(userMetadata).hasSize(1); entry = userMetadata.entrySet().iterator().next(); assertThat(entry.getKey()).isEqualTo("my-key"); assertThat(entry.getValue()).isEqualTo("my-value-"); // check blobMetadata var blobMetadata = userMetadataReplacerBlobStore.blobMetadata( containerName, blobName); userMetadata = blobMetadata.getUserMetadata(); assertThat(userMetadata).hasSize(1); entry = userMetadata.entrySet().iterator().next(); assertThat(entry.getKey()).isEqualTo("my-key"); assertThat(entry.getValue()).isEqualTo("my-value-"); } @Test public void testPutNewMultipartBlob() { var blobName = TestUtils.createRandomBlobName(); var content = TestUtils.randomByteSource().slice(0, 1024); var blob = userMetadataReplacerBlobStore.blobBuilder(blobName) .payload(content) .userMetadata(Map.of("my-key", "my-value-")) .build(); var mpu = userMetadataReplacerBlobStore.initiateMultipartUpload( containerName, blob.getMetadata(), new PutOptions()); var part = userMetadataReplacerBlobStore.uploadMultipartPart( mpu, 1, blob.getPayload()); userMetadataReplacerBlobStore.completeMultipartUpload( mpu, List.of(part)); // check underlying blobStore var mutableBlobMetadata = blobStore.getBlob(containerName, blobName) .getMetadata(); var userMetadata = mutableBlobMetadata.getUserMetadata(); assertThat(userMetadata).hasSize(1); var entry = userMetadata.entrySet().iterator().next(); assertThat(entry.getKey()).isEqualTo("my_key"); assertThat(entry.getValue()).isEqualTo("my_value_"); // check getBlob mutableBlobMetadata = userMetadataReplacerBlobStore.getBlob( containerName, blobName).getMetadata(); userMetadata = mutableBlobMetadata.getUserMetadata(); assertThat(userMetadata).hasSize(1); entry = userMetadata.entrySet().iterator().next(); assertThat(entry.getKey()).isEqualTo("my-key"); assertThat(entry.getValue()).isEqualTo("my-value-"); // check blobMetadata var blobMetadata = userMetadataReplacerBlobStore.blobMetadata( containerName, blobName); userMetadata = blobMetadata.getUserMetadata(); assertThat(userMetadata).hasSize(1); entry = userMetadata.entrySet().iterator().next(); assertThat(entry.getKey()).isEqualTo("my-key"); assertThat(entry.getValue()).isEqualTo("my-value-"); } } ================================================ FILE: src/test/java/org/gaul/s3proxy/junit/S3ProxyExtensionTest.java ================================================ /* * Copyright 2014-2026 Andrew Gaul * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.gaul.s3proxy.junit; import static org.assertj.core.api.Assertions.assertThat; import java.util.List; import com.amazonaws.auth.AWSStaticCredentialsProvider; import com.amazonaws.auth.BasicAWSCredentials; import com.amazonaws.client.builder.AwsClientBuilder; import com.amazonaws.regions.Regions; import com.amazonaws.services.s3.AmazonS3; import com.amazonaws.services.s3.AmazonS3ClientBuilder; import com.amazonaws.services.s3.model.Bucket; import com.amazonaws.services.s3.model.ListBucketsPaginatedRequest; import com.amazonaws.services.s3.model.S3ObjectSummary; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.extension.RegisterExtension; /** * This is an example of how one would use the S3Proxy JUnit extension in a unit * test as opposed to a proper test of the S3ProxyExtension class. */ public class S3ProxyExtensionTest { @RegisterExtension static final S3ProxyExtension EXTENSION = S3ProxyExtension .builder() .withCredentials("access", "secret") .build(); private static final String MY_TEST_BUCKET = "my-test-bucket"; private AmazonS3 s3Client; @BeforeEach public final void setUp() throws Exception { s3Client = AmazonS3ClientBuilder .standard() .withCredentials( new AWSStaticCredentialsProvider( new BasicAWSCredentials( EXTENSION.getAccessKey(), EXTENSION.getSecretKey()))) .withEndpointConfiguration( new AwsClientBuilder.EndpointConfiguration( EXTENSION.getUri().toString(), Regions.US_EAST_1.getName())) .build(); s3Client.createBucket(MY_TEST_BUCKET); } @Test public final void listBucket() { List buckets = s3Client.listBuckets(new ListBucketsPaginatedRequest()).getBuckets(); assertThat(buckets).hasSize(1); assertThat(buckets.get(0).getName()) .isEqualTo(MY_TEST_BUCKET); } @Test public final void uploadFile() throws Exception { String testInput = "content"; s3Client.putObject(MY_TEST_BUCKET, "file.txt", testInput); List summaries = s3Client .listObjects(MY_TEST_BUCKET) .getObjectSummaries(); assertThat(summaries).hasSize(1); assertThat(summaries.get(0).getKey()).isEqualTo("file.txt"); assertThat(summaries.get(0).getSize()).isEqualTo(testInput.length()); } @Test public final void doesBucketExistV2() { assertThat(s3Client.doesBucketExistV2(MY_TEST_BUCKET)).isTrue(); // Issue #299 assertThat(s3Client.doesBucketExistV2("nonexistingbucket")).isFalse(); } @Test public final void createExtensionWithoutCredentials() { S3ProxyExtension extension = S3ProxyExtension .builder() .build(); assertThat(extension.getAccessKey()).isNull(); assertThat(extension.getSecretKey()).isNull(); assertThat(extension.getUri()).isNull(); } } ================================================ FILE: src/test/java/org/gaul/s3proxy/junit/S3ProxyRuleTest.java ================================================ /* * Copyright 2014-2026 Andrew Gaul * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.gaul.s3proxy.junit; import static org.assertj.core.api.Assertions.assertThat; import java.util.List; import com.amazonaws.auth.AWSStaticCredentialsProvider; import com.amazonaws.auth.BasicAWSCredentials; import com.amazonaws.client.builder.AwsClientBuilder.EndpointConfiguration; import com.amazonaws.regions.Regions; import com.amazonaws.services.s3.AmazonS3; import com.amazonaws.services.s3.AmazonS3ClientBuilder; import com.amazonaws.services.s3.model.Bucket; import com.amazonaws.services.s3.model.ListBucketsPaginatedRequest; import com.amazonaws.services.s3.model.S3ObjectSummary; import org.junit.Before; import org.junit.Rule; import org.junit.Test; import org.junit.rules.TemporaryFolder; /** * This is an example of how one would use the S3Proxy JUnit rule in a unit * test as opposed to a proper test of the S3ProxyRule class. */ public class S3ProxyRuleTest { private static final String MY_TEST_BUCKET = "my-test-bucket"; @Rule public TemporaryFolder temporaryFolder = new TemporaryFolder(); @Rule public S3ProxyRule s3Proxy = S3ProxyRule .builder() .withCredentials("access", "secret") .build(); private AmazonS3 s3Client; @Before public final void setUp() throws Exception { s3Client = AmazonS3ClientBuilder .standard() .withCredentials( new AWSStaticCredentialsProvider( new BasicAWSCredentials( s3Proxy.getAccessKey(), s3Proxy.getSecretKey()))) .withEndpointConfiguration( new EndpointConfiguration( s3Proxy.getUri().toString(), Regions.US_EAST_1.getName())) .build(); s3Client.createBucket(MY_TEST_BUCKET); } @Test public final void listBucket() { List buckets = s3Client.listBuckets(new ListBucketsPaginatedRequest()).getBuckets(); assertThat(buckets).hasSize(1); assertThat(buckets.get(0).getName()) .isEqualTo(MY_TEST_BUCKET); } @Test public final void uploadFile() throws Exception { String testInput = "content"; s3Client.putObject(MY_TEST_BUCKET, "file.txt", testInput); List summaries = s3Client .listObjects(MY_TEST_BUCKET) .getObjectSummaries(); assertThat(summaries).hasSize(1); assertThat(summaries.get(0).getKey()).isEqualTo("file.txt"); assertThat(summaries.get(0).getSize()).isEqualTo(testInput.length()); } @Test public final void doesBucketExistV2() { assertThat(s3Client.doesBucketExistV2(MY_TEST_BUCKET)).isTrue(); // Issue #299 assertThat(s3Client.doesBucketExistV2("nonexistingbucket")).isFalse(); } @Test public final void createExtensionWithoutCredentials() { S3ProxyRule extension = S3ProxyRule .builder() .build(); assertThat(extension.getAccessKey()).isNull(); assertThat(extension.getSecretKey()).isNull(); assertThat(extension.getUri()).isNull(); } } ================================================ FILE: src/test/resources/logback.xml ================================================ [s3proxy] %.-1p %d{MM-dd HH:mm:ss.SSS} %t %c{30}:%L %X{clientId}|%X{sessionId}:%X{messageId}:%X{fileId}] %m%n ${LOG_LEVEL:-info} ================================================ FILE: src/test/resources/run-s3-tests.sh ================================================ #!/bin/bash set -o errexit set -o nounset # Optional first argument selects a config; remaining args pass through to pytest via tox. # Example single test: ./src/test/resources/run-s3-tests.sh s3proxy-localstack.conf \ # s3tests_boto3/functional/test_s3.py::test_bucket_list_delimiter_prefix S3PROXY_CONF="s3proxy.conf" if (($# > 0)) && [[ "$1" == *.conf ]]; then S3PROXY_CONF="$1" shift fi if (($# > 0)) && [[ "$1" == -- ]]; then shift fi S3PROXY_BIN="${PWD}/target/s3proxy" S3PROXY_PORT="${S3PROXY_PORT:-8081}" export S3TEST_CONF="${PWD}/src/test/resources/s3-tests.conf" TOX_TEST_ARGS=("$@") # launch S3Proxy using HTTP and a fixed port sed "s,^\(s3proxy.endpoint\)=.*,\1=http://127.0.0.1:${S3PROXY_PORT}," \ < "src/test/resources/$S3PROXY_CONF" | grep -v secure-endpoint > target/s3proxy.conf java -DLOG_LEVEL=${LOG_LEVEL:-info} -jar $S3PROXY_BIN --properties target/s3proxy.conf & S3PROXY_PID=$! function finish { kill $S3PROXY_PID } trap finish EXIT # wait for S3Proxy to start for i in $(seq 30); do if exec 3<>"/dev/tcp/localhost/${S3PROXY_PORT}"; then exec 3<&- # Close for read exec 3>&- # Close for write break fi sleep 1 done tags='not fails_on_s3proxy'\ ' and not appendobject'\ ' and not bucket_policy'\ ' and not checksum'\ ' and not copy'\ ' and not cors'\ ' and not encryption'\ ' and not fails_strict_rfc2616'\ ' and not iam_tenant'\ ' and not lifecycle'\ ' and not object_lock'\ ' and not policy'\ ' and not policy_status'\ ' and not s3select'\ ' and not s3website'\ ' and not sse_s3'\ ' and not tagging'\ ' and not test_of_sts'\ ' and not user_policy'\ ' and not versioning'\ ' and not webidentity_test' if [ "${S3PROXY_CONF}" = "s3proxy-azurite.conf" ]; then tags="${tags} and not fails_on_s3proxy_azureblob" elif [ "${S3PROXY_CONF}" = "s3proxy-fake-gcs-server.conf" ]; then tags="${tags} and not fails_on_s3proxy_gcs" elif [ "${S3PROXY_CONF}" = "s3proxy-minio.conf" ]; then tags="${tags} and not fails_on_s3proxy_minio" elif [[ "${S3PROXY_CONF}" == s3proxy-localstack*.conf ]]; then tags="${tags} and not fails_on_s3proxy_localstack and not fails_on_s3proxy_minio and not fails_on_aws" elif [ "${S3PROXY_CONF}" = "s3proxy-transient-nio2.conf" ]; then tags="${tags} and not fails_on_s3proxy_nio2" fi # execute s3-tests pushd s3-tests if [ ${#TOX_TEST_ARGS[@]} -eq 0 ]; then tox -- -m "${tags}" else tox -- -m "${tags}" "${TOX_TEST_ARGS[@]}" fi ================================================ FILE: src/test/resources/s3-tests.conf ================================================ [DEFAULT] ## this section is just used as default for all the "s3 *" ## sections, you can place these variables also directly there ## replace with e.g. "localhost" to run against local software host = 127.0.0.1 ## uncomment the port to use something other than 80 port = 8081 ## say "no" to disable TLS is_secure = no [fixtures] ## all the buckets created will start with this prefix; ## {random} will be filled with random characters to pad ## the prefix to 30 characters long, and avoid collisions bucket prefix = s3proxy-{random}- [s3 main] ## the tests assume two accounts are defined, "main" and "alt". ## user_id is a 64-character hexstring user_id = 0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef email = tester@ceph.com ## display name typically looks more like a unix login, "jdoe" etc display_name = CustomersName@amazon.com ## replace these with your access keys access_key = local-identity secret_key = local-credential [s3 alt] ## another user account, used for ACL-related tests user_id = 56789abcdef0123456789abcdef0123456789abcdef0123456789abcdef01234 display_name = john.doe ## the "alt" user needs to have email set, too email = john.doe@example.com access_key = local-identity secret_key = local-credential [s3 tenant] user_id = 9876543210abcdef0123456789abcdef0123456789abcdef0123456789abcdef display_name = testx$tenanteduser email = tenanteduser@example.com access_key = local-identity secret_key = local-credential tenant = testx [iam] #used for iam operations in sts-tests #email from vstart.sh email = s3@example.com #user_id from vstart.sh user_id = 0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef #access_key from vstart.sh access_key = ABCDEFGHIJKLMNOPQRST #secret_key vstart.sh secret_key = abcdefghijklmnopqrstuvwxyzabcdefghijklmn #display_name from vstart.sh display_name = youruseridhere # iam account root user for iam_account tests [iam root] access_key = AAAAAAAAAAAAAAAAAAaa secret_key = aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa user_id = RGW11111111111111111 email = account1@ceph.com # iam account root user in a different account than [iam root] [iam alt root] access_key = BBBBBBBBBBBBBBBBBBbb secret_key = bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb user_id = RGW22222222222222222 email = account2@ceph.com [webidentity] # TODO: obvious garbage #used for assume role with web identity test in sts-tests #all parameters will be obtained from ceph/qa/tasks/keycloak.py token= aud= sub= azp= user_token=] thumbprint= KC_REALM= ================================================ FILE: src/test/resources/s3proxy-anonymous.conf ================================================ s3proxy.endpoint=http://127.0.0.1:0 s3proxy.secure-endpoint=https://127.0.0.1:0 #s3proxy.service-path=s3proxy # authorization must be aws-v2, aws-v4, aws-v2-or-v4, or none s3proxy.authorization=none s3proxy.keystore-path=keystore.jks s3proxy.keystore-password=password jclouds.provider=transient jclouds.identity=remote-identity jclouds.credential=remote-credential # endpoint is optional for some providers #jclouds.endpoint=http://127.0.0.1:8081 jclouds.filesystem.basedir=/tmp/blobstore ================================================ FILE: src/test/resources/s3proxy-azurite.conf ================================================ s3proxy.endpoint=http://127.0.0.1:0 s3proxy.secure-endpoint=https://127.0.0.1:0 #s3proxy.service-path=s3proxy # authorization must be aws-v2, aws-v4, aws-v2-or-v4, or none s3proxy.authorization=aws-v2-or-v4 s3proxy.identity=local-identity s3proxy.credential=local-credential s3proxy.keystore-path=keystore.jks s3proxy.keystore-password=password jclouds.provider=azureblob-sdk jclouds.endpoint=http://127.0.0.1:10000/devstoreaccount1 jclouds.identity=devstoreaccount1 jclouds.credential=Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw== ================================================ FILE: src/test/resources/s3proxy-cors-allow-all.conf ================================================ s3proxy.endpoint=http://127.0.0.1:0 s3proxy.secure-endpoint=https://127.0.0.1:0 # authorization must be aws-v2, aws-v4, aws-v2-or-v4, or none s3proxy.authorization=aws-v2-or-v4 s3proxy.identity=local-identity s3proxy.credential=local-credential s3proxy.keystore-path=keystore.jks s3proxy.keystore-password=password s3proxy.cors-allow-all=true jclouds.provider=transient jclouds.identity=remote-identity jclouds.credential=remote-credential # endpoint is optional for some providers #jclouds.endpoint=http://127.0.0.1:8081 jclouds.filesystem.basedir=/tmp/blobstore ================================================ FILE: src/test/resources/s3proxy-cors.conf ================================================ s3proxy.endpoint=http://127.0.0.1:0 s3proxy.secure-endpoint=https://127.0.0.1:0 # authorization must be aws-v2, aws-v4, aws-v2-or-v4, or none s3proxy.authorization=aws-v2-or-v4 s3proxy.identity=local-identity s3proxy.credential=local-credential s3proxy.keystore-path=keystore.jks s3proxy.keystore-password=password s3proxy.cors-allow-origins=https://example\.com https://.+\.example\.com https://example\.cloud s3proxy.cors-allow-methods=GET PUT s3proxy.cors-allow-headers=Accept Content-Type s3proxy.cors-exposed-headers=ETag jclouds.provider=transient jclouds.identity=remote-identity jclouds.credential=remote-credential # endpoint is optional for some providers #jclouds.endpoint=http://127.0.0.1:8081 jclouds.filesystem.basedir=/tmp/blobstore ================================================ FILE: src/test/resources/s3proxy-encryption.conf ================================================ s3proxy.endpoint=http://127.0.0.1:0 s3proxy.secure-endpoint=https://127.0.0.1:0 #s3proxy.service-path=s3proxy # authorization must be aws-v2, aws-v4, aws-v2-or-v4, or none s3proxy.authorization=aws-v2-or-v4 s3proxy.identity=local-identity s3proxy.credential=local-credential s3proxy.keystore-path=keystore.jks s3proxy.keystore-password=password jclouds.provider=transient jclouds.identity=remote-identity jclouds.credential=remote-credential # endpoint is optional for some providers #jclouds.endpoint=http://127.0.0.1:8081 jclouds.filesystem.basedir=/tmp/blobstore s3proxy.encrypted-blobstore=true s3proxy.encrypted-blobstore-password=1234567890123456 s3proxy.encrypted-blobstore-salt=12345678 ================================================ FILE: src/test/resources/s3proxy-fake-gcs-server.conf ================================================ s3proxy.endpoint=http://127.0.0.1:0 s3proxy.secure-endpoint=https://127.0.0.1:0 #s3proxy.service-path=s3proxy # authorization must be aws-v2, aws-v4, aws-v2-or-v4, or none s3proxy.authorization=aws-v2-or-v4 s3proxy.identity=local-identity s3proxy.credential=local-credential s3proxy.keystore-path=keystore.jks s3proxy.keystore-password=password jclouds.provider=google-cloud-storage-sdk jclouds.endpoint=http://localhost:4443 jclouds.identity=identity jclouds.credential= ================================================ FILE: src/test/resources/s3proxy-filesystem-nio2.conf ================================================ s3proxy.endpoint=http://127.0.0.1:0 s3proxy.secure-endpoint=https://127.0.0.1:0 #s3proxy.service-path=s3proxy # authorization must be aws-v2, aws-v4, aws-v2-or-v4, or none s3proxy.authorization=aws-v2-or-v4 s3proxy.identity=local-identity s3proxy.credential=local-credential s3proxy.keystore-path=keystore.jks s3proxy.keystore-password=password jclouds.provider=filesystem-nio2 jclouds.identity=remote-identity jclouds.credential=remote-credential # endpoint is optional for some providers #jclouds.endpoint=http://127.0.0.1:8081 jclouds.filesystem.basedir=/tmp/blobstore ================================================ FILE: src/test/resources/s3proxy-localstack-aws-s3-sdk.conf ================================================ s3proxy.endpoint=http://127.0.0.1:0 s3proxy.secure-endpoint=https://127.0.0.1:0 #s3proxy.service-path=s3proxy # authorization must be aws-v2, aws-v4, aws-v2-or-v4, or none s3proxy.authorization=aws-v2-or-v4 s3proxy.identity=local-identity s3proxy.credential=local-credential s3proxy.keystore-path=keystore.jks s3proxy.keystore-password=password jclouds.provider=aws-s3-sdk jclouds.identity=remote-identity jclouds.credential=remote-credential jclouds.endpoint=http://127.0.0.1:4566 # Region may be needed by the backend to locate the bucket (default: us-east-1) aws-s3-sdk.region=us-east-1 # Conditional writes mode: "native" (default) or "emulated" aws-s3-sdk.conditional-writes=native ================================================ FILE: src/test/resources/s3proxy-localstack-s3.conf ================================================ s3proxy.endpoint=http://127.0.0.1:0 s3proxy.secure-endpoint=https://127.0.0.1:0 #s3proxy.service-path=s3proxy # authorization must be aws-v2, aws-v4, aws-v2-or-v4, or none s3proxy.authorization=aws-v2-or-v4 s3proxy.identity=local-identity s3proxy.credential=local-credential s3proxy.keystore-path=keystore.jks s3proxy.keystore-password=password jclouds.provider=s3 jclouds.identity=remote-identity jclouds.credential=remote-credential jclouds.endpoint=http://127.0.0.1:4566 ================================================ FILE: src/test/resources/s3proxy-transient-nio2.conf ================================================ s3proxy.endpoint=http://127.0.0.1:0 s3proxy.secure-endpoint=https://127.0.0.1:0 #s3proxy.service-path=s3proxy # authorization must be aws-v2, aws-v4, aws-v2-or-v4, or none s3proxy.authorization=aws-v2-or-v4 s3proxy.identity=local-identity s3proxy.credential=local-credential s3proxy.keystore-path=keystore.jks s3proxy.keystore-password=password jclouds.provider=transient-nio2 jclouds.identity=remote-identity jclouds.credential=remote-credential # endpoint is optional for some providers #jclouds.endpoint=http://127.0.0.1:8081 jclouds.filesystem.basedir=/tmp/blobstore ================================================ FILE: src/test/resources/s3proxy.conf ================================================ s3proxy.endpoint=http://127.0.0.1:0 s3proxy.secure-endpoint=https://127.0.0.1:0 #s3proxy.service-path=s3proxy # authorization must be aws-v2, aws-v4, aws-v2-or-v4, or none s3proxy.authorization=aws-v2-or-v4 s3proxy.identity=local-identity s3proxy.credential=local-credential s3proxy.keystore-path=keystore.jks s3proxy.keystore-password=password jclouds.provider=transient jclouds.identity=remote-identity jclouds.credential=remote-credential # endpoint is optional for some providers #jclouds.endpoint=http://127.0.0.1:8081 jclouds.filesystem.basedir=/tmp/blobstore