[
  {
    "path": ".dockerignore",
    "content": "# Exclude everything from context that is not used by COPY steps in the Dockerfile\n*\n!/target/s3proxy\n!/src/main/resources/run-docker-container.sh\n"
  },
  {
    "path": ".github/dependabot.yml",
    "content": "# To get started with Dependabot version updates, you'll need to specify which\n# package ecosystems to update and where the package manifests are located.\n# Please see the documentation for all configuration options:\n# https://docs.github.com/github/administering-a-repository/configuration-options-for-dependency-updates\n\nversion: 2\nupdates:\n  - package-ecosystem: \"github-actions\"\n    directory: \"/\"\n    schedule:\n      interval: \"monthly\"\n  - package-ecosystem: \"maven\"\n    directory: \"/\" # Location of package manifests\n    schedule:\n      interval: \"monthly\"\n    open-pull-requests-limit: 20\n"
  },
  {
    "path": ".github/workflows/ci-main.yml",
    "content": "name: Main CI\n\non:\n  push:\n    branches:\n      - \"master\"\n    tags:\n      - \"*\"\n  pull_request:\n    branches:\n      - \"*\"\n\npermissions:\n  contents: read\n\nenv:\n  dockerhub_publish: ${{ secrets.DOCKER_PASS != '' }}\n\njobs:\n\n  meta:\n    runs-on: ubuntu-24.04-arm\n    outputs:\n      container_tags: ${{ steps.docker_action_meta.outputs.tags }}\n      container_labels: ${{ steps.docker_action_meta.outputs.labels }}\n      container_buildtime: ${{ fromJSON(steps.docker_action_meta.outputs.json).labels['org.opencontainers.image.created'] }}\n      container_version: ${{ fromJSON(steps.docker_action_meta.outputs.json).labels['org.opencontainers.image.version'] }}\n      container_revision: ${{ fromJSON(steps.docker_action_meta.outputs.json).labels['org.opencontainers.image.revision'] }}\n    steps:\n      - name: Checkout\n        uses: actions/checkout@v6\n        with:\n          submodules: false\n          persist-credentials: false\n      - name: Docker meta\n        id: docker_action_meta\n        uses: docker/metadata-action@v6.0.0\n        with:\n          images: |\n            name=ghcr.io/${{ github.repository }}/container\n            name=andrewgaul/s3proxy,enable=${{ env.dockerhub_publish }}\n          flavor: |\n            latest=auto\n          tags: |\n            type=sha,format=long\n            type=sha\n            type=match,pattern=s3proxy-(.*),group=1\n            type=ref,event=branch\n            type=ref,event=pr\n            type=ref,event=tag\n          labels: |\n            org.opencontainers.image.licenses=Apache-2.0\n  runTests:\n    runs-on: ubuntu-24.04-arm\n    needs: [meta]\n    steps:\n      - uses: actions/checkout@v6\n        with:\n          submodules: \"recursive\"\n\n      - uses: actions/setup-java@v5\n        with:\n          distribution: \"temurin\"\n          java-version: \"17\"\n          cache: \"maven\"\n      - uses: actions/setup-python@v6\n        with:\n          python-version: \"3.11\"\n          cache: \"pip\"\n\n      #Run tests\n      - name: Maven Set version\n        run: |\n          mvn versions:set -DnewVersion=${{ needs.meta.outputs.version }}\n      - name: Maven Package\n        run: |\n          mvn verify -DskipTests\n      - name: Maven Test\n        run: |\n          mvn test\n\n      - name: Maven Test with transient-nio2\n        run: |\n          # TODO: run other test classes\n          mvn test -Ds3proxy.test.conf=s3proxy-transient-nio2.conf -Dtest=AwsSdkTest\n\n      - name: Maven Test with filesystem-nio2\n        run: |\n          # TODO: run other test classes\n          mkdir /tmp/blobstore\n          mvn test -Ds3proxy.test.conf=s3proxy-filesystem-nio2.conf -Dtest=AwsSdkTest\n\n      - name: Install s3-tests\n        run: |\n          python -m pip install --upgrade pip\n          pip install tox tox-gh-actions\n      - name: Run s3-tests\n        run: |\n          ./src/test/resources/run-s3-tests.sh\n      - name: Run s3-tests with transient-nio2\n        run: |\n          ./src/test/resources/run-s3-tests.sh s3proxy-transient-nio2.conf\n\n      #Store the target\n      - uses: actions/upload-artifact@v7\n        with:\n          name: s3proxy\n          path: target/s3proxy\n      - uses: actions/upload-artifact@v7\n        with:\n          name: pom\n          path: pom.xml\n\n  azuriteTests:\n    runs-on: ubuntu-24.04-arm\n    needs: [meta]\n    steps:\n      - uses: actions/checkout@v6\n        with:\n          submodules: \"recursive\"\n      - uses: actions/setup-java@v5\n        with:\n          distribution: \"temurin\"\n          java-version: \"17\"\n          cache: \"maven\"\n      - uses: actions/setup-python@v6\n        with:\n          python-version: \"3.11\"\n          cache: \"pip\"\n\n      - name: Maven Package\n        run: |\n          mvn package -DskipTests\n\n      - name: Install Azurite\n        run: npx --yes --loglevel info azurite@3.35 --version\n      - name: Start Azurite\n        shell: bash\n        run: npx --yes --package azurite@3.35 azurite-blob &\n      - name: Maven Test with Azurite\n        run: |\n          # TODO: run other test classes\n          mvn test -Ds3proxy.test.conf=s3proxy-azurite.conf -Dtest=AwsSdkTest\n\n      - name: Install s3-tests\n        run: |\n          python -m pip install --upgrade pip\n          pip install tox tox-gh-actions\n      - name: Run s3-tests with Azurite\n        run: |\n          ./src/test/resources/run-s3-tests.sh s3proxy-azurite.conf\n          kill $(pidof node)\n\n  localstackTests:\n    runs-on: ubuntu-24.04-arm\n    needs: [meta]\n    steps:\n      - uses: actions/checkout@v6\n        with:\n          submodules: \"recursive\"\n      - uses: actions/setup-java@v5\n        with:\n          distribution: \"temurin\"\n          java-version: \"17\"\n          cache: \"maven\"\n      - uses: actions/setup-python@v6\n        with:\n          python-version: \"3.11\"\n          cache: \"pip\"\n\n      - name: Maven Package\n        run: |\n          mvn package -DskipTests\n\n      - name: Install LocalStack\n        run: docker pull localstack/localstack:4.11.1\n      - name: Start LocalStack\n        run: |\n          docker run -d --name localstack -p 4566:4566 localstack/localstack:4.11.1\n          # Wait for LocalStack to be ready\n          for i in $(seq 30); do\n            if curl -s http://127.0.0.1:4566/_localstack/health | grep -q '\"s3\"'; then\n              break\n            fi\n            sleep 1\n          done\n      - name: Maven Test with LocalStack (s3)\n        run: |\n          # TODO: run other test classes\n          mvn test -Ds3proxy.test.conf=s3proxy-localstack-s3.conf -Dtest=AwsSdkTest\n\n      - name: Maven Test with LocalStack (aws-s3-sdk)\n        run: |\n          # TODO: run other test classes\n          mvn test -Ds3proxy.test.conf=s3proxy-localstack-aws-s3-sdk.conf -Dtest=AwsSdkTest\n\n      - name: Install s3-tests\n        run: |\n          python -m pip install --upgrade pip\n          pip install tox tox-gh-actions\n      - name: Run s3-tests with LocalStack (s3)\n        run: |\n          ./src/test/resources/run-s3-tests.sh s3proxy-localstack-s3.conf\n      - name: Run s3-tests with LocalStack (aws-s3-sdk)\n        run: |\n          ./src/test/resources/run-s3-tests.sh s3proxy-localstack-aws-s3-sdk.conf\n          docker stop localstack\n\n  fakeGcsServerTests:\n    runs-on: ubuntu-24.04-arm\n    needs: [meta]\n    steps:\n      - uses: actions/checkout@v6\n        with:\n          submodules: \"recursive\"\n      - uses: actions/setup-java@v5\n        with:\n          distribution: \"temurin\"\n          java-version: \"17\"\n          cache: \"maven\"\n      - uses: actions/setup-python@v6\n        with:\n          python-version: \"3.11\"\n          cache: \"pip\"\n\n      - name: Maven Package\n        run: |\n          mvn package -DskipTests\n\n      - name: Install fake-gcs-server\n        run: go install github.com/fsouza/fake-gcs-server@latest\n      - name: Start fake-gcs-server\n        run: $HOME/go/bin/fake-gcs-server -backend memory -scheme http -host 127.0.0.1 &\n      - name: Maven Test with fake-gcs-server\n        run: |\n          # TODO: run other test classes\n          STORAGE_EMULATOR_HOST=http://localhost:4443 mvn test -Ds3proxy.test.conf=s3proxy-fake-gcs-server.conf -Dtest=AwsSdkTest\n\n      - name: Install s3-tests\n        run: |\n          python -m pip install --upgrade pip\n          pip install tox tox-gh-actions\n      - name: Run s3-tests with fake-gcs-server\n        run: |\n          # TODO:\n          #STORAGE_EMULATOR_HOST=http://localhost:4443 ./src/test/resources/run-s3-tests.sh s3proxy-fake-gcs-server.conf\n          kill $(pidof fake-gcs-server)\n\n  Containerize:\n    runs-on: ubuntu-24.04-arm\n    needs: [runTests, azuriteTests, localstackTests, fakeGcsServerTests, meta]\n    permissions:\n      contents: read\n      packages: write\n    steps:\n      - uses: actions/checkout@v6\n      - uses: actions/download-artifact@v8\n        with:\n          name: s3proxy\n          path: target\n      - uses: actions/download-artifact@v8\n        with:\n          name: pom\n          path: .\n      - name: Set up QEMU\n        uses: docker/setup-qemu-action@v4\n      - name: Set up Docker Buildx\n        uses: docker/setup-buildx-action@v4\n\n      - name: Login to DockerHub\n        uses: docker/login-action@v4\n        if: github.event_name != 'pull_request' && env.dockerhub_publish == 'true'\n        with:\n          username: ${{ secrets.DOCKER_USER }}\n          password: ${{ secrets.DOCKER_PASS }}\n\n      - name: Login to GHCR\n        uses: docker/login-action@v4\n        if: github.event_name != 'pull_request'\n        with:\n          registry: ghcr.io\n          username: ${{ github.actor }}\n          password: ${{ secrets.GITHUB_TOKEN }}\n\n      - name: Build and push\n        uses: docker/build-push-action@v7\n        with:\n          context: .\n          platforms: linux/amd64,linux/arm64\n          push: ${{ github.event_name != 'pull_request' }}\n          tags: ${{ needs.meta.outputs.container_tags }}\n          labels: ${{ needs.meta.outputs.container_labels }}\n          build-args: |\n            BUILDTIME=${{ needs.meta.outputs.container_buildtime }}\n            VERSION=${{ needs.meta.outputs.container_version }}\n            REVISION=${{ needs.meta.outputs.container_revision }}\n          cache-from: type=gha\n          cache-to: type=gha,mode=max\n"
  },
  {
    "path": ".gitignore",
    "content": "s3proxy.iml\n.idea/\n\n# Eclipse project configuration files\n.classpath\n.project\n.settings\n\n# MAC stuff\n.DS_Store\n\n# below is default github .ignore for java\n*.class\n# Mobile Tools for Java (J2ME)\n.mtj.tmp/\n# Package Files #\n*.jar\n*.war\n*.ear\n# virtual machine crash logs, see http://www.java.com/en/download/help/error_hotspot.xml\nhs_err_pid*\ntarget/\n\n# files created during tests\n__blobstorage__/\nAzuriteConfig\n__azurite_db*\n"
  },
  {
    "path": ".gitmodules",
    "content": "[submodule \"s3-tests\"]\n\tpath = s3-tests\n\turl = https://github.com/gaul/s3-tests.git\n"
  },
  {
    "path": ".mailmap",
    "content": "Hironao Sekine <phant.acc+github@gmail.com>\nSheng Hu <s.huhot@gmail.com>\n"
  },
  {
    "path": ".mvn/maven.config",
    "content": "-Daether.checksums.algorithms=SHA-512,SHA-256,SHA-1,MD5\n"
  },
  {
    "path": ".releaserc",
    "content": "{\n    \"tagFormat\": 's3proxy-${version}',\n        \"branches\": [\n            {\n                \"name\": 'master',\n                prerelease: false\n            },\n            {\n                \"name\": 'releases\\/+([0-9])?(\\.\\d+)(\\.\\d+|z|$)',\n                prerelease: false\n            },\n            {\n                \"name\": 'next',\n                prerelease: false\n            },\n            {\n                name: 'next-major',\n                prerelease: true\n            },\n            {\n                name: 'develop',\n                prerelease: true\n            },\n            {\n                name: 'develop\\/.*',\n                prerelease: true\n            }\n        ]\n}\n"
  },
  {
    "path": "Dockerfile",
    "content": "FROM docker.io/library/eclipse-temurin:21-jre\nLABEL maintainer=\"Andrew Gaul <andrew@gaul.org>\"\n\nWORKDIR /opt/s3proxy\n\nRUN apt-get update && \\\n    apt-get install -y dumb-init && \\\n    rm -rf /var/lib/apt/lists/*\n\nCOPY \\\n    target/s3proxy \\\n    src/main/resources/run-docker-container.sh \\\n    /opt/s3proxy/\n\nENV \\\n    LOG_LEVEL=\"info\" \\\n    S3PROXY_AUTHORIZATION=\"aws-v2-or-v4\" \\\n    S3PROXY_ENDPOINT=\"http://0.0.0.0:80\" \\\n    S3PROXY_IDENTITY=\"local-identity\" \\\n    S3PROXY_CREDENTIAL=\"local-credential\" \\\n    S3PROXY_VIRTUALHOST=\"\" \\\n    S3PROXY_KEYSTORE_PATH=\"keystore.jks\" \\\n    S3PROXY_KEYSTORE_PASSWORD=\"password\" \\\n    S3PROXY_CORS_ALLOW_ALL=\"false\" \\\n    S3PROXY_CORS_ALLOW_ORIGINS=\"\" \\\n    S3PROXY_CORS_ALLOW_METHODS=\"\" \\\n    S3PROXY_CORS_ALLOW_HEADERS=\"\" \\\n    S3PROXY_CORS_ALLOW_CREDENTIAL=\"\" \\\n    S3PROXY_V4_MAX_CHUNK_SIZE=\"16777216\" \\\n    S3PROXY_IGNORE_UNKNOWN_HEADERS=\"false\" \\\n    S3PROXY_ENCRYPTED_BLOBSTORE=\"\" \\\n    S3PROXY_ENCRYPTED_BLOBSTORE_PASSWORD=\"\" \\\n    S3PROXY_ENCRYPTED_BLOBSTORE_SALT=\"\" \\\n    S3PROXY_READ_ONLY_BLOBSTORE=\"false\" \\\n    S3PROXY_METRICS_ENABLED=\"false\" \\\n    S3PROXY_METRICS_PORT=\"9090\" \\\n    S3PROXY_METRICS_HOST=\"0.0.0.0\" \\\n    JCLOUDS_PROVIDER=\"filesystem-nio2\" \\\n    JCLOUDS_ENDPOINT=\"\" \\\n    JCLOUDS_REGION=\"\" \\\n    JCLOUDS_REGIONS=\"us-east-1\" \\\n    JCLOUDS_IDENTITY=\"remote-identity\" \\\n    JCLOUDS_CREDENTIAL=\"remote-credential\" \\\n    JCLOUDS_KEYSTONE_VERSION=\"\" \\\n    JCLOUDS_KEYSTONE_SCOPE=\"\" \\\n    JCLOUDS_KEYSTONE_PROJECT_DOMAIN_NAME=\"\" \\\n    JCLOUDS_FILESYSTEM_BASEDIR=\"/data\"\n\nEXPOSE 80 443\n\nENTRYPOINT [\"/usr/bin/dumb-init\", \"--\"]\n\nCMD [\"/opt/s3proxy/run-docker-container.sh\"]\n"
  },
  {
    "path": "LICENSE",
    "content": "\n                                 Apache License\n                           Version 2.0, January 2004\n                        https://www.apache.org/licenses/\n\n   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION\n\n   1. Definitions.\n\n      \"License\" shall mean the terms and conditions for use, reproduction,\n      and distribution as defined by Sections 1 through 9 of this document.\n\n      \"Licensor\" shall mean the copyright owner or entity authorized by\n      the copyright owner that is granting the License.\n\n      \"Legal Entity\" shall mean the union of the acting entity and all\n      other entities that control, are controlled by, or are under common\n      control with that entity. For the purposes of this definition,\n      \"control\" means (i) the power, direct or indirect, to cause the\n      direction or management of such entity, whether by contract or\n      otherwise, or (ii) ownership of fifty percent (50%) or more of the\n      outstanding shares, or (iii) beneficial ownership of such entity.\n\n      \"You\" (or \"Your\") shall mean an individual or Legal Entity\n      exercising permissions granted by this License.\n\n      \"Source\" form shall mean the preferred form for making modifications,\n      including but not limited to software source code, documentation\n      source, and configuration files.\n\n      \"Object\" form shall mean any form resulting from mechanical\n      transformation or translation of a Source form, including but\n      not limited to compiled object code, generated documentation,\n      and conversions to other media types.\n\n      \"Work\" shall mean the work of authorship, whether in Source or\n      Object form, made available under the License, as indicated by a\n      copyright notice that is included in or attached to the work\n      (an example is provided in the Appendix below).\n\n      \"Derivative Works\" shall mean any work, whether in Source or Object\n      form, that is based on (or derived from) the Work and for which the\n      editorial revisions, annotations, elaborations, or other modifications\n      represent, as a whole, an original work of authorship. For the purposes\n      of this License, Derivative Works shall not include works that remain\n      separable from, or merely link (or bind by name) to the interfaces of,\n      the Work and Derivative Works thereof.\n\n      \"Contribution\" shall mean any work of authorship, including\n      the original version of the Work and any modifications or additions\n      to that Work or Derivative Works thereof, that is intentionally\n      submitted to Licensor for inclusion in the Work by the copyright owner\n      or by an individual or Legal Entity authorized to submit on behalf of\n      the copyright owner. For the purposes of this definition, \"submitted\"\n      means any form of electronic, verbal, or written communication sent\n      to the Licensor or its representatives, including but not limited to\n      communication on electronic mailing lists, source code control systems,\n      and issue tracking systems that are managed by, or on behalf of, the\n      Licensor for the purpose of discussing and improving the Work, but\n      excluding communication that is conspicuously marked or otherwise\n      designated in writing by the copyright owner as \"Not a Contribution.\"\n\n      \"Contributor\" shall mean Licensor and any individual or Legal Entity\n      on behalf of whom a Contribution has been received by Licensor and\n      subsequently incorporated within the Work.\n\n   2. Grant of Copyright License. Subject to the terms and conditions of\n      this License, each Contributor hereby grants to You a perpetual,\n      worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n      copyright license to reproduce, prepare Derivative Works of,\n      publicly display, publicly perform, sublicense, and distribute the\n      Work and such Derivative Works in Source or Object form.\n\n   3. Grant of Patent License. Subject to the terms and conditions of\n      this License, each Contributor hereby grants to You a perpetual,\n      worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n      (except as stated in this section) patent license to make, have made,\n      use, offer to sell, sell, import, and otherwise transfer the Work,\n      where such license applies only to those patent claims licensable\n      by such Contributor that are necessarily infringed by their\n      Contribution(s) alone or by combination of their Contribution(s)\n      with the Work to which such Contribution(s) was submitted. If You\n      institute patent litigation against any entity (including a\n      cross-claim or counterclaim in a lawsuit) alleging that the Work\n      or a Contribution incorporated within the Work constitutes direct\n      or contributory patent infringement, then any patent licenses\n      granted to You under this License for that Work shall terminate\n      as of the date such litigation is filed.\n\n   4. Redistribution. You may reproduce and distribute copies of the\n      Work or Derivative Works thereof in any medium, with or without\n      modifications, and in Source or Object form, provided that You\n      meet the following conditions:\n\n      (a) You must give any other recipients of the Work or\n          Derivative Works a copy of this License; and\n\n      (b) You must cause any modified files to carry prominent notices\n          stating that You changed the files; and\n\n      (c) You must retain, in the Source form of any Derivative Works\n          that You distribute, all copyright, patent, trademark, and\n          attribution notices from the Source form of the Work,\n          excluding those notices that do not pertain to any part of\n          the Derivative Works; and\n\n      (d) If the Work includes a \"NOTICE\" text file as part of its\n          distribution, then any Derivative Works that You distribute must\n          include a readable copy of the attribution notices contained\n          within such NOTICE file, excluding those notices that do not\n          pertain to any part of the Derivative Works, in at least one\n          of the following places: within a NOTICE text file distributed\n          as part of the Derivative Works; within the Source form or\n          documentation, if provided along with the Derivative Works; or,\n          within a display generated by the Derivative Works, if and\n          wherever such third-party notices normally appear. The contents\n          of the NOTICE file are for informational purposes only and\n          do not modify the License. You may add Your own attribution\n          notices within Derivative Works that You distribute, alongside\n          or as an addendum to the NOTICE text from the Work, provided\n          that such additional attribution notices cannot be construed\n          as modifying the License.\n\n      You may add Your own copyright statement to Your modifications and\n      may provide additional or different license terms and conditions\n      for use, reproduction, or distribution of Your modifications, or\n      for any such Derivative Works as a whole, provided Your use,\n      reproduction, and distribution of the Work otherwise complies with\n      the conditions stated in this License.\n\n   5. Submission of Contributions. Unless You explicitly state otherwise,\n      any Contribution intentionally submitted for inclusion in the Work\n      by You to the Licensor shall be under the terms and conditions of\n      this License, without any additional terms or conditions.\n      Notwithstanding the above, nothing herein shall supersede or modify\n      the terms of any separate license agreement you may have executed\n      with Licensor regarding such Contributions.\n\n   6. Trademarks. This License does not grant permission to use the trade\n      names, trademarks, service marks, or product names of the Licensor,\n      except as required for reasonable and customary use in describing the\n      origin of the Work and reproducing the content of the NOTICE file.\n\n   7. Disclaimer of Warranty. Unless required by applicable law or\n      agreed to in writing, Licensor provides the Work (and each\n      Contributor provides its Contributions) on an \"AS IS\" BASIS,\n      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n      implied, including, without limitation, any warranties or conditions\n      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A\n      PARTICULAR PURPOSE. You are solely responsible for determining the\n      appropriateness of using or redistributing the Work and assume any\n      risks associated with Your exercise of permissions under this License.\n\n   8. Limitation of Liability. In no event and under no legal theory,\n      whether in tort (including negligence), contract, or otherwise,\n      unless required by applicable law (such as deliberate and grossly\n      negligent acts) or agreed to in writing, shall any Contributor be\n      liable to You for damages, including any direct, indirect, special,\n      incidental, or consequential damages of any character arising as a\n      result of this License or out of the use or inability to use the\n      Work (including but not limited to damages for loss of goodwill,\n      work stoppage, computer failure or malfunction, or any and all\n      other commercial damages or losses), even if such Contributor\n      has been advised of the possibility of such damages.\n\n   9. Accepting Warranty or Additional Liability. While redistributing\n      the Work or Derivative Works thereof, You may choose to offer,\n      and charge a fee for, acceptance of support, warranty, indemnity,\n      or other liability obligations and/or rights consistent with this\n      License. However, in accepting such obligations, You may act only\n      on Your own behalf and on Your sole responsibility, not on behalf\n      of any other Contributor, and only if You agree to indemnify,\n      defend, and hold each Contributor harmless for any liability\n      incurred by, or claims asserted against, such Contributor by reason\n      of your accepting any such warranty or additional liability.\n\n   END OF TERMS AND CONDITIONS\n\n   APPENDIX: How to apply the Apache License to your work.\n\n      To apply the Apache License to your work, attach the following\n      boilerplate notice, with the fields enclosed by brackets \"[]\"\n      replaced with your own identifying information. (Don't include\n      the brackets!)  The text should be enclosed in the appropriate\n      comment syntax for the file format. We also recommend that a\n      file or class name and description of purpose be included on the\n      same \"printed page\" as the copyright notice for easier\n      identification within third-party archives.\n\n   Copyright [yyyy] [name of copyright owner]\n\n   Licensed under the Apache License, Version 2.0 (the \"License\");\n   you may not use this file except in compliance with the License.\n   You may obtain a copy of the License at\n\n       https://www.apache.org/licenses/LICENSE-2.0\n\n   Unless required by applicable law or agreed to in writing, software\n   distributed under the License is distributed on an \"AS IS\" BASIS,\n   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n   See the License for the specific language governing permissions and\n   limitations under the License.\n"
  },
  {
    "path": "README.md",
    "content": "# S3Proxy\n\n[![Github All Releases](https://img.shields.io/github/downloads/gaul/s3proxy/total.svg)](https://github.com/gaul/s3proxy/releases/)\n[![Docker Pulls](https://img.shields.io/docker/pulls/andrewgaul/s3proxy.svg)](https://hub.docker.com/r/andrewgaul/s3proxy/)\n[![Maven Central](https://img.shields.io/maven-central/v/org.gaul/s3proxy.svg)](https://search.maven.org/#search%7Cga%7C1%7Ca%3A%22s3proxy%22)\n[![Twitter Follow](https://img.shields.io/twitter/follow/S3Proxy.svg?style=social&label=Follow)](https://twitter.com/S3Proxy)\n\nS3Proxy implements the\n[S3 API](https://en.wikipedia.org/wiki/Amazon_S3#S3_API_and_competing_services)\nand *proxies* requests, enabling several use cases:\n\n* translation from S3 to Backblaze B2, EMC Atmos, Google Cloud, Microsoft Azure, and OpenStack Swift\n* testing without Amazon by using the local filesystem\n* extension via middlewares\n* embedding into Java applications\n\n## Usage with Docker\n\n[Docker Hub](https://hub.docker.com/r/andrewgaul/s3proxy/) hosts a Docker image\nand has instructions on how to run it.\n\n## Usage without Docker\n\nUsers can [download releases](https://github.com/gaul/s3proxy/releases)\nfrom GitHub.  Developers can build the project by running `mvn package` which\nproduces a binary at `target/s3proxy`.  S3Proxy requires Java 17 or newer to\nrun.\n\nConfigure S3Proxy via a properties file.  An example using the local\nfile system as the storage backend with anonymous access:\n\n```\ns3proxy.authorization=none\ns3proxy.endpoint=http://127.0.0.1:8080\njclouds.provider=filesystem\njclouds.filesystem.basedir=/tmp/s3proxy\n```\n\nFirst create the filesystem basedir:\n\n```\nmkdir /tmp/s3proxy\n```\n\nNext run S3Proxy.  Linux and Mac OS X users can run the executable jar:\n\n```\nchmod +x s3proxy\ns3proxy --properties s3proxy.conf\n```\n\nWindows users must explicitly invoke java:\n\n```\njava -jar s3proxy --properties s3proxy.conf\n```\n\nFinally test by creating a bucket then listing all the buckets:\n\n```\n$ curl --request PUT http://localhost:8080/testbucket\n\n$ curl http://localhost:8080/\n<?xml version=\"1.0\" ?><ListAllMyBucketsResult xmlns=\"http://s3.amazonaws.com/doc/2006-03-01/\"><Owner><ID>75aa57f09aa0c8caeab4f8c24e99d10f8e7faeebf76c078efc7c6caea54ba06a</ID><DisplayName>CustomersName@amazon.com</DisplayName></Owner><Buckets><Bucket><Name>testbucket</Name><CreationDate>2015-08-05T22:16:24.000Z</CreationDate></Bucket></Buckets></ListAllMyBucketsResult>\n```\n\n## Usage with Java\n\nMaven Central hosts S3Proxy artifacts and the wiki has\n[instructions on Java use](https://github.com/gaul/s3proxy/wiki/Using-S3Proxy-in-Java-projects).\n\n## Supported storage backends\n\n* atmos\n* aws-s3 (Amazon-only, deprecated)\n* aws-s3-sdk (S3-compatible backends via AWS SDK, recommended)\n* azureblob (deprecated)\n* azureblob-sdk (recommended)\n* b2\n* filesystem (on-disk storage, deprecated)\n* filesystem-nio2 (on-disk storage, recommended)\n* google-cloud-storage (deprecated)\n* google-cloud-storage-sdk (recommended)\n* openstack-swift\n* rackspace-cloudfiles-uk and rackspace-cloudfiles-us\n* s3 (non-Amazon, deprecated)\n* transient (in-memory storage, deprecated)\n* transient-nio2 (in-memory storage, recommended)\n\nSee the wiki for [examples of configurations](https://github.com/gaul/s3proxy/wiki/Storage-backend-examples).\n\n## Assigning buckets to backends\n\nS3Proxy can be configured to assign buckets to different backends with the same\ncredentials. The configuration in the properties file is as follows:\n```\ns3proxy.bucket-locator.1=bucket\ns3proxy.bucket-locator.2=another-bucket\n```\n\nIn addition to the explicit names, [glob syntax](https://docs.oracle.com/javase/tutorial/essential/io/fileOps.html#glob) can be used to configure many\nbuckets for a given backend.\n\nA bucket (or a glob) cannot be assigned to multiple backends.\n\n## Middlewares\n\nS3Proxy can modify its behavior based on middlewares:\n\n* [bucket aliasing](https://github.com/gaul/s3proxy/wiki/Middleware-alias-blobstore)\n* [bucket prefix scoping](https://github.com/gaul/s3proxy/wiki/Middleware-prefix-blobstore)\n* [bucket locator](https://github.com/gaul/s3proxy/wiki/Middleware-bucket-locator)\n* [eventual consistency modeling](https://github.com/gaul/s3proxy/wiki/Middleware---eventual-consistency)\n* [large object mocking](https://github.com/gaul/s3proxy/wiki/Middleware-large-object-mocking)\n* [latency](https://github.com/gaul/s3proxy/wiki/Middleware-latency)\n* [read-only](https://github.com/gaul/s3proxy/wiki/Middleware-read-only)\n* [regex rename blobs](https://github.com/gaul/s3proxy/wiki/Middleware-regex)\n* [sharded backend containers](https://github.com/gaul/s3proxy/wiki/Middleware-sharded-backend)\n* [storage class override](https://github.com/gaul/s3proxy/wiki/Middleware-storage-class-override)\n* [user metadata replacer](https://github.com/gaul/s3proxy/wiki/Middleware-user-metadata-replacer)\n* [no cache override](https://github.com/gaul/s3proxy/wiki/Middleware-no-cache)\n\n## SSL Support\n\nS3Proxy can listen on HTTPS by setting the `secure-endpoint` and [configuring a keystore](http://wiki.eclipse.org/Jetty/Howto/Configure_SSL#Generating_Keys_and_Certificates_with_JDK_keytool). You can read more about how configure S3Proxy for SSL Support in [the dedicated wiki page](https://github.com/gaul/s3proxy/wiki/SSL-support) with Docker, Kubernetes or simply Java.\n\n## Limitations\n\nS3Proxy has broad compatibility with the S3 API, however, it does not support:\n\n* ACLs other than private and public-read\n* BitTorrent hosting\n* bucket logging\n* bucket policies\n* [CORS bucket operations](https://docs.aws.amazon.com/AmazonS3/latest/dev/cors.html#how-do-i-enable-cors) like getting or setting the CORS configuration for a bucket. S3Proxy only supports a static configuration (see below).\n* hosting static websites\n* object server-side encryption\n* object tagging\n* object versioning, see [#74](https://github.com/gaul/s3proxy/issues/74)\n* POST upload policies, see [#73](https://github.com/gaul/s3proxy/issues/73)\n* requester pays buckets\n* [select object content](https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectSELECTContent.html)\n\nS3Proxy emulates the following operations:\n\n* conditional PUT object when using If-Match or If-None-Match, unless the `azureblob-sdk` provider is used\n* copy multi-part objects, see [#76](https://github.com/gaul/s3proxy/issues/76)\n\nS3Proxy has basic CORS preflight and actual request/response handling. It can be configured within the properties\nfile (and corresponding ENV variables for Docker):\n\n```\ns3proxy.cors-allow-origins=https://example\\.com https://.+\\.example\\.com https://example\\.cloud\ns3proxy.cors-allow-methods=GET PUT\ns3proxy.cors-allow-headers=Accept Content-Type\ns3proxy.cors-allow-credential=true\n```\n\nCORS cannot be configured per bucket. `s3proxy.cors-allow-all=true` will accept any origin and header.\nActual CORS requests are supported for GET, PUT, POST, HEAD and DELETE methods.\n\nThe wiki collects\n[compatibility notes](https://github.com/gaul/s3proxy/wiki/Storage-backend-compatibility)\nfor specific storage backends.\n\n## Support\n\n* [GitHub issues](https://github.com/gaul/s3proxy/issues)\n* [Stack Overflow](https://stackoverflow.com/questions/tagged/s3proxy)\n* [commercial support](mailto:andrew@gaul.org)\n\n## References\n\n* [Apache jclouds](https://jclouds.apache.org/) provides storage backend support for S3Proxy\n* [Ceph s3-tests](https://github.com/ceph/s3-tests) help maintain and improve compatibility with the S3 API\n* [fake-s3](https://github.com/jubos/fake-s3), [gofakes3](https://github.com/johannesboyne/gofakes3), [minio](https://github.com/minio/minio), [S3 ninja](https://github.com/scireum/s3ninja), and [s3rver](https://github.com/jamhall/s3rver) provide functionality similar to S3Proxy when using the filesystem backend\n* [GlacierProxy](https://github.com/bouncestorage/glacier-proxy) and [SwiftProxy](https://github.com/bouncestorage/swiftproxy) provide similar functionality for the Amazon Glacier and OpenStack Swift APIs\n* [s3mock](https://github.com/adobe/S3Mock) - Adobe's s3 mock implementation\n* [sbt-s3](https://github.com/localytics/sbt-s3) runs S3Proxy via the Scala Build Tool\n* [swift3](https://github.com/openstack/swift3) provides an S3 middleware for OpenStack Swift\n* [Zenko](https://www.zenko.io/) provide similar multi-cloud functionality\n\n## License\n\nCopyright (C) 2014-2026 Andrew Gaul\n\nLicensed under the Apache License, Version 2.0\n"
  },
  {
    "path": "docs/Encryption.md",
    "content": "S3Proxy\n\n# Encryption \n\n## Motivation \nThe motivation behind this implementation is to provide a fully transparent and secure encryption to the s3 client while having the ability to write into different clouds.\n\n## Cipher mode\nThe chosen cipher is ```AES/CFB/NoPadding``` because it provides the ability to read from an offset like in the middle of a ```Blob```.\nWhile reading from an offset the decryption process needs to consider the previous 16 bytes of the AES block.\n\n### Key generation\nThe encryption uses a 128-bit key that will be derived from a given password and salt in combination with random initialization vector that will be stored in each part padding.\n\n## How a blob is encrypted \nEvery uploaded part get a padding of 64 bytes that includes the necessary information for decryption. The input stream from a s3 client is passed through ```CipherInputStream``` and piped to append the 64 byte part padding at the end the encrypted stream. The encrypted input stream is then processed by the ```BlobStore``` to save the ```Blob```.\n\n| Name      | Byte size | Description                                                    |\n|-----------|-----------|----------------------------------------------------------------|\n| Delimiter | 8 byte    | The delimiter is used to detect if the ```Blob``` is encrypted |\n| IV        | 16 byte   | AES initialization vector                                      |\n| Part      | 4 byte    | The part number                                                |\n| Size      | 8 byte    | The unencrypted size of the ```Blob```                         |\n| Version   | 2 byte    | Version can be used in the future if changes are necessary     |\n| Reserved  | 26 byte   | Reserved for future use                                        |\n\n### Multipart handling \nA single ```Blob``` can be uploaded by the client into multiple parts. After the completion all parts are concatenated into a single ```Blob```.\nThis procedure will result in multiple parts and paddings being held by a single ```Blob```.\n\n### Single blob example\n```\n-------------------------------------\n| ENCRYPTED BYTES         | PADDING |\n-------------------------------------\n```\n\n### Multipart blob example\n```\n-------------------------------------------------------------------------------------\n| ENCRYPTED BYTES | PADDING | ENCRYPTED BYTES | PADDING | ENCRYPTED BYTES | PADDING |\n-------------------------------------------------------------------------------------\n```\n\n## How a blob is decrypted\nThe decryption is way more complex than the encryption. Decryption process needs to take care of the following circumstances:\n- decryption of the entire ```Blob```\n- decryption from a specific offset by skipping initial bytes \n- decryption of bytes by reading from the end (tail)\n- decryption of a specific byte range like middle of the ```Blob```\n- decryption of all previous situation by considering a underlying multipart ```Blob```\n\n### Single blob decryption \nFirst the ```BlobMetadata``` is requested to get the encrypted ```Blob``` size. The last 64 bytes of ```PartPadding``` are fetched and inspected to detect if a decryption is necessary.\nThe cipher is than initialized with the IV and the key.\n\n### Multipart blob decryption \nThe process is similar to the single ```Blob``` decryption but with the difference that a list of parts is computed by fetching all ```PartPadding``` from end to the beginning.\n\n## Blob suffix\nEach stored ```Blob``` will get a suffix named ```.s3enc``` this helps to determine if a ```Blob``` is encrypted. For the s3 client the ```.s3enc``` suffix is not visible and the ```Blob``` size will always show the unencrypted size.  \n\n## Tested jClouds provider\n- S3\n    - Minio\n    - OBS from OpenTelekomCloud\n- AWS S3\n- Azure\n- GCP\n- Local\n\n## Limitation \n- All blobs are encrypted with the same key that is derived from a given password \n- No support for re-encryption\n- Returned eTag always differs therefore clients should not verify it\n- Decryption of a ```Blob``` will always result in multiple calls against the backend for instance a GET will result in a HEAD + GET because the size of the blob needs to be determined \n"
  },
  {
    "path": "docs/Logging.md",
    "content": "# Logging\n\n## Configuration\n\nThe following environment variables can be used to configure logging\n\n* LOG_LEVEL default value \"info\" used to configure log level\n* LOG_APPENDER default value \"STDOUT\" produce string formatted logs \"CONTAINER\" used to produce json formatted logs"
  },
  {
    "path": "pom.xml",
    "content": "<project xmlns=\"http://maven.apache.org/POM/4.0.0\" xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\" xsi:schemaLocation=\"http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd\">\n  <modelVersion>4.0.0</modelVersion>\n\n  <groupId>org.gaul</groupId>\n  <artifactId>s3proxy</artifactId>\n  <version>3.2.0-SNAPSHOT</version>\n  <packaging>jar</packaging>\n\n  <name>S3Proxy</name>\n  <url>https://github.com/gaul/s3proxy</url>\n  <description>Access other storage backends via the S3 API</description>\n\n  <licenses>\n    <license>\n      <name>The Apache Software License, Version 2.0</name>\n      <url>http://www.apache.org/licenses/LICENSE-2.0.txt</url>\n      <distribution>repo</distribution>\n    </license>\n  </licenses>\n\n  <scm>\n    <connection>scm:git:git@github.com:gaul/s3proxy.git</connection>\n    <developerConnection>scm:git:git@github.com:gaul/s3proxy.git</developerConnection>\n    <url>git@github.com:gaul/s3proxy.git</url>\n  </scm>\n\n  <developers>\n    <developer>\n      <name>Andrew Gaul</name>\n      <id>gaul</id>\n      <email>andrew@gaul.org</email>\n    </developer>\n  </developers>\n\n  <distributionManagement>\n    <snapshotRepository>\n      <id>sonatype-central-portal</id>\n      <name>Sonatype Central Portal</name>\n      <url>https://central.sonatype.com/repository/maven-snapshots/</url>\n    </snapshotRepository>\n    <repository>\n      <id>sonatype-central-portal</id>\n      <name>Sonatype Central Portal</name>\n      <url>https://repo.maven.apache.org/maven2/</url>\n    </repository>\n  </distributionManagement>\n\n  <profiles>\n    <profile>\n      <id>release</id>\n      <build>\n        <plugins>\n          <plugin>\n            <groupId>org.apache.maven.plugins</groupId>\n            <artifactId>maven-gpg-plugin</artifactId>\n            <version>3.2.8</version>\n            <executions>\n              <execution>\n                <id>sign-artifacts</id>\n                <phase>verify</phase>\n                <goals>\n                  <goal>sign</goal>\n                </goals>\n              </execution>\n            </executions>\n          </plugin>\n        </plugins>\n      </build>\n    </profile>\n  </profiles>\n\n  <build>\n    <extensions>\n      <extension>\n        <groupId>eu.maveniverse.maven.njord</groupId>\n        <artifactId>extension</artifactId>\n        <version>${njord.version}</version>\n      </extension>\n    </extensions>\n    <plugins>\n      <plugin>\n        <groupId>eu.maveniverse.maven.plugins</groupId>\n        <artifactId>njord</artifactId>\n        <version>${njord.version}</version>\n      </plugin>\n      <plugin>\n        <groupId>org.apache.maven.plugins</groupId>\n        <artifactId>maven-enforcer-plugin</artifactId>\n        <version>3.6.2</version>\n        <executions>\n          <execution>\n            <id>enforce-maven</id>\n            <goals>\n              <goal>enforce</goal>\n            </goals>\n            <configuration>\n              <rules>\n                <requireMavenVersion>\n                  <version>3.6.3</version>\n                </requireMavenVersion>\n              </rules>\n            </configuration>\n          </execution>\n        </executions>\n      </plugin>\n      <plugin>\n        <groupId>org.apache.maven.plugins</groupId>\n        <artifactId>maven-clean-plugin</artifactId>\n        <version>3.5.0</version>\n      </plugin>\n      <plugin>\n        <groupId>org.apache.maven.plugins</groupId>\n        <artifactId>maven-install-plugin</artifactId>\n        <version>3.1.4</version>\n      </plugin>\n      <plugin>\n        <groupId>org.apache.maven.plugins</groupId>\n        <artifactId>maven-deploy-plugin</artifactId>\n        <version>3.1.4</version>\n      </plugin>\n      <plugin>\n        <groupId>org.apache.maven.plugins</groupId>\n        <artifactId>maven-checkstyle-plugin</artifactId>\n        <version>3.6.0</version>\n        <executions>\n          <execution>\n            <id>check</id>\n            <phase>verify</phase>\n            <goals>\n              <goal>check</goal>\n            </goals>\n          </execution>\n        </executions>\n        <configuration>\n          <configLocation>src/main/resources/checkstyle.xml</configLocation>\n          <headerLocation>src/main/resources/copyright_header.txt</headerLocation>\n          <includeTestSourceDirectory>true</includeTestSourceDirectory>\n          <violationSeverity>warning</violationSeverity>\n          <failOnViolation>true</failOnViolation>\n        </configuration>\n        <dependencies>\n          <dependency>\n            <groupId>com.puppycrawl.tools</groupId>\n            <artifactId>checkstyle</artifactId>\n            <version>12.3.1</version>\n          </dependency>\n        </dependencies>\n      </plugin>\n      <plugin>\n        <groupId>org.apache.maven.plugins</groupId>\n        <artifactId>maven-resources-plugin</artifactId>\n        <version>3.5.0</version>\n      </plugin>\n      <plugin>\n        <groupId>org.apache.maven.plugins</groupId>\n        <artifactId>maven-compiler-plugin</artifactId>\n        <version>3.15.0</version>\n        <configuration>\n          <source>${java.version}</source>\n          <target>${java.version}</target>\n          <showDeprecation>true</showDeprecation>\n          <showWarnings>true</showWarnings>\n          <fork>true</fork>\n          <compilerArgs>\n            <arg>-Xlint</arg>\n            <arg>-XDcompilePolicy=simple</arg>\n            <arg>--should-stop=ifError=FLOW</arg>\n            <arg>-Xplugin:ErrorProne -Xep:JavaUtilDate:OFF -Xep:DefaultCharset:OFF -Xep:StringCaseLocaleUsage:OFF -Xep:ProtectedMembersInFinalClass:OFF -Xep:JavaTimeDefaultTimeZone:OFF</arg>\n            <arg>-J--add-exports=jdk.compiler/com.sun.tools.javac.api=ALL-UNNAMED</arg>\n            <arg>-J--add-exports=jdk.compiler/com.sun.tools.javac.code=ALL-UNNAMED</arg>\n            <arg>-J--add-exports=jdk.compiler/com.sun.tools.javac.file=ALL-UNNAMED</arg>\n            <arg>-J--add-exports=jdk.compiler/com.sun.tools.javac.main=ALL-UNNAMED</arg>\n            <arg>-J--add-exports=jdk.compiler/com.sun.tools.javac.model=ALL-UNNAMED</arg>\n            <arg>-J--add-exports=jdk.compiler/com.sun.tools.javac.parser=ALL-UNNAMED</arg>\n            <arg>-J--add-exports=jdk.compiler/com.sun.tools.javac.processing=ALL-UNNAMED</arg>\n            <arg>-J--add-exports=jdk.compiler/com.sun.tools.javac.tree=ALL-UNNAMED</arg>\n            <arg>-J--add-exports=jdk.compiler/com.sun.tools.javac.util=ALL-UNNAMED</arg>\n            <arg>-J--add-opens=jdk.compiler/com.sun.tools.javac.code=ALL-UNNAMED</arg>\n            <arg>-J--add-opens=jdk.compiler/com.sun.tools.javac.comp=ALL-UNNAMED</arg>\n          </compilerArgs>\n          <annotationProcessorPaths>\n            <path>\n              <groupId>com.google.auto.service</groupId>\n              <artifactId>auto-service</artifactId>\n              <version>1.1.1</version>\n            </path>\n            <path>\n              <groupId>com.google.errorprone</groupId>\n              <artifactId>error_prone_core</artifactId>\n              <version>2.36.0</version>\n            </path>\n          </annotationProcessorPaths>\n        </configuration>\n      </plugin>\n      <plugin>\n        <groupId>org.apache.maven.plugins</groupId>\n        <artifactId>maven-jar-plugin</artifactId>\n        <version>3.5.0</version>\n        <configuration>\n          <archive>\n            <manifest>\n              <addDefaultImplementationEntries>true</addDefaultImplementationEntries>\n              <addDefaultSpecificationEntries>true</addDefaultSpecificationEntries>\n            </manifest>\n          </archive>\n        </configuration>\n      </plugin>\n      <plugin>\n        <groupId>io.github.git-commit-id</groupId>\n        <artifactId>git-commit-id-maven-plugin</artifactId>\n        <version>9.0.2</version>\n        <executions>\n          <execution>\n            <goals>\n              <goal>revision</goal>\n            </goals>\n          </execution>\n        </executions>\n        <configuration>\n          <generateGitPropertiesFile>true</generateGitPropertiesFile>\n          <includeOnlyProperties>\n            <includeOnlyProperty>git.commit.id.abbrev</includeOnlyProperty>\n            <includeOnlyProperty>git.commit.id</includeOnlyProperty>\n          </includeOnlyProperties>\n        </configuration>\n      </plugin>\n      <plugin>\n        <groupId>org.apache.maven.plugins</groupId>\n        <artifactId>maven-javadoc-plugin</artifactId>\n        <version>3.12.0</version>\n        <executions>\n          <execution>\n            <id>attach-javadocs</id>\n            <goals>\n              <goal>jar</goal>\n            </goals>\n          </execution>\n        </executions>\n        <configuration>\n          <doclint>all,-missing</doclint>\n        </configuration>\n      </plugin>\n      <plugin>\n        <groupId>org.apache.maven.plugins</groupId>\n        <artifactId>maven-shade-plugin</artifactId>\n        <version>3.6.2</version>\n        <executions>\n          <execution>\n            <phase>package</phase>\n            <goals>\n              <goal>shade</goal>\n            </goals>\n            <configuration>\n             <createDependencyReducedPom>false</createDependencyReducedPom>\n             <filters>\n               <filter>\n                 <artifact>org.eclipse.jetty:*</artifact>\n                 <excludes>\n                   <exclude>META-INF/MANIFEST.MF</exclude>\n                   <exclude>META-INF/LICENSE</exclude>\n                   <exclude>META-INF/NOTICE.txt</exclude>\n                   <exclude>about.html</exclude>\n                 </excludes>\n               </filter>\n               <filter>\n                 <artifact>org.eclipse.jetty.ee10:*</artifact>\n                 <excludes>\n                   <exclude>META-INF/MANIFEST.MF</exclude>\n                   <exclude>META-INF/LICENSE</exclude>\n                   <exclude>META-INF/NOTICE.txt</exclude>\n                   <exclude>about.html</exclude>\n                 </excludes>\n               </filter>\n             </filters>\n             <artifactSet>\n                <includes>\n                  <include>org.eclipse.jetty:*</include>\n                  <include>org.eclipse.jetty.ee10:*</include>\n                </includes>\n              </artifactSet>\n              <relocations>\n                <relocation>\n                  <pattern>org.eclipse.jetty</pattern>\n                  <shadedPattern>${shade.prefix}.org.eclipse.jetty</shadedPattern>\n                </relocation>\n              </relocations>\n            </configuration>\n          </execution>\n        </executions>\n      </plugin>\n      <plugin>\n        <groupId>org.apache.maven.plugins</groupId>\n        <artifactId>maven-assembly-plugin</artifactId>\n        <version>3.8.0</version>\n        <configuration>\n          <descriptors>\n            <descriptor>src/main/assembly/jar-with-dependencies.xml</descriptor>\n          </descriptors>\n          <archive>\n            <manifest>\n              <mainClass>org.gaul.s3proxy.Main</mainClass>\n              <addDefaultImplementationEntries>true</addDefaultImplementationEntries>\n            </manifest>\n          </archive>\n        </configuration>\n        <executions>\n          <execution>\n            <id>make-assembly</id>\n            <phase>package</phase>\n            <goals>\n              <goal>single</goal>\n            </goals>\n          </execution>\n        </executions>\n      </plugin>\n      <plugin>\n        <groupId>org.apache.maven.plugins</groupId>\n        <artifactId>maven-source-plugin</artifactId>\n        <version>3.4.0</version>\n        <executions>\n          <execution>\n            <id>attach-sources</id>\n            <goals>\n              <goal>jar-no-fork</goal>\n            </goals>\n          </execution>\n        </executions>\n      </plugin>\n      <plugin>\n        <groupId>org.apache.maven.plugins</groupId>\n        <artifactId>maven-surefire-plugin</artifactId>\n        <version>${surefire.version}</version>\n        <dependencies>\n          <dependency>\n            <groupId>org.apache.maven.surefire</groupId>\n            <artifactId>surefire-junit47</artifactId>\n            <version>${surefire.version}</version>\n          </dependency>\n          <dependency>\n            <groupId>org.apache.maven.surefire</groupId>\n            <artifactId>surefire-junit-platform</artifactId>\n            <version>${surefire.version}</version>\n          </dependency>\n        </dependencies>\n        <configuration>\n          <parallel>classes</parallel>\n          <threadCount>1</threadCount>\n          <argLine>-Xmx512m</argLine>\n          <redirectTestOutputToFile>true</redirectTestOutputToFile>\n          <forkedProcessTimeoutInSeconds>1800</forkedProcessTimeoutInSeconds>\n          <runOrder>random</runOrder>\n          <trimStackTrace>false</trimStackTrace>\n          <properties>\n            <property>\n              <name>junit</name>\n              <value>false</value>\n            </property>\n          </properties>\n        </configuration>\n      </plugin>\n      <plugin>\n        <groupId>com.github.spotbugs</groupId>\n        <artifactId>spotbugs-maven-plugin</artifactId>\n        <version>4.9.8.3</version>\n        <configuration>\n          <effort>Max</effort>\n          <omitVisitors>CrossSiteScripting,DefaultEncodingDetector,FindNullDeref</omitVisitors>\n          <plugins>\n            <plugin>\n              <groupId>jp.skypencil.findbugs.slf4j</groupId>\n              <artifactId>bug-pattern</artifactId>\n              <version>1.5.0</version>\n            </plugin>\n          </plugins>\n        </configuration>\n      </plugin>\n      <plugin>\n        <groupId>org.skife.maven</groupId>\n        <artifactId>really-executable-jar-maven-plugin</artifactId>\n        <version>2.1.1</version>\n        <configuration>\n          <inputFile>target/s3proxy-${project.version}-jar-with-dependencies.jar</inputFile>\n          <programFile>s3proxy</programFile>\n        </configuration>\n        <executions>\n          <execution>\n            <phase>package</phase>\n            <goals>\n              <goal>really-executable-jar</goal>\n            </goals>\n          </execution>\n        </executions>\n      </plugin>\n      <plugin>\n        <groupId>org.gaul</groupId>\n        <artifactId>modernizer-maven-plugin</artifactId>\n        <version>${modernizer.version}</version>\n        <executions>\n          <execution>\n            <id>modernizer</id>\n            <phase>verify</phase>\n            <goals>\n              <goal>modernizer</goal>\n            </goals>\n          </execution>\n        </executions>\n        <configuration>\n          <javaVersion>${java.version}</javaVersion>\n        </configuration>\n      </plugin>\n    </plugins>\n  </build>\n\n  <properties>\n    <project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>\n    <java.version>17</java.version>\n    <aws-sdk.version>1.12.797</aws-sdk.version>\n    <aws-sdkv2.version>2.42.31</aws-sdkv2.version>\n    <jclouds.version>2.7.0</jclouds.version>\n    <jetty.version>12.1.8</jetty.version>\n    <modernizer.version>3.3.0</modernizer.version>\n    <njord.version>0.7.5</njord.version>\n    <opentelemetry.version>1.60.1</opentelemetry.version>\n    <opentelemetry-semconv.version>1.40.0</opentelemetry-semconv.version>\n    <slf4j.version>2.0.17</slf4j.version>\n    <shade.prefix>${project.groupId}.shaded</shade.prefix>\n    <surefire.version>3.5.5</surefire.version>\n  </properties>\n\n  <dependencyManagement>\n    <dependencies>\n      <dependency>\n        <groupId>com.fasterxml.jackson</groupId>\n        <artifactId>jackson-bom</artifactId>\n        <version>2.21.2</version>\n        <type>pom</type>\n        <scope>import</scope>\n      </dependency>\n      <dependency>\n        <groupId>io.opentelemetry</groupId>\n        <artifactId>opentelemetry-bom</artifactId>\n        <version>${opentelemetry.version}</version>\n        <type>pom</type>\n        <scope>import</scope>\n      </dependency>\n      <dependency>\n        <groupId>org.junit</groupId>\n        <artifactId>junit-bom</artifactId>\n        <version>6.0.3</version>\n        <type>pom</type>\n        <scope>import</scope>\n      </dependency>\n    </dependencies>\n  </dependencyManagement>\n\n  <dependencies>\n    <dependency>\n      <groupId>io.opentelemetry</groupId>\n      <artifactId>opentelemetry-api</artifactId>\n    </dependency>\n    <dependency>\n      <groupId>io.opentelemetry</groupId>\n      <artifactId>opentelemetry-sdk</artifactId>\n    </dependency>\n    <dependency>\n      <groupId>io.opentelemetry</groupId>\n      <artifactId>opentelemetry-exporter-prometheus</artifactId>\n      <version>1.60.1-alpha</version>\n    </dependency>\n    <dependency>\n      <groupId>io.opentelemetry.semconv</groupId>\n      <artifactId>opentelemetry-semconv</artifactId>\n      <version>${opentelemetry-semconv.version}</version>\n    </dependency>\n    <dependency>\n      <groupId>com.amazonaws</groupId>\n      <artifactId>aws-java-sdk-s3</artifactId>\n      <version>${aws-sdk.version}</version>\n      <scope>test</scope>\n      <exclusions>\n        <exclusion>\n          <groupId>commons-logging</groupId>\n          <artifactId>commons-logging</artifactId>\n        </exclusion>\n      </exclusions>\n    </dependency>\n    <dependency>\n      <groupId>com.amazonaws</groupId>\n      <artifactId>aws-java-sdk-sts</artifactId>\n      <version>${aws-sdk.version}</version>\n    </dependency>\n    <dependency>\n      <groupId>args4j</groupId>\n      <artifactId>args4j</artifactId>\n      <version>2.37</version>\n    </dependency>\n    <dependency>\n      <groupId>ch.qos.logback</groupId>\n      <artifactId>logback-classic</artifactId>\n      <version>1.5.32</version>\n    </dependency>\n    <dependency>\n      <groupId>com.google.cloud</groupId>\n      <artifactId>google-cloud-storage</artifactId>\n      <version>2.64.1</version>\n    </dependency>\n    <dependency>\n      <groupId>com.azure</groupId>\n      <artifactId>azure-storage-blob</artifactId>\n      <version>12.32.0</version>\n    </dependency>\n    <dependency>\n      <groupId>com.azure</groupId>\n      <artifactId>azure-identity</artifactId>\n      <version>1.18.2</version>\n    </dependency>\n    <dependency>\n      <groupId>com.google.auto.service</groupId>\n      <artifactId>auto-service</artifactId>\n      <version>1.1.1</version>\n    </dependency>\n    <dependency>\n      <groupId>com.google.guava</groupId>\n      <artifactId>guava</artifactId>\n      <version>33.5.0-jre</version>\n    </dependency>\n    <dependency>\n      <groupId>com.google.jimfs</groupId>\n      <artifactId>jimfs</artifactId>\n      <version>1.3.1</version>\n    </dependency>\n    <dependency>\n      <groupId>javax.xml.bind</groupId>\n      <artifactId>jaxb-api</artifactId>\n      <version>2.3.1</version>\n    </dependency>\n    <dependency>\n      <groupId>junit</groupId>\n      <artifactId>junit</artifactId>\n      <version>4.13.2</version>\n      <!-- Required for S3ProxyRule -->\n      <scope>provided</scope>\n    </dependency>\n    <dependency>\n      <groupId>org.junit.jupiter</groupId>\n      <artifactId>junit-jupiter</artifactId>\n      <version>6.0.3</version>\n      <!-- Required for S3ProxyExtension -->\n      <scope>provided</scope>\n    </dependency>\n    <dependency>\n      <groupId>org.junit.platform</groupId>\n      <artifactId>junit-platform-launcher</artifactId>\n      <scope>test</scope>\n    </dependency>\n    <dependency>\n      <groupId>com.fasterxml.jackson.dataformat</groupId>\n      <artifactId>jackson-dataformat-xml</artifactId>\n      <version>2.21.2</version>\n    </dependency>\n    <dependency>\n      <groupId>com.github.spotbugs</groupId>\n      <artifactId>spotbugs-annotations</artifactId>\n      <version>4.9.8</version>\n      <scope>provided</scope>\n    </dependency>\n    <dependency>\n      <groupId>org.jspecify</groupId>\n      <artifactId>jspecify</artifactId>\n      <version>1.0.0</version>\n    </dependency>\n    <dependency>\n      <groupId>org.apache.jclouds</groupId>\n      <artifactId>jclouds-allblobstore</artifactId>\n      <version>${jclouds.version}</version>\n    </dependency>\n    <dependency>\n      <groupId>org.apache.jclouds.api</groupId>\n      <artifactId>filesystem</artifactId>\n      <version>${jclouds.version}</version>\n    </dependency>\n    <dependency>\n      <groupId>org.apache.jclouds.driver</groupId>\n      <artifactId>jclouds-slf4j</artifactId>\n      <version>${jclouds.version}</version>\n    </dependency>\n    <dependency>\n      <groupId>org.assertj</groupId>\n      <artifactId>assertj-core</artifactId>\n      <scope>test</scope>\n      <!-- we need to use the same version as in jclouds because we pull in their tests -->\n      <version>3.27.7</version>\n    </dependency>\n    <dependency>\n      <groupId>org.eclipse.jetty.ee10</groupId>\n      <artifactId>jetty-ee10-servlet</artifactId>\n      <version>${jetty.version}</version>\n    </dependency>\n    <dependency>\n      <groupId>org.gaul</groupId>\n      <artifactId>modernizer-maven-annotations</artifactId>\n      <version>${modernizer.version}</version>\n    </dependency>\n    <dependency>\n      <groupId>org.slf4j</groupId>\n      <artifactId>slf4j-api</artifactId>\n      <version>${slf4j.version}</version>\n    </dependency>\n    <dependency>\n      <groupId>org.slf4j</groupId>\n      <artifactId>jcl-over-slf4j</artifactId>\n      <version>${slf4j.version}</version>\n    </dependency>\n    <!-- tests dependencies -->\n    <dependency>\n      <groupId>software.amazon.awssdk</groupId>\n      <artifactId>s3</artifactId>\n      <version>${aws-sdkv2.version}</version>\n    </dependency>\n    <dependency>\n      <groupId>software.amazon.awssdk</groupId>\n      <artifactId>sts</artifactId>\n      <version>${aws-sdkv2.version}</version>\n    </dependency>\n  </dependencies>\n</project>\n"
  },
  {
    "path": "src/main/assembly/jar-with-dependencies.xml",
    "content": "<assembly xmlns=\"http://maven.apache.org/plugins/maven-assembly-plugin/assembly/1.1.0\"\n  xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\"\n  xsi:schemaLocation=\"http://maven.apache.org/plugins/maven-assembly-plugin/assembly/1.1.0 http://maven.apache.org/xsd/assembly-1.1.0.xsd\">\n  <id>jar-with-dependencies</id>\n  <formats>\n    <format>jar</format>\n  </formats>\n  <includeBaseDirectory>false</includeBaseDirectory>\n  <containerDescriptorHandlers>\n    <containerDescriptorHandler>\n      <handlerName>metaInf-services</handlerName>\n    </containerDescriptorHandler>\n  </containerDescriptorHandlers>\n  <dependencySets>\n    <dependencySet>\n      <excludes>\n        <exclude>org.eclipse.jetty:*</exclude>\n        <exclude>org.eclipse.jetty.ee10:*</exclude>\n      </excludes>\n      <outputDirectory>/</outputDirectory>\n      <useProjectArtifact>true</useProjectArtifact>\n      <unpack>true</unpack>\n      <scope>runtime</scope>\n    </dependencySet>\n  </dependencySets>\n  <fileSets>\n    <fileSet>\n      <directory>${project.basedir}/src/main/config</directory>\n      <outputDirectory>/</outputDirectory>\n      <includes>\n        <include>logback.xml</include>\n      </includes>\n      <useDefaultExcludes>true</useDefaultExcludes>\n    </fileSet>\n  </fileSets>\n</assembly>\n"
  },
  {
    "path": "src/main/config/logback.xml",
    "content": "<configuration>\n  <appender name=\"STDOUT\" class=\"ch.qos.logback.core.ConsoleAppender\">\n    <encoder>\n      <pattern>[s3proxy] %.-1p %d{MM-dd HH:mm:ss.SSS} %t %c{30}:%L %X{clientId}|%X{sessionId}:%X{messageId}:%X{fileId}] %m%n</pattern>\n    </encoder>\n    <filter class=\"ch.qos.logback.classic.filter.ThresholdFilter\">\n      <level>${LOG_LEVEL:-info}</level>\n    </filter>\n  </appender>\n  <appender name=\"CONTAINER\" class=\"ch.qos.logback.core.ConsoleAppender\">\n    <encoder class=\"ch.qos.logback.classic.encoder.JsonEncoder\"/>\n    <filter class=\"ch.qos.logback.classic.filter.ThresholdFilter\">\n      <level>${LOG_LEVEL:-info}</level>\n    </filter>\n  </appender>\n\n  <logger name=\"org.eclipse.jetty\" level=\"${JETTY_LOG_LEVEL:-info}\" />\n  <logger name=\"org.gaul.shaded.org.eclipse.jetty\" level=\"${JETTY_LOG_LEVEL:-info}\" />\n\n  <root level=\"${LOG_LEVEL:-info}\">\n    <appender-ref ref=\"${LOG_APPENDER:-STDOUT}\" />\n  </root>\n</configuration>\n"
  },
  {
    "path": "src/main/java/org/gaul/s3proxy/AccessControlPolicy.java",
    "content": "/*\n * Copyright 2014-2026 Andrew Gaul <andrew@gaul.org>\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * https://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\npackage org.gaul.s3proxy;\n\nimport java.util.Collection;\n\nimport com.fasterxml.jackson.dataformat.xml.annotation.JacksonXmlElementWrapper;\nimport com.fasterxml.jackson.dataformat.xml.annotation.JacksonXmlProperty;\nimport com.google.common.base.MoreObjects;\n\n/** Represent an Amazon AccessControlPolicy for a container or object. */\n// CHECKSTYLE:OFF\nfinal class AccessControlPolicy {\n    @JacksonXmlProperty(localName = \"Owner\")\n    Owner owner;\n    @JacksonXmlProperty(localName = \"AccessControlList\")\n    AccessControlList aclList;\n\n    @Override\n    public String toString() {\n        return MoreObjects.toStringHelper(AccessControlList.class)\n                .add(\"owner\", owner)\n                .add(\"aclList\", aclList)\n                .toString();\n    }\n\n    static final class Owner {\n        @JacksonXmlProperty(localName = \"ID\")\n        String id;\n        @JacksonXmlProperty(localName = \"DisplayName\")\n        String displayName;\n\n        @Override\n        public String toString() {\n            return MoreObjects.toStringHelper(Owner.class)\n                    .add(\"id\", id)\n                    .add(\"displayName\", displayName)\n                    .toString();\n        }\n    }\n\n    static final class AccessControlList {\n        @JacksonXmlProperty(localName = \"Grant\")\n        @JacksonXmlElementWrapper(useWrapping = false)\n        Collection<Grant> grants;\n\n        @Override\n        public String toString() {\n            return MoreObjects.toStringHelper(AccessControlList.class)\n                    .add(\"grants\", grants)\n                    .toString();\n        }\n\n        static final class Grant {\n            @JacksonXmlProperty(localName = \"Grantee\")\n            Grantee grantee;\n            @JacksonXmlProperty(localName = \"Permission\")\n            String permission;\n\n            @Override\n            public String toString() {\n                return MoreObjects.toStringHelper(Grant.class)\n                        .add(\"grantee\", grantee)\n                        .add(\"permission\", permission)\n                        .toString();\n            }\n\n            static final class Grantee {\n                @JacksonXmlProperty(namespace = \"xsi\", localName = \"type\",\n                        isAttribute = true)\n                String type;\n                @JacksonXmlProperty(localName = \"ID\")\n                String id;\n                @JacksonXmlProperty(localName = \"DisplayName\")\n                String displayName;\n                @JacksonXmlProperty(localName = \"EmailAddress\")\n                String emailAddress;\n                @JacksonXmlProperty(localName = \"URI\")\n                String uri;\n\n                @Override\n                public String toString() {\n                    return MoreObjects.toStringHelper(Grantee.class)\n                            .add(\"type\", type)\n                            .add(\"id\", id)\n                            .add(\"displayName\", displayName)\n                            .add(\"emailAddress\", emailAddress)\n                            .add(\"uri\", uri)\n                            .toString();\n                }\n            }\n        }\n    }\n}\n// CHECKSTYLE:ON\n"
  },
  {
    "path": "src/main/java/org/gaul/s3proxy/AliasBlobStore.java",
    "content": "/*\n * Copyright 2014-2026 Andrew Gaul <andrew@gaul.org>\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * https://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\npackage org.gaul.s3proxy;\n\nimport static java.util.Objects.requireNonNull;\nimport static com.google.common.base.Preconditions.checkArgument;\n\nimport java.util.HashMap;\nimport java.util.List;\nimport java.util.Map;\nimport java.util.Properties;\n\nimport com.google.common.collect.BiMap;\nimport com.google.common.collect.ImmutableBiMap;\nimport com.google.common.collect.ImmutableList;\n\nimport org.jclouds.blobstore.BlobStore;\nimport org.jclouds.blobstore.domain.Blob;\nimport org.jclouds.blobstore.domain.BlobMetadata;\nimport org.jclouds.blobstore.domain.ContainerAccess;\nimport org.jclouds.blobstore.domain.MultipartPart;\nimport org.jclouds.blobstore.domain.MultipartUpload;\nimport org.jclouds.blobstore.domain.MutableStorageMetadata;\nimport org.jclouds.blobstore.domain.PageSet;\nimport org.jclouds.blobstore.domain.StorageMetadata;\nimport org.jclouds.blobstore.domain.internal.MutableStorageMetadataImpl;\nimport org.jclouds.blobstore.domain.internal.PageSetImpl;\nimport org.jclouds.blobstore.options.CopyOptions;\nimport org.jclouds.blobstore.options.CreateContainerOptions;\nimport org.jclouds.blobstore.options.GetOptions;\nimport org.jclouds.blobstore.options.ListContainerOptions;\nimport org.jclouds.blobstore.options.PutOptions;\nimport org.jclouds.blobstore.util.ForwardingBlobStore;\nimport org.jclouds.domain.Location;\nimport org.jclouds.io.Payload;\n\n/**\n * This class implements a middleware to alias buckets to a different name.\n * The aliases are configured as:\n *   s3proxy.alias-blobstore.&lt;alias name&gt; = &lt;backend bucket&gt;\n *\n * The aliases appear in bucket listings if the configured\n * backend buckets are present. Requests for all other buckets are unaffected.\n */\npublic final class AliasBlobStore extends ForwardingBlobStore {\n    private final BiMap<String, String> aliases;\n\n    private AliasBlobStore(BlobStore delegate,\n                           BiMap<String, String> aliases) {\n        super(delegate);\n        this.aliases = requireNonNull(aliases);\n    }\n\n    static BlobStore newAliasBlobStore(BlobStore delegate,\n                                       BiMap<String, String> aliases) {\n        return new AliasBlobStore(delegate, aliases);\n    }\n\n    private MultipartUpload getDelegateMpu(MultipartUpload mpu) {\n        return MultipartUpload.create(\n                getContainer(mpu.containerName()),\n                mpu.blobName(),\n                mpu.id(),\n                mpu.blobMetadata(),\n                mpu.putOptions());\n    }\n\n    public static ImmutableBiMap<String, String> parseAliases(\n            Properties properties) {\n        Map<String, String> backendBuckets = new HashMap<>();\n        for (String key : properties.stringPropertyNames()) {\n            if (key.startsWith(S3ProxyConstants.PROPERTY_ALIAS_BLOBSTORE)) {\n                String virtualBucket = key.substring(\n                        S3ProxyConstants.PROPERTY_ALIAS_BLOBSTORE.length() + 1);\n                String backendBucket = properties.getProperty(key);\n                checkArgument(\n                        !backendBuckets.containsKey(backendBucket),\n                        \"Backend bucket %s is aliased twice\",\n                        backendBucket);\n                backendBuckets.put(backendBucket, virtualBucket);\n            }\n        }\n        return ImmutableBiMap.copyOf(backendBuckets).inverse();\n    }\n\n    private String getContainer(String container) {\n        return this.aliases.getOrDefault(container, container);\n    }\n\n    @Override\n    public boolean createContainerInLocation(Location location,\n                                             String container) {\n        return this.delegate().createContainerInLocation(location,\n                getContainer(container));\n    }\n\n    @Override\n    public boolean createContainerInLocation(\n            Location location, String container,\n            CreateContainerOptions options) {\n        return delegate().createContainerInLocation(\n                location, getContainer(container), options);\n    }\n\n    @Override\n    public boolean containerExists(String container) {\n        return delegate().containerExists(getContainer(container));\n    }\n\n    @Override\n    public ContainerAccess getContainerAccess(String container) {\n        return delegate().getContainerAccess(getContainer(container));\n    }\n\n    @Override\n    public void setContainerAccess(String container,\n                                   ContainerAccess containerAccess) {\n        delegate().setContainerAccess(getContainer(container), containerAccess);\n    }\n\n    @Override\n    public PageSet<? extends StorageMetadata> list() {\n        PageSet<? extends StorageMetadata> upstream = this.delegate().list();\n        var results = new ImmutableList.Builder<StorageMetadata>();\n        for (StorageMetadata sm : upstream) {\n            if (aliases.containsValue(sm.getName())) {\n                MutableStorageMetadata bucketAlias =\n                        new MutableStorageMetadataImpl();\n                bucketAlias.setName(aliases.inverse().get(sm.getName()));\n                bucketAlias.setCreationDate(sm.getCreationDate());\n                bucketAlias.setETag(sm.getETag());\n                bucketAlias.setId(sm.getProviderId());\n                bucketAlias.setLastModified(sm.getLastModified());\n                bucketAlias.setLocation(sm.getLocation());\n                bucketAlias.setSize(sm.getSize());\n                bucketAlias.setTier(sm.getTier());\n                bucketAlias.setType(sm.getType());\n                // TODO: the URI should be rewritten to use the alias\n                bucketAlias.setUri(sm.getUri());\n                bucketAlias.setUserMetadata(sm.getUserMetadata());\n                results.add(bucketAlias);\n            } else {\n                results.add(sm);\n            }\n        }\n        return new PageSetImpl<>(results.build(), upstream.getNextMarker());\n    }\n\n    @Override\n    public PageSet<? extends StorageMetadata> list(String container) {\n        return delegate().list(getContainer(container));\n    }\n\n    @Override\n    public PageSet<? extends StorageMetadata> list(\n            String container, ListContainerOptions options) {\n        return delegate().list(getContainer(container), options);\n    }\n\n    @Override\n    public void clearContainer(String container) {\n        delegate().clearContainer(getContainer(container));\n    }\n\n    @Override\n    public void clearContainer(String container, ListContainerOptions options) {\n        delegate().clearContainer(getContainer(container), options);\n    }\n\n    @Override\n    public void deleteContainer(String container) {\n        delegate().deleteContainer(getContainer(container));\n    }\n\n    @Override\n    public boolean deleteContainerIfEmpty(String container) {\n        return delegate().deleteContainerIfEmpty(getContainer(container));\n    }\n\n    @Override\n    public boolean blobExists(String container, String name) {\n        return delegate().blobExists(getContainer(container), name);\n    }\n\n    @Override\n    public BlobMetadata blobMetadata(String container, String name) {\n        return delegate().blobMetadata(getContainer(container), name);\n    }\n\n    @Override\n    public Blob getBlob(String containerName, String blobName) {\n        return delegate().getBlob(getContainer(containerName), blobName);\n    }\n\n    @Override\n    public Blob getBlob(String containerName, String blobName,\n                        GetOptions getOptions) {\n        return delegate().getBlob(getContainer(containerName), blobName,\n                getOptions);\n    }\n\n    @Override\n    public String putBlob(String containerName, Blob blob) {\n        return delegate().putBlob(getContainer(containerName), blob);\n    }\n\n    @Override\n    public String putBlob(final String containerName, Blob blob,\n                          final PutOptions options) {\n        return delegate().putBlob(getContainer(containerName), blob,\n                options);\n    }\n\n    @Override\n    public void removeBlob(final String containerName, final String blobName) {\n        delegate().removeBlob(getContainer(containerName), blobName);\n    }\n\n    @Override\n    public void removeBlobs(final String containerName,\n                            final Iterable<String> blobNames) {\n        delegate().removeBlobs(getContainer(containerName), blobNames);\n    }\n\n    @Override\n    public String copyBlob(final String fromContainer, final String fromName,\n                           final String toContainer, final String toName,\n                           final CopyOptions options) {\n        return delegate().copyBlob(getContainer(fromContainer), fromName,\n                getContainer(toContainer), toName, options);\n    }\n\n    @Override\n    public MultipartUpload initiateMultipartUpload(\n            String container, BlobMetadata blobMetadata, PutOptions options) {\n        MultipartUpload mpu = delegate().initiateMultipartUpload(\n                getContainer(container), blobMetadata, options);\n        return MultipartUpload.create(container, blobMetadata.getName(),\n                mpu.id(), mpu.blobMetadata(), mpu.putOptions());\n    }\n\n    @Override\n    public void abortMultipartUpload(MultipartUpload mpu) {\n        delegate().abortMultipartUpload(getDelegateMpu(mpu));\n    }\n\n    @Override\n    public String completeMultipartUpload(final MultipartUpload mpu,\n                                          final List<MultipartPart> parts) {\n        return delegate().completeMultipartUpload(getDelegateMpu(mpu), parts);\n    }\n\n    @Override\n    public MultipartPart uploadMultipartPart(MultipartUpload mpu,\n                                             int partNumber, Payload payload) {\n        return delegate().uploadMultipartPart(getDelegateMpu(mpu), partNumber,\n                payload);\n    }\n}\n"
  },
  {
    "path": "src/main/java/org/gaul/s3proxy/AuthenticationType.java",
    "content": "/*\n * Copyright 2014-2026 Andrew Gaul <andrew@gaul.org>\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * https://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\npackage org.gaul.s3proxy;\n\nimport com.google.common.base.CaseFormat;\n\npublic enum AuthenticationType {\n    AWS_V2,\n    AWS_V4,\n    AWS_V2_OR_V4,\n    NONE;\n\n    static AuthenticationType fromString(String string) {\n        return AuthenticationType.valueOf(CaseFormat.LOWER_HYPHEN.to(\n                CaseFormat.UPPER_UNDERSCORE, string));\n    }\n}\n"
  },
  {
    "path": "src/main/java/org/gaul/s3proxy/AwsHttpHeaders.java",
    "content": "/*\n * Copyright 2014-2026 Andrew Gaul <andrew@gaul.org>\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * https://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\npackage org.gaul.s3proxy;\n\nfinal class AwsHttpHeaders {\n    static final String ACL = \"x-amz-acl\";\n    static final String API_VERSION = \"x-amz-api-version\";\n    static final String CHECKSUM_ALGORITHM = \"x-amz-checksum-algorithm\";\n    static final String CHECKSUM_CRC32 = \"x-amz-checksum-crc32\";\n    static final String CHECKSUM_CRC32C = \"x-amz-checksum-crc32c\";\n    static final String CHECKSUM_CRC64NVME = \"x-amz-checksum-crc64nvme\";\n    static final String CHECKSUM_MODE = \"x-amz-checksum-mode\";\n    static final String CHECKSUM_SHA1 = \"x-amz-checksum-sha1\";\n    static final String CHECKSUM_SHA256 = \"x-amz-checksum-sha256\";\n    static final String CONTENT_SHA256 = \"x-amz-content-sha256\";\n    static final String COPY_SOURCE = \"x-amz-copy-source\";\n    static final String COPY_SOURCE_IF_MATCH = \"x-amz-copy-source-if-match\";\n    static final String COPY_SOURCE_IF_MODIFIED_SINCE =\n            \"x-amz-copy-source-if-modified-since\";\n    static final String COPY_SOURCE_IF_NONE_MATCH =\n            \"x-amz-copy-source-if-none-match\";\n    static final String COPY_SOURCE_IF_UNMODIFIED_SINCE =\n            \"x-amz-copy-source-if-unmodified-since\";\n    static final String COPY_SOURCE_RANGE = \"x-amz-copy-source-range\";\n    static final String DATE = \"x-amz-date\";\n    static final String DECODED_CONTENT_LENGTH =\n            \"x-amz-decoded-content-length\";\n    static final String METADATA_DIRECTIVE = \"x-amz-metadata-directive\";\n    static final String REQUEST_ID = \"x-amz-request-id\";\n    static final String SDK_CHECKSUM_ALGORITHM = \"x-amz-sdk-checksum-algorithm\";\n    static final String STORAGE_CLASS = \"x-amz-storage-class\";\n    static final String TRAILER = \"x-amz-trailer\";\n    static final String TRANSFER_ENCODING = \"x-amz-te\";\n    static final String USER_AGENT = \"x-amz-user-agent\";\n\n    private AwsHttpHeaders() {\n        throw new AssertionError(\"intentionally unimplemented\");\n    }\n}\n"
  },
  {
    "path": "src/main/java/org/gaul/s3proxy/AwsSignature.java",
    "content": "/*\n * Copyright 2014-2026 Andrew Gaul <andrew@gaul.org>\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * https://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\npackage org.gaul.s3proxy;\n\nimport java.io.IOException;\nimport java.nio.charset.StandardCharsets;\nimport java.security.InvalidKeyException;\nimport java.security.MessageDigest;\nimport java.security.NoSuchAlgorithmException;\nimport java.util.ArrayList;\nimport java.util.Base64;\nimport java.util.Collection;\nimport java.util.Collections;\nimport java.util.List;\nimport java.util.Set;\nimport java.util.regex.Pattern;\n\nimport javax.crypto.Mac;\nimport javax.crypto.spec.SecretKeySpec;\n\nimport com.google.common.base.Joiner;\nimport com.google.common.base.Splitter;\nimport com.google.common.base.Strings;\nimport com.google.common.collect.SortedSetMultimap;\nimport com.google.common.collect.TreeMultimap;\nimport com.google.common.io.BaseEncoding;\nimport com.google.common.net.HttpHeaders;\nimport com.google.common.net.PercentEscaper;\n\nimport jakarta.servlet.http.HttpServletRequest;\n\nimport org.jspecify.annotations.Nullable;\nimport org.slf4j.Logger;\nimport org.slf4j.LoggerFactory;\n\nfinal class AwsSignature {\n    private static final Logger logger = LoggerFactory.getLogger(\n            AwsSignature.class);\n    private static final PercentEscaper AWS_URL_PARAMETER_ESCAPER =\n            new PercentEscaper(\"-_.~\", false);\n    private static final Set<String> SIGNED_SUBRESOURCES = Set.of(\n            \"acl\",\n            \"delete\",\n            \"lifecycle\",\n            \"location\",\n            \"logging\",\n            \"notification\",\n            \"partNumber\",\n            \"policy\",\n            \"requestPayment\",\n            \"response-cache-control\",\n            \"response-content-disposition\",\n            \"response-content-encoding\",\n            \"response-content-language\",\n            \"response-content-type\",\n            \"response-expires\",\n            \"torrent\",\n            \"uploadId\",\n            \"uploads\",\n            \"versionId\",\n            \"versioning\",\n            \"versions\",\n            \"website\"\n    );\n    private static final Pattern REPEATING_WHITESPACE = Pattern.compile(\"\\\\s+\");\n\n    private AwsSignature() { }\n\n    /**\n     * Create Amazon V2 signature.  Reference:\n     * http://docs.aws.amazon.com/general/latest/gr/signature-version-2.html\n     */\n    static String createAuthorizationSignature(\n            HttpServletRequest request, String uri, String credential,\n            boolean queryAuth, boolean bothDateHeader) {\n        // sort Amazon headers\n        SortedSetMultimap<String, String> canonicalizedHeaders =\n                TreeMultimap.create();\n        for (String headerName : Collections.list(request.getHeaderNames())) {\n            Collection<String> headerValues = Collections.list(\n                    request.getHeaders(headerName));\n            headerName = headerName.toLowerCase();\n            if (!headerName.startsWith(\"x-amz-\") || (bothDateHeader &&\n                  headerName.equalsIgnoreCase(AwsHttpHeaders.DATE))) {\n                continue;\n            }\n            if (headerValues.isEmpty()) {\n                canonicalizedHeaders.put(headerName, \"\");\n            }\n            for (String headerValue : headerValues) {\n                canonicalizedHeaders.put(headerName,\n                        Strings.nullToEmpty(headerValue));\n            }\n        }\n\n        // Build string to sign\n        var builder = new StringBuilder()\n                .append(request.getMethod())\n                .append('\\n')\n                .append(Strings.nullToEmpty(request.getHeader(\n                        HttpHeaders.CONTENT_MD5)))\n                .append('\\n')\n                .append(Strings.nullToEmpty(request.getHeader(\n                        HttpHeaders.CONTENT_TYPE)))\n                .append('\\n');\n        String expires = request.getParameter(\"Expires\");\n        if (queryAuth) {\n            // If expires is not nil, then it is query string sign\n            // If expires is nil, maybe also query string sign\n            // So should check other accessid param, presign to judge.\n            // not the expires\n            builder.append(Strings.nullToEmpty(expires));\n        }  else {\n            if (!bothDateHeader) {\n                if (canonicalizedHeaders.containsKey(AwsHttpHeaders.DATE)) {\n                    builder.append(\"\");\n                } else {\n                    builder.append(request.getHeader(HttpHeaders.DATE));\n                }\n            }  else {\n                if (!canonicalizedHeaders.containsKey(AwsHttpHeaders.DATE)) {\n                    builder.append(request.getHeader(AwsHttpHeaders.DATE));\n                }  else {\n                    // panic\n                }\n            }\n        }\n\n        builder.append('\\n');\n        for (var entry : canonicalizedHeaders.entries()) {\n            builder.append(entry.getKey()).append(':')\n                    .append(entry.getValue()).append('\\n');\n        }\n        builder.append(uri);\n\n        char separator = '?';\n        List<String> subresources = Collections.list(\n                request.getParameterNames());\n        Collections.sort(subresources);\n        for (String subresource : subresources) {\n            if (SIGNED_SUBRESOURCES.contains(subresource)) {\n                builder.append(separator).append(subresource);\n\n                String value = request.getParameter(subresource);\n                if (!\"\".equals(value)) {\n                    builder.append('=').append(value);\n                }\n                separator = '&';\n            }\n        }\n\n        String stringToSign = builder.toString();\n        logger.trace(\"stringToSign: {}\", stringToSign);\n\n        // Sign string\n        Mac mac;\n        try {\n            mac = Mac.getInstance(\"HmacSHA1\");\n            mac.init(new SecretKeySpec(credential.getBytes(\n                    StandardCharsets.UTF_8), \"HmacSHA1\"));\n        } catch (InvalidKeyException | NoSuchAlgorithmException e) {\n            throw new RuntimeException(e);\n        }\n        return Base64.getEncoder().encodeToString(mac.doFinal(\n                stringToSign.getBytes(StandardCharsets.UTF_8)));\n    }\n\n    private static byte[] signMessage(byte[] data, byte[] key, String algorithm)\n            throws InvalidKeyException, NoSuchAlgorithmException {\n        Mac mac = Mac.getInstance(algorithm);\n        mac.init(new SecretKeySpec(key, algorithm));\n        return mac.doFinal(data);\n    }\n\n    /**\n     * Derive the AWS SigV4 signing key from the credential and auth header.\n     */\n    static byte[] deriveSigningKeyV4(S3AuthorizationHeader authHeader,\n            String credential)\n            throws InvalidKeyException, NoSuchAlgorithmException {\n        String algorithm = authHeader.getHmacAlgorithm();\n        byte[] dateKey = signMessage(\n                authHeader.getDate().getBytes(StandardCharsets.UTF_8),\n                (\"AWS4\" + credential).getBytes(StandardCharsets.UTF_8),\n                algorithm);\n        byte[] dateRegionKey = signMessage(\n                authHeader.getRegion().getBytes(StandardCharsets.UTF_8),\n                dateKey,\n                algorithm);\n        byte[] dateRegionServiceKey = signMessage(\n                authHeader.getService().getBytes(StandardCharsets.UTF_8),\n                dateRegionKey, algorithm);\n        return signMessage(\n                \"aws4_request\".getBytes(StandardCharsets.UTF_8),\n                dateRegionServiceKey, algorithm);\n    }\n\n    private static String getMessageDigest(byte[] payload, String algorithm)\n            throws NoSuchAlgorithmException {\n        MessageDigest md = MessageDigest.getInstance(algorithm);\n        byte[] hash = md.digest(payload);\n        return BaseEncoding.base16().lowerCase().encode(hash);\n    }\n\n    @Nullable\n    private static List<String> extractSignedHeaders(String authorization) {\n        int index = authorization.indexOf(\"SignedHeaders=\");\n        if (index < 0) {\n            return null;\n        }\n        int endSigned = authorization.indexOf(',', index);\n        if (endSigned < 0) {\n            return null;\n        }\n        int startHeaders = authorization.indexOf('=', index);\n        return Splitter.on(';').splitToList(authorization.substring(\n                startHeaders + 1, endSigned));\n    }\n\n    private static String buildCanonicalHeaders(HttpServletRequest request,\n            List<String> signedHeaders) {\n        List<String> headers = new ArrayList<>(\n                /*initialCapacity=*/ signedHeaders.size());\n        for (String header : signedHeaders) {\n            headers.add(header.toLowerCase());\n        }\n        Collections.sort(headers);\n\n        var headersWithValues = new StringBuilder();\n        boolean firstHeader = true;\n        for (String header : headers) {\n            if (firstHeader) {\n                firstHeader = false;\n            } else {\n                headersWithValues.append('\\n');\n            }\n            headersWithValues.append(header);\n            headersWithValues.append(':');\n\n            boolean firstValue = true;\n            for (String value : Collections.list(request.getHeaders(header))) {\n                if (firstValue) {\n                    firstValue = false;\n                } else {\n                    headersWithValues.append(',');\n                }\n                value = value.trim();\n                if (!value.startsWith(\"\\\"\")) {\n                    value = REPEATING_WHITESPACE.matcher(value).replaceAll(\" \");\n                }\n                headersWithValues.append(value);\n            }\n        }\n\n        return headersWithValues.toString();\n    }\n\n    private static String buildCanonicalQueryString(\n            HttpServletRequest request) {\n        // The parameters are required to be sorted\n        List<String> parameters = Collections.list(request.getParameterNames());\n        Collections.sort(parameters);\n        List<String> queryParameters = new ArrayList<>();\n\n        for (String key : parameters) {\n            if (key.equals(\"X-Amz-Signature\")) {\n                continue;\n            }\n            // re-encode keys and values in AWS normalized form\n            String value = request.getParameter(key);\n            queryParameters.add(AWS_URL_PARAMETER_ESCAPER.escape(key) +\n                    \"=\" + AWS_URL_PARAMETER_ESCAPER.escape(value));\n        }\n        return Joiner.on(\"&\").join(queryParameters);\n    }\n\n    private static String createCanonicalRequest(HttpServletRequest request,\n                                                 String uri, byte[] payload,\n                                                 String hashAlgorithm)\n            throws IOException, NoSuchAlgorithmException {\n        String authorizationHeader = request.getHeader(\"Authorization\");\n        String xAmzContentSha256 = request.getHeader(\n                AwsHttpHeaders.CONTENT_SHA256);\n        if (xAmzContentSha256 == null) {\n            xAmzContentSha256 = request.getParameter(\"X-Amz-SignedHeaders\");\n        }\n        String digest;\n        if (authorizationHeader == null) {\n            digest = \"UNSIGNED-PAYLOAD\";\n        } else if (\"STREAMING-AWS4-HMAC-SHA256-PAYLOAD\".equals(\n                xAmzContentSha256)) {\n            digest = \"STREAMING-AWS4-HMAC-SHA256-PAYLOAD\";\n        } else if (\"STREAMING-UNSIGNED-PAYLOAD-TRAILER\".equals(xAmzContentSha256)) {\n            digest = \"STREAMING-UNSIGNED-PAYLOAD-TRAILER\";\n        } else if (\"UNSIGNED-PAYLOAD\".equals(xAmzContentSha256)) {\n            digest = \"UNSIGNED-PAYLOAD\";\n        } else {\n            digest = getMessageDigest(payload, hashAlgorithm);\n        }\n        List<String> signedHeaders;\n        if (authorizationHeader != null) {\n            signedHeaders = extractSignedHeaders(authorizationHeader);\n        } else {\n            signedHeaders = Splitter.on(';').splitToList(request.getParameter(\n                    \"X-Amz-SignedHeaders\"));\n        }\n\n        /*\n         * CORS Preflight\n         *\n         * The signature is based on the canonical request, which includes the\n         * HTTP Method.\n         * For presigned URLs, the method must be replaced for OPTIONS request\n         * to match\n         */\n        String method = request.getMethod();\n        if (\"OPTIONS\".equals(method)) {\n            String corsMethod = request.getHeader(\n                    HttpHeaders.ACCESS_CONTROL_REQUEST_METHOD);\n            if (corsMethod != null) {\n                method = corsMethod;\n            }\n        }\n\n        String canonicalRequest = Joiner.on(\"\\n\").join(\n                method,\n                uri,\n                buildCanonicalQueryString(request),\n                buildCanonicalHeaders(request, signedHeaders) + \"\\n\",\n                Joiner.on(';').join(signedHeaders),\n                digest);\n\n        return getMessageDigest(\n                canonicalRequest.getBytes(StandardCharsets.UTF_8),\n                hashAlgorithm);\n    }\n\n    /**\n     * Create v4 signature.  Reference:\n     * http://docs.aws.amazon.com/general/latest/gr/signature-version-4.html\n     */\n    static String createAuthorizationSignatureV4(\n            HttpServletRequest request, S3AuthorizationHeader authHeader,\n            byte[] payload, String uri, String credential)\n            throws InvalidKeyException, IOException, NoSuchAlgorithmException,\n            S3Exception {\n        String canonicalRequest = createCanonicalRequest(request, uri, payload,\n                authHeader.getHashAlgorithm());\n        String algorithm = authHeader.getHmacAlgorithm();\n        byte[] signingKey = deriveSigningKeyV4(authHeader, credential);\n        String date = request.getHeader(AwsHttpHeaders.DATE);\n        if (date == null) {\n            date = request.getParameter(\"X-Amz-Date\");\n        }\n        String signatureString = \"AWS4-HMAC-SHA256\\n\" +\n                date + \"\\n\" +\n                authHeader.getDate() + \"/\" + authHeader.getRegion() +\n                \"/s3/aws4_request\\n\" +\n                canonicalRequest;\n        byte[] signature = signMessage(\n                signatureString.getBytes(StandardCharsets.UTF_8),\n                signingKey, algorithm);\n        return BaseEncoding.base16().lowerCase().encode(signature);\n    }\n}\n"
  },
  {
    "path": "src/main/java/org/gaul/s3proxy/BlobStoreLocator.java",
    "content": "/*\n * Copyright 2014-2026 Andrew Gaul <andrew@gaul.org>\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * https://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\npackage org.gaul.s3proxy;\n\nimport java.util.Map;\n\nimport org.jclouds.blobstore.BlobStore;\n\npublic interface BlobStoreLocator {\n    Map.Entry<String, BlobStore> locateBlobStore(String identity,\n            String container, String blob);\n}\n"
  },
  {
    "path": "src/main/java/org/gaul/s3proxy/CaseInsensitiveImmutableMultimap.java",
    "content": "/*\n * Copyright 2014-2026 Andrew Gaul <andrew@gaul.org>\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * https://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\npackage org.gaul.s3proxy;\n\nimport java.util.Collection;\n\nimport com.google.common.collect.ForwardingMultimap;\nimport com.google.common.collect.ImmutableMultimap;\nimport com.google.common.collect.Multimap;\n\nfinal class CaseInsensitiveImmutableMultimap\n        extends ForwardingMultimap<String, String> {\n    private final Multimap<String, String> inner;\n\n    CaseInsensitiveImmutableMultimap(Multimap<String, String> map) {\n        var builder = ImmutableMultimap.<String, String>builder();\n        for (var entry : map.entries()) {\n            builder.put(lower(entry.getKey()), entry.getValue());\n        }\n        this.inner = builder.build();\n    }\n\n    @Override\n    protected Multimap<String, String> delegate() {\n        return inner;\n    }\n\n    @Override\n    public Collection<String> get(String key) {\n        return inner.get(lower(key));\n    }\n\n    private static String lower(String key) {\n        return key == null ? null : key.toLowerCase();\n    }\n}\n"
  },
  {
    "path": "src/main/java/org/gaul/s3proxy/ChunkedInputStream.java",
    "content": "/*\n * Copyright 2014-2026 Andrew Gaul <andrew@gaul.org>\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * https://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\npackage org.gaul.s3proxy;\n\nimport java.io.FilterInputStream;\nimport java.io.IOException;\nimport java.io.InputStream;\nimport java.nio.ByteBuffer;\nimport java.nio.charset.StandardCharsets;\nimport java.security.InvalidKeyException;\nimport java.security.MessageDigest;\nimport java.security.NoSuchAlgorithmException;\nimport java.util.Base64;\n\nimport javax.crypto.Mac;\nimport javax.crypto.spec.SecretKeySpec;\n\nimport com.google.common.hash.Hasher;\nimport com.google.common.hash.Hashing;\nimport com.google.common.io.BaseEncoding;\nimport com.google.common.io.ByteStreams;\n\nimport org.jspecify.annotations.Nullable;\n\n/**\n * Parse an AWS v4 signature chunked stream.  Reference:\n * https://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-streaming.html\n */\nfinal class ChunkedInputStream extends FilterInputStream {\n    private static final int MAX_LINE_LENGTH = 4096;\n    private static final String EMPTY_SHA256 =\n            \"e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855\";\n    private byte[] chunk;\n    private int currentIndex;\n    private int currentLength;\n    private String currentSignature;\n    private final int maxChunkSize;\n    private final Hasher hasher;\n    private final byte @Nullable [] signingKey;\n    @Nullable private final String hmacAlgorithm;\n    @Nullable private final String timestamp;\n    @Nullable private final String scope;\n    @Nullable private String previousSignature;\n\n    ChunkedInputStream(InputStream is, int maxChunkSize) {\n        super(is);\n        this.maxChunkSize = maxChunkSize;\n        hasher = null;\n        signingKey = null;\n        hmacAlgorithm = null;\n        timestamp = null;\n        scope = null;\n    }\n\n    @SuppressWarnings(\"deprecation\")\n    ChunkedInputStream(InputStream is, int maxChunkSize,\n            @Nullable String trailer) {\n        super(is);\n        this.maxChunkSize = maxChunkSize;\n        if (\"x-amz-checksum-crc32\".equals(trailer)) {\n            hasher = Hashing.crc32().newHasher();\n        } else if (\"x-amz-checksum-crc32c\".equals(trailer)) {\n            hasher = Hashing.crc32c().newHasher();\n        } else if (\"x-amz-checksum-sha1\".equals(trailer)) {\n            hasher = Hashing.sha1().newHasher();\n        } else if (\"x-amz-checksum-sha256\".equals(trailer)) {\n            hasher = Hashing.sha256().newHasher();\n        } else {\n            // TODO: Guava does not support x-amz-checksum-crc64nvme\n            hasher = null;\n        }\n        signingKey = null;\n        hmacAlgorithm = null;\n        timestamp = null;\n        scope = null;\n    }\n\n    /**\n     * Construct a chunked stream that verifies the per-chunk signature chain\n     * used by STREAMING-AWS4-HMAC-SHA256-PAYLOAD.\n     *\n     * @param seedSignature the Authorization header signature (hex-encoded)\n     * @param signingKey    the AWS SigV4 signing key\n     * @param hmacAlgorithm HMAC algorithm name (e.g. \"HmacSHA256\")\n     * @param timestamp     full ISO8601 request timestamp (x-amz-date)\n     * @param scope         credential scope (date/region/service/aws4_request)\n     */\n    ChunkedInputStream(InputStream is, int maxChunkSize,\n            String seedSignature, byte[] signingKey, String hmacAlgorithm,\n            String timestamp, String scope) {\n        super(is);\n        this.maxChunkSize = maxChunkSize;\n        this.hasher = null;\n        this.signingKey = signingKey.clone();\n        this.hmacAlgorithm = hmacAlgorithm;\n        this.timestamp = timestamp;\n        this.scope = scope;\n        this.previousSignature = seedSignature;\n    }\n\n    @Override\n    public int read() throws IOException {\n        while (currentIndex == currentLength) {\n            String line = readLine(in);\n            if (line.equals(\"\")) {\n                return -1;\n            }\n            String[] parts = line.split(\";\", 2);\n            if (parts[0].startsWith(\"x-amz-checksum-\")) {\n                String[] checksumParts = parts[0].split(\":\", 2);\n                var expectedHash = checksumParts[1];\n                var actualHash = switch (checksumParts[0]) {\n                case \"x-amz-checksum-crc32\", \"x-amz-checksum-crc32c\" -> ByteBuffer.allocate(4).putInt(hasher.hash().asInt()).array(); // Use big-endian to match AWS\n                case \"x-amz-checksum-sha1\", \"x-amz-checksum-sha256\" -> hasher.hash().asBytes();\n                default -> throw new IllegalArgumentException(\"Unknown value: \" + checksumParts[0]);\n                };\n                if (!expectedHash.equals(Base64.getEncoder().encodeToString(actualHash))) {\n                    throw new IOException(new S3Exception(S3ErrorCode.BAD_DIGEST));\n                }\n                currentLength = 0;\n            } else {\n                currentLength = Integer.parseInt(parts[0], 16);\n                if (currentLength < 0 || currentLength > maxChunkSize) {\n                    throw new IOException(\n                            \"chunk size exceeds maximum: \" + currentLength);\n                }\n            }\n            if (parts.length > 1) {\n                String sigPart = parts[1];\n                int eq = sigPart.indexOf('=');\n                currentSignature = eq >= 0 ? sigPart.substring(eq + 1) : sigPart;\n            } else {\n                currentSignature = null;\n            }\n            chunk = new byte[currentLength];\n            currentIndex = 0;\n            ByteStreams.readFully(in, chunk);\n            if (hasher != null) {\n                hasher.putBytes(chunk);\n            }\n            if (signingKey != null) {\n                verifyChunkSignature(chunk, currentSignature);\n            }\n            if (currentLength == 0) {\n                return -1;\n            }\n            // consume trailing \\r\\n\n            readLine(in);\n        }\n        return chunk[currentIndex++] & 0xFF;\n    }\n\n    @Override\n    public int read(byte[] b, int off, int len) throws IOException {\n        int i;\n        for (i = 0; i < len; ++i) {\n            int ch = read();\n            if (ch == -1) {\n                break;\n            }\n            b[off + i] = (byte) ch;\n        }\n        if (i == 0) {\n            return -1;\n        }\n        return i;\n    }\n\n    private void verifyChunkSignature(byte[] data, @Nullable String signature)\n            throws IOException {\n        if (signature == null) {\n            throw new IOException(new S3Exception(\n                    S3ErrorCode.SIGNATURE_DOES_NOT_MATCH));\n        }\n        String chunkHash;\n        try {\n            MessageDigest md = MessageDigest.getInstance(\"SHA-256\");\n            chunkHash = BaseEncoding.base16().lowerCase()\n                    .encode(md.digest(data));\n        } catch (NoSuchAlgorithmException e) {\n            throw new IOException(e);\n        }\n        String stringToSign = \"AWS4-HMAC-SHA256-PAYLOAD\\n\" +\n                timestamp + \"\\n\" +\n                scope + \"\\n\" +\n                previousSignature + \"\\n\" +\n                EMPTY_SHA256 + \"\\n\" +\n                chunkHash;\n        String expected;\n        try {\n            Mac mac = Mac.getInstance(hmacAlgorithm);\n            mac.init(new SecretKeySpec(signingKey, hmacAlgorithm));\n            expected = BaseEncoding.base16().lowerCase().encode(\n                    mac.doFinal(stringToSign.getBytes(StandardCharsets.UTF_8)));\n        } catch (InvalidKeyException | NoSuchAlgorithmException e) {\n            throw new IOException(e);\n        }\n        if (!constantTimeEquals(expected, signature)) {\n            throw new IOException(new S3Exception(\n                    S3ErrorCode.SIGNATURE_DOES_NOT_MATCH));\n        }\n        previousSignature = signature;\n    }\n\n    private static boolean constantTimeEquals(String a, String b) {\n        if (a.length() != b.length()) {\n            return false;\n        }\n        int diff = 0;\n        for (int i = 0; i < a.length(); i++) {\n            diff |= a.charAt(i) ^ b.charAt(i);\n        }\n        return diff == 0;\n    }\n\n    /**\n     * Read a \\r\\n terminated line from an InputStream.\n     *\n     * @return line without the newline or empty String if InputStream is empty\n     */\n    private static String readLine(InputStream is) throws IOException {\n        var builder = new StringBuilder();\n        while (true) {\n            int ch = is.read();\n            if (ch == '\\r') {\n                ch = is.read();\n                if (ch == '\\n') {\n                    break;\n                } else {\n                    throw new IOException(\"unexpected char after \\\\r: \" + ch);\n                }\n            } else if (ch == -1) {\n                if (builder.length() > 0) {\n                    throw new IOException(\"unexpected end of stream\");\n                }\n                break;\n            }\n            if (builder.length() >= MAX_LINE_LENGTH) {\n                throw new IOException(\"chunk header too long\");\n            }\n            builder.append((char) ch);\n        }\n        return builder.toString();\n    }\n}\n"
  },
  {
    "path": "src/main/java/org/gaul/s3proxy/CompleteMultipartUploadRequest.java",
    "content": "/*\n * Copyright 2014-2026 Andrew Gaul <andrew@gaul.org>\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * https://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\npackage org.gaul.s3proxy;\n\nimport java.util.Collection;\n\nimport com.fasterxml.jackson.dataformat.xml.annotation.JacksonXmlElementWrapper;\nimport com.fasterxml.jackson.dataformat.xml.annotation.JacksonXmlProperty;\n\n// CHECKSTYLE:OFF\nfinal class CompleteMultipartUploadRequest {\n    @JacksonXmlProperty(localName = \"Part\")\n    @JacksonXmlElementWrapper(useWrapping = false)\n    Collection<Part> parts;\n\n    static final class Part {\n        @JacksonXmlProperty(localName = \"PartNumber\")\n        int partNumber;\n        @JacksonXmlProperty(localName = \"ETag\")\n        String eTag;\n\n        // TODO: unsupported checksums\n        @JacksonXmlProperty(localName = \"ChecksumCRC32\")\n        String checksumCRC32;\n        @JacksonXmlProperty(localName = \"ChecksumCRC32C\")\n        String checksumCRC32C;\n        @JacksonXmlProperty(localName = \"ChecksumSHA1\")\n        String checksumSHA1;\n        @JacksonXmlProperty(localName = \"ChecksumSHA256\")\n        String checksumSHA256;\n    }\n}\n// CHECKSTYLE:ON\n"
  },
  {
    "path": "src/main/java/org/gaul/s3proxy/CreateBucketRequest.java",
    "content": "/*\n * Copyright 2014-2026 Andrew Gaul <andrew@gaul.org>\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * https://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\npackage org.gaul.s3proxy;\n\nimport com.fasterxml.jackson.dataformat.xml.annotation.JacksonXmlProperty;\n\n// CHECKSTYLE:OFF\nfinal class CreateBucketRequest {\n    @JacksonXmlProperty(localName = \"LocationConstraint\")\n    String locationConstraint;\n}\n// CHECKSTYLE:ON\n"
  },
  {
    "path": "src/main/java/org/gaul/s3proxy/CrossOriginResourceSharing.java",
    "content": "/*\n * Copyright 2014-2026 Andrew Gaul <andrew@gaul.org>\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * https://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\npackage org.gaul.s3proxy;\n\nimport java.util.HashSet;\nimport java.util.List;\nimport java.util.Objects;\nimport java.util.Set;\nimport java.util.regex.Matcher;\nimport java.util.regex.Pattern;\n\nimport com.google.common.base.Joiner;\nimport com.google.common.base.Splitter;\nimport com.google.common.base.Strings;\n\nimport org.slf4j.Logger;\nimport org.slf4j.LoggerFactory;\n\npublic final class CrossOriginResourceSharing {\n    protected static final List<String> SUPPORTED_METHODS =\n            List.of(\"GET\", \"HEAD\", \"PUT\", \"POST\", \"DELETE\");\n\n    private static final String HEADER_VALUE_SEPARATOR = \", \";\n    private static final String ALLOW_ANY_ORIGIN = \"*\";\n    private static final String ALLOW_ANY_HEADER = \"*\";\n    private static final String EXPOSE_ALL_HEADERS = \"*\";\n    private static final String ALLOW_CREDENTIALS = \"true\";\n\n    private static final Logger logger = LoggerFactory.getLogger(\n            CrossOriginResourceSharing.class);\n\n    private final String allowedMethodsRaw;\n    private final String allowedHeadersRaw;\n    private final String exposedHeadersRaw;\n    private final boolean anyOriginAllowed;\n    // Enforce ordering of values\n    private final List<Pattern> allowedOrigins;\n    private final List<String> allowedMethods;\n    private final List<String> allowedHeaders;\n    private final List<String> exposedHeaders;\n    private final String allowCredentials;\n\n    public CrossOriginResourceSharing() {\n        // CORS Allow all\n        this(List.of(ALLOW_ANY_ORIGIN), SUPPORTED_METHODS,\n            List.of(ALLOW_ANY_HEADER),\n            List.of(EXPOSE_ALL_HEADERS), \"\");\n    }\n\n    public CrossOriginResourceSharing(List<String> allowedOrigins,\n            List<String> allowedMethods,\n            List<String> allowedHeaders,\n            List<String> exposedHeaders,\n            String allowCredentials) {\n        Set<Pattern> allowedPattern = new HashSet<Pattern>();\n        boolean anyOriginAllowed = false;\n\n        if (allowedOrigins != null) {\n            if (allowedOrigins.contains(ALLOW_ANY_ORIGIN)) {\n                anyOriginAllowed = true;\n            } else {\n                for (String origin : allowedOrigins) {\n                    allowedPattern.add(Pattern.compile(\n                        origin, Pattern.CASE_INSENSITIVE));\n                }\n            }\n        }\n        this.anyOriginAllowed = anyOriginAllowed;\n        this.allowedOrigins = List.copyOf(allowedPattern);\n\n        if (allowedMethods == null) {\n            this.allowedMethods = List.of();\n        } else {\n            this.allowedMethods = List.copyOf(allowedMethods);\n        }\n        this.allowedMethodsRaw = Joiner.on(HEADER_VALUE_SEPARATOR).join(\n                this.allowedMethods);\n\n        if (allowedHeaders == null) {\n            this.allowedHeaders = List.of();\n        } else {\n            this.allowedHeaders = List.copyOf(allowedHeaders);\n        }\n        this.allowedHeadersRaw = Joiner.on(HEADER_VALUE_SEPARATOR).join(\n                this.allowedHeaders);\n\n        if (exposedHeaders == null) {\n            this.exposedHeaders = List.of();\n        } else {\n            this.exposedHeaders = List.copyOf(exposedHeaders);\n        }\n        this.exposedHeadersRaw = Joiner.on(HEADER_VALUE_SEPARATOR).join(\n                this.exposedHeaders);\n\n        this.allowCredentials = allowCredentials;\n\n        logger.info(\"CORS allowed origins: {}\", allowedOrigins);\n        logger.info(\"CORS allowed methods: {}\", allowedMethods);\n        logger.info(\"CORS allowed headers: {}\", allowedHeaders);\n        logger.info(\"CORS exposed headers: {}\", exposedHeaders);\n        logger.info(\"CORS allow credentials: {}\", allowCredentials);\n    }\n\n    public String getAllowedMethods() {\n        return this.allowedMethodsRaw;\n    }\n\n    public String getExposedHeaders() {\n        return this.exposedHeadersRaw;\n    }\n\n    public String getAllowedOrigin(String origin) {\n        if (this.anyOriginAllowed) {\n            return ALLOW_ANY_ORIGIN;\n        } else {\n            return origin;\n        }\n    }\n\n    public boolean isOriginAllowed(String origin) {\n        if (!Strings.isNullOrEmpty(origin)) {\n            if (this.anyOriginAllowed) {\n                logger.debug(\"CORS origin allowed: {}\", origin);\n                return true;\n            } else {\n                for (Pattern pattern : this.allowedOrigins) {\n                    Matcher matcher = pattern.matcher(origin);\n                    if (matcher.matches()) {\n                        logger.debug(\"CORS origin allowed: {}\", origin);\n                        return true;\n                    }\n                }\n            }\n        }\n        logger.debug(\"CORS origin not allowed: {}\", origin);\n        return false;\n    }\n\n    public boolean isMethodAllowed(String method) {\n        if (!Strings.isNullOrEmpty(method)) {\n            if (this.allowedMethods.contains(method)) {\n                logger.debug(\"CORS method allowed: {}\", method);\n                return true;\n            }\n        }\n        logger.debug(\"CORS method not allowed: {}\", method);\n        return false;\n    }\n\n    public boolean isEveryHeaderAllowed(String headers) {\n        boolean result = false;\n\n        if (!Strings.isNullOrEmpty(headers)) {\n            if (this.allowedHeadersRaw.equals(ALLOW_ANY_HEADER)) {\n                result = true;\n            } else {\n                for (String header : Splitter.on(HEADER_VALUE_SEPARATOR).split(\n                        headers)) {\n                    result = this.allowedHeaders.contains(header);\n                    if (!result) {\n                        // First not matching header breaks\n                        break;\n                    }\n                }\n            }\n        }\n\n        if (result) {\n            logger.debug(\"CORS headers allowed: {}\", headers);\n        } else {\n            logger.debug(\"CORS headers not allowed: {}\", headers);\n        }\n\n        return result;\n    }\n\n    public boolean isAllowCredentials() {\n        return ALLOW_CREDENTIALS.equals(allowCredentials);\n    }\n\n    @Override\n    public boolean equals(Object object) {\n        if (this == object) {\n            return true;\n        }\n        if (!(object instanceof CrossOriginResourceSharing that)) {\n            return false;\n        }\n\n        return this.allowedOrigins.equals(that.allowedOrigins) &&\n                this.allowedMethodsRaw.equals(that.allowedMethodsRaw) &&\n                this.allowedHeadersRaw.equals(that.allowedHeadersRaw);\n    }\n\n    @Override\n    public int hashCode() {\n        return Objects.hash(this.allowedOrigins, this.allowedMethodsRaw,\n                this.allowedHeadersRaw);\n    }\n}\n"
  },
  {
    "path": "src/main/java/org/gaul/s3proxy/DeleteMultipleObjectsRequest.java",
    "content": "/*\n * Copyright 2014-2026 Andrew Gaul <andrew@gaul.org>\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * https://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\npackage org.gaul.s3proxy;\n\nimport java.util.Collection;\n\nimport com.fasterxml.jackson.dataformat.xml.annotation.JacksonXmlElementWrapper;\nimport com.fasterxml.jackson.dataformat.xml.annotation.JacksonXmlProperty;\n\n// CHECKSTYLE:OFF\nfinal class DeleteMultipleObjectsRequest {\n    @JacksonXmlProperty(localName = \"Quiet\")\n    boolean quiet;\n\n    @JacksonXmlProperty(localName = \"Object\")\n    @JacksonXmlElementWrapper(useWrapping = false)\n    Collection<S3Object> objects;\n\n    static final class S3Object {\n        @JacksonXmlProperty(localName = \"Key\")\n        String key;\n        @JacksonXmlProperty(localName = \"VersionID\")\n        String versionId;\n    }\n}\n// CHECKSTYLE:ON\n"
  },
  {
    "path": "src/main/java/org/gaul/s3proxy/EncryptedBlobStore.java",
    "content": "/*\n * Copyright 2014-2026 Andrew Gaul <andrew@gaul.org>\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * https://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\npackage org.gaul.s3proxy;\n\nimport static com.google.common.base.Preconditions.checkArgument;\n\nimport java.io.IOException;\nimport java.io.InputStream;\nimport java.io.UncheckedIOException;\nimport java.nio.charset.StandardCharsets;\nimport java.security.GeneralSecurityException;\nimport java.security.spec.KeySpec;\nimport java.util.ArrayList;\nimport java.util.List;\nimport java.util.Map;\nimport java.util.Properties;\nimport java.util.regex.Matcher;\n\nimport javax.crypto.SecretKey;\nimport javax.crypto.SecretKeyFactory;\nimport javax.crypto.spec.PBEKeySpec;\nimport javax.crypto.spec.SecretKeySpec;\n\nimport com.google.common.base.Strings;\nimport com.google.common.collect.ImmutableSet;\nimport com.google.common.hash.HashCode;\nimport com.google.common.hash.Hashing;\nimport com.google.common.net.HttpHeaders;\n\nimport org.gaul.s3proxy.crypto.Constants;\nimport org.gaul.s3proxy.crypto.Decryption;\nimport org.gaul.s3proxy.crypto.Encryption;\nimport org.gaul.s3proxy.crypto.PartPadding;\nimport org.jclouds.blobstore.BlobStore;\nimport org.jclouds.blobstore.domain.Blob;\nimport org.jclouds.blobstore.domain.BlobAccess;\nimport org.jclouds.blobstore.domain.BlobBuilder;\nimport org.jclouds.blobstore.domain.BlobMetadata;\nimport org.jclouds.blobstore.domain.MultipartPart;\nimport org.jclouds.blobstore.domain.MultipartUpload;\nimport org.jclouds.blobstore.domain.MutableBlobMetadata;\nimport org.jclouds.blobstore.domain.PageSet;\nimport org.jclouds.blobstore.domain.StorageMetadata;\nimport org.jclouds.blobstore.domain.internal.MutableBlobMetadataImpl;\nimport org.jclouds.blobstore.domain.internal.MutableStorageMetadataImpl;\nimport org.jclouds.blobstore.domain.internal.PageSetImpl;\nimport org.jclouds.blobstore.options.CopyOptions;\nimport org.jclouds.blobstore.options.GetOptions;\nimport org.jclouds.blobstore.options.ListContainerOptions;\nimport org.jclouds.blobstore.options.PutOptions;\nimport org.jclouds.blobstore.util.ForwardingBlobStore;\nimport org.jclouds.io.ContentMetadata;\nimport org.jclouds.io.MutableContentMetadata;\nimport org.jclouds.io.Payload;\nimport org.jclouds.io.Payloads;\nimport org.slf4j.Logger;\nimport org.slf4j.LoggerFactory;\n\n@SuppressWarnings(\"UnstableApiUsage\")\npublic final class EncryptedBlobStore extends ForwardingBlobStore {\n    private final Logger logger =\n        LoggerFactory.getLogger(EncryptedBlobStore.class);\n    private SecretKeySpec secretKey;\n\n    private EncryptedBlobStore(BlobStore blobStore, Properties properties)\n            throws IllegalArgumentException {\n        super(blobStore);\n\n        String password = properties.getProperty(\n            S3ProxyConstants.PROPERTY_ENCRYPTED_BLOBSTORE_PASSWORD);\n        checkArgument(!Strings.isNullOrEmpty(password),\n            \"Password for encrypted blobstore is not set\");\n\n        String salt = properties.getProperty(\n            S3ProxyConstants.PROPERTY_ENCRYPTED_BLOBSTORE_SALT);\n        checkArgument(!Strings.isNullOrEmpty(salt),\n            \"Salt for encrypted blobstore is not set\");\n        initStore(password, salt);\n    }\n\n    static BlobStore newEncryptedBlobStore(BlobStore blobStore,\n        Properties properties) throws IOException {\n        return new EncryptedBlobStore(blobStore, properties);\n    }\n\n    private void initStore(String password, String salt)\n            throws IllegalArgumentException {\n        try {\n            SecretKeyFactory factory =\n                SecretKeyFactory.getInstance(\"PBKDF2WithHmacSHA256\");\n            KeySpec spec =\n                new PBEKeySpec(password.toCharArray(), salt.getBytes(), 65536,\n                    128);\n            SecretKey tmp = factory.generateSecret(spec);\n            secretKey = new SecretKeySpec(tmp.getEncoded(), \"AES\");\n        } catch (GeneralSecurityException e) {\n            throw new IllegalArgumentException(e);\n        }\n    }\n\n    private Blob cipheredBlob(String container, Blob blob, InputStream payload,\n        long contentLength,\n        boolean addEncryptedMetadata) {\n\n        // make a copy of the blob with the new payload stream\n        BlobMetadata blobMeta = blob.getMetadata();\n        ContentMetadata contentMeta = blob.getMetadata().getContentMetadata();\n        Map<String, String> userMetadata = blobMeta.getUserMetadata();\n        String contentType = contentMeta.getContentType();\n\n        // suffix the content type with -s3enc if we need to encrypt\n        if (addEncryptedMetadata) {\n            blobMeta = setEncryptedSuffix(blobMeta);\n        } else {\n            // remove the -s3enc suffix while decrypting\n            // but not if it contains a multipart meta\n            if (!blobMeta.getUserMetadata()\n                .containsKey(Constants.METADATA_IS_ENCRYPTED_MULTIPART)) {\n                blobMeta = removeEncryptedSuffix(blobMeta);\n            }\n        }\n\n        // we do not set contentMD5 as it will not match due to the encryption\n        Blob cipheredBlob = blobBuilder(container)\n            .name(blobMeta.getName())\n            .type(blobMeta.getType())\n            .tier(blobMeta.getTier())\n            .userMetadata(userMetadata)\n            .payload(payload)\n            .cacheControl(contentMeta.getCacheControl())\n            .contentDisposition(contentMeta.getContentDisposition())\n            .contentEncoding(contentMeta.getContentEncoding())\n            .contentLanguage(contentMeta.getContentLanguage())\n            .contentLength(contentLength)\n            .contentType(contentType)\n            .build();\n\n        cipheredBlob.getMetadata().setUri(blobMeta.getUri());\n        cipheredBlob.getMetadata().setETag(blobMeta.getETag());\n        cipheredBlob.getMetadata().setLastModified(blobMeta.getLastModified());\n        cipheredBlob.getMetadata().setSize(blobMeta.getSize());\n        cipheredBlob.getMetadata().setPublicUri(blobMeta.getPublicUri());\n        cipheredBlob.getMetadata().setContainer(blobMeta.getContainer());\n\n        return cipheredBlob;\n    }\n\n    private Blob encryptBlob(String container, Blob blob) {\n\n        try {\n            // open the streams and pass them through the encryption\n            InputStream isRaw = blob.getPayload().openStream();\n            Encryption encryption =\n                new Encryption(secretKey, isRaw, 1);\n            InputStream is = encryption.openStream();\n\n            // adjust the encrypted content length by\n            // adding the padding block size\n            long contentLength =\n                blob.getMetadata().getContentMetadata().getContentLength() +\n                    Constants.PADDING_BLOCK_SIZE;\n\n            return cipheredBlob(container, blob, is, contentLength, true);\n        } catch (IOException | GeneralSecurityException e) {\n            throw new RuntimeException(e);\n        }\n    }\n\n    private Payload encryptPayload(Payload payload, int partNumber) {\n\n        try {\n            // open the streams and pass them through the encryption\n            InputStream isRaw = payload.openStream();\n            Encryption encryption =\n                new Encryption(secretKey, isRaw, partNumber);\n            InputStream is = encryption.openStream();\n\n            Payload cipheredPayload = Payloads.newInputStreamPayload(is);\n            MutableContentMetadata contentMetadata =\n                payload.getContentMetadata();\n            HashCode md5 = null;\n            contentMetadata.setContentMD5(md5);\n            cipheredPayload.setContentMetadata(payload.getContentMetadata());\n            cipheredPayload.setSensitive(payload.isSensitive());\n\n            // adjust the encrypted content length by\n            // adding the padding block size\n            long contentLength =\n                payload.getContentMetadata().getContentLength() +\n                    Constants.PADDING_BLOCK_SIZE;\n            cipheredPayload.getContentMetadata()\n                .setContentLength(contentLength);\n\n            return cipheredPayload;\n        } catch (IOException | GeneralSecurityException e) {\n            throw new RuntimeException(e);\n        }\n    }\n\n    private Blob decryptBlob(Decryption decryption, String container,\n        Blob blob) {\n        try {\n            // handle blob does not exist\n            if (blob == null) {\n                return null;\n            }\n\n            // open the streams and pass them through the decryption\n            InputStream isRaw = blob.getPayload().openStream();\n            InputStream is = decryption.openStream(isRaw);\n\n            // adjust the content length if the blob is encrypted\n            long contentLength =\n                blob.getMetadata().getContentMetadata().getContentLength();\n            if (decryption.isEncrypted()) {\n                contentLength = decryption.getContentLength();\n            }\n\n            return cipheredBlob(container, blob, is, contentLength, false);\n        } catch (IOException e) {\n            throw new UncheckedIOException(e);\n        }\n    }\n\n    // filter the list by showing the unencrypted blob size\n    private PageSet<? extends StorageMetadata> filteredList(\n        PageSet<? extends StorageMetadata> pageSet) {\n        var builder = ImmutableSet.<StorageMetadata>builder();\n        for (StorageMetadata sm : pageSet) {\n            if (sm instanceof BlobMetadata bm) {\n                MutableBlobMetadata mbm =\n                    new MutableBlobMetadataImpl(bm);\n\n                // if blob is encrypted remove the -s3enc suffix\n                // from content type\n                if (isEncrypted(mbm)) {\n                    mbm = removeEncryptedSuffix(bm);\n                    mbm = calculateBlobSize(mbm);\n                }\n\n                builder.add(mbm);\n            } else if (sm.getName() != null && isEncrypted(sm.getName())) {\n                // non-BlobMetadata list entries (e.g. from S3 list backends)\n                // still need the .s3enc suffix stripped from the name\n                var msm = new MutableStorageMetadataImpl(sm);\n                msm.setName(removeEncryptedSuffix(sm.getName()));\n                builder.add(msm);\n            } else {\n                builder.add(sm);\n            }\n        }\n\n        // make sure the marker do not show blob with .s3enc suffix\n        String marker = pageSet.getNextMarker();\n        if (marker != null && isEncrypted(marker)) {\n            marker = removeEncryptedSuffix(marker);\n        }\n        return new PageSetImpl<>(builder.build(), marker);\n    }\n\n    private boolean isEncrypted(BlobMetadata blobMeta) {\n        return isEncrypted(blobMeta.getName());\n    }\n\n    private boolean isEncrypted(String blobName) {\n        return blobName.endsWith(Constants.S3_ENC_SUFFIX);\n    }\n\n    private MutableBlobMetadata setEncryptedSuffix(BlobMetadata blobMeta) {\n        var bm = new MutableBlobMetadataImpl(blobMeta);\n        if (blobMeta.getName() != null && !isEncrypted(blobMeta.getName())) {\n            bm.setName(blobNameWithSuffix(blobMeta.getName()));\n        }\n\n        return bm;\n    }\n\n    private String removeEncryptedSuffix(String blobName) {\n        return blobName.substring(0,\n            blobName.length() - Constants.S3_ENC_SUFFIX.length());\n    }\n\n    private MutableBlobMetadata removeEncryptedSuffix(BlobMetadata blobMeta) {\n        var bm = new MutableBlobMetadataImpl(blobMeta);\n        if (isEncrypted(bm.getName())) {\n            String blobName = bm.getName();\n            bm.setName(removeEncryptedSuffix(blobName));\n        }\n\n        return bm;\n    }\n\n    private MutableBlobMetadata calculateBlobSize(BlobMetadata blobMeta) {\n        MutableBlobMetadata mbm = removeEncryptedSuffix(blobMeta);\n\n        // we are using on non-s3 backends like azure or gcp a metadata key to\n        // calculate the part padding sizes that needs to be removed\n        if (mbm.getUserMetadata()\n            .containsKey(Constants.METADATA_ENCRYPTION_PARTS)) {\n            int parts = Integer.parseInt(\n                mbm.getUserMetadata().get(Constants.METADATA_ENCRYPTION_PARTS));\n            int partPaddingSizes = Constants.PADDING_BLOCK_SIZE * parts;\n            long size = blobMeta.getSize() - partPaddingSizes;\n            mbm.setSize(size);\n            mbm.getContentMetadata().setContentLength(size);\n        } else {\n            // on s3 backends like aws or minio we rely on the eTag suffix\n            Matcher matcher =\n                Constants.MPU_ETAG_SUFFIX_PATTERN.matcher(blobMeta.getETag());\n            if (matcher.find()) {\n                int parts = Integer.parseInt(matcher.group(1));\n                int partPaddingSizes = Constants.PADDING_BLOCK_SIZE * parts;\n                long size = blobMeta.getSize() - partPaddingSizes;\n                mbm.setSize(size);\n                mbm.getContentMetadata().setContentLength(size);\n            } else {\n                // if there is also no eTag suffix then get the number of parts from last padding\n                var options = new GetOptions()\n                    .range(blobMeta.getSize() - Constants.PADDING_BLOCK_SIZE, blobMeta.getSize());\n                var name = blobNameWithSuffix(blobMeta.getName());\n                var blob = delegate().getBlob(blobMeta.getContainer(), name, options);\n                try {\n                    PartPadding lastPartPadding = PartPadding.readPartPaddingFromBlob(blob);\n                    int parts = lastPartPadding.getPart();\n                    int partPaddingSizes = Constants.PADDING_BLOCK_SIZE * parts;\n                    long size = blobMeta.getSize() - partPaddingSizes;\n                    mbm.setSize(size);\n                    mbm.getContentMetadata().setContentLength(size);\n                } catch (IOException e) {\n                    throw new UncheckedIOException(\"Failed to read part-padding from encrypted blob\", e);\n                }\n            }\n        }\n\n        return mbm;\n    }\n\n    private boolean multipartRequiresStub() {\n        String blobStoreType = getBlobStoreType();\n        return Quirks.MULTIPART_REQUIRES_STUB.contains(blobStoreType);\n    }\n\n    private String blobNameWithSuffix(String container, String name) {\n        String nameWithSuffix = blobNameWithSuffix(name);\n        if (delegate().blobExists(container, nameWithSuffix)) {\n            name = nameWithSuffix;\n        }\n        return name;\n    }\n\n    private String blobNameWithSuffix(String name) {\n        return name + Constants.S3_ENC_SUFFIX;\n    }\n\n    private String getBlobStoreType() {\n        return delegate().getContext().unwrap().getProviderMetadata().getId();\n    }\n\n    private String generateUploadId(String container, String blobName) {\n        String path = container + \"/\" + blobName;\n        @SuppressWarnings(\"deprecation\")\n        var hash = Hashing.md5();\n        return hash.hashBytes(path.getBytes(StandardCharsets.UTF_8)).toString();\n    }\n\n    @Override\n    public Blob getBlob(String containerName, String blobName) {\n        return getBlob(containerName, blobName, new GetOptions());\n    }\n\n    @Override\n    public Blob getBlob(String containerName, String blobName,\n        GetOptions getOptions) {\n\n        // adjust the blob name\n        blobName = blobNameWithSuffix(blobName);\n\n        // get the metadata to determine the blob size\n        BlobMetadata meta = delegate().blobMetadata(containerName, blobName);\n\n        try {\n            // we have a blob that ends with .s3enc\n            if (meta != null) {\n                // init defaults\n                long offset = 0;\n                long end = 0;\n                long length = -1;\n\n                if (getOptions.getRanges().size() > 0) {\n                    // S3 doesn't allow multiple ranges\n                    String range = getOptions.getRanges().get(0);\n                    String[] ranges = range.split(\"-\", 2);\n\n                    if (ranges[0].isEmpty()) {\n                        // handle to read from the end\n                        end = Long.parseLong(ranges[1]);\n                        length = end;\n                    } else if (ranges[1].isEmpty()) {\n                        // handle to read from an offset till the end\n                        offset = Long.parseLong(ranges[0]);\n                    } else {\n                        // handle to read from an offset\n                        offset = Long.parseLong(ranges[0]);\n                        end = Long.parseLong(ranges[1]);\n                        length = end - offset + 1;\n                    }\n                }\n\n                // init decryption\n                Decryption decryption =\n                    new Decryption(secretKey, delegate(), meta, offset, length);\n\n                if (decryption.isEncrypted() &&\n                    getOptions.getRanges().size() > 0) {\n                    // clear current ranges to avoid multiple ranges\n                    getOptions.getRanges().clear();\n\n                    long startAt = decryption.getStartAt();\n                    long endAt = decryption.getEncryptedSize();\n\n                    if (offset == 0 && end > 0 && length == end) {\n                        // handle to read from the end\n                        startAt = decryption.calculateTail();\n                    } else if (offset > 0 && end > 0) {\n                        // handle to read from an offset\n                        endAt = decryption.calculateEndAt(end);\n                    }\n\n                    getOptions.range(startAt, endAt);\n                }\n\n                Blob blob =\n                    delegate().getBlob(containerName, blobName, getOptions);\n                Blob decryptedBlob = decryptBlob(decryption, containerName, blob);\n                if (!getOptions.getRanges().isEmpty()) {\n                    long decryptedSize = decryption.getUnencryptedSize();\n                    long endRange = (offset != 0 && end == 0) ? decryptedSize : end;\n                    decryptedBlob.getAllHeaders()\n                        .put(HttpHeaders.CONTENT_RANGE, \"bytes \" + offset + \"-\" + endRange +\n                            \"/\" + decryptedSize);\n                }\n                return decryptedBlob;\n            } else {\n                // we suppose to return a unencrypted blob\n                // since no metadata was found\n                blobName = removeEncryptedSuffix(blobName);\n                return delegate().getBlob(containerName, blobName, getOptions);\n            }\n\n        } catch (IOException e) {\n            throw new UncheckedIOException(e);\n        }\n    }\n\n    @Override\n    public String putBlob(String containerName, Blob blob) {\n        return delegate().putBlob(containerName,\n            encryptBlob(containerName, blob));\n    }\n\n    @Override\n    public String putBlob(String containerName, Blob blob,\n        PutOptions putOptions) {\n        return delegate().putBlob(containerName,\n            encryptBlob(containerName, blob), putOptions);\n    }\n\n    @Override\n    public String copyBlob(String fromContainer, String fromName,\n        String toContainer, String toName, CopyOptions options) {\n\n        // if we copy an encrypted blob\n        // make sure to add suffix to the destination blob name\n        String blobName = blobNameWithSuffix(fromName);\n        if (delegate().blobExists(fromContainer, blobName)) {\n            fromName = blobName;\n            toName = blobNameWithSuffix(toName);\n        }\n\n        return delegate().copyBlob(fromContainer, fromName, toContainer, toName,\n            options);\n    }\n\n    @Override\n    public void removeBlob(String container, String name) {\n        name = blobNameWithSuffix(container, name);\n        delegate().removeBlob(container, name);\n    }\n\n    @Override\n    public void removeBlobs(String container, Iterable<String> names) {\n        List<String> filteredNames = new ArrayList<>();\n\n        // filter the list of blobs to determine\n        // if we need to delete encrypted blobs\n        for (String name : names) {\n            name = blobNameWithSuffix(container, name);\n            filteredNames.add(name);\n        }\n\n        delegate().removeBlobs(container, filteredNames);\n    }\n\n    @Override\n    public BlobAccess getBlobAccess(String container, String name) {\n        name = blobNameWithSuffix(container, name);\n        return delegate().getBlobAccess(container, name);\n    }\n\n    @Override\n    public boolean blobExists(String container, String name) {\n        name = blobNameWithSuffix(container, name);\n        return delegate().blobExists(container, name);\n    }\n\n    @Override\n    public void setBlobAccess(String container, String name,\n        BlobAccess access) {\n        name = blobNameWithSuffix(container, name);\n        delegate().setBlobAccess(container, name, access);\n    }\n\n    @Override\n    public PageSet<? extends StorageMetadata> list() {\n        PageSet<? extends StorageMetadata> pageSet = delegate().list();\n        return filteredList(pageSet);\n    }\n\n    @Override\n    public PageSet<? extends StorageMetadata> list(String container) {\n        PageSet<? extends StorageMetadata> pageSet = delegate().list(container);\n        return filteredList(pageSet);\n    }\n\n    @Override\n    public PageSet<? extends StorageMetadata> list(String container,\n        ListContainerOptions options) {\n        PageSet<? extends StorageMetadata> pageSet =\n            delegate().list(container, options);\n        return filteredList(pageSet);\n    }\n\n    @Override\n    public MultipartUpload initiateMultipartUpload(String container,\n        BlobMetadata blobMetadata, PutOptions options) {\n        MutableBlobMetadata mbm = new MutableBlobMetadataImpl(blobMetadata);\n        mbm = setEncryptedSuffix(mbm);\n\n        MultipartUpload mpu =\n            delegate().initiateMultipartUpload(container, mbm, options);\n\n        // handle non-s3 backends\n        // by setting a metadata key for multipart stubs\n        if (multipartRequiresStub()) {\n            mbm.getUserMetadata()\n                .put(Constants.METADATA_IS_ENCRYPTED_MULTIPART, \"true\");\n\n            if (getBlobStoreType().equals(\"azureblob\")) {\n                // use part 0 as a placeholder\n                delegate().uploadMultipartPart(mpu, 0,\n                    Payloads.newStringPayload(\"dummy\"));\n\n                // since azure does not have a uploadId\n                // we use the sha256 of the path\n                String uploadId = generateUploadId(container, mbm.getName());\n\n                mpu = MultipartUpload.create(mpu.containerName(),\n                    mpu.blobName(), uploadId, mpu.blobMetadata(), options);\n            } else if (getBlobStoreType().equals(\"google-cloud-storage\")) {\n                mbm.getUserMetadata()\n                    .put(Constants.METADATA_MULTIPART_KEY, mbm.getName());\n\n                // since gcp does not have a uploadId\n                // we use the sha256 of the path\n                String uploadId = generateUploadId(container, mbm.getName());\n\n                // to emulate later the list of multipart uploads\n                // we create a placeholder\n                BlobBuilder builder =\n                    blobBuilder(Constants.MPU_FOLDER + uploadId)\n                        .payload(\"\")\n                        .userMetadata(mbm.getUserMetadata());\n                delegate().putBlob(container, builder.build(), options);\n\n                // final mpu on gcp\n                mpu = MultipartUpload.create(mpu.containerName(),\n                    mpu.blobName(), uploadId, mpu.blobMetadata(), options);\n            }\n        }\n\n        return mpu;\n    }\n\n    @Override\n    public List<MultipartUpload> listMultipartUploads(String container) {\n        List<MultipartUpload> mpus = new ArrayList<>();\n\n        // emulate list of multipart uploads on gcp\n        if (getBlobStoreType().equals(\"google-cloud-storage\")) {\n            var options = new ListContainerOptions();\n            PageSet<? extends StorageMetadata> mpuList =\n                delegate().list(container,\n                    options.prefix(Constants.MPU_FOLDER));\n\n            // find all blobs in .mpu folder and build the list\n            for (StorageMetadata blob : mpuList) {\n                Map<String, String> meta = blob.getUserMetadata();\n                if (meta.containsKey(Constants.METADATA_MULTIPART_KEY)) {\n                    String blobName =\n                        meta.get(Constants.METADATA_MULTIPART_KEY);\n                    String uploadId =\n                        blob.getName()\n                            .substring(blob.getName().lastIndexOf(\"/\") + 1);\n                    MultipartUpload mpu =\n                        MultipartUpload.create(container,\n                            blobName, uploadId, null, null);\n                    mpus.add(mpu);\n                }\n            }\n        } else {\n            mpus = delegate().listMultipartUploads(container);\n        }\n\n        List<MultipartUpload> filtered = new ArrayList<>();\n        // filter the list uploads by removing the .s3enc suffix\n        for (MultipartUpload mpu : mpus) {\n            String blobName = mpu.blobName();\n            if (isEncrypted(blobName)) {\n                blobName = removeEncryptedSuffix(mpu.blobName());\n\n                String uploadId = mpu.id();\n\n                // since azure not have a uploadId\n                // we use the sha256 of the path\n                if (getBlobStoreType().equals(\"azureblob\")) {\n                    uploadId = generateUploadId(container, mpu.blobName());\n                }\n\n                MultipartUpload mpuWithoutSuffix =\n                    MultipartUpload.create(mpu.containerName(),\n                        blobName, uploadId, mpu.blobMetadata(),\n                        mpu.putOptions());\n\n                filtered.add(mpuWithoutSuffix);\n            } else {\n                filtered.add(mpu);\n            }\n        }\n        return filtered;\n    }\n\n    @Override\n    public List<MultipartPart> listMultipartUpload(MultipartUpload mpu) {\n        mpu = filterMultipartUpload(mpu);\n        List<MultipartPart> parts = delegate().listMultipartUpload(mpu);\n        List<MultipartPart> filteredParts = new ArrayList<>();\n\n        // fix wrong multipart size due to the part padding\n        for (MultipartPart part : parts) {\n\n            // we use part 0 as a placeholder and hide it on azure\n            if (getBlobStoreType().equals(\"azureblob\") &&\n                part.partNumber() == 0) {\n                continue;\n            }\n\n            MultipartPart newPart = MultipartPart.create(\n                part.partNumber(),\n                part.partSize() - Constants.PADDING_BLOCK_SIZE,\n                part.partETag(),\n                part.lastModified()\n            );\n            filteredParts.add(newPart);\n        }\n        return filteredParts;\n    }\n\n    @Override\n    public MultipartPart uploadMultipartPart(MultipartUpload mpu,\n        int partNumber, Payload payload) {\n\n        mpu = filterMultipartUpload(mpu);\n        return delegate().uploadMultipartPart(mpu, partNumber,\n            encryptPayload(payload, partNumber));\n    }\n\n    private MultipartUpload filterMultipartUpload(MultipartUpload mpu) {\n        MutableBlobMetadata mbm = null;\n        if (mpu.blobMetadata() != null) {\n            mbm = new MutableBlobMetadataImpl(mpu.blobMetadata());\n            mbm = setEncryptedSuffix(mbm);\n        }\n\n        String blobName = mpu.blobName();\n        if (!isEncrypted(blobName)) {\n            blobName = blobNameWithSuffix(blobName);\n        }\n\n        return MultipartUpload.create(mpu.containerName(), blobName, mpu.id(),\n            mbm, mpu.putOptions());\n    }\n\n    @Override\n    public String completeMultipartUpload(MultipartUpload mpu,\n        List<MultipartPart> parts) {\n\n        MutableBlobMetadata mbm =\n            new MutableBlobMetadataImpl(mpu.blobMetadata());\n        String blobName = mpu.blobName();\n\n        // always set .s3enc suffix except on gcp\n        // and blob name starts with multipart upload id\n        if (getBlobStoreType().equals(\"google-cloud-storage\") &&\n            mpu.blobName().startsWith(mpu.id())) {\n            logger.debug(\"skip suffix on gcp\");\n        } else {\n            mbm = setEncryptedSuffix(mbm);\n            if (!isEncrypted(mpu.blobName())) {\n                blobName = blobNameWithSuffix(blobName);\n            }\n        }\n\n        MultipartUpload mpuWithSuffix =\n            MultipartUpload.create(mpu.containerName(),\n                blobName, mpu.id(), mbm, mpu.putOptions());\n\n        // this will only work for non s3 backends like azure and gcp\n        if (multipartRequiresStub()) {\n            long partCount = parts.size();\n\n            // special handling for GCP to sum up all parts\n            if (getBlobStoreType().equals(\"google-cloud-storage\")) {\n                partCount = 0;\n                for (MultipartPart part : parts) {\n                    blobName =\n                        \"%s_%08d\".formatted(\n                            mpu.id(),\n                            part.partNumber());\n                    BlobMetadata metadata =\n                        delegate().blobMetadata(mpu.containerName(), blobName);\n                    if (metadata != null && metadata.getUserMetadata()\n                        .containsKey(Constants.METADATA_ENCRYPTION_PARTS)) {\n                        String partMetaCount = metadata.getUserMetadata()\n                            .get(Constants.METADATA_ENCRYPTION_PARTS);\n                        partCount = partCount + Long.parseLong(partMetaCount);\n                    } else {\n                        partCount++;\n                    }\n                }\n            }\n\n            mpuWithSuffix.blobMetadata().getUserMetadata()\n                .put(Constants.METADATA_ENCRYPTION_PARTS,\n                    String.valueOf(partCount));\n            mpuWithSuffix.blobMetadata().getUserMetadata()\n                .remove(Constants.METADATA_IS_ENCRYPTED_MULTIPART);\n        }\n\n        String eTag = delegate().completeMultipartUpload(mpuWithSuffix, parts);\n\n        // cleanup mpu placeholder on gcp\n        if (getBlobStoreType().equals(\"google-cloud-storage\")) {\n            delegate().removeBlob(mpu.containerName(),\n                Constants.MPU_FOLDER + mpu.id());\n        }\n\n        return eTag;\n    }\n\n    @Override\n    public BlobMetadata blobMetadata(String container, String name) {\n\n        name = blobNameWithSuffix(container, name);\n        BlobMetadata blobMetadata = delegate().blobMetadata(container, name);\n        if (blobMetadata != null) {\n            // only remove the -s3enc suffix\n            // if the blob is encrypted and not a multipart stub\n            if (isEncrypted(blobMetadata) &&\n                !blobMetadata.getUserMetadata()\n                    .containsKey(Constants.METADATA_IS_ENCRYPTED_MULTIPART)) {\n                blobMetadata = removeEncryptedSuffix(blobMetadata);\n                blobMetadata = calculateBlobSize(blobMetadata);\n            }\n        }\n        return blobMetadata;\n    }\n\n    @Override\n    public long getMaximumMultipartPartSize() {\n        long max = delegate().getMaximumMultipartPartSize();\n        return max - Constants.PADDING_BLOCK_SIZE;\n    }\n}\n"
  },
  {
    "path": "src/main/java/org/gaul/s3proxy/EventualBlobStore.java",
    "content": "/*\n * Copyright 2014-2026 Andrew Gaul <andrew@gaul.org>\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * https://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\npackage org.gaul.s3proxy;\n\nimport static java.util.Objects.requireNonNull;\nimport static com.google.common.base.Preconditions.checkArgument;\n\nimport java.util.Deque;\nimport java.util.List;\nimport java.util.Random;\nimport java.util.concurrent.Callable;\nimport java.util.concurrent.ConcurrentLinkedDeque;\nimport java.util.concurrent.ScheduledExecutorService;\nimport java.util.concurrent.TimeUnit;\n\nimport org.jclouds.blobstore.BlobStore;\nimport org.jclouds.blobstore.domain.Blob;\nimport org.jclouds.blobstore.domain.BlobMetadata;\nimport org.jclouds.blobstore.domain.MultipartPart;\nimport org.jclouds.blobstore.domain.MultipartUpload;\nimport org.jclouds.blobstore.options.CopyOptions;\nimport org.jclouds.blobstore.options.CreateContainerOptions;\nimport org.jclouds.blobstore.options.PutOptions;\nimport org.jclouds.blobstore.util.ForwardingBlobStore;\nimport org.jclouds.domain.Location;\nimport org.jclouds.io.Payload;\n\n/**\n * This class is a BlobStore wrapper which emulates eventual consistency\n * using two blobstores.  It writes objects to one store and reads objects\n * from another.  An asynchronous process copies objects between stores.  Note\n * that container operations are not eventually consistent.\n */\nfinal class EventualBlobStore extends ForwardingBlobStore {\n    private final BlobStore writeStore;  // read from delegate\n    private final ScheduledExecutorService executorService;\n    private final Deque<Callable<?>> deque = new ConcurrentLinkedDeque<>();\n    private final int delay;\n    private final TimeUnit delayUnit;\n    private final double probability;\n    private final Random random = new Random();\n\n    private EventualBlobStore(BlobStore writeStore, BlobStore readStore,\n            ScheduledExecutorService executorService, int delay,\n            TimeUnit delayUnit, double probability) {\n        super(readStore);\n        this.writeStore = requireNonNull(writeStore);\n        this.executorService = requireNonNull(executorService);\n        checkArgument(delay >= 0, \"Delay must be at least zero, was: %s\",\n                delay);\n        this.delay = delay;\n        this.delayUnit = requireNonNull(delayUnit);\n        checkArgument(probability >= 0.0 && probability <= 1.0,\n                \"Probability must be between 0.0 and 1.0, was: %s\",\n                probability);\n        this.probability = probability;\n    }\n\n    static BlobStore newEventualBlobStore(BlobStore writeStore,\n            BlobStore readStore, ScheduledExecutorService executorService,\n            int delay, TimeUnit delayUnit, double probability) {\n        return new EventualBlobStore(writeStore, readStore, executorService,\n                delay, delayUnit, probability);\n    }\n\n    @Override\n    public boolean createContainerInLocation(Location location,\n            String container, CreateContainerOptions options) {\n        return delegate().createContainerInLocation(\n                        location, container, options) &&\n                writeStore.createContainerInLocation(\n                        location, container, options);\n    }\n\n    @Override\n    public void deleteContainer(String container) {\n        delegate().deleteContainer(container);\n        writeStore.deleteContainer(container);\n    }\n\n    @Override\n    public boolean deleteContainerIfEmpty(String container) {\n        return delegate().deleteContainerIfEmpty(container) &&\n                writeStore.deleteContainerIfEmpty(container);\n    }\n\n    @Override\n    public String putBlob(String containerName, Blob blob) {\n        return putBlob(containerName, blob, PutOptions.NONE);\n    }\n\n    @Override\n    public String putBlob(final String containerName, Blob blob,\n            final PutOptions options) {\n        final String nearName = blob.getMetadata().getName();\n        String nearETag = writeStore.putBlob(containerName, blob, options);\n        schedule(new Callable<String>() {\n                @Override\n                public String call() {\n                    Blob nearBlob = writeStore.getBlob(containerName, nearName);\n                    String farETag = delegate().putBlob(containerName,\n                            nearBlob, options);\n                    return farETag;\n                }\n            });\n        return nearETag;\n    }\n\n    @Override\n    public void removeBlob(final String containerName, final String blobName) {\n        writeStore.removeBlob(containerName, blobName);\n        schedule(new Callable<Void>() {\n                @Override\n                public Void call() {\n                    delegate().removeBlob(containerName, blobName);\n                    return null;\n                }\n            });\n    }\n\n    @Override\n    public void removeBlobs(final String containerName,\n            final Iterable<String> blobNames) {\n        writeStore.removeBlobs(containerName, blobNames);\n        schedule(new Callable<Void>() {\n                @Override\n                public Void call() {\n                    delegate().removeBlobs(containerName, blobNames);\n                    return null;\n                }\n            });\n    }\n\n    @Override\n    public String copyBlob(final String fromContainer, final String fromName,\n            final String toContainer, final String toName,\n            final CopyOptions options) {\n        String nearETag = writeStore.copyBlob(fromContainer, fromName,\n                toContainer, toName, options);\n        schedule(new Callable<String>() {\n                @Override\n                public String call() {\n                    return delegate().copyBlob(fromContainer, fromName,\n                            toContainer, toName, options);\n                }\n            });\n        return nearETag;\n    }\n\n    @Override\n    public MultipartUpload initiateMultipartUpload(String container,\n            BlobMetadata blobMetadata, PutOptions options) {\n        MultipartUpload mpu = delegate().initiateMultipartUpload(container,\n                blobMetadata, options);\n        return mpu;\n    }\n\n    @Override\n    public void abortMultipartUpload(MultipartUpload mpu) {\n        delegate().abortMultipartUpload(mpu);\n    }\n\n    @Override\n    public String completeMultipartUpload(final MultipartUpload mpu,\n            final List<MultipartPart> parts) {\n        schedule(new Callable<String>() {\n                @Override\n                public String call() {\n                    String farETag = delegate().completeMultipartUpload(mpu,\n                            parts);\n                    return farETag;\n                }\n            });\n        return \"\";  // TODO: fake ETag\n    }\n\n    @Override\n    public MultipartPart uploadMultipartPart(MultipartUpload mpu,\n            int partNumber, Payload payload) {\n        MultipartPart part = delegate().uploadMultipartPart(mpu, partNumber,\n                payload);\n        return part;\n    }\n\n    @SuppressWarnings(\"FutureReturnValueIgnored\")\n    private void schedule(Callable<?> callable) {\n        if (random.nextDouble() < probability) {\n            deque.add(callable);\n            executorService.schedule(new DequeCallable(), delay, delayUnit);\n        }\n    }\n\n    private final class DequeCallable implements Callable<Void> {\n        @Override\n        public Void call() throws Exception {\n            deque.poll().call();\n            return null;\n        }\n    }\n}\n"
  },
  {
    "path": "src/main/java/org/gaul/s3proxy/GlobBlobStoreLocator.java",
    "content": "/*\n * Copyright 2014-2026 Andrew Gaul <andrew@gaul.org>\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * https://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\npackage org.gaul.s3proxy;\n\nimport java.nio.file.FileSystems;\nimport java.nio.file.PathMatcher;\nimport java.util.Map;\n\nimport com.google.common.collect.Maps;\n\nimport org.jclouds.blobstore.BlobStore;\nimport org.jspecify.annotations.Nullable;\n\npublic final class GlobBlobStoreLocator implements BlobStoreLocator {\n    private final Map<String, Map.Entry<String, BlobStore>> locator;\n    private final Map<PathMatcher, Map.Entry<String, BlobStore>> globLocator;\n\n    public GlobBlobStoreLocator(\n            Map<String, Map.Entry<String, BlobStore>> locator,\n            Map<PathMatcher, Map.Entry<String, BlobStore>> globLocator) {\n        this.locator = locator;\n        this.globLocator = globLocator;\n    }\n\n    @Override\n    public Map.Entry<String, BlobStore> locateBlobStore(\n            @Nullable String identity, String container, String blob) {\n        Map.Entry<String, BlobStore> locatorEntry =\n                locator.get(identity);\n        Map.Entry<String, BlobStore> globEntry = null;\n        if (container != null) {\n            for (var entry : globLocator.entrySet()) {\n                if (entry.getKey().matches(FileSystems.getDefault()\n                        .getPath(container))) {\n                    globEntry = entry.getValue();\n                }\n            }\n        }\n        if (globEntry == null) {\n            if (identity == null) {\n                if (!locator.isEmpty()) {\n                    return locator.entrySet().iterator().next()\n                            .getValue();\n                }\n                return Maps.immutableEntry(null,\n                        globLocator.entrySet().iterator().next().getValue()\n                                .getValue());\n            }\n            return locatorEntry;\n        }\n        if (identity == null) {\n            return Maps.immutableEntry(null, globEntry.getValue());\n        }\n        if (!globEntry.getKey().equals(identity)) {\n            return null;\n        }\n        if (locatorEntry == null) {\n            return null;\n        }\n        return Map.entry(locatorEntry.getKey(), globEntry.getValue());\n    }\n}\n"
  },
  {
    "path": "src/main/java/org/gaul/s3proxy/LatencyBlobStore.java",
    "content": "/*\n * Copyright 2014-2026 Andrew Gaul <andrew@gaul.org>\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * https://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\npackage org.gaul.s3proxy;\n\nimport static com.google.common.base.Preconditions.checkArgument;\nimport static java.util.Objects.requireNonNull;\n\nimport java.io.File;\nimport java.io.IOException;\nimport java.io.InputStream;\nimport java.util.List;\nimport java.util.Map;\nimport java.util.Properties;\nimport java.util.Set;\nimport java.util.concurrent.ExecutorService;\nimport java.util.regex.Matcher;\nimport java.util.regex.Pattern;\n\nimport com.google.common.collect.ImmutableMap;\n\nimport org.jclouds.blobstore.BlobStore;\nimport org.jclouds.blobstore.domain.Blob;\nimport org.jclouds.blobstore.domain.BlobAccess;\nimport org.jclouds.blobstore.domain.BlobMetadata;\nimport org.jclouds.blobstore.domain.ContainerAccess;\nimport org.jclouds.blobstore.domain.MultipartPart;\nimport org.jclouds.blobstore.domain.MultipartUpload;\nimport org.jclouds.blobstore.domain.PageSet;\nimport org.jclouds.blobstore.domain.StorageMetadata;\nimport org.jclouds.blobstore.options.CopyOptions;\nimport org.jclouds.blobstore.options.CreateContainerOptions;\nimport org.jclouds.blobstore.options.GetOptions;\nimport org.jclouds.blobstore.options.ListContainerOptions;\nimport org.jclouds.blobstore.options.PutOptions;\nimport org.jclouds.blobstore.util.ForwardingBlobStore;\nimport org.jclouds.domain.Location;\nimport org.jclouds.io.ContentMetadata;\nimport org.jclouds.io.Payload;\nimport org.jclouds.io.payloads.InputStreamPayload;\n\npublic final class LatencyBlobStore extends ForwardingBlobStore {\n    private static final Pattern PROPERTIES_LATENCY_RE = Pattern.compile(\n            \"^\" + S3ProxyConstants.PROPERTY_LATENCY + \"\\\\.(?<op>.*)\\\\.latency$\");\n    private static final Pattern PROPERTIES_SPEED_RE = Pattern.compile(\n            \"^\" + S3ProxyConstants.PROPERTY_LATENCY + \"\\\\.(?<op>.*)\\\\.speed$\");\n    private static final String OP_ALL = \"*\";\n    private static final String OP_CONTAINER_EXISTS = \"container-exists\";\n    private static final String OP_CREATE_CONTAINER = \"create-container\";\n    private static final String OP_CONTAINER_ACCESS = \"container-access\";\n    private static final String OP_LIST = \"list\";\n    private static final String OP_CLEAR_CONTAINER = \"clear-container\";\n    private static final String OP_DELETE_CONTAINER = \"delete-container\";\n    private static final String OP_DIRECTORY_EXISTS = \"directory-exists\";\n    private static final String OP_CREATE_DIRECTORY = \"create-directory\";\n    private static final String OP_DELETE_DIRECTORY = \"delete-directory\";\n    private static final String OP_BLOB_EXISTS = \"blob-exists\";\n    private static final String OP_PUT_BLOB = \"put\";\n    private static final String OP_COPY_BLOB = \"copy\";\n    private static final String OP_BLOB_METADATA = \"metadata\";\n    private static final String OP_GET_BLOB = \"get\";\n    private static final String OP_REMOVE_BLOB = \"remove\";\n    private static final String OP_BLOB_ACCESS = \"blob-access\";\n    private static final String OP_COUNT_BLOBS = \"count\";\n    private static final String OP_MULTIPART_MESSAGE = \"multipart-message\";\n    private static final String OP_UPLOAD_PART = \"upload-part\";\n    private static final String OP_LIST_MULTIPART = \"list-multipart\";\n    private static final String OP_MULTIPART_PARAM = \"multipart-param\";\n    private static final String OP_DOWNLOAD_BLOB = \"download\";\n    private static final String OP_STREAM_BLOB = \"stream\";\n    private final Map<String, Long> latencies;\n    private final Map<String, Long> speeds;\n\n    private LatencyBlobStore(BlobStore blobStore, Map<String, Long> latencies, Map<String, Long> speeds) {\n        super(blobStore);\n        this.latencies = requireNonNull(latencies);\n        for (String op : latencies.keySet()) {\n            checkArgument(latencies.get(op) >= 0, \"Latency must be non negative for %s\", op);\n        }\n        this.speeds = requireNonNull(speeds);\n        for (String op : speeds.keySet()) {\n            checkArgument(speeds.get(op) > 0, \"Speed must be positive for %s\", op);\n        }\n    }\n\n    public static Map<String, Long> parseLatencies(Properties properties) {\n        var latencies = new ImmutableMap.Builder<String, Long>();\n        for (String key : properties.stringPropertyNames()) {\n            Matcher matcher = PROPERTIES_LATENCY_RE.matcher(key);\n            if (!matcher.matches()) {\n                continue;\n            }\n            String op = matcher.group(\"op\");\n            long latency = Long.parseLong(properties.getProperty(key));\n            checkArgument(latency >= 0, \"Latency must be non negative for %s\", op);\n            latencies.put(op, latency);\n        }\n        return latencies.build();\n    }\n\n    public static Map<String, Long> parseSpeeds(Properties properties) {\n        var speeds = new ImmutableMap.Builder<String, Long>();\n        for (String key : properties.stringPropertyNames()) {\n            Matcher matcher = PROPERTIES_SPEED_RE.matcher(key);\n            if (!matcher.matches()) {\n                continue;\n            }\n            String op = matcher.group(\"op\");\n            long speed = Long.parseLong(properties.getProperty(key));\n            checkArgument(speed > 0, \"Speed must be positive for %s\", op);\n            speeds.put(op, speed);\n        }\n        return speeds.build();\n    }\n\n    static BlobStore newLatencyBlobStore(BlobStore delegate, Map<String, Long> latencies, Map<String, Long> speeds) {\n        return new LatencyBlobStore(delegate, latencies, speeds);\n    }\n\n    @Override\n    public Set<? extends Location> listAssignableLocations() {\n        simulateLatency(OP_LIST);\n        return super.listAssignableLocations();\n    }\n\n    @Override\n    public PageSet<? extends StorageMetadata> list() {\n        simulateLatency(OP_LIST);\n        return super.list();\n    }\n\n    @Override\n    public PageSet<? extends StorageMetadata> list(String container) {\n        simulateLatency(OP_LIST);\n        return super.list(container);\n    }\n\n    @Override\n    public PageSet<? extends StorageMetadata> list(String container, ListContainerOptions options) {\n        simulateLatency(OP_LIST);\n        return super.list(container, options);\n    }\n\n    @Override\n    public boolean containerExists(String container) {\n        simulateLatency(OP_CONTAINER_EXISTS);\n        return super.containerExists(container);\n    }\n\n    @Override\n    public boolean createContainerInLocation(Location location, String container) {\n        simulateLatency(OP_CREATE_CONTAINER);\n        return super.createContainerInLocation(location, container);\n    }\n\n    @Override\n    public boolean createContainerInLocation(Location location, String container, CreateContainerOptions createContainerOptions) {\n        simulateLatency(OP_CREATE_CONTAINER);\n        return super.createContainerInLocation(location, container, createContainerOptions);\n    }\n\n    @Override\n    public ContainerAccess getContainerAccess(String container) {\n        simulateLatency(OP_CONTAINER_ACCESS);\n        return super.getContainerAccess(container);\n    }\n\n    @Override\n    public void setContainerAccess(String container, ContainerAccess containerAccess) {\n        simulateLatency(OP_CONTAINER_ACCESS);\n        super.setContainerAccess(container, containerAccess);\n    }\n\n    @Override\n    public void clearContainer(String container) {\n        simulateLatency(OP_CLEAR_CONTAINER);\n        super.clearContainer(container);\n    }\n\n    @Override\n    public void clearContainer(String container, ListContainerOptions options) {\n        simulateLatency(OP_CLEAR_CONTAINER);\n        super.clearContainer(container, options);\n    }\n\n    @Override\n    public void deleteContainer(String container) {\n        simulateLatency(OP_DELETE_CONTAINER);\n        super.deleteContainer(container);\n    }\n\n    @Override\n    public boolean deleteContainerIfEmpty(String container) {\n        simulateLatency(OP_DELETE_CONTAINER);\n        return super.deleteContainerIfEmpty(container);\n    }\n\n    @Override\n    public boolean directoryExists(String container, String directory) {\n        simulateLatency(OP_DIRECTORY_EXISTS);\n        return super.directoryExists(container, directory);\n    }\n\n    @Override\n    public void createDirectory(String container, String directory) {\n        simulateLatency(OP_CREATE_DIRECTORY);\n        super.createDirectory(container, directory);\n    }\n\n    @Override\n    public void deleteDirectory(String container, String directory) {\n        simulateLatency(OP_DELETE_DIRECTORY);\n        super.deleteDirectory(container, directory);\n    }\n\n    @Override\n    public boolean blobExists(String container, String name) {\n        simulateLatency(OP_BLOB_EXISTS);\n        return super.blobExists(container, name);\n    }\n\n    @Override\n    public String putBlob(String containerName, Blob blob) {\n        simulateLatency(OP_PUT_BLOB);\n        try {\n            InputStream is = blob.getPayload().openStream();\n            Blob newBlob = replaceStream(blob, new ThrottledInputStream(is, getSpeed(OP_PUT_BLOB)));\n            return super.putBlob(containerName, newBlob);\n        } catch (IOException e) {\n            throw new RuntimeException(e);\n        }\n    }\n\n    @Override\n    public String putBlob(String containerName, Blob blob, PutOptions putOptions) {\n        simulateLatency(OP_PUT_BLOB);\n        try {\n            InputStream is = blob.getPayload().openStream();\n            Blob newBlob = replaceStream(blob, new ThrottledInputStream(is, getSpeed(OP_PUT_BLOB)));\n            return super.putBlob(containerName, newBlob);\n        } catch (IOException e) {\n            throw new RuntimeException(e);\n        }\n    }\n\n    @Override\n    public String copyBlob(String fromContainer, String fromName, String toContainer, String toName, CopyOptions options) {\n        simulateLatency(OP_COPY_BLOB);\n        return super.copyBlob(fromContainer, fromName, toContainer, toName, options);\n    }\n\n    @Override\n    public BlobMetadata blobMetadata(String container, String name) {\n        simulateLatency(OP_BLOB_METADATA);\n        return super.blobMetadata(container, name);\n    }\n\n    @Override\n    public Blob getBlob(String containerName, String blobName) {\n        simulateLatency(OP_GET_BLOB);\n        Blob blob = super.getBlob(containerName, blobName);\n        try {\n            InputStream is = blob.getPayload().openStream();\n            return replaceStream(blob, new ThrottledInputStream(is, getSpeed(OP_GET_BLOB)));\n        } catch (IOException e) {\n            throw new RuntimeException(e);\n        }\n    }\n\n    @Override\n    public Blob getBlob(String containerName, String blobName, GetOptions getOptions) {\n        simulateLatency(OP_GET_BLOB);\n        Blob blob = super.getBlob(containerName, blobName, getOptions);\n        try {\n            InputStream is = blob.getPayload().openStream();\n            return replaceStream(blob, new ThrottledInputStream(is, getSpeed(OP_GET_BLOB)));\n        } catch (IOException e) {\n            throw new RuntimeException(e);\n        }\n    }\n\n    @Override\n    public void removeBlob(String container, String name) {\n        simulateLatency(OP_REMOVE_BLOB);\n        super.removeBlob(container, name);\n    }\n\n    @Override\n    public void removeBlobs(String container, Iterable<String> iterable) {\n        simulateLatency(OP_REMOVE_BLOB);\n        super.removeBlobs(container, iterable);\n    }\n\n    @Override\n    public BlobAccess getBlobAccess(String container, String name) {\n        simulateLatency(OP_BLOB_ACCESS);\n        return super.getBlobAccess(container, name);\n    }\n\n    @Override\n    public void setBlobAccess(String container, String name, BlobAccess access) {\n        simulateLatency(OP_BLOB_ACCESS);\n        super.setBlobAccess(container, name, access);\n    }\n\n    @Override\n    public long countBlobs(String container) {\n        simulateLatency(OP_COUNT_BLOBS);\n        return super.countBlobs(container);\n    }\n\n    @Override\n    public long countBlobs(String container, ListContainerOptions options) {\n        simulateLatency(OP_COUNT_BLOBS);\n        return super.countBlobs(container, options);\n    }\n\n    @Override\n    public MultipartUpload initiateMultipartUpload(String container, BlobMetadata blobMetadata, PutOptions options) {\n        simulateLatency(OP_MULTIPART_MESSAGE);\n        return super.initiateMultipartUpload(container, blobMetadata, options);\n    }\n\n    @Override\n    public void abortMultipartUpload(MultipartUpload mpu) {\n        simulateLatency(OP_MULTIPART_MESSAGE);\n        super.abortMultipartUpload(mpu);\n    }\n\n    @Override\n    public String completeMultipartUpload(MultipartUpload mpu, List<MultipartPart> parts) {\n        simulateLatency(OP_MULTIPART_MESSAGE);\n        return super.completeMultipartUpload(mpu, parts);\n    }\n\n    @Override\n    public MultipartPart uploadMultipartPart(MultipartUpload mpu, int partNumber, Payload payload) {\n        simulateLatency(OP_UPLOAD_PART);\n        try {\n            InputStream is = payload.openStream();\n            payload = new InputStreamPayload(new ThrottledInputStream(is, getSpeed(OP_UPLOAD_PART)));\n        } catch (IOException e) {\n            throw new RuntimeException(e);\n        }\n        return super.uploadMultipartPart(mpu, partNumber, payload);\n    }\n\n    @Override\n    public List<MultipartPart> listMultipartUpload(MultipartUpload mpu) {\n        simulateLatency(OP_LIST_MULTIPART);\n        return super.listMultipartUpload(mpu);\n    }\n\n    @Override\n    public List<MultipartUpload> listMultipartUploads(String container) {\n        simulateLatency(OP_LIST_MULTIPART);\n        return super.listMultipartUploads(container);\n    }\n\n    @Override\n    public long getMinimumMultipartPartSize() {\n        simulateLatency(OP_MULTIPART_PARAM);\n        return super.getMinimumMultipartPartSize();\n    }\n\n    @Override\n    public long getMaximumMultipartPartSize() {\n        simulateLatency(OP_MULTIPART_PARAM);\n        return super.getMaximumMultipartPartSize();\n    }\n\n    @Override\n    public int getMaximumNumberOfParts() {\n        simulateLatency(OP_MULTIPART_PARAM);\n        return super.getMaximumNumberOfParts();\n    }\n\n    @Override\n    public void downloadBlob(String container, String name, File destination) {\n        simulateLatency(OP_DOWNLOAD_BLOB);\n        super.downloadBlob(container, name, destination);\n    }\n\n    @Override\n    public void downloadBlob(String container, String name, File destination, ExecutorService executor) {\n        simulateLatency(OP_DOWNLOAD_BLOB);\n        super.downloadBlob(container, name, destination, executor);\n    }\n\n    @Override\n    public InputStream streamBlob(String container, String name) {\n        simulateLatency(OP_STREAM_BLOB);\n        InputStream is = super.streamBlob(container, name);\n        return new ThrottledInputStream(is, getSpeed(OP_STREAM_BLOB));\n    }\n\n    @Override\n    public InputStream streamBlob(String container, String name, ExecutorService executor) {\n        simulateLatency(OP_STREAM_BLOB);\n        InputStream is = super.streamBlob(container, name, executor);\n        return new ThrottledInputStream(is, getSpeed(OP_STREAM_BLOB));\n    }\n\n    private long getLatency(String op) {\n        return latencies.getOrDefault(op, latencies.getOrDefault(OP_ALL, 0L));\n    }\n\n    private Long getSpeed(String op) {\n        return speeds.getOrDefault(op, speeds.getOrDefault(OP_ALL, null));\n    }\n\n    private void simulateLatency(String op) {\n        long latency = getLatency(op);\n        if (latency > 0) {\n            try {\n                Thread.sleep(latency);\n            } catch (InterruptedException e) {\n                Thread.currentThread().interrupt();\n            }\n        }\n    }\n\n    private Blob replaceStream(Blob blob, InputStream is) {\n        BlobMetadata blobMeta = blob.getMetadata();\n        ContentMetadata contentMeta = blobMeta.getContentMetadata();\n        Map<String, String> userMetadata = blobMeta.getUserMetadata();\n\n        Blob newBlob = blobBuilder(blobMeta.getName())\n                .type(blobMeta.getType())\n                .tier(blobMeta.getTier())\n                .userMetadata(userMetadata)\n                .payload(is)\n                .cacheControl(contentMeta.getCacheControl())\n                .contentDisposition(contentMeta.getContentDisposition())\n                .contentEncoding(contentMeta.getContentEncoding())\n                .contentLanguage(contentMeta.getContentLanguage())\n                .contentLength(contentMeta.getContentLength())\n                .contentType(contentMeta.getContentType())\n                .build();\n\n        newBlob.getMetadata().setUri(blobMeta.getUri());\n        newBlob.getMetadata().setETag(blobMeta.getETag());\n        newBlob.getMetadata().setLastModified(blobMeta.getLastModified());\n        newBlob.getMetadata().setSize(blobMeta.getSize());\n        newBlob.getMetadata().setPublicUri(blobMeta.getPublicUri());\n        newBlob.getMetadata().setContainer(blobMeta.getContainer());\n\n        return newBlob;\n    }\n}\n"
  },
  {
    "path": "src/main/java/org/gaul/s3proxy/Main.java",
    "content": "/*\n * Copyright 2014-2026 Andrew Gaul <andrew@gaul.org>\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * https://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\npackage org.gaul.s3proxy;\n\nimport java.io.Console;\nimport java.io.IOException;\nimport java.io.PrintStream;\nimport java.nio.charset.StandardCharsets;\nimport java.nio.file.FileSystems;\nimport java.nio.file.Files;\nimport java.nio.file.Path;\nimport java.nio.file.PathMatcher;\nimport java.util.ArrayList;\nimport java.util.HashSet;\nimport java.util.List;\nimport java.util.Map;\nimport java.util.Properties;\nimport java.util.Set;\nimport java.util.concurrent.ExecutorService;\nimport java.util.concurrent.Executors;\nimport java.util.concurrent.TimeUnit;\nimport java.util.regex.Pattern;\n\nimport com.amazonaws.auth.AWSCredentials;\nimport com.amazonaws.auth.AWSCredentialsProvider;\nimport com.amazonaws.auth.AWSSessionCredentials;\nimport com.amazonaws.auth.DefaultAWSCredentialsProviderChain;\nimport com.google.common.base.Strings;\nimport com.google.common.base.Supplier;\nimport com.google.common.collect.ImmutableBiMap;\nimport com.google.common.collect.ImmutableMap;\nimport com.google.common.collect.Maps;\nimport com.google.common.io.MoreFiles;\nimport com.google.common.util.concurrent.ThreadFactoryBuilder;\n\nimport org.gaul.modernizer_maven_annotations.SuppressModernizer;\nimport org.jclouds.Constants;\nimport org.jclouds.ContextBuilder;\nimport org.jclouds.JcloudsVersion;\nimport org.jclouds.aws.domain.SessionCredentials;\nimport org.jclouds.blobstore.BlobStore;\nimport org.jclouds.blobstore.BlobStoreContext;\nimport org.jclouds.concurrent.DynamicExecutors;\nimport org.jclouds.concurrent.config.ExecutorServiceModule;\nimport org.jclouds.domain.Credentials;\nimport org.jclouds.location.reference.LocationConstants;\nimport org.jclouds.logging.slf4j.config.SLF4JLoggingModule;\nimport org.jclouds.openstack.swift.v1.blobstore.RegionScopedBlobStoreContext;\nimport org.jclouds.s3.domain.ObjectMetadata.StorageClass;\nimport org.kohsuke.args4j.CmdLineException;\nimport org.kohsuke.args4j.CmdLineParser;\nimport org.kohsuke.args4j.Option;\nimport org.slf4j.Logger;\nimport org.slf4j.LoggerFactory;\n\npublic final class Main {\n    private static final Logger logger = LoggerFactory.getLogger(Main.class);\n    private Main() {\n        throw new AssertionError(\"intentionally not implemented\");\n    }\n\n    private static final class Options {\n        @Option(name = \"--properties\",\n                usage = \"S3Proxy configuration (required, multiple allowed)\")\n        private List<Path> properties = new ArrayList<>();\n\n        @Option(name = \"--version\", usage = \"display version\")\n        private boolean version;\n    }\n\n    @SuppressWarnings(\"EqualsIncompatibleType\")\n    public static void main(String[] args) throws Exception {\n        Console console = System.console();\n        if (console == null) {\n            System.setErr(createLoggerErrorPrintStream());\n        }\n\n        var options = new Options();\n        var parser = new CmdLineParser(options);\n        try {\n            parser.parseArgument(args);\n        } catch (CmdLineException cle) {\n            usage(parser);\n        }\n\n        if (options.version) {\n            System.err.println(\n                    Main.class.getPackage().getImplementationVersion());\n            System.exit(0);\n        } else if (options.properties.isEmpty()) {\n            usage(parser);\n        }\n\n        S3Proxy.Builder s3ProxyBuilder = null;\n        var factory = new ThreadFactoryBuilder()\n                .setNameFormat(\"user thread %d\")\n                .setThreadFactory(Executors.defaultThreadFactory())\n                .build();\n        ExecutorService executorService = DynamicExecutors.newScalingThreadPool(\n                1, 20, 60 * 1000, factory);\n        var locators = ImmutableMap\n                .<String, Map.Entry<String, BlobStore>>builder();\n        var globLocators = ImmutableMap\n                .<PathMatcher, Map.Entry<String, BlobStore>>builder();\n        Set<String> locatorGlobs = new HashSet<>();\n        Set<String> parsedIdentities = new HashSet<>();\n        for (var path : options.properties) {\n            var properties = new Properties();\n            try (var is = Files.newInputStream(path)) {\n                properties.load(is);\n            }\n            properties.putAll(System.getProperties());\n\n            BlobStore blobStore = createBlobStore(properties, executorService);\n            var blobStoreType = blobStore.getContext().unwrap().getProviderMetadata().getId();\n            if (blobStoreType.equals(\"aws-s3\")) {\n                System.err.println(\"WARNING: aws-s3 storage backend deprecated -- please use aws-s3-sdk instead\");\n            } else if (blobStoreType.equals(\"azureblob\")) {\n                System.err.println(\"WARNING: azureblob storage backend deprecated -- please use azureblob-sdk instead\");\n            } else if (blobStoreType.equals(\"filesystem\")) {\n                System.err.println(\"WARNING: filesystem storage backend deprecated -- please use filesystem-nio2 instead\");\n            } else if (blobStoreType.equals(\"google-cloud-storage\")) {\n                System.err.println(\"WARNING: google-cloud-storage storage backend deprecated -- please use google-cloud-storage-sdk instead\");\n            } else if (blobStoreType.equals(\"s3\")) {\n                System.err.println(\"WARNING: s3 storage backend deprecated -- please use aws-s3-sdk instead\");\n            } else if (blobStoreType.equals(\"transient\")) {\n                System.err.println(\"WARNING: transient storage backend deprecated -- please use transient-nio2 instead\");\n            }\n\n            blobStore = parseMiddlewareProperties(blobStore, executorService,\n                    properties);\n\n            String s3ProxyAuthorizationString = properties.getProperty(\n                    S3ProxyConstants.PROPERTY_AUTHORIZATION);\n\n            String localIdentity = null;\n            if (AuthenticationType.fromString(s3ProxyAuthorizationString) !=\n                    AuthenticationType.NONE) {\n                localIdentity = properties.getProperty(\n                        S3ProxyConstants.PROPERTY_IDENTITY);\n                String localCredential = properties.getProperty(\n                        S3ProxyConstants.PROPERTY_CREDENTIAL);\n                if (parsedIdentities.add(localIdentity)) {\n                    locators.put(localIdentity,\n                            Map.entry(localCredential, blobStore));\n                }\n            }\n            for (String key : properties.stringPropertyNames()) {\n                if (key.startsWith(S3ProxyConstants.PROPERTY_BUCKET_LOCATOR)) {\n                    String bucketLocator = properties.getProperty(key);\n                    if (locatorGlobs.add(bucketLocator)) {\n                        globLocators.put(\n                                FileSystems.getDefault().getPathMatcher(\n                                        \"glob:\" + bucketLocator),\n                                Maps.immutableEntry(localIdentity, blobStore));\n                    } else {\n                        System.err.println(\"Multiple definitions of the \" +\n                                \"bucket locator: \" + bucketLocator);\n                        System.exit(1);\n                    }\n                }\n            }\n\n            S3Proxy.Builder s3ProxyBuilder2 = S3Proxy.Builder\n                    .fromProperties(properties)\n                    .blobStore(blobStore);\n\n            if (s3ProxyBuilder != null &&\n                    !s3ProxyBuilder.equals(s3ProxyBuilder2)) {\n                System.err.println(\"Multiple configurations require\" +\n                        \" identical s3proxy properties\");\n                System.exit(1);\n            }\n            s3ProxyBuilder = s3ProxyBuilder2;\n        }\n\n        S3Proxy s3Proxy;\n        try {\n            s3Proxy = s3ProxyBuilder.build();\n        } catch (IllegalArgumentException | IllegalStateException e) {\n            System.err.println(e.getMessage());\n            System.exit(1);\n            throw e;\n        }\n\n        var locator = locators.build();\n        var globLocator = globLocators.build();\n        if (!locator.isEmpty() || !globLocator.isEmpty()) {\n            s3Proxy.setBlobStoreLocator(\n                    new GlobBlobStoreLocator(locator, globLocator));\n        }\n\n        try {\n            s3Proxy.start();\n        } catch (Exception e) {\n            System.err.println(e.getMessage());\n            System.exit(1);\n        }\n    }\n\n    private static BlobStore parseMiddlewareProperties(BlobStore blobStore,\n            ExecutorService executorService, Properties properties)\n            throws IOException {\n        var altProperties = new Properties();\n        for (var entry : properties.entrySet()) {\n            String key = (String) entry.getKey();\n            if (key.startsWith(S3ProxyConstants.PROPERTY_ALT_JCLOUDS_PREFIX)) {\n                key = key.substring(\n                        S3ProxyConstants.PROPERTY_ALT_JCLOUDS_PREFIX.length());\n                altProperties.put(key, (String) entry.getValue());\n            }\n        }\n\n        String eventualConsistency = properties.getProperty(\n                S3ProxyConstants.PROPERTY_EVENTUAL_CONSISTENCY);\n        if (\"true\".equalsIgnoreCase(eventualConsistency)) {\n            BlobStore altBlobStore = createBlobStore(altProperties,\n                    executorService);\n            int delay = Integer.parseInt(properties.getProperty(\n                    S3ProxyConstants.PROPERTY_EVENTUAL_CONSISTENCY_DELAY,\n                    \"5\"));\n            double probability = Double.parseDouble(properties.getProperty(\n                    S3ProxyConstants.PROPERTY_EVENTUAL_CONSISTENCY_PROBABILITY,\n                    \"1.0\"));\n            System.err.println(\"Emulating eventual consistency with delay \" +\n                    delay + \" seconds and probability \" + (probability * 100) +\n                    \"%\");\n            blobStore = EventualBlobStore.newEventualBlobStore(\n                    blobStore, altBlobStore,\n                    Executors.newScheduledThreadPool(1),\n                    delay, TimeUnit.SECONDS, probability);\n        }\n\n        String nullBlobStore = properties.getProperty(\n                S3ProxyConstants.PROPERTY_NULL_BLOBSTORE);\n        if (\"true\".equalsIgnoreCase(nullBlobStore)) {\n            System.err.println(\"Using null storage backend\");\n            blobStore = NullBlobStore.newNullBlobStore(blobStore);\n        }\n\n        String readOnlyBlobStore = properties.getProperty(\n                S3ProxyConstants.PROPERTY_READ_ONLY_BLOBSTORE);\n        if (\"true\".equalsIgnoreCase(readOnlyBlobStore)) {\n            System.err.println(\"Using read-only storage backend\");\n            blobStore = ReadOnlyBlobStore.newReadOnlyBlobStore(blobStore);\n        }\n\n        ImmutableBiMap<String, String> aliases = AliasBlobStore.parseAliases(\n                properties);\n        if (!aliases.isEmpty()) {\n            System.err.println(\"Using alias backend\");\n            blobStore = AliasBlobStore.newAliasBlobStore(blobStore, aliases);\n        }\n\n        Map<String, String> prefixMap = PrefixBlobStore.parsePrefixes(properties);\n        if (!prefixMap.isEmpty()) {\n            System.err.println(\"Using prefix backend\");\n            blobStore = PrefixBlobStore.newPrefixBlobStore(blobStore,\n                    prefixMap);\n        }\n\n        List<Map.Entry<Pattern, String>> regexs =\n                RegexBlobStore.parseRegexs(properties);\n        if (!regexs.isEmpty()) {\n            System.err.println(\"Using regex backend\");\n            blobStore = RegexBlobStore.newRegexBlobStore(blobStore, regexs);\n        }\n\n        Map<String, Integer> shards =\n                ShardedBlobStore.parseBucketShards(properties);\n        Map<String, String> prefixes =\n                ShardedBlobStore.parsePrefixes(properties);\n        if (!shards.isEmpty()) {\n            System.err.println(\"Using sharded buckets backend\");\n            blobStore = ShardedBlobStore.newShardedBlobStore(blobStore,\n                    shards, prefixes);\n        }\n\n        String encryptedBlobStore = properties.getProperty(\n            S3ProxyConstants.PROPERTY_ENCRYPTED_BLOBSTORE);\n        if (\"true\".equalsIgnoreCase(encryptedBlobStore)) {\n            System.err.println(\"Using encrypted storage backend\");\n            blobStore = EncryptedBlobStore.newEncryptedBlobStore(blobStore,\n                properties);\n        }\n\n        var storageClass = properties.getProperty(\n                S3ProxyConstants.PROPERTY_STORAGE_CLASS_BLOBSTORE);\n        if (!Strings.isNullOrEmpty(storageClass)) {\n            System.err.println(\"Using storage class override backend\");\n            var storageClassBlobStore =\n                    StorageClassBlobStore.newStorageClassBlobStore(\n                            blobStore, storageClass);\n            blobStore = storageClassBlobStore;\n            System.err.println(\"Configuration storage class: \" + storageClass);\n            // TODO: This only makes sense for S3 backends.\n            System.err.println(\"Mapping storage storage class to: \" +\n                    StorageClass.fromTier(storageClassBlobStore.getTier()));\n        }\n\n        String userMetadataReplacerBlobStore = properties.getProperty(\n                S3ProxyConstants.PROPERTY_USER_METADATA_REPLACER);\n        if (\"true\".equalsIgnoreCase(userMetadataReplacerBlobStore)) {\n            System.err.println(\"Using user metadata replacers storage backend\");\n            String fromChars = properties.getProperty(S3ProxyConstants\n                    .PROPERTY_USER_METADATA_REPLACER_FROM_CHARS);\n            String toChars = properties.getProperty(S3ProxyConstants\n                    .PROPERTY_USER_METADATA_REPLACER_TO_CHARS);\n            blobStore = UserMetadataReplacerBlobStore\n                    .newUserMetadataReplacerBlobStore(\n                            blobStore, fromChars, toChars);\n        }\n\n        Map<String, Long> latencies = LatencyBlobStore.parseLatencies(properties);\n        Map<String, Long> speeds = LatencyBlobStore.parseSpeeds(properties);\n        if (!latencies.isEmpty() || !speeds.isEmpty()) {\n            System.err.println(\"Using latency storage backend\");\n            blobStore = LatencyBlobStore.newLatencyBlobStore(blobStore, latencies, speeds);\n        }\n\n        String noCacheBlobStore = properties.getProperty(\n              S3ProxyConstants.PROPERTY_NO_CACHE_BLOBSTORE);\n        if  (\"true\".equalsIgnoreCase(noCacheBlobStore)) {\n            System.err.println(\"Using no-cache storage backend middleware\");\n            blobStore = NoCacheBlobStore\n                    .newNoCacheBlobStore(blobStore);\n        }\n\n        return blobStore;\n    }\n\n    private static PrintStream createLoggerErrorPrintStream() {\n        return new PrintStream(System.err) {\n            private final StringBuilder builder = new StringBuilder();\n\n            @Override\n            @edu.umd.cs.findbugs.annotations.SuppressFBWarnings(\n                    \"SLF4J_SIGN_ONLY_FORMAT\")\n            public void print(final String string) {\n                logger.error(\"{}\", string);\n            }\n\n            @Override\n            public void write(byte[] buf, int off, int len) {\n                for (int i = off; i < len; ++i) {\n                    char ch = (char) buf[i];\n                    if (ch == '\\n') {\n                        if (builder.length() != 0) {\n                            print(builder.toString());\n                            builder.setLength(0);\n                        }\n                    } else {\n                        builder.append(ch);\n                    }\n                }\n            }\n        };\n    }\n\n    private static BlobStore createBlobStore(Properties properties,\n            ExecutorService executorService) throws IOException {\n        String provider = properties.getProperty(Constants.PROPERTY_PROVIDER);\n        String identity = properties.getProperty(Constants.PROPERTY_IDENTITY);\n        String credential = properties.getProperty(\n                Constants.PROPERTY_CREDENTIAL);\n        String endpoint = properties.getProperty(Constants.PROPERTY_ENDPOINT);\n        properties.remove(Constants.PROPERTY_ENDPOINT);\n        String region = properties.getProperty(\n                LocationConstants.PROPERTY_REGION);\n\n        if (provider == null) {\n            System.err.println(\n                    \"Properties file must contain: \" +\n                    Constants.PROPERTY_PROVIDER);\n            System.exit(1);\n        }\n\n        if (provider.equals(\"filesystem\") ||\n                provider.equals(\"filesystem-nio2\") ||\n                provider.equals(\"transient\") ||\n                provider.equals(\"transient-nio2\")) {\n            // local blobstores do not require credentials\n            identity = Strings.nullToEmpty(identity);\n            credential = Strings.nullToEmpty(credential);\n        } else if (provider.equals(\"google-cloud-storage\") ||\n                provider.equals(\"google-cloud-storage-sdk\")) {\n            if (credential != null && !credential.isEmpty()) {\n                var path = FileSystems.getDefault().getPath(credential);\n                if (Files.exists(path)) {\n                    credential = MoreFiles.asCharSource(path,\n                            StandardCharsets.UTF_8).read();\n                }\n            }\n            identity = Strings.nullToEmpty(identity);\n            credential = Strings.nullToEmpty(credential);\n            properties.remove(Constants.PROPERTY_CREDENTIAL);\n            // We also need to clear the system property, otherwise the\n            // credential will be overridden by the system property.\n            System.clearProperty(Constants.PROPERTY_CREDENTIAL);\n        }\n\n        if (identity == null || credential == null) {\n            System.err.println(\n                    \"Properties file must contain: \" +\n                    Constants.PROPERTY_IDENTITY + \" and \" +\n                    Constants.PROPERTY_CREDENTIAL);\n            System.exit(1);\n        }\n\n        properties.setProperty(Constants.PROPERTY_USER_AGENT,\n                \"s3proxy/%s jclouds/%s java/%s\".formatted(\n                        Main.class.getPackage().getImplementationVersion(),\n                        JcloudsVersion.get(),\n                        System.getProperty(\"java.version\")));\n\n        ContextBuilder builder = ContextBuilder\n                .newBuilder(provider)\n                .modules(List.of(\n                        new SLF4JLoggingModule(),\n                        new ExecutorServiceModule(executorService)))\n                .overrides(properties);\n        if (!Strings.isNullOrEmpty(endpoint)) {\n            builder = builder.endpoint(endpoint);\n        }\n\n        if ((identity.isEmpty() || credential.isEmpty()) && provider.equals(\"aws-s3\")) {\n            @SuppressModernizer\n            Supplier<Credentials> credentialsSupplier = new Supplier<Credentials>() {\n                @Override\n                public Credentials get() {\n                    AWSCredentialsProvider authChain = DefaultAWSCredentialsProviderChain.getInstance();\n                    AWSCredentials newCreds = authChain.getCredentials();\n                    Credentials jcloudsCred = null;\n\n                    if (newCreds instanceof AWSSessionCredentials sessionCreds) {\n                        jcloudsCred = SessionCredentials.builder()\n                                .accessKeyId(newCreds.getAWSAccessKeyId())\n                                .secretAccessKey(newCreds.getAWSSecretKey())\n                                .sessionToken(sessionCreds.getSessionToken())\n                                .build();\n                    } else {\n                        jcloudsCred = new Credentials(\n                                newCreds.getAWSAccessKeyId(), newCreds.getAWSSecretKey()\n                        );\n                    }\n\n                    return jcloudsCred;\n                }\n            };\n            builder = builder.credentialsSupplier(credentialsSupplier);\n        } else {\n            builder = builder.credentials(identity, credential);\n        }\n\n        BlobStoreContext context = builder.build(BlobStoreContext.class);\n        BlobStore blobStore;\n        if (context instanceof RegionScopedBlobStoreContext regionContext &&\n                region != null) {\n            blobStore = regionContext.getBlobStore(region);\n        } else {\n            blobStore = context.getBlobStore();\n        }\n        return blobStore;\n    }\n\n    private static void usage(CmdLineParser parser) {\n        System.err.println(\"Usage: s3proxy [options...]\");\n        parser.printUsage(System.err);\n        System.exit(1);\n    }\n}\n"
  },
  {
    "path": "src/main/java/org/gaul/s3proxy/MetricsHandler.java",
    "content": "/*\n * Copyright 2014-2026 Andrew Gaul <andrew@gaul.org>\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * https://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\npackage org.gaul.s3proxy;\n\nimport java.io.IOException;\n\nimport jakarta.servlet.http.HttpServlet;\nimport jakarta.servlet.http.HttpServletRequest;\nimport jakarta.servlet.http.HttpServletResponse;\n\n/** Servlet that serves Prometheus metrics at /metrics endpoint. */\npublic final class MetricsHandler extends HttpServlet {\n    private final S3ProxyMetrics metrics;\n\n    public MetricsHandler(S3ProxyMetrics metrics) {\n        this.metrics = metrics;\n    }\n\n    @Override\n    protected void service(HttpServletRequest request,\n            HttpServletResponse response) throws IOException {\n        response.setContentType(\"text/plain; version=0.0.4; charset=utf-8\");\n        response.setStatus(HttpServletResponse.SC_OK);\n        response.getWriter().write(metrics.scrape());\n    }\n}\n"
  },
  {
    "path": "src/main/java/org/gaul/s3proxy/NoCacheBlobStore.java",
    "content": "/*\n * Copyright 2014-2026 Andrew Gaul <andrew@gaul.org>\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * https://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\npackage org.gaul.s3proxy;\n\nimport org.jclouds.blobstore.BlobStore;\nimport org.jclouds.blobstore.domain.Blob;\nimport org.jclouds.blobstore.options.GetOptions;\nimport org.jclouds.blobstore.util.ForwardingBlobStore;\n\n/**\n * BlobStore which drops ETag or date-based cache options from object requests.\n * This is useful as jclouds does not fully support the proxying of HTTP 304 responses.\n */\nfinal class NoCacheBlobStore extends ForwardingBlobStore {\n\n    private NoCacheBlobStore(BlobStore blobStore) {\n        super(blobStore);\n    }\n\n    public static BlobStore newNoCacheBlobStore(BlobStore blobStore) {\n        return new NoCacheBlobStore(blobStore);\n    }\n\n    @Override\n    public Blob getBlob(String containerName, String name) {\n        return getBlob(containerName, name, new GetOptions());\n    }\n\n    @Override\n    public Blob getBlob(String containerName, String name, GetOptions getOptions) {\n        return super.getBlob(containerName, name, resetCacheHeaders(getOptions));\n    }\n\n    static GetOptions resetCacheHeaders(GetOptions options) {\n        if (options.getIfMatch() != null || options.getIfNoneMatch() != null ||\n            options.getIfModifiedSince() != null ||  options.getIfUnmodifiedSince() != null) {\n              // as there is no exposed method to reset just the cache headers, a copy is used\n            GetOptions optionsNoCache = new GetOptions();\n            for (String range : options.getRanges()) {\n                String[] ranges = range.split(\"-\", 2);\n                if (ranges[0].isEmpty()) {\n                    optionsNoCache.tail(Long.parseLong(ranges[1]));\n                } else if (ranges[1].isEmpty()) {\n                    optionsNoCache.startAt(Long.parseLong(ranges[0]));\n                } else {\n                    optionsNoCache.range(Long.parseLong(ranges[0]), Long.parseLong(ranges[1]));\n                }\n            }\n            return optionsNoCache;\n        }\n        return options;\n    }\n\n}\n"
  },
  {
    "path": "src/main/java/org/gaul/s3proxy/NullBlobStore.java",
    "content": "/*\n * Copyright 2014-2026 Andrew Gaul <andrew@gaul.org>\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * https://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\npackage org.gaul.s3proxy;\n\nimport java.io.IOException;\nimport java.io.InputStream;\nimport java.io.OutputStream;\nimport java.util.Arrays;\nimport java.util.List;\n\nimport com.google.common.collect.ImmutableList;\nimport com.google.common.collect.ImmutableSet;\nimport com.google.common.hash.HashCode;\nimport com.google.common.io.ByteSource;\nimport com.google.common.primitives.Longs;\n\nimport org.jclouds.blobstore.BlobStore;\nimport org.jclouds.blobstore.domain.Blob;\nimport org.jclouds.blobstore.domain.BlobMetadata;\nimport org.jclouds.blobstore.domain.MultipartPart;\nimport org.jclouds.blobstore.domain.MultipartUpload;\nimport org.jclouds.blobstore.domain.PageSet;\nimport org.jclouds.blobstore.domain.StorageMetadata;\nimport org.jclouds.blobstore.domain.internal.MutableStorageMetadataImpl;\nimport org.jclouds.blobstore.domain.internal.PageSetImpl;\nimport org.jclouds.blobstore.options.GetOptions;\nimport org.jclouds.blobstore.options.PutOptions;\nimport org.jclouds.blobstore.util.ForwardingBlobStore;\nimport org.jclouds.io.Payload;\nimport org.jclouds.io.payloads.ByteSourcePayload;\nimport org.jspecify.annotations.Nullable;\n\nfinal class NullBlobStore extends ForwardingBlobStore {\n    private NullBlobStore(BlobStore blobStore) {\n        super(blobStore);\n    }\n\n    static BlobStore newNullBlobStore(BlobStore blobStore) {\n        return new NullBlobStore(blobStore);\n    }\n\n    @Override\n    @Nullable\n    public BlobMetadata blobMetadata(String container, String name) {\n        Blob blob = getBlob(container, name);\n        if (blob == null) {\n            return null;\n        }\n        return blob.getMetadata();\n    }\n\n    @Override\n    @Nullable\n    public Blob getBlob(String container, String name) {\n        return getBlob(container, name, GetOptions.NONE);\n    }\n\n    @Override\n    @Nullable\n    public Blob getBlob(String container, String name, GetOptions options) {\n        Blob blob = super.getBlob(container, name, options);\n        if (blob == null) {\n            return null;\n        }\n\n        byte[] array;\n        try (InputStream is = blob.getPayload().openStream()) {\n            array = is.readAllBytes();\n        } catch (IOException ioe) {\n            throw new RuntimeException(ioe);\n        }\n\n        long length = Longs.fromByteArray(array);\n        var payload = new ByteSourcePayload(\n                new NullByteSource().slice(0, length));\n        payload.setContentMetadata(blob.getPayload().getContentMetadata());\n        payload.getContentMetadata().setContentLength(length);\n        payload.getContentMetadata().setContentMD5((HashCode) null);\n        blob.setPayload(payload);\n        blob.getMetadata().setSize(length);\n        return blob;\n    }\n\n    @Override\n    public PageSet<? extends StorageMetadata> list(String container) {\n        var builder = ImmutableSet.<StorageMetadata>builder();\n        PageSet<? extends StorageMetadata> pageSet = super.list(container);\n        for (StorageMetadata sm : pageSet) {\n            var msm = new MutableStorageMetadataImpl(sm);\n            msm.setSize(0L);\n            builder.add(msm);\n        }\n        return new PageSetImpl<>(builder.build(), pageSet.getNextMarker());\n    }\n\n    @Override\n    public String putBlob(String containerName, Blob blob) {\n        return putBlob(containerName, blob, PutOptions.NONE);\n    }\n\n    @Override\n    public String putBlob(String containerName, Blob blob,\n            PutOptions options) {\n        long length;\n        try (InputStream is = blob.getPayload().openStream()) {\n            length = is.transferTo(OutputStream.nullOutputStream());\n        } catch (IOException ioe) {\n            throw new RuntimeException(ioe);\n        }\n\n        byte[] array = Longs.toByteArray(length);\n        var payload = new ByteSourcePayload(\n                ByteSource.wrap(array));\n        payload.setContentMetadata(blob.getPayload().getContentMetadata());\n        payload.getContentMetadata().setContentLength((long) array.length);\n        payload.getContentMetadata().setContentMD5((HashCode) null);\n        blob.setPayload(payload);\n\n        return super.putBlob(containerName, blob, options);\n    }\n\n    @Override\n    public String completeMultipartUpload(final MultipartUpload mpu,\n            final List<MultipartPart> parts) {\n        long length = 0;\n        for (MultipartPart part : parts) {\n            length += part.partSize();\n            super.removeBlob(mpu.containerName(), mpu.id() + \"-\" +\n                    part.partNumber());\n        }\n\n        byte[] array = Longs.toByteArray(length);\n        var payload = new ByteSourcePayload(\n                ByteSource.wrap(array));\n        payload.getContentMetadata().setContentLength((long) array.length);\n\n        super.abortMultipartUpload(mpu);\n\n        MultipartUpload mpu2 = super.initiateMultipartUpload(\n                mpu.containerName(), mpu.blobMetadata(), mpu.putOptions());\n\n        MultipartPart part = super.uploadMultipartPart(mpu2, 1, payload);\n\n        return super.completeMultipartUpload(mpu2, List.of(part));\n    }\n\n    @Override\n    public void abortMultipartUpload(MultipartUpload mpu) {\n        for (MultipartPart part : super.listMultipartUpload(mpu)) {\n            super.removeBlob(mpu.containerName(), mpu.id() + \"-\" +\n                    part.partNumber());\n        }\n\n        super.abortMultipartUpload(mpu);\n    }\n\n    @Override\n    public MultipartPart uploadMultipartPart(MultipartUpload mpu,\n            int partNumber, Payload payload) {\n        long length;\n        try (InputStream is = payload.openStream()) {\n            length = is.transferTo(OutputStream.nullOutputStream());\n        } catch (IOException ioe) {\n            throw new RuntimeException(ioe);\n        }\n\n        byte[] array = Longs.toByteArray(length);\n        var newPayload = new ByteSourcePayload(\n                ByteSource.wrap(array));\n        newPayload.setContentMetadata(payload.getContentMetadata());\n        newPayload.getContentMetadata().setContentLength((long) array.length);\n        newPayload.getContentMetadata().setContentMD5((HashCode) null);\n\n        // create a single-part object which contains the logical length which\n        // list and complete will read later\n        Blob blob = blobBuilder(mpu.id() + \"-\" + partNumber)\n                .payload(newPayload)\n                .build();\n        super.putBlob(mpu.containerName(), blob);\n\n        MultipartPart part = super.uploadMultipartPart(mpu, partNumber,\n                newPayload);\n        return MultipartPart.create(part.partNumber(), length, part.partETag(),\n                part.lastModified());\n    }\n\n    @Override\n    public List<MultipartPart> listMultipartUpload(MultipartUpload mpu) {\n        var builder = ImmutableList.<MultipartPart>builder();\n        for (MultipartPart part : super.listMultipartUpload(mpu)) {\n            // get real blob size from stub blob\n            Blob blob = getBlob(mpu.containerName(),\n                    mpu.id() + \"-\" + part.partNumber());\n            long length = blob.getPayload().getContentMetadata()\n                    .getContentLength();\n            builder.add(MultipartPart.create(part.partNumber(), length,\n                    part.partETag(), part.lastModified()));\n        }\n        return builder.build();\n    }\n\n    private static final class NullByteSource extends ByteSource {\n        @Override\n        public InputStream openStream() throws IOException {\n            return new NullInputStream();\n        }\n    }\n\n    private static final class NullInputStream extends InputStream {\n        private boolean closed;\n\n        @Override\n        public int read() throws IOException {\n            if (closed) {\n                throw new IOException(\"Stream already closed\");\n            }\n            return 0;\n        }\n\n        @Override\n        public int read(byte[] b, int off, int len) throws IOException {\n            if (closed) {\n                throw new IOException(\"Stream already closed\");\n            }\n            Arrays.fill(b, off, off + len, (byte) 0);\n            return len;\n        }\n\n        @Override\n        public void close() throws IOException {\n            super.close();\n            closed = true;\n        }\n    }\n}\n"
  },
  {
    "path": "src/main/java/org/gaul/s3proxy/PrefixBlobStore.java",
    "content": "/*\n * Copyright 2014-2026 Andrew Gaul <andrew@gaul.org>\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * https://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\npackage org.gaul.s3proxy;\n\nimport static com.google.common.base.Preconditions.checkArgument;\nimport static java.util.Objects.requireNonNull;\n\nimport java.util.HashMap;\nimport java.util.List;\nimport java.util.Map;\nimport java.util.Properties;\n\nimport com.google.common.base.Strings;\nimport com.google.common.collect.ImmutableList;\nimport com.google.common.collect.ImmutableMap;\n\nimport org.jclouds.blobstore.BlobStore;\nimport org.jclouds.blobstore.domain.Blob;\nimport org.jclouds.blobstore.domain.BlobAccess;\nimport org.jclouds.blobstore.domain.BlobMetadata;\nimport org.jclouds.blobstore.domain.MultipartPart;\nimport org.jclouds.blobstore.domain.MultipartUpload;\nimport org.jclouds.blobstore.domain.PageSet;\nimport org.jclouds.blobstore.domain.StorageMetadata;\nimport org.jclouds.blobstore.domain.internal.MutableBlobMetadataImpl;\nimport org.jclouds.blobstore.domain.internal.MutableStorageMetadataImpl;\nimport org.jclouds.blobstore.domain.internal.PageSetImpl;\nimport org.jclouds.blobstore.options.CopyOptions;\nimport org.jclouds.blobstore.options.GetOptions;\nimport org.jclouds.blobstore.options.ListContainerOptions;\nimport org.jclouds.blobstore.options.PutOptions;\nimport org.jclouds.blobstore.util.ForwardingBlobStore;\nimport org.jclouds.io.Payload;\n\n/**\n * Middleware that scopes a virtual bucket to a fixed backend prefix.\n */\npublic final class PrefixBlobStore extends ForwardingBlobStore {\n    private final Map<String, String> prefixes;\n\n    private PrefixBlobStore(BlobStore delegate, Map<String, String> prefixes) {\n        super(delegate);\n        this.prefixes = ImmutableMap.copyOf(requireNonNull(prefixes));\n    }\n\n    static BlobStore newPrefixBlobStore(BlobStore delegate,\n            Map<String, String> prefixes) {\n        String blobStoreType = delegate.getContext().unwrap()\n                .getProviderMetadata().getId();\n        if (Quirks.OPAQUE_MARKERS.contains(blobStoreType)) {\n            throw new UnsupportedOperationException(\n                    \"Only supports opaque markers\");\n        }\n        return new PrefixBlobStore(delegate, prefixes);\n    }\n\n    public static Map<String, String> parsePrefixes(Properties properties) {\n        Map<String, String> prefixMap = new HashMap<>();\n        for (String key : properties.stringPropertyNames()) {\n            if (!key.startsWith(S3ProxyConstants.PROPERTY_PREFIX_BLOBSTORE + \".\")) {\n                continue;\n            }\n            String bucket = key.substring(\n                    S3ProxyConstants.PROPERTY_PREFIX_BLOBSTORE.length() + 1);\n            String prefix = properties.getProperty(key);\n            checkArgument(!Strings.isNullOrEmpty(bucket),\n                    \"Prefix property %s must specify a bucket\", key);\n            checkArgument(!Strings.isNullOrEmpty(prefix),\n                    \"Prefix for bucket %s must not be empty\", bucket);\n            checkArgument(prefixMap.put(bucket, prefix) == null,\n                    \"Multiple prefixes configured for bucket %s\", bucket);\n        }\n        return ImmutableMap.copyOf(prefixMap);\n    }\n\n    private boolean hasPrefix(String container) {\n        return this.prefixes.containsKey(container);\n    }\n\n    private String getPrefix(String container) {\n        return this.prefixes.get(container);\n    }\n\n    private String addPrefix(String container, String name) {\n        if (!hasPrefix(container) || Strings.isNullOrEmpty(name)) {\n            return name;\n        }\n        String prefix = getPrefix(container);\n        if (name.startsWith(prefix)) {\n            return name;\n        }\n        if (prefix.endsWith(\"/\") && name.startsWith(\"/\")) {\n            return prefix + name.substring(1);\n        }\n        return prefix + name;\n    }\n\n    private String trimPrefix(String container, String name) {\n        if (!hasPrefix(container) || Strings.isNullOrEmpty(name)) {\n            return name;\n        }\n        String prefix = getPrefix(container);\n        if (name.startsWith(prefix)) {\n            return name.substring(prefix.length());\n        }\n        return name;\n    }\n\n    private BlobMetadata trimBlobMetadata(String container,\n            BlobMetadata metadata) {\n        if (metadata == null || !hasPrefix(container)) {\n            return metadata;\n        }\n        var mutable = new MutableBlobMetadataImpl(metadata);\n        mutable.setName(trimPrefix(container, metadata.getName()));\n        return mutable;\n    }\n\n    private Blob trimBlob(String container, Blob blob) {\n        if (blob == null || !hasPrefix(container)) {\n            return blob;\n        }\n        blob.getMetadata().setName(\n                trimPrefix(container, blob.getMetadata().getName()));\n        return blob;\n    }\n\n    private MultipartUpload toDelegateMultipartUpload(MultipartUpload upload) {\n        if (upload == null || !hasPrefix(upload.containerName())) {\n            return upload;\n        }\n        var metadata = upload.blobMetadata() == null ? null :\n                new MutableBlobMetadataImpl(upload.blobMetadata());\n        if (metadata != null) {\n            metadata.setName(\n                    addPrefix(upload.containerName(), metadata.getName()));\n        }\n        return MultipartUpload.create(upload.containerName(),\n                addPrefix(upload.containerName(), upload.blobName()),\n                upload.id(), metadata, upload.putOptions());\n    }\n\n    private MultipartUpload toClientMultipartUpload(MultipartUpload upload) {\n        if (upload == null || !hasPrefix(upload.containerName())) {\n            return upload;\n        }\n        var metadata = upload.blobMetadata() == null ? null :\n                new MutableBlobMetadataImpl(upload.blobMetadata());\n        if (metadata != null) {\n            metadata.setName(\n                    trimPrefix(upload.containerName(), metadata.getName()));\n        }\n        return MultipartUpload.create(upload.containerName(),\n                trimPrefix(upload.containerName(), upload.blobName()),\n                upload.id(), metadata, upload.putOptions());\n    }\n\n    private ListContainerOptions applyPrefix(String container,\n            ListContainerOptions options) {\n        if (!hasPrefix(container)) {\n            return options;\n        }\n        ListContainerOptions effective = options == null ?\n                new ListContainerOptions() : options.clone();\n        String basePrefix = getPrefix(container);\n        String requestedPrefix = effective.getPrefix();\n        String requestedMarker = effective.getMarker();\n        String requestedDir = effective.getDir();\n\n        if (Strings.isNullOrEmpty(requestedPrefix)) {\n            effective.prefix(basePrefix);\n        } else {\n            effective.prefix(addPrefix(container, requestedPrefix));\n        }\n\n        if (!Strings.isNullOrEmpty(requestedMarker)) {\n            effective.afterMarker(addPrefix(container, requestedMarker));\n        }\n\n        if (!Strings.isNullOrEmpty(requestedDir)) {\n            effective.inDirectory(addPrefix(container, requestedDir));\n        }\n\n        return effective;\n    }\n\n    private PageSet<? extends StorageMetadata> trimListing(String container,\n            PageSet<? extends StorageMetadata> listing) {\n        if (!hasPrefix(container)) {\n            return listing;\n        }\n        var builder = ImmutableList.<StorageMetadata>builder();\n        for (StorageMetadata metadata : listing) {\n            if (metadata instanceof BlobMetadata blobMetadata) {\n                var mutable = new MutableBlobMetadataImpl(blobMetadata);\n                mutable.setName(trimPrefix(container, blobMetadata.getName()));\n                builder.add(mutable);\n            } else {\n                var mutable = new MutableStorageMetadataImpl(metadata);\n                mutable.setName(trimPrefix(container, metadata.getName()));\n                builder.add(mutable);\n            }\n        }\n        String nextMarker = listing.getNextMarker();\n        if (nextMarker != null) {\n            nextMarker = trimPrefix(container, nextMarker);\n        }\n        return new PageSetImpl<>(builder.build(), nextMarker);\n    }\n\n    @Override\n    public boolean directoryExists(String container, String directory) {\n        return super.directoryExists(container,\n                addPrefix(container, directory));\n    }\n\n    @Override\n    public void createDirectory(String container, String directory) {\n        super.createDirectory(container, addPrefix(container, directory));\n    }\n\n    @Override\n    public void deleteDirectory(String container, String directory) {\n        super.deleteDirectory(container, addPrefix(container, directory));\n    }\n\n    @Override\n    public boolean blobExists(String container, String name) {\n        return super.blobExists(container, addPrefix(container, name));\n    }\n\n    @Override\n    public BlobMetadata blobMetadata(String container, String name) {\n        return trimBlobMetadata(container,\n                super.blobMetadata(container, addPrefix(container, name)));\n    }\n\n    @Override\n    public Blob getBlob(String containerName, String blobName) {\n        return trimBlob(containerName,\n                super.getBlob(containerName, addPrefix(containerName,\n                        blobName)));\n    }\n\n    @Override\n    public Blob getBlob(String containerName, String blobName,\n                        GetOptions getOptions) {\n        return trimBlob(containerName,\n                super.getBlob(containerName, addPrefix(containerName,\n                        blobName), getOptions));\n    }\n\n    @Override\n    public String putBlob(String containerName, Blob blob) {\n        String originalName = blob.getMetadata().getName();\n        blob.getMetadata().setName(addPrefix(containerName, originalName));\n        try {\n            return super.putBlob(containerName, blob);\n        } finally {\n            blob.getMetadata().setName(originalName);\n        }\n    }\n\n    @Override\n    public String putBlob(String containerName, Blob blob,\n                          PutOptions options) {\n        String originalName = blob.getMetadata().getName();\n        blob.getMetadata().setName(addPrefix(containerName, originalName));\n        try {\n            return super.putBlob(containerName, blob, options);\n        } finally {\n            blob.getMetadata().setName(originalName);\n        }\n    }\n\n    @Override\n    public void removeBlob(String container, String name) {\n        super.removeBlob(container, addPrefix(container, name));\n    }\n\n    @Override\n    public void removeBlobs(String container, Iterable<String> names) {\n        if (!hasPrefix(container)) {\n            super.removeBlobs(container, names);\n            return;\n        }\n        var builder = ImmutableList.<String>builder();\n        for (String name : names) {\n            builder.add(addPrefix(container, name));\n        }\n        super.removeBlobs(container, builder.build());\n    }\n\n    @Override\n    public BlobAccess getBlobAccess(String container, String name) {\n        return super.getBlobAccess(container, addPrefix(container, name));\n    }\n\n    @Override\n    public void setBlobAccess(String container, String name,\n            BlobAccess access) {\n        super.setBlobAccess(container, addPrefix(container, name), access);\n    }\n\n    @Override\n    public String copyBlob(String fromContainer, String fromName,\n            String toContainer, String toName, CopyOptions options) {\n        return super.copyBlob(fromContainer, addPrefix(fromContainer, fromName),\n                toContainer, addPrefix(toContainer, toName), options);\n    }\n\n    @Override\n    public PageSet<? extends StorageMetadata> list(String container) {\n        if (!hasPrefix(container)) {\n            return super.list(container);\n        }\n        return list(container, new ListContainerOptions());\n    }\n\n    @Override\n    public PageSet<? extends StorageMetadata> list(String container,\n            ListContainerOptions options) {\n        if (!hasPrefix(container)) {\n            return super.list(container, options);\n        }\n        var effective = applyPrefix(container, options);\n        return trimListing(container, super.list(container, effective));\n    }\n\n    @Override\n    public void clearContainer(String container) {\n        if (!hasPrefix(container)) {\n            super.clearContainer(container);\n            return;\n        }\n        var options = new ListContainerOptions()\n                .prefix(getPrefix(container))\n                .recursive();\n        super.clearContainer(container, options);\n    }\n\n    @Override\n    public void clearContainer(String container, ListContainerOptions options) {\n        if (!hasPrefix(container)) {\n            super.clearContainer(container, options);\n            return;\n        }\n        super.clearContainer(container, applyPrefix(container, options));\n    }\n\n    @Override\n    public MultipartUpload initiateMultipartUpload(String container,\n            BlobMetadata blobMetadata, PutOptions options) {\n        var mutable = new MutableBlobMetadataImpl(blobMetadata);\n        mutable.setName(addPrefix(container, blobMetadata.getName()));\n        MultipartUpload upload = super.initiateMultipartUpload(container,\n                mutable, options);\n        return toClientMultipartUpload(upload);\n    }\n\n    @Override\n    public void abortMultipartUpload(MultipartUpload mpu) {\n        super.abortMultipartUpload(toDelegateMultipartUpload(mpu));\n    }\n\n    @Override\n    public String completeMultipartUpload(MultipartUpload mpu,\n            List<MultipartPart> parts) {\n        return super.completeMultipartUpload(\n                toDelegateMultipartUpload(mpu), parts);\n    }\n\n    @Override\n    public MultipartPart uploadMultipartPart(MultipartUpload mpu,\n            int partNumber, Payload payload) {\n        return super.uploadMultipartPart(\n                toDelegateMultipartUpload(mpu), partNumber, payload);\n    }\n\n    @Override\n    public List<MultipartPart> listMultipartUpload(MultipartUpload mpu) {\n        return super.listMultipartUpload(toDelegateMultipartUpload(mpu));\n    }\n\n    @Override\n    public List<MultipartUpload> listMultipartUploads(String container) {\n        List<MultipartUpload> uploads =\n                super.listMultipartUploads(container);\n        if (!hasPrefix(container)) {\n            return uploads;\n        }\n        var builder = ImmutableList.<MultipartUpload>builder();\n        for (MultipartUpload upload : uploads) {\n            builder.add(toClientMultipartUpload(upload));\n        }\n        return builder.build();\n    }\n}\n"
  },
  {
    "path": "src/main/java/org/gaul/s3proxy/PutOptions2.java",
    "content": "/*\n * Copyright 2014-2026 Andrew Gaul <andrew@gaul.org>\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * https://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\npackage org.gaul.s3proxy;\n\nimport com.google.common.util.concurrent.ListeningExecutorService;\n\nimport org.jclouds.blobstore.domain.BlobAccess;\nimport org.jclouds.blobstore.options.PutOptions;\nimport org.jspecify.annotations.Nullable;\n\n/**\n * This class extends jclouds' PutOptions to support conditional put operations via\n * the If-Match and If-None-Match headers.\n */\npublic final class PutOptions2 extends PutOptions {\n    @Nullable\n    private String ifMatch;\n    @Nullable\n    private String ifNoneMatch;\n\n    public PutOptions2() {\n        super();\n    }\n\n    public PutOptions2(PutOptions options) {\n        super(options.isMultipart(), options.getUseCustomExecutor(),\n                options.getCustomExecutor());\n        this.setBlobAccess(options.getBlobAccess());\n\n        if (options instanceof PutOptions2 other) {\n            this.ifMatch = other.ifMatch;\n            this.ifNoneMatch = other.ifNoneMatch;\n        }\n    }\n\n    @Nullable\n    public String getIfMatch() {\n        return ifMatch;\n    }\n\n    public PutOptions2 setIfMatch(@Nullable String etag) {\n        this.ifMatch = etag;\n        return this;\n    }\n\n    @Nullable\n    public String getIfNoneMatch() {\n        return ifNoneMatch;\n    }\n\n    public PutOptions2 setIfNoneMatch(@Nullable String etag) {\n        this.ifNoneMatch = etag;\n        return this;\n    }\n\n    @Override\n    public PutOptions2 setBlobAccess(BlobAccess blobAccess) {\n        super.setBlobAccess(blobAccess);\n        return this;\n    }\n\n    @Override\n    public PutOptions2 multipart() {\n        super.multipart();\n        return this;\n    }\n\n    @Override\n    public PutOptions2 multipart(boolean val) {\n        super.multipart(val);\n        return this;\n    }\n\n    @Override\n    public PutOptions2 multipart(ListeningExecutorService customExecutor) {\n        super.multipart(customExecutor);\n        return this;\n    }\n\n    @Override\n    public PutOptions2 setCustomExecutor(ListeningExecutorService customExecutor) {\n        super.setCustomExecutor(customExecutor);\n        return this;\n    }\n\n    @Override\n    public String toString() {\n        String s = super.toString();\n        return s.substring(0, s.length() - 1) +\n                \", ifMatch=\" + ifMatch +\n                \", ifNoneMatch=\" + ifNoneMatch + \"]\";\n    }\n}\n"
  },
  {
    "path": "src/main/java/org/gaul/s3proxy/Quirks.java",
    "content": "/*\n * Copyright 2014-2026 Andrew Gaul <andrew@gaul.org>\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * https://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\npackage org.gaul.s3proxy;\n\nimport java.util.Set;\n\nfinal class Quirks {\n    /** Blobstores which do not support blob-level access control. */\n    static final Set<String> NO_BLOB_ACCESS_CONTROL = Set.of(\n            \"azureblob\",\n            \"azureblob-sdk\",\n            \"b2\",\n            \"google-cloud-storage-sdk\",\n            \"rackspace-cloudfiles-uk\",\n            \"rackspace-cloudfiles-us\",\n            \"openstack-swift\"\n    );\n\n    /** Blobstores which do not support the Cache-Control header. */\n    static final Set<String> NO_CACHE_CONTROL_SUPPORT = Set.of(\n            \"atmos\",\n            \"b2\",\n            \"google-cloud-storage\",\n            \"google-cloud-storage-sdk\",\n            \"rackspace-cloudfiles-uk\",\n            \"rackspace-cloudfiles-us\",\n            \"openstack-swift\"\n    );\n\n    /** Blobstores which do not support the Cache-Control header. */\n    static final Set<String> NO_CONTENT_DISPOSITION = Set.of(\n            \"b2\"\n    );\n\n    /** Blobstores which do not support the Content-Encoding header. */\n    static final Set<String> NO_CONTENT_ENCODING = Set.of(\n            \"b2\",\n            \"google-cloud-storage\",\n            \"google-cloud-storage-sdk\"\n    );\n\n    /** Blobstores which do not support the Content-Language header. */\n    static final Set<String> NO_CONTENT_LANGUAGE = Set.of(\n            \"b2\",\n            \"rackspace-cloudfiles-uk\",\n            \"rackspace-cloudfiles-us\",\n            \"openstack-swift\"\n    );\n\n    /** Blobstores which do not support the If-None-Match header during copy. */\n    static final Set<String> NO_COPY_IF_NONE_MATCH = Set.of(\n            \"openstack-swift\",\n            \"rackspace-cloudfiles-uk\",\n            \"rackspace-cloudfiles-us\"\n    );\n\n    static final Set<String> NO_EXPIRES = Set.of(\n            \"azureblob\",\n            \"azureblob-sdk\"\n    );\n\n    static final Set<String> NO_LIST_MULTIPART_UPLOADS = Set.of(\n            \"atmos\",\n            \"filesystem\",\n            \"google-cloud-storage\",\n            \"openstack-swift\",\n            \"rackspace-cloudfiles-uk\",\n            \"rackspace-cloudfiles-us\",\n            \"transient\"\n    );\n\n    /** Blobstores which do not allow listing zero keys. */\n    static final Set<String> NO_LIST_ZERO_KEYS = Set.of(\n            \"atmos\",\n            \"azureblob\",\n            \"azureblob-sdk\"\n    );\n\n    /**\n     * S3 stores object metadata during initiate multipart while others\n     * require it during complete multipart.  Emulate the former in the latter\n     * by storing and retrieving a stub object.\n     *\n     * Note: azureblob-sdk also uses stubs for multipart uploads but handles\n     * this internally in AzureBlobStore rather than in S3ProxyHandler.\n     */\n    static final Set<String> MULTIPART_REQUIRES_STUB = Set.of(\n            \"azureblob\",\n            \"filesystem\",\n            \"filesystem-nio2\",\n            \"google-cloud-storage\",\n            \"openstack-swift\",\n            \"transient\",\n            \"transient-nio2\"\n    );\n\n    /** Blobstores with opaque ETags. */\n    static final Set<String> OPAQUE_ETAG = Set.of(\n            \"azureblob\",\n            \"azureblob-sdk\",\n            \"b2\",\n            \"google-cloud-storage\",\n            \"google-cloud-storage-sdk\"\n    );\n\n    /** Blobstores with opaque markers. */\n    static final Set<String> OPAQUE_MARKERS = Set.of(\n            \"azureblob\",\n            \"azureblob-sdk\",\n            // S3 marker means one past this token while B2 means this token\n            \"b2\",\n            \"google-cloud-storage\",\n            \"google-cloud-storage-sdk\"\n    );\n\n    private Quirks() {\n        throw new AssertionError(\"Intentionally unimplemented\");\n    }\n}\n"
  },
  {
    "path": "src/main/java/org/gaul/s3proxy/ReadOnlyBlobStore.java",
    "content": "/*\n * Copyright 2014-2026 Andrew Gaul <andrew@gaul.org>\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * https://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\npackage org.gaul.s3proxy;\n\nimport java.util.List;\n\nimport org.jclouds.blobstore.BlobStore;\nimport org.jclouds.blobstore.domain.Blob;\nimport org.jclouds.blobstore.domain.BlobMetadata;\nimport org.jclouds.blobstore.domain.MultipartPart;\nimport org.jclouds.blobstore.domain.MultipartUpload;\nimport org.jclouds.blobstore.options.CopyOptions;\nimport org.jclouds.blobstore.options.CreateContainerOptions;\nimport org.jclouds.blobstore.options.PutOptions;\nimport org.jclouds.blobstore.util.ForwardingBlobStore;\nimport org.jclouds.domain.Location;\nimport org.jclouds.io.Payload;\n\n/** This class is a BlobStore wrapper which prevents mutating operations. */\nfinal class ReadOnlyBlobStore extends ForwardingBlobStore {\n    private ReadOnlyBlobStore(BlobStore blobStore) {\n        super(blobStore);\n    }\n\n    static BlobStore newReadOnlyBlobStore(BlobStore blobStore) {\n        return new ReadOnlyBlobStore(blobStore);\n    }\n\n    @Override\n    public boolean createContainerInLocation(Location location,\n            String container, CreateContainerOptions options) {\n        throw new UnsupportedOperationException(\"read-only BlobStore\");\n    }\n\n    @Override\n    public void deleteContainer(String container) {\n        throw new UnsupportedOperationException(\"read-only BlobStore\");\n    }\n\n    @Override\n    public boolean deleteContainerIfEmpty(String container) {\n        throw new UnsupportedOperationException(\"read-only BlobStore\");\n    }\n\n    @Override\n    public String putBlob(String containerName, Blob blob) {\n        throw new UnsupportedOperationException(\"read-only BlobStore\");\n    }\n\n    @Override\n    public String putBlob(final String containerName, Blob blob,\n            final PutOptions options) {\n        throw new UnsupportedOperationException(\"read-only BlobStore\");\n    }\n\n    @Override\n    public void removeBlob(final String containerName, final String blobName) {\n        throw new UnsupportedOperationException(\"read-only BlobStore\");\n    }\n\n    @Override\n    public void removeBlobs(final String containerName,\n            final Iterable<String> blobNames) {\n        throw new UnsupportedOperationException(\"read-only BlobStore\");\n    }\n\n    @Override\n    public String copyBlob(final String fromContainer, final String fromName,\n            final String toContainer, final String toName,\n            final CopyOptions options) {\n        throw new UnsupportedOperationException(\"read-only BlobStore\");\n    }\n\n    @Override\n    public MultipartUpload initiateMultipartUpload(String container,\n            BlobMetadata blobMetadata, PutOptions options) {\n        throw new UnsupportedOperationException(\"read-only BlobStore\");\n    }\n\n    @Override\n    public void abortMultipartUpload(MultipartUpload mpu) {\n        throw new UnsupportedOperationException(\"read-only BlobStore\");\n    }\n\n    @Override\n    public String completeMultipartUpload(final MultipartUpload mpu,\n            final List<MultipartPart> parts) {\n        throw new UnsupportedOperationException(\"read-only BlobStore\");\n    }\n\n    @Override\n    public MultipartPart uploadMultipartPart(MultipartUpload mpu,\n            int partNumber, Payload payload) {\n        throw new UnsupportedOperationException(\"read-only BlobStore\");\n    }\n}\n"
  },
  {
    "path": "src/main/java/org/gaul/s3proxy/RegexBlobStore.java",
    "content": "/*\n * Copyright 2014-2026 Andrew Gaul <andrew@gaul.org>\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * https://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\npackage org.gaul.s3proxy;\n\nimport static com.google.common.base.Preconditions.checkArgument;\nimport static java.util.Objects.requireNonNull;\n\nimport java.io.File;\nimport java.io.InputStream;\nimport java.util.AbstractMap.SimpleEntry;\nimport java.util.ArrayList;\nimport java.util.List;\nimport java.util.Map;\nimport java.util.Map.Entry;\nimport java.util.Properties;\nimport java.util.concurrent.ExecutorService;\nimport java.util.regex.Matcher;\nimport java.util.regex.Pattern;\n\nimport org.jclouds.blobstore.BlobStore;\nimport org.jclouds.blobstore.domain.Blob;\nimport org.jclouds.blobstore.domain.BlobAccess;\nimport org.jclouds.blobstore.domain.BlobMetadata;\nimport org.jclouds.blobstore.options.CopyOptions;\nimport org.jclouds.blobstore.options.PutOptions;\nimport org.jclouds.blobstore.util.ForwardingBlobStore;\nimport org.slf4j.Logger;\nimport org.slf4j.LoggerFactory;\n\n/**\n * This class implements a middleware to apply regex to blob names.\n * The regex are configured as:\n * s3proxy.regex-blobstore.match.&lt;regex name&gt; = &lt;regex match\n * expression&gt;\n * s3proxy.regex-blobstore.replace.&lt;regex name&gt; = &lt;regex replace\n * expression&gt;\n *\n * You can add multiple regex, they will be applied from the beginning to the\n * end,\n * stopping as soon as the first regex matches.\n */\npublic final class RegexBlobStore extends ForwardingBlobStore {\n    private static final Logger logger = LoggerFactory.getLogger(\n            RegexBlobStore.class);\n\n    private final List<Entry<Pattern, String>> regexs;\n\n    private RegexBlobStore(BlobStore blobStore,\n            List<Entry<Pattern, String>> regexs) {\n        super(blobStore);\n        this.regexs = requireNonNull(regexs);\n    }\n\n    static BlobStore newRegexBlobStore(BlobStore delegate,\n            List<Entry<Pattern, String>> regexs) {\n        return new RegexBlobStore(delegate, regexs);\n    }\n\n    public static List<Map.Entry<Pattern, String>> parseRegexs(\n            Properties properties) {\n        List<Entry<String, String>> configRegex = new ArrayList<>();\n        List<Entry<Pattern, String>> regexs = new ArrayList<>();\n\n        for (String key : properties.stringPropertyNames()) {\n            if (key.startsWith(S3ProxyConstants.PROPERTY_REGEX_BLOBSTORE)) {\n                String propKey = key.substring(\n                        S3ProxyConstants.PROPERTY_REGEX_BLOBSTORE.length() + 1);\n                String value = properties.getProperty(key);\n\n                configRegex.add(new SimpleEntry<>(propKey, value));\n            }\n        }\n\n        for (Entry<String, String> entry : configRegex) {\n            String key = entry.getKey();\n            if (key.startsWith(\n                    S3ProxyConstants.PROPERTY_REGEX_BLOBSTORE_MATCH)) {\n                String regexName = key.substring(S3ProxyConstants\n                        .PROPERTY_REGEX_BLOBSTORE_MATCH.length() + 1);\n                String regex = entry.getValue();\n                Pattern pattern = Pattern.compile(regex);\n\n                String replace = properties.getProperty(String.join(\n                        \".\", S3ProxyConstants.PROPERTY_REGEX_BLOBSTORE,\n                        S3ProxyConstants.PROPERTY_REGEX_BLOBSTORE_REPLACE,\n                        regexName));\n\n                checkArgument(\n                        replace != null,\n                        \"Regex %s has no replace property associated\",\n                        regexName);\n\n                logger.info(\n                        \"Adding new regex with name {} replaces with {} to {}\",\n                        regexName, regex, replace);\n\n                regexs.add(new SimpleEntry<>(pattern, replace));\n            }\n        }\n\n        return List.copyOf(regexs);\n    }\n\n    @Override\n    public boolean directoryExists(String container, String directory) {\n        return super.directoryExists(container, replaceBlobName(directory));\n    }\n\n    @Override\n    public void createDirectory(String container, String directory) {\n        super.createDirectory(container, replaceBlobName(directory));\n    }\n\n    @Override\n    public void deleteDirectory(String container, String directory) {\n        super.deleteDirectory(container, replaceBlobName(directory));\n    }\n\n    @Override\n    public boolean blobExists(String container, String name) {\n        return super.blobExists(container, replaceBlobName(name));\n    }\n\n    @Override\n    public String putBlob(String containerName, Blob blob) {\n        String name = blob.getMetadata().getName();\n        String newName = replaceBlobName(name);\n        blob.getMetadata().setName(newName);\n\n        logger.debug(\"Renaming blob name from {} to {}\", name, newName);\n\n        return super.putBlob(containerName, blob);\n    }\n\n    @Override\n    public String putBlob(String containerName, Blob blob,\n            PutOptions putOptions) {\n        String name = blob.getMetadata().getName();\n        String newName = replaceBlobName(name);\n        blob.getMetadata().setName(newName);\n\n        logger.debug(\"Renaming blob name from {} to {}\", name, newName);\n\n        return super.putBlob(containerName, blob, putOptions);\n    }\n\n    @Override\n    public String copyBlob(String fromContainer, String fromName,\n            String toContainer, String toName, CopyOptions options) {\n        return super.copyBlob(fromContainer, replaceBlobName(fromName),\n                toContainer, replaceBlobName(toName), options);\n    }\n\n    @Override\n    public BlobMetadata blobMetadata(String container, String name) {\n        return super.blobMetadata(container, replaceBlobName(name));\n    }\n\n    @Override\n    public Blob getBlob(String containerName, String name) {\n        return super.getBlob(containerName, replaceBlobName(name));\n    }\n\n    @Override\n    public void removeBlob(String container, String name) {\n        super.removeBlob(container, replaceBlobName(name));\n    }\n\n    @Override\n    public void removeBlobs(String container, Iterable<String> iterable) {\n        List<String> blobs = new ArrayList<>();\n        for (String name : iterable) {\n            blobs.add(replaceBlobName(name));\n        }\n        super.removeBlobs(container, blobs);\n    }\n\n    @Override\n    public BlobAccess getBlobAccess(String container, String name) {\n        return super.getBlobAccess(container, replaceBlobName(name));\n    }\n\n    @Override\n    public void setBlobAccess(String container, String name,\n            BlobAccess access) {\n        super.setBlobAccess(container, replaceBlobName(name), access);\n    }\n\n    @Override\n    public void downloadBlob(String container, String name, File destination) {\n        super.downloadBlob(container, replaceBlobName(name), destination);\n    }\n\n    @Override\n    public void downloadBlob(String container, String name, File destination,\n            ExecutorService executor) {\n        super.downloadBlob(container, replaceBlobName(name), destination,\n                executor);\n    }\n\n    @Override\n    public InputStream streamBlob(String container, String name) {\n        return super.streamBlob(container, replaceBlobName(name));\n    }\n\n    @Override\n    public InputStream streamBlob(String container, String name,\n            ExecutorService executor) {\n        return super.streamBlob(container, replaceBlobName(name), executor);\n    }\n\n    private String replaceBlobName(String name) {\n        String newName = name;\n\n        for (var entry : this.regexs) {\n            Pattern pattern = entry.getKey();\n            Matcher match = pattern.matcher(name);\n\n            if (match.find()) {\n                return match.replaceAll(entry.getValue());\n            }\n\n        }\n\n        return newName;\n    }\n}\n"
  },
  {
    "path": "src/main/java/org/gaul/s3proxy/S3AuthorizationHeader.java",
    "content": "/*\n * Copyright 2014-2026 Andrew Gaul <andrew@gaul.org>\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * https://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\npackage org.gaul.s3proxy;\n\nimport java.util.List;\nimport java.util.Map;\n\nimport com.google.common.base.Splitter;\n\nimport org.jspecify.annotations.Nullable;\n\nfinal class S3AuthorizationHeader {\n    private static final Map<String, String> DIGEST_MAP = Map.of(\n            \"SHA256\", \"SHA-256\",\n            \"SHA1\", \"SHA-1\",\n            \"MD5\", \"MD5\");\n    private static final String SIGNATURE_FIELD = \"Signature=\";\n    private static final String CREDENTIAL_FIELD = \"Credential=\";\n\n    private final AuthenticationType authenticationType;\n    @Nullable private final String hmacAlgorithm;\n    @Nullable private final String hashAlgorithm;\n    @Nullable private final String region;\n    @Nullable private final String date;\n    @Nullable private final String service;\n    private final String identity;\n    private final String signature;\n\n    S3AuthorizationHeader(String header) {\n        if (header.startsWith(\"AWS \")) {\n            authenticationType = AuthenticationType.AWS_V2;\n            hmacAlgorithm = null;\n            hashAlgorithm = null;\n            region = null;\n            date = null;\n            service = null;\n            List<String> fields = Splitter.on(' ').splitToList(header);\n            if (fields.size() != 2) {\n                throw new IllegalArgumentException(\"Invalid header\");\n            }\n            List<String> identityTuple = Splitter.on(':').splitToList(\n                    fields.get(1));\n            if (identityTuple.size() != 2) {\n                throw new IllegalArgumentException(\"Invalid header\");\n            }\n            identity = identityTuple.get(0);\n            signature = identityTuple.get(1);\n        } else if (header.startsWith(\"AWS4-HMAC\")) {\n            authenticationType = AuthenticationType.AWS_V4;\n            signature = extractSignature(header);\n\n            int credentialIndex = header.indexOf(CREDENTIAL_FIELD);\n            if (credentialIndex < 0) {\n                throw new IllegalArgumentException(\"Invalid header\");\n            }\n            int credentialEnd = header.indexOf(',', credentialIndex);\n            if (credentialEnd < 0) {\n                throw new IllegalArgumentException(\"Invalid header\");\n            }\n            String credential = header.substring(credentialIndex +\n                    CREDENTIAL_FIELD.length(), credentialEnd);\n            List<String> fields = Splitter.on('/').splitToList(credential);\n            if (fields.size() != 5) {\n                throw new IllegalArgumentException(\n                        \"Invalid Credential: \" + credential);\n            }\n            identity = fields.get(0);\n            date = fields.get(1);\n            region = fields.get(2);\n            service = fields.get(3);\n            String awsSignatureVersion = header.substring(\n                    0, header.indexOf(' '));\n            hashAlgorithm = DIGEST_MAP.get(Splitter.on('-').splitToList(\n                    awsSignatureVersion).get(2));\n            hmacAlgorithm = \"Hmac\" + Splitter.on('-').splitToList(\n                    awsSignatureVersion).get(2);\n        } else {\n            throw new IllegalArgumentException(\"Invalid header\");\n        }\n    }\n\n    @Override\n    public String toString() {\n        return \"Identity: \" + identity +\n                \"; Signature: \" + signature +\n                \"; HMAC algorithm: \" + hmacAlgorithm +\n                \"; Hash algorithm: \" + hashAlgorithm +\n                \"; region: \" + region +\n                \"; date: \" + date +\n                \"; service \" + service;\n    }\n\n    private static String extractSignature(String header) {\n        int signatureIndex = header.indexOf(SIGNATURE_FIELD);\n        if (signatureIndex < 0) {\n            throw new IllegalArgumentException(\"Invalid signature\");\n        }\n        signatureIndex += SIGNATURE_FIELD.length();\n        int signatureEnd = header.indexOf(',', signatureIndex);\n        if (signatureEnd < 0) {\n            return header.substring(signatureIndex);\n        } else {\n            return header.substring(signatureIndex, signatureEnd);\n        }\n    }\n\n    public AuthenticationType getAuthenticationType() {\n        return authenticationType;\n    }\n\n    public String getHmacAlgorithm() {\n        return hmacAlgorithm;\n    }\n\n    public String getHashAlgorithm() {\n        return hashAlgorithm;\n    }\n\n    public String getRegion() {\n        return region;\n    }\n\n    public String getDate() {\n        return date;\n    }\n\n    public String getService() {\n        return service;\n    }\n\n    public String getIdentity() {\n        return identity;\n    }\n\n    public String getSignature() {\n        return signature;\n    }\n}\n"
  },
  {
    "path": "src/main/java/org/gaul/s3proxy/S3ErrorCode.java",
    "content": "/*\n * Copyright 2014-2026 Andrew Gaul <andrew@gaul.org>\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * https://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\npackage org.gaul.s3proxy;\n\nimport static java.util.Objects.requireNonNull;\n\nimport com.google.common.base.CaseFormat;\n\nimport jakarta.servlet.http.HttpServletResponse;\n\n/**\n * List of S3 error codes.  Reference:\n * http://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html\n */\npublic enum S3ErrorCode {\n    ACCESS_DENIED(HttpServletResponse.SC_FORBIDDEN, \"Forbidden\"),\n    BAD_DIGEST(HttpServletResponse.SC_BAD_REQUEST, \"Bad Request\"),\n    BUCKET_ALREADY_EXISTS(HttpServletResponse.SC_FORBIDDEN,\n            \"The requested bucket name is not available.\" +\n            \" The bucket namespace is shared by all users of the system.\" +\n            \" Please select a different name and try again.\"),\n    BUCKET_ALREADY_OWNED_BY_YOU(HttpServletResponse.SC_CONFLICT,\n            \"Your previous request to create the named bucket\" +\n            \" succeeded and you already own it.\"),\n    BUCKET_NOT_EMPTY(HttpServletResponse.SC_CONFLICT,\n            \"The bucket you tried to delete is not empty\"),\n    ENTITY_TOO_LARGE(HttpServletResponse.SC_BAD_REQUEST,\n            \"Your proposed upload exceeds the maximum allowed object size.\"),\n    ENTITY_TOO_SMALL(HttpServletResponse.SC_BAD_REQUEST,\n            \"Your proposed upload is smaller than the minimum allowed object\" +\n            \" size. Each part must be at least 5 MB in size, except the last\" +\n            \" part.\"),\n    INTERNAL_ERROR(HttpServletResponse.SC_INTERNAL_SERVER_ERROR,\n            \"An internal error occurred. Try again.\"),\n    INVALID_ACCESS_KEY_ID(HttpServletResponse.SC_FORBIDDEN, \"Forbidden\"),\n    INVALID_ARGUMENT(HttpServletResponse.SC_BAD_REQUEST, \"Bad Request\"),\n    INVALID_BUCKET_NAME(HttpServletResponse.SC_BAD_REQUEST,\n            \"The specified bucket is not valid.\"),\n    INVALID_CORS_ORIGIN(HttpServletResponse.SC_BAD_REQUEST,\n            \"Insufficient information. Origin request header needed.\"),\n    INVALID_CORS_METHOD(HttpServletResponse.SC_BAD_REQUEST,\n            \"The specified Access-Control-Request-Method is not valid.\"),\n    INVALID_DIGEST(HttpServletResponse.SC_BAD_REQUEST, \"Bad Request\"),\n    INVALID_LOCATION_CONSTRAINT(HttpServletResponse.SC_BAD_REQUEST,\n            \"The specified location constraint is not valid. For\" +\n            \" more information about Regions, see How to Select\" +\n            \" a Region for Your Buckets.\"),\n    INVALID_RANGE(HttpServletResponse.SC_REQUESTED_RANGE_NOT_SATISFIABLE,\n            \"The requested range is not satisfiable\"),\n    INVALID_PART(HttpServletResponse.SC_BAD_REQUEST,\n            \"One or more of the specified parts could not be found.\" +\n            \"  The part may not have been uploaded, or the specified entity\" +\n            \" tag may not match the part's entity tag.\"),\n    INVALID_PART_ORDER(HttpServletResponse.SC_BAD_REQUEST,\n            \"The list of parts must be specified in ascending\" +\n            \" PartNumber order.\"),\n    INVALID_REQUEST(HttpServletResponse.SC_BAD_REQUEST, \"Bad Request\"),\n    MALFORMED_X_M_L(HttpServletResponse.SC_BAD_REQUEST,\n            \"The XML you provided was not well-formed or did not validate\" +\n            \" against our published schema.\"),\n    MAX_MESSAGE_LENGTH_EXCEEDED(HttpServletResponse.SC_BAD_REQUEST,\n            \"Your request was too big.\"),\n    METHOD_NOT_ALLOWED(HttpServletResponse.SC_METHOD_NOT_ALLOWED,\n            \"Method Not Allowed\"),\n    MISSING_CONTENT_LENGTH(HttpServletResponse.SC_LENGTH_REQUIRED,\n            \"Length Required\"),\n    NO_SUCH_BUCKET(HttpServletResponse.SC_NOT_FOUND,\n            \"The specified bucket does not exist\"),\n    NO_SUCH_KEY(HttpServletResponse.SC_NOT_FOUND,\n            \"The specified key does not exist.\"),\n    NO_SUCH_POLICY(HttpServletResponse.SC_NOT_FOUND,\n            \"The specified bucket does not have a bucket policy.\"),\n    NO_SUCH_UPLOAD(HttpServletResponse.SC_NOT_FOUND, \"Not Found\"),\n    NOT_IMPLEMENTED(HttpServletResponse.SC_NOT_IMPLEMENTED,\n            \"A header you provided implies functionality that is not\" +\n            \" implemented.\"),\n    PRECONDITION_FAILED(HttpServletResponse.SC_PRECONDITION_FAILED,\n            \"At least one of the preconditions you specified did not hold.\"),\n    REQUEST_TIME_TOO_SKEWED(HttpServletResponse.SC_FORBIDDEN, \"Forbidden\"),\n    REQUEST_TIMEOUT(HttpServletResponse.SC_BAD_REQUEST, \"Bad Request\"),\n    SIGNATURE_DOES_NOT_MATCH(HttpServletResponse.SC_FORBIDDEN, \"Forbidden\"),\n    X_AMZ_CONTENT_S_H_A_256_MISMATCH(HttpServletResponse.SC_BAD_REQUEST,\n            \"The provided 'x-amz-content-sha256' header does not match what\" +\n            \" was computed.\");\n\n    private final String errorCode;\n    private final int httpStatusCode;\n    private final String message;\n\n    S3ErrorCode(int httpStatusCode, String message) {\n        this.errorCode = CaseFormat.UPPER_UNDERSCORE.to(CaseFormat.UPPER_CAMEL,\n                name());\n        this.httpStatusCode = httpStatusCode;\n        this.message = requireNonNull(message);\n    }\n\n    String getErrorCode() {\n        return errorCode;\n    }\n\n    int getHttpStatusCode() {\n        return httpStatusCode;\n    }\n\n    String getMessage() {\n        return message;\n    }\n\n    @Override\n    public String toString() {\n        return getHttpStatusCode() + \" \" + getErrorCode() + \" \" + getMessage();\n    }\n}\n"
  },
  {
    "path": "src/main/java/org/gaul/s3proxy/S3Exception.java",
    "content": "/*\n * Copyright 2014-2026 Andrew Gaul <andrew@gaul.org>\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * https://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\npackage org.gaul.s3proxy;\n\nimport static java.util.Objects.requireNonNull;\n\nimport java.util.Map;\n\n@SuppressWarnings(\"serial\")\npublic final class S3Exception extends Exception {\n    private final S3ErrorCode error;\n    private final Map<String, String> elements;\n\n    S3Exception(S3ErrorCode error) {\n        this(error, error.getMessage(), (Throwable) null, Map.of());\n    }\n\n    S3Exception(S3ErrorCode error, String message) {\n        this(error, message, (Throwable) null, Map.of());\n    }\n\n    S3Exception(S3ErrorCode error, Throwable cause) {\n        this(error, error.getMessage(), cause, Map.of());\n    }\n\n    S3Exception(S3ErrorCode error, String message, Throwable cause) {\n        this(error, message, cause, Map.of());\n    }\n\n    S3Exception(S3ErrorCode error, String message, Throwable cause,\n                Map<String, String> elements) {\n        super(requireNonNull(message), cause);\n        this.error = requireNonNull(error);\n        this.elements = Map.copyOf(elements);\n    }\n\n    S3ErrorCode getError() {\n        return error;\n    }\n\n    Map<String, String> getElements() {\n        return elements;\n    }\n\n    @Override\n    public String getMessage() {\n        var builder = new StringBuilder().append(super.getMessage());\n        if (!elements.isEmpty()) {\n            builder.append(\" \").append(elements);\n        }\n        return builder.toString();\n    }\n}\n"
  },
  {
    "path": "src/main/java/org/gaul/s3proxy/S3Operation.java",
    "content": "/*\n * Copyright 2014-2026 Andrew Gaul <andrew@gaul.org>\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * https://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\npackage org.gaul.s3proxy;\n\n/** Enumeration of S3 operations for metrics tracking. */\npublic enum S3Operation {\n    LIST_BUCKETS(\"ListBuckets\"),\n    LIST_OBJECTS_V2(\"ListObjectsV2\"),\n    GET_OBJECT(\"GetObject\"),\n    PUT_OBJECT(\"PutObject\"),\n    DELETE_OBJECT(\"DeleteObject\"),\n    DELETE_OBJECTS(\"DeleteObjects\"),\n    CREATE_BUCKET(\"CreateBucket\"),\n    DELETE_BUCKET(\"DeleteBucket\"),\n    HEAD_BUCKET(\"HeadBucket\"),\n    HEAD_OBJECT(\"HeadObject\"),\n    COPY_OBJECT(\"CopyObject\"),\n    CREATE_MULTIPART_UPLOAD(\"CreateMultipartUpload\"),\n    UPLOAD_PART(\"UploadPart\"),\n    UPLOAD_PART_COPY(\"UploadPartCopy\"),\n    COMPLETE_MULTIPART_UPLOAD(\"CompleteMultipartUpload\"),\n    ABORT_MULTIPART_UPLOAD(\"AbortMultipartUpload\"),\n    LIST_MULTIPART_UPLOADS(\"ListMultipartUploads\"),\n    LIST_PARTS(\"ListParts\"),\n    GET_OBJECT_ACL(\"GetObjectAcl\"),\n    PUT_OBJECT_ACL(\"PutObjectAcl\"),\n    GET_BUCKET_ACL(\"GetBucketAcl\"),\n    PUT_BUCKET_ACL(\"PutBucketAcl\"),\n    GET_BUCKET_LOCATION(\"GetBucketLocation\"),\n    GET_BUCKET_POLICY(\"GetBucketPolicy\"),\n    OPTIONS_OBJECT(\"OptionsObject\"),\n    UNKNOWN(\"Unknown\");\n\n    private final String value;\n\n    S3Operation(String value) {\n        this.value = value;\n    }\n\n    public String getValue() {\n        return value;\n    }\n}\n"
  },
  {
    "path": "src/main/java/org/gaul/s3proxy/S3Proxy.java",
    "content": "/*\n * Copyright 2014-2026 Andrew Gaul <andrew@gaul.org>\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * https://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\npackage org.gaul.s3proxy;\n\nimport static java.util.Objects.requireNonNull;\nimport static com.google.common.base.Preconditions.checkArgument;\n\nimport java.net.URI;\nimport java.net.URISyntaxException;\nimport java.util.Collection;\nimport java.util.Objects;\nimport java.util.Properties;\n\nimport javax.net.ssl.SSLContext;\n\nimport com.google.common.base.Joiner;\nimport com.google.common.base.Splitter;\nimport com.google.common.base.Strings;\nimport com.google.common.collect.Lists;\n\nimport org.eclipse.jetty.ee10.servlet.ServletContextHandler;\nimport org.eclipse.jetty.ee10.servlet.ServletHolder;\nimport org.eclipse.jetty.http.HttpCompliance;\nimport org.eclipse.jetty.http.UriCompliance;\nimport org.eclipse.jetty.server.HttpConfiguration;\nimport org.eclipse.jetty.server.HttpConnectionFactory;\nimport org.eclipse.jetty.server.SecureRequestCustomizer;\nimport org.eclipse.jetty.server.Server;\nimport org.eclipse.jetty.server.ServerConnector;\nimport org.eclipse.jetty.util.ssl.SslContextFactory;\nimport org.eclipse.jetty.util.thread.QueuedThreadPool;\nimport org.jclouds.blobstore.BlobStore;\n\n/**\n * S3Proxy translates S3 HTTP operations into jclouds provider-agnostic\n * operations.  This allows applications using the S3 API to interface with any\n * provider that jclouds supports, e.g., EMC Atmos, Microsoft Azure,\n * OpenStack Swift.\n */\npublic final class S3Proxy {\n    private final Server server;\n    private final S3ProxyHandlerJetty handler;\n    private final S3ProxyMetrics metrics;\n    private final boolean listenHTTP;\n    private final boolean listenHTTPS;\n\n    S3Proxy(Builder builder) {\n        checkArgument(builder.endpoint != null ||\n                        builder.secureEndpoint != null,\n                \"Must provide endpoint or secure-endpoint\");\n        if (builder.endpoint != null) {\n            checkArgument(builder.endpoint.getPath().isEmpty(),\n                    \"endpoint path must be empty, was: %s\",\n                    builder.endpoint.getPath());\n        }\n        if (builder.secureEndpoint != null) {\n            checkArgument(builder.secureEndpoint.getPath().isEmpty(),\n                    \"secure-endpoint path must be empty, was: %s\",\n                    builder.secureEndpoint.getPath());\n            if (builder.sslContext == null) {\n                requireNonNull(builder.keyStorePath,\n                        \"Must provide keyStorePath with HTTPS endpoint\");\n                requireNonNull(builder.keyStorePassword,\n                        \"Must provide keyStorePassword with HTTPS endpoint\");\n            }\n        }\n        checkArgument(Strings.isNullOrEmpty(builder.identity) ^\n                !Strings.isNullOrEmpty(builder.credential),\n                \"Must provide both identity and credential\");\n\n        var pool = new QueuedThreadPool(builder.jettyMaxThreads);\n        pool.setName(\"S3Proxy-Jetty\");\n        server = new Server(pool);\n\n        var httpConfiguration = new HttpConfiguration();\n        httpConfiguration.setHttpCompliance(HttpCompliance.LEGACY);\n        httpConfiguration.setUriCompliance(UriCompliance.LEGACY);\n\n        var src = new SecureRequestCustomizer();\n        src.setSniHostCheck(false);\n        httpConfiguration.addCustomizer(src);\n        HttpConnectionFactory httpConnectionFactory =\n                new HttpConnectionFactory(httpConfiguration);\n        ServerConnector connector;\n        if (builder.endpoint != null) {\n            connector = new ServerConnector(server, httpConnectionFactory);\n            connector.setHost(builder.endpoint.getHost());\n            connector.setPort(builder.endpoint.getPort());\n            server.addConnector(connector);\n            listenHTTP = true;\n        } else {\n            listenHTTP = false;\n        }\n\n        if (builder.secureEndpoint != null) {\n            SslContextFactory.Server sslContextFactory =\n                new SslContextFactory.Server();\n            if (builder.sslContext != null) {\n                sslContextFactory.setSslContext(builder.sslContext);\n            } else {\n                sslContextFactory.setKeyStorePath(builder.keyStorePath);\n                sslContextFactory.setKeyStorePassword(builder.keyStorePassword);\n            }\n            connector = new ServerConnector(server, sslContextFactory,\n                    httpConnectionFactory);\n            connector.setHost(builder.secureEndpoint.getHost());\n            connector.setPort(builder.secureEndpoint.getPort());\n            server.addConnector(connector);\n            listenHTTPS = true;\n        } else {\n            listenHTTPS = false;\n        }\n        if (builder.metricsEnabled) {\n            this.metrics = new S3ProxyMetrics(\n                    builder.metricsHost, builder.metricsPort);\n        } else {\n            this.metrics = null;\n        }\n\n        handler = new S3ProxyHandlerJetty(builder.blobStore,\n                builder.authenticationType, builder.identity,\n                builder.credential, builder.virtualHost,\n                builder.maxSinglePartObjectSize,\n                builder.v4MaxNonChunkedRequestSize,\n                builder.v4MaxChunkSize,\n                builder.ignoreUnknownHeaders, builder.corsRules,\n                builder.servicePath, builder.maximumTimeSkew, metrics);\n\n        var context = new ServletContextHandler();\n        if (builder.servicePath != null && !builder.servicePath.isEmpty()) {\n            context.setContextPath(builder.servicePath);\n        }\n        if (metrics != null) {\n            context.addServlet(new ServletHolder(\n                    new MetricsHandler(metrics)), \"/metrics\");\n        }\n        context.addServlet(new ServletHolder(handler), \"/*\");\n        server.setHandler(context);\n    }\n\n    public static final class Builder {\n        private BlobStore blobStore;\n        private URI endpoint;\n        private URI secureEndpoint;\n        private String servicePath;\n        private AuthenticationType authenticationType =\n                AuthenticationType.NONE;\n        private String identity;\n        private String credential;\n        private SSLContext sslContext;\n        private String keyStorePath;\n        private String keyStorePassword;\n        private String virtualHost;\n        private long maxSinglePartObjectSize = 5L * 1024 * 1024 * 1024;\n        private long v4MaxNonChunkedRequestSize = 128 * 1024 * 1024;\n        private int v4MaxChunkSize = 16 * 1024 * 1024;\n        private boolean ignoreUnknownHeaders;\n        private CrossOriginResourceSharing corsRules;\n        private int jettyMaxThreads = 200;  // sourced from QueuedThreadPool()\n        private int maximumTimeSkew = 15 * 60;\n        private boolean metricsEnabled;\n        private int metricsPort = S3ProxyMetrics.DEFAULT_METRICS_PORT;\n        private String metricsHost = S3ProxyMetrics.DEFAULT_METRICS_HOST;\n\n        Builder() {\n        }\n\n        public S3Proxy build() {\n            return new S3Proxy(this);\n        }\n\n        public static Builder fromProperties(Properties properties)\n                throws URISyntaxException {\n            var builder = new Builder();\n\n            String endpoint = properties.getProperty(\n                    S3ProxyConstants.PROPERTY_ENDPOINT);\n            String secureEndpoint = properties.getProperty(\n                    S3ProxyConstants.PROPERTY_SECURE_ENDPOINT);\n            boolean hasEndpoint = !Strings.isNullOrEmpty(endpoint);\n            boolean hasSecureEndpoint = !Strings.isNullOrEmpty(secureEndpoint);\n            if (!hasEndpoint && !hasSecureEndpoint) {\n                throw new IllegalArgumentException(\n                        \"Properties file must contain: \" +\n                        S3ProxyConstants.PROPERTY_ENDPOINT + \" or \" +\n                        S3ProxyConstants.PROPERTY_SECURE_ENDPOINT);\n            }\n            if (hasEndpoint) {\n                builder.endpoint(new URI(endpoint));\n            }\n            if (hasSecureEndpoint) {\n                builder.secureEndpoint(new URI(secureEndpoint));\n            }\n\n            String authorizationString = properties.getProperty(\n                    S3ProxyConstants.PROPERTY_AUTHORIZATION);\n            if (authorizationString == null) {\n                throw new IllegalArgumentException(\n                        \"Properties file must contain: \" +\n                        S3ProxyConstants.PROPERTY_AUTHORIZATION);\n            }\n\n            AuthenticationType authorization =\n                    AuthenticationType.fromString(authorizationString);\n            String localIdentity = null;\n            String localCredential = null;\n            switch (authorization) {\n            case AWS_V2:\n            case AWS_V4:\n            case AWS_V2_OR_V4:\n                localIdentity = properties.getProperty(\n                        S3ProxyConstants.PROPERTY_IDENTITY);\n                localCredential = properties.getProperty(\n                        S3ProxyConstants.PROPERTY_CREDENTIAL);\n                if (localIdentity == null || localCredential == null) {\n                    throw new IllegalArgumentException(\"Must specify both \" +\n                            S3ProxyConstants.PROPERTY_IDENTITY + \" and \" +\n                            S3ProxyConstants.PROPERTY_CREDENTIAL +\n                            \" when using authentication\");\n                }\n                break;\n            case NONE:\n                break;\n            default:\n                throw new IllegalArgumentException(\n                        S3ProxyConstants.PROPERTY_AUTHORIZATION +\n                        \" invalid value, was: \" + authorization);\n            }\n\n            if (localIdentity != null || localCredential != null) {\n                builder.awsAuthentication(authorization, localIdentity,\n                        localCredential);\n            }\n\n            String servicePath = Strings.nullToEmpty(properties.getProperty(\n                    S3ProxyConstants.PROPERTY_SERVICE_PATH));\n            if (servicePath != null) {\n                builder.servicePath(servicePath);\n            }\n\n            String keyStorePath = properties.getProperty(\n                    S3ProxyConstants.PROPERTY_KEYSTORE_PATH);\n            String keyStorePassword = properties.getProperty(\n                    S3ProxyConstants.PROPERTY_KEYSTORE_PASSWORD);\n            if (keyStorePath != null || keyStorePassword != null) {\n                builder.keyStore(keyStorePath, keyStorePassword);\n            }\n\n            String virtualHost = properties.getProperty(\n                    S3ProxyConstants.PROPERTY_VIRTUAL_HOST);\n            if (!Strings.isNullOrEmpty(virtualHost)) {\n                builder.virtualHost(virtualHost);\n            }\n\n            String maxSinglePartObjectSize = properties.getProperty(\n                    S3ProxyConstants.PROPERTY_MAX_SINGLE_PART_OBJECT_SIZE);\n            if (maxSinglePartObjectSize != null) {\n                builder.maxSinglePartObjectSize(Long.parseLong(\n                        maxSinglePartObjectSize));\n            }\n\n            String v4MaxNonChunkedRequestSize = properties.getProperty(\n                    S3ProxyConstants.PROPERTY_V4_MAX_NON_CHUNKED_REQUEST_SIZE);\n            if (v4MaxNonChunkedRequestSize != null) {\n                builder.v4MaxNonChunkedRequestSize(Long.parseLong(\n                        v4MaxNonChunkedRequestSize));\n            }\n\n            String v4MaxChunkSize = properties.getProperty(\n                    S3ProxyConstants.PROPERTY_V4_MAX_CHUNK_SIZE);\n            if (v4MaxChunkSize != null) {\n                builder.v4MaxChunkSize(Integer.parseInt(v4MaxChunkSize));\n            }\n\n            String ignoreUnknownHeaders = properties.getProperty(\n                    S3ProxyConstants.PROPERTY_IGNORE_UNKNOWN_HEADERS);\n            if (!Strings.isNullOrEmpty(ignoreUnknownHeaders)) {\n                builder.ignoreUnknownHeaders(Boolean.parseBoolean(\n                        ignoreUnknownHeaders));\n            }\n\n            String corsAllowAll = properties.getProperty(\n                    S3ProxyConstants.PROPERTY_CORS_ALLOW_ALL);\n            if (!Strings.isNullOrEmpty(corsAllowAll) && Boolean.parseBoolean(\n                         corsAllowAll)) {\n                builder.corsRules(new CrossOriginResourceSharing());\n            } else {\n                String corsAllowOrigins = properties.getProperty(\n                        S3ProxyConstants.PROPERTY_CORS_ALLOW_ORIGINS, \"\");\n                String corsAllowMethods = properties.getProperty(\n                        S3ProxyConstants.PROPERTY_CORS_ALLOW_METHODS, \"\");\n                String corsAllowHeaders = properties.getProperty(\n                        S3ProxyConstants.PROPERTY_CORS_ALLOW_HEADERS, \"\");\n                String corsExposedHeaders = properties.getProperty(\n                        S3ProxyConstants.PROPERTY_CORS_EXPOSED_HEADERS, \"\");\n                String allowCredentials = properties.getProperty(\n                        S3ProxyConstants.PROPERTY_CORS_ALLOW_CREDENTIAL, \"\");\n\n                Splitter splitter = Splitter.on(\" \").trimResults()\n                        .omitEmptyStrings();\n\n                //Validate configured methods\n                Collection<String> allowedMethods = Lists.newArrayList(\n                        splitter.split(corsAllowMethods));\n                allowedMethods.removeAll(\n                        CrossOriginResourceSharing.SUPPORTED_METHODS);\n                if (!allowedMethods.isEmpty()) {\n                    throw new IllegalArgumentException(\n                        S3ProxyConstants.PROPERTY_CORS_ALLOW_METHODS +\n                        \" contains not supported values: \" + Joiner.on(\" \")\n                        .join(allowedMethods));\n                }\n\n                builder.corsRules(new CrossOriginResourceSharing(\n                        splitter.splitToList(corsAllowOrigins),\n                        splitter.splitToList(corsAllowMethods),\n                        splitter.splitToList(corsAllowHeaders),\n                        splitter.splitToList(corsExposedHeaders),\n                        allowCredentials));\n            }\n\n            String jettyMaxThreads = properties.getProperty(\n                    S3ProxyConstants.PROPERTY_JETTY_MAX_THREADS);\n            if (jettyMaxThreads != null) {\n                builder.jettyMaxThreads(Integer.parseInt(jettyMaxThreads));\n            }\n\n            String maximumTimeSkew = properties.getProperty(\n                    S3ProxyConstants.PROPERTY_MAXIMUM_TIME_SKEW);\n            if (maximumTimeSkew != null && !maximumTimeSkew.isBlank()) {\n                builder.maximumTimeSkew(Integer.parseInt(maximumTimeSkew));\n            }\n\n            String metricsEnabled = properties.getProperty(\n                    S3ProxyConstants.PROPERTY_METRICS_ENABLED);\n            if (!Strings.isNullOrEmpty(metricsEnabled)) {\n                builder.metricsEnabled(Boolean.parseBoolean(metricsEnabled));\n            }\n\n            String metricsPort = properties.getProperty(\n                    S3ProxyConstants.PROPERTY_METRICS_PORT);\n            if (!Strings.isNullOrEmpty(metricsPort)) {\n                builder.metricsPort(Integer.parseInt(metricsPort));\n            }\n\n            String metricsHost = properties.getProperty(\n                    S3ProxyConstants.PROPERTY_METRICS_HOST);\n            if (!Strings.isNullOrEmpty(metricsHost)) {\n                builder.metricsHost(metricsHost);\n            }\n\n            return builder;\n        }\n\n        public Builder blobStore(BlobStore blobStore) {\n            this.blobStore = requireNonNull(blobStore);\n            return this;\n        }\n\n        public Builder endpoint(URI endpoint) {\n            this.endpoint = requireNonNull(endpoint);\n            return this;\n        }\n\n        public Builder secureEndpoint(URI secureEndpoint) {\n            this.secureEndpoint = requireNonNull(secureEndpoint);\n            return this;\n        }\n\n        public Builder awsAuthentication(AuthenticationType authenticationType,\n                String identity, String credential) {\n            this.authenticationType = authenticationType;\n            if (!AuthenticationType.NONE.equals(authenticationType)) {\n                this.identity = requireNonNull(identity);\n                this.credential = requireNonNull(credential);\n            }\n            return this;\n        }\n\n        public Builder sslContext(SSLContext sslContext) {\n            this.sslContext = requireNonNull(sslContext);\n            this.keyStorePath = null;\n            this.keyStorePassword = null;\n            return this;\n        }\n\n        public Builder keyStore(String keyStorePath, String keyStorePassword) {\n            this.keyStorePath = requireNonNull(keyStorePath);\n            this.keyStorePassword = requireNonNull(keyStorePassword);\n            this.sslContext = null;\n            return this;\n        }\n\n        public Builder virtualHost(String virtualHost) {\n            this.virtualHost = requireNonNull(virtualHost);\n            return this;\n        }\n\n        public Builder maxSinglePartObjectSize(long maxSinglePartObjectSize) {\n            if (maxSinglePartObjectSize <= 0) {\n                throw new IllegalArgumentException(\n                        \"must be greater than zero, was: \" +\n                        maxSinglePartObjectSize);\n            }\n            this.maxSinglePartObjectSize = maxSinglePartObjectSize;\n            return this;\n        }\n\n        public Builder v4MaxNonChunkedRequestSize(\n                long v4MaxNonChunkedRequestSize) {\n            if (v4MaxNonChunkedRequestSize <= 0) {\n                throw new IllegalArgumentException(\n                        \"must be greater than zero, was: \" +\n                        v4MaxNonChunkedRequestSize);\n            }\n            this.v4MaxNonChunkedRequestSize = v4MaxNonChunkedRequestSize;\n            return this;\n        }\n\n        public Builder v4MaxChunkSize(int v4MaxChunkSize) {\n            if (v4MaxChunkSize <= 0) {\n                throw new IllegalArgumentException(\n                        \"must be greater than zero, was: \" +\n                        v4MaxChunkSize);\n            }\n            this.v4MaxChunkSize = v4MaxChunkSize;\n            return this;\n        }\n\n        public Builder ignoreUnknownHeaders(boolean ignoreUnknownHeaders) {\n            this.ignoreUnknownHeaders = ignoreUnknownHeaders;\n            return this;\n        }\n\n        public Builder corsRules(CrossOriginResourceSharing corsRules) {\n            this.corsRules = corsRules;\n            return this;\n        }\n\n        public Builder jettyMaxThreads(int jettyMaxThreads) {\n            this.jettyMaxThreads = jettyMaxThreads;\n            return this;\n        }\n\n        public Builder maximumTimeSkew(int maximumTimeSkew) {\n            this.maximumTimeSkew = maximumTimeSkew;\n            return this;\n        }\n\n        public Builder metricsEnabled(boolean metricsEnabled) {\n            this.metricsEnabled = metricsEnabled;\n            return this;\n        }\n\n        public Builder metricsPort(int metricsPort) {\n            this.metricsPort = metricsPort;\n            return this;\n        }\n\n        public Builder metricsHost(String metricsHost) {\n            this.metricsHost = requireNonNull(metricsHost);\n            return this;\n        }\n\n        public Builder servicePath(String s3ProxyServicePath) {\n            String path = Strings.nullToEmpty(s3ProxyServicePath);\n\n            if (!path.isEmpty()) {\n                if (!path.startsWith(\"/\")) {\n                    path = \"/\" + path;\n                }\n            }\n\n            this.servicePath = path;\n\n            return this;\n        }\n\n        public URI getEndpoint() {\n            return endpoint;\n        }\n\n        public URI getSecureEndpoint() {\n            return secureEndpoint;\n        }\n\n        public String getServicePath() {\n            return servicePath;\n        }\n\n        public String getIdentity() {\n            return identity;\n        }\n\n        public String getCredential() {\n            return credential;\n        }\n\n        @Override\n        public boolean equals(Object object) {\n            if (this == object) {\n                return true;\n            } else if (!(object instanceof S3Proxy.Builder)) {\n                return false;\n            }\n            S3Proxy.Builder that = (S3Proxy.Builder) object;\n            // do not check credentials or storage backend fields\n            return Objects.equals(this.endpoint, that.endpoint) &&\n                    Objects.equals(this.secureEndpoint, that.secureEndpoint) &&\n                    Objects.equals(this.sslContext, that.sslContext) &&\n                    Objects.equals(this.keyStorePath, that.keyStorePath) &&\n                    Objects.equals(this.keyStorePassword,\n                            that.keyStorePassword) &&\n                    Objects.equals(this.virtualHost, that.virtualHost) &&\n                    Objects.equals(this.servicePath, that.servicePath) &&\n                    this.maxSinglePartObjectSize ==\n                            that.maxSinglePartObjectSize &&\n                    this.v4MaxNonChunkedRequestSize ==\n                            that.v4MaxNonChunkedRequestSize &&\n                    this.v4MaxChunkSize == that.v4MaxChunkSize &&\n                    this.ignoreUnknownHeaders == that.ignoreUnknownHeaders &&\n                    this.corsRules.equals(that.corsRules);\n        }\n\n        @Override\n        public int hashCode() {\n            return Objects.hash(endpoint, secureEndpoint, sslContext,\n                    keyStorePath, keyStorePassword, virtualHost, servicePath,\n                    maxSinglePartObjectSize, v4MaxNonChunkedRequestSize,\n                    v4MaxChunkSize, ignoreUnknownHeaders, corsRules);\n        }\n    }\n\n    public static Builder builder() {\n        return new Builder();\n    }\n\n    public void start() throws Exception {\n        server.start();\n    }\n\n    public void stop() throws Exception {\n        server.stop();\n        if (metrics != null) {\n            metrics.close();\n        }\n    }\n\n    public int getPort() {\n        if (listenHTTP) {\n            return ((ServerConnector) server.getConnectors()[0]).getLocalPort();\n        } else {\n            return -1;\n        }\n    }\n\n    public int getSecurePort() {\n        if (listenHTTPS) {\n            ServerConnector connector;\n            if (listenHTTP) {\n                connector = (ServerConnector) server.getConnectors()[1];\n            } else {\n                connector = (ServerConnector) server.getConnectors()[0];\n            }\n            return connector.getLocalPort();\n        }\n\n        return -1;\n    }\n\n    public String getState() {\n        return server.getState();\n    }\n\n    public void setBlobStoreLocator(BlobStoreLocator lookup) {\n        handler.getHandler().setBlobStoreLocator(lookup);\n    }\n}\n"
  },
  {
    "path": "src/main/java/org/gaul/s3proxy/S3ProxyConstants.java",
    "content": "/*\n * Copyright 2014-2026 Andrew Gaul <andrew@gaul.org>\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * https://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\npackage org.gaul.s3proxy;\n\npublic final class S3ProxyConstants {\n    public static final String PROPERTY_ENDPOINT =\n            \"s3proxy.endpoint\";\n    public static final String PROPERTY_SECURE_ENDPOINT =\n            \"s3proxy.secure-endpoint\";\n    public static final String PROPERTY_AUTHORIZATION =\n            \"s3proxy.authorization\";\n    public static final String PROPERTY_IDENTITY =\n            \"s3proxy.identity\";\n    /**\n     * Path to prepend to all requests, e.g.,\n     * https://endpoint/service-path/object.\n     */\n    public static final String PROPERTY_SERVICE_PATH =\n            \"s3proxy.service-path\";\n    /** When true, include \"Access-Control-Allow-Origin: *\" in all responses. */\n    public static final String PROPERTY_CORS_ALLOW_ALL =\n            \"s3proxy.cors-allow-all\";\n    public static final String PROPERTY_CORS_ALLOW_ORIGINS =\n            \"s3proxy.cors-allow-origins\";\n    public static final String PROPERTY_CORS_ALLOW_METHODS =\n            \"s3proxy.cors-allow-methods\";\n    public static final String PROPERTY_CORS_ALLOW_HEADERS =\n            \"s3proxy.cors-allow-headers\";\n    public static final String PROPERTY_CORS_EXPOSED_HEADERS =\n            \"s3proxy.cors-exposed-headers\";\n    public static final String PROPERTY_CORS_ALLOW_CREDENTIAL =\n            \"s3proxy.cors-allow-credential\";\n    public static final String PROPERTY_CREDENTIAL =\n            \"s3proxy.credential\";\n    public static final String PROPERTY_IGNORE_UNKNOWN_HEADERS =\n            \"s3proxy.ignore-unknown-headers\";\n    public static final String PROPERTY_KEYSTORE_PATH =\n            \"s3proxy.keystore-path\";\n    public static final String PROPERTY_KEYSTORE_PASSWORD =\n            \"s3proxy.keystore-password\";\n    public static final String PROPERTY_JETTY_MAX_THREADS =\n            \"s3proxy.jetty.max-threads\";\n\n    /** Request attributes. */\n    public static final String ATTRIBUTE_QUERY_ENCODING = \"queryEncoding\";\n\n    /**\n     * Configure servicing of virtual host buckets.  Setting to localhost:8080\n     * allows bucket-in-hostname requests, e.g., bucketname.localhost:8080.\n     * This mode requires configuring DNS to forward all requests to the\n     * S3Proxy host.\n     */\n    public static final String PROPERTY_VIRTUAL_HOST =\n            \"s3proxy.virtual-host\";\n    public static final String PROPERTY_MAX_SINGLE_PART_OBJECT_SIZE =\n            \"s3proxy.max-single-part-object-size\";\n    public static final String PROPERTY_V4_MAX_NON_CHUNKED_REQUEST_SIZE =\n            \"s3proxy.v4-max-non-chunked-request-size\";\n    /** Maximum size of a single chunk in an aws-chunked transfer encoding. */\n    public static final String PROPERTY_V4_MAX_CHUNK_SIZE =\n            \"s3proxy.v4-max-chunk-size\";\n    /** Used to locate blobstores by specified bucket names. Each property\n     * file should contain a list of buckets associated with it, e.g.\n     *     s3proxy.bucket-locator.1 = data\n     *     s3proxy.bucket-locator.2 = metadata\n     *     s3proxy.bucket-locator.3 = other\n     * When a request is made for the specified bucket, the backend defined\n     * in that properties file is used. This allows using the same\n     * credentials in multiple properties file and select the backend based\n     * on the bucket names.\n     */\n    public static final String PROPERTY_BUCKET_LOCATOR =\n            \"s3proxy.bucket-locator\";\n    /** When true, model eventual consistency using two storage backends. */\n    public static final String PROPERTY_EVENTUAL_CONSISTENCY =\n            \"s3proxy.eventual-consistency\";\n    /**\n     * Minimum delay, in seconds, when propagating modifications from the\n     * write backend to the read backend.\n     */\n    public static final String PROPERTY_EVENTUAL_CONSISTENCY_DELAY =\n            \"s3proxy.eventual-consistency.delay\";\n    /** Probability of eventual consistency, between 0.0 and 1.0. */\n    public static final String PROPERTY_EVENTUAL_CONSISTENCY_PROBABILITY =\n            \"s3proxy.eventual-consistency.probability\";\n    /** Alias a backend bucket to an alternate name. */\n    public static final String PROPERTY_ALIAS_BLOBSTORE =\n            \"s3proxy.alias-blobstore\";\n    /** Scope bucket operations to a specific object prefix. */\n    public static final String PROPERTY_PREFIX_BLOBSTORE =\n            \"s3proxy.prefix-blobstore\";\n    /** Alias a backend bucket to an alternate name. */\n    public static final String PROPERTY_REGEX_BLOBSTORE =\n            \"s3proxy.regex-blobstore\";\n    public static final String PROPERTY_REGEX_BLOBSTORE_MATCH =\n            \"match\";\n    public static final String PROPERTY_REGEX_BLOBSTORE_REPLACE =\n            \"replace\";\n    /** Discard object data. */\n    public static final String PROPERTY_NULL_BLOBSTORE =\n            \"s3proxy.null-blobstore\";\n    /** Prevent mutations. */\n    public static final String PROPERTY_READ_ONLY_BLOBSTORE =\n            \"s3proxy.read-only-blobstore\";\n    /** Shard objects across a specified number of buckets. */\n    public static final String PROPERTY_SHARDED_BLOBSTORE =\n            \"s3proxy.sharded-blobstore\";\n    /** Override tier when creating blobs. */\n    public static final String PROPERTY_STORAGE_CLASS_BLOBSTORE =\n            \"s3proxy.storage-class-blobstore\";\n\n    /** Maximum time skew allowed in signed requests. */\n    public static final String PROPERTY_MAXIMUM_TIME_SKEW =\n            \"s3proxy.maximum-timeskew\";\n\n    public static final String PROPERTY_ENCRYPTED_BLOBSTORE =\n            \"s3proxy.encrypted-blobstore\";\n    public static final String PROPERTY_ENCRYPTED_BLOBSTORE_PASSWORD =\n            \"s3proxy.encrypted-blobstore-password\";\n    public static final String PROPERTY_ENCRYPTED_BLOBSTORE_SALT =\n            \"s3proxy.encrypted-blobstore-salt\";\n\n    public static final String PROPERTY_USER_METADATA_REPLACER =\n            \"s3proxy.user-metadata-replacer-blobstore\";\n    public static final String PROPERTY_USER_METADATA_REPLACER_FROM_CHARS =\n            \"s3proxy.user-metadata-replacer-blobstore.from-chars\";\n    public static final String PROPERTY_USER_METADATA_REPLACER_TO_CHARS =\n            \"s3proxy.user-metadata-replacer-blobstore.to-chars\";\n\n    public static final String PROPERTY_LATENCY =\n            \"s3proxy.latency-blobstore\";\n\n    public static final String PROPERTY_NO_CACHE_BLOBSTORE =\n            \"s3proxy.no-cache-blobstore\";\n\n    /** Enable Prometheus metrics endpoint at /metrics. */\n    public static final String PROPERTY_METRICS_ENABLED =\n            \"s3proxy.metrics.enabled\";\n\n    public static final String PROPERTY_METRICS_PORT =\n            \"s3proxy.metrics.port\";\n    public static final String PROPERTY_METRICS_HOST =\n            \"s3proxy.metrics.host\";\n\n    static final String PROPERTY_ALT_JCLOUDS_PREFIX = \"alt.\";\n\n    private S3ProxyConstants() {\n        throw new AssertionError(\"Cannot instantiate utility constructor\");\n    }\n}\n"
  },
  {
    "path": "src/main/java/org/gaul/s3proxy/S3ProxyHandler.java",
    "content": "/*\n * Copyright 2014-2026 Andrew Gaul <andrew@gaul.org>\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * https://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\npackage org.gaul.s3proxy;\n\nimport java.io.ByteArrayInputStream;\nimport java.io.FilterInputStream;\nimport java.io.IOException;\nimport java.io.InputStream;\nimport java.io.OutputStream;\nimport java.io.PrintWriter;\nimport java.io.PushbackInputStream;\nimport java.io.Writer;\nimport java.net.URLDecoder;\nimport java.nio.charset.StandardCharsets;\nimport java.nio.file.AccessDeniedException;\nimport java.security.InvalidKeyException;\nimport java.security.MessageDigest;\nimport java.security.NoSuchAlgorithmException;\nimport java.text.ParseException;\nimport java.text.SimpleDateFormat;\nimport java.time.Instant;\nimport java.util.ArrayList;\nimport java.util.Base64;\nimport java.util.Collection;\nimport java.util.Collections;\nimport java.util.Date;\nimport java.util.List;\nimport java.util.Map;\nimport java.util.Optional;\nimport java.util.Properties;\nimport java.util.Set;\nimport java.util.SortedMap;\nimport java.util.TimeZone;\nimport java.util.TreeMap;\nimport java.util.TreeSet;\nimport java.util.concurrent.ThreadLocalRandom;\nimport java.util.concurrent.TimeUnit;\nimport java.util.concurrent.atomic.AtomicReference;\nimport java.util.stream.Collectors;\n\nimport javax.crypto.Mac;\nimport javax.crypto.spec.SecretKeySpec;\nimport javax.xml.stream.XMLOutputFactory;\nimport javax.xml.stream.XMLStreamException;\nimport javax.xml.stream.XMLStreamWriter;\n\nimport com.fasterxml.jackson.core.JsonParseException;\nimport com.fasterxml.jackson.databind.ObjectMapper;\nimport com.fasterxml.jackson.dataformat.xml.XmlMapper;\nimport com.google.common.base.CharMatcher;\nimport com.google.common.base.Splitter;\nimport com.google.common.base.Strings;\nimport com.google.common.cache.Cache;\nimport com.google.common.cache.CacheBuilder;\nimport com.google.common.collect.ImmutableMap;\nimport com.google.common.collect.Maps;\nimport com.google.common.collect.Streams;\nimport com.google.common.escape.Escaper;\nimport com.google.common.hash.HashCode;\nimport com.google.common.hash.HashFunction;\nimport com.google.common.hash.Hashing;\nimport com.google.common.hash.HashingInputStream;\nimport com.google.common.io.BaseEncoding;\nimport com.google.common.io.ByteSource;\nimport com.google.common.io.ByteStreams;\nimport com.google.common.net.HostAndPort;\nimport com.google.common.net.HttpHeaders;\nimport com.google.common.net.PercentEscaper;\n\nimport jakarta.servlet.http.HttpServletRequest;\nimport jakarta.servlet.http.HttpServletResponse;\n\nimport org.eclipse.jetty.http.MultiPartFormData;\nimport org.eclipse.jetty.io.content.InputStreamContentSource;\nimport org.jclouds.blobstore.BlobStore;\nimport org.jclouds.blobstore.ContainerNotFoundException;\nimport org.jclouds.blobstore.KeyNotFoundException;\nimport org.jclouds.blobstore.domain.Blob;\nimport org.jclouds.blobstore.domain.BlobAccess;\nimport org.jclouds.blobstore.domain.BlobBuilder;\nimport org.jclouds.blobstore.domain.BlobMetadata;\nimport org.jclouds.blobstore.domain.ContainerAccess;\nimport org.jclouds.blobstore.domain.MultipartPart;\nimport org.jclouds.blobstore.domain.MultipartUpload;\nimport org.jclouds.blobstore.domain.PageSet;\nimport org.jclouds.blobstore.domain.StorageMetadata;\nimport org.jclouds.blobstore.domain.Tier;\nimport org.jclouds.blobstore.domain.internal.MutableBlobMetadataImpl;\nimport org.jclouds.blobstore.options.CopyOptions;\nimport org.jclouds.blobstore.options.CreateContainerOptions;\nimport org.jclouds.blobstore.options.GetOptions;\nimport org.jclouds.blobstore.options.ListContainerOptions;\nimport org.jclouds.blobstore.options.PutOptions;\nimport org.jclouds.domain.Location;\nimport org.jclouds.io.ContentMetadata;\nimport org.jclouds.io.ContentMetadataBuilder;\nimport org.jclouds.io.Payload;\nimport org.jclouds.io.Payloads;\nimport org.jclouds.rest.AuthorizationException;\nimport org.jclouds.s3.domain.ObjectMetadata.StorageClass;\nimport org.jspecify.annotations.Nullable;\nimport org.slf4j.Logger;\nimport org.slf4j.LoggerFactory;\n\n/** HTTP server-independent handler for S3 requests. */\npublic class S3ProxyHandler {\n    private static final Logger logger = LoggerFactory.getLogger(\n            S3ProxyHandler.class);\n\n    public static final class RequestContext {\n        private S3Operation operation;\n        private String bucket;\n\n        public S3Operation getOperation() {\n            return operation;\n        }\n\n        public void setOperation(S3Operation operation) {\n            this.operation = operation;\n        }\n\n        public String getBucket() {\n            return bucket;\n        }\n\n        public void setBucket(String bucket) {\n            this.bucket = bucket;\n        }\n    }\n    private static final String AWS_XMLNS =\n            \"http://s3.amazonaws.com/doc/2006-03-01/\";\n    // TODO: support configurable metadata prefix\n    private static final String USER_METADATA_PREFIX = \"x-amz-meta-\";\n    // TODO: fake owner\n    private static final String FAKE_OWNER_ID =\n            \"75aa57f09aa0c8caeab4f8c24e99d10f8e7faeebf76c078efc7c6caea54ba06a\";\n    private static final String FAKE_OWNER_DISPLAY_NAME =\n            \"CustomersName@amazon.com\";\n    private static final String FAKE_INITIATOR_ID =\n            \"arn:aws:iam::111122223333:\" +\n            \"user/some-user-11116a31-17b5-4fb7-9df5-b288870f11xx\";\n    private static final String FAKE_INITIATOR_DISPLAY_NAME =\n            \"umat-user-11116a31-17b5-4fb7-9df5-b288870f11xx\";\n    private static final CharMatcher VALID_BUCKET_FIRST_CHAR =\n            CharMatcher.inRange('a', 'z')\n                    .or(CharMatcher.inRange('A', 'Z'))\n                    .or(CharMatcher.inRange('0', '9'));\n    private static final CharMatcher VALID_BUCKET =\n            VALID_BUCKET_FIRST_CHAR\n                    .or(CharMatcher.is('.'))\n                    .or(CharMatcher.is('_'))\n                    .or(CharMatcher.is('-'));\n    private static final long MAX_MULTIPART_COPY_SIZE =\n            5L * 1024L * 1024L * 1024L;\n    private static final Set<String> UNSUPPORTED_PARAMETERS = Set.of(\n            \"accelerate\",\n            \"analytics\",\n            \"cors\",\n            \"inventory\",\n            \"lifecycle\",\n            \"logging\",\n            \"metrics\",\n            \"notification\",\n            \"replication\",\n            \"requestPayment\",\n            \"restore\",\n            \"tagging\",\n            \"torrent\",\n            \"versioning\",\n            \"versions\",\n            \"website\"\n    );\n    /** All supported x-amz- headers, except for x-amz-meta- user metadata. */\n    private static final Set<String> SUPPORTED_X_AMZ_HEADERS = Set.of(\n            AwsHttpHeaders.ACL,\n            AwsHttpHeaders.API_VERSION,\n            AwsHttpHeaders.CHECKSUM_ALGORITHM,  // TODO: ignoring header\n            AwsHttpHeaders.CHECKSUM_CRC32,  // TODO: ignoring header\n            AwsHttpHeaders.CHECKSUM_CRC32C,  // TODO: ignoring header\n            AwsHttpHeaders.CHECKSUM_CRC64NVME,  // TODO: ignoring header\n            AwsHttpHeaders.CHECKSUM_MODE,  // TODO: ignoring header\n            AwsHttpHeaders.CHECKSUM_SHA1,  // TODO: ignoring header\n            AwsHttpHeaders.CHECKSUM_SHA256,  // TODO: ignoring header\n            AwsHttpHeaders.CONTENT_SHA256,\n            AwsHttpHeaders.COPY_SOURCE,\n            AwsHttpHeaders.COPY_SOURCE_IF_MATCH,\n            AwsHttpHeaders.COPY_SOURCE_IF_MODIFIED_SINCE,\n            AwsHttpHeaders.COPY_SOURCE_IF_NONE_MATCH,\n            AwsHttpHeaders.COPY_SOURCE_IF_UNMODIFIED_SINCE,\n            AwsHttpHeaders.COPY_SOURCE_RANGE,\n            AwsHttpHeaders.DATE,\n            AwsHttpHeaders.DECODED_CONTENT_LENGTH,\n            AwsHttpHeaders.METADATA_DIRECTIVE,\n            AwsHttpHeaders.SDK_CHECKSUM_ALGORITHM,  // TODO: ignoring header\n            AwsHttpHeaders.STORAGE_CLASS,\n            AwsHttpHeaders.TRAILER,\n            AwsHttpHeaders.TRANSFER_ENCODING,  // TODO: ignoring header\n            AwsHttpHeaders.USER_AGENT\n    );\n    private static final Set<String> CANNED_ACLS = Set.of(\n            \"private\",\n            \"public-read\",\n            \"public-read-write\",\n            \"authenticated-read\",\n            \"bucket-owner-read\",\n            \"bucket-owner-full-control\",\n            \"log-delivery-write\"\n    );\n    private static final String XML_CONTENT_TYPE = \"application/xml\";\n    private static final String UTF_8 = \"UTF-8\";\n    /** URLEncoder escapes / which we do not want. */\n    private static final Escaper urlEscaper = new PercentEscaper(\n            \"*-./_\", /*plusForSpace=*/ false);\n    @SuppressWarnings(\"deprecation\")\n    private static final HashFunction MD5 = Hashing.md5();\n    private static final ObjectMapper JSON_MAPPER = new ObjectMapper();\n    private static final Instant LAUNCH_TIME = Instant.now();\n    private static final String GIT_HASH = loadGitHash();\n\n    private final boolean anonymousIdentity;\n    private final AuthenticationType authenticationType;\n    private final Optional<String> virtualHost;\n    private final long maxSinglePartObjectSize;\n    private final long v4MaxNonChunkedRequestSize;\n    private final int v4MaxChunkSize;\n    private final boolean ignoreUnknownHeaders;\n    private final CrossOriginResourceSharing corsRules;\n    private final String servicePath;\n    private final int maximumTimeSkew;\n    private final XmlMapper mapper = new XmlMapper();\n    private final XMLOutputFactory xmlOutputFactory =\n            XMLOutputFactory.newInstance();\n    private BlobStoreLocator blobStoreLocator;\n    // TODO: hack to allow per-request anonymous access\n    private final BlobStore defaultBlobStore;\n    /**\n     * S3 supports arbitrary keys for the marker while some blobstores only\n     * support opaque markers.  Emulate the common case for these by mapping\n     * the last key from a listing to the corresponding previously returned\n     * marker.\n     */\n    private final Cache<Map.Entry<String, String>, String> lastKeyToMarker =\n            CacheBuilder.newBuilder()\n            .maximumSize(10000)\n            .expireAfterWrite(10, TimeUnit.MINUTES)\n            .build();\n\n    public S3ProxyHandler(final BlobStore blobStore,\n            AuthenticationType authenticationType, final String identity,\n            final String credential, @Nullable String virtualHost,\n            long maxSinglePartObjectSize, long v4MaxNonChunkedRequestSize,\n            int v4MaxChunkSize,\n            boolean ignoreUnknownHeaders,\n            @Nullable CrossOriginResourceSharing corsRules,\n            final String servicePath, int maximumTimeSkew) {\n        if (corsRules != null) {\n            this.corsRules = corsRules;\n        } else {\n            this.corsRules = new CrossOriginResourceSharing();\n        }\n        if (authenticationType != AuthenticationType.NONE) {\n            anonymousIdentity = false;\n            blobStoreLocator = new BlobStoreLocator() {\n                @Override\n                public Map.@Nullable Entry<String, BlobStore> locateBlobStore(\n                        String identityArg, String container, String blob) {\n                    if (!identity.equals(identityArg)) {\n                        return null;\n                    }\n                    return Map.entry(credential, blobStore);\n                }\n            };\n        } else {\n            anonymousIdentity = true;\n            final Map.Entry<String, BlobStore> anonymousBlobStore =\n                    Maps.immutableEntry(null, blobStore);\n            blobStoreLocator = new BlobStoreLocator() {\n                @Override\n                public Map.Entry<String, BlobStore> locateBlobStore(\n                        String identityArg, String container, String blob) {\n                    return anonymousBlobStore;\n                }\n            };\n        }\n        this.authenticationType = authenticationType;\n        this.virtualHost = Optional.ofNullable(virtualHost);\n        this.maxSinglePartObjectSize = maxSinglePartObjectSize;\n        this.v4MaxNonChunkedRequestSize = v4MaxNonChunkedRequestSize;\n        this.v4MaxChunkSize = v4MaxChunkSize;\n        this.ignoreUnknownHeaders = ignoreUnknownHeaders;\n        this.defaultBlobStore = blobStore;\n        xmlOutputFactory.setProperty(\"javax.xml.stream.isRepairingNamespaces\", false);\n        this.servicePath = Strings.nullToEmpty(servicePath);\n        this.maximumTimeSkew = maximumTimeSkew;\n    }\n\n    private static String getBlobStoreType(BlobStore blobStore) {\n        return blobStore.getContext().unwrap().getProviderMetadata().getId();\n    }\n\n    private static boolean isValidContainer(String containerName) {\n        if (containerName == null ||\n                containerName.length() < 3 || containerName.length() > 255 ||\n                containerName.startsWith(\".\") || containerName.endsWith(\".\") ||\n                validateIpAddress(containerName) ||\n                !VALID_BUCKET_FIRST_CHAR.matches(containerName.charAt(0)) ||\n                !VALID_BUCKET.matchesAllOf(containerName)) {\n            return false;\n        }\n        return true;\n    }\n\n    public final void doHandle(HttpServletRequest baseRequest,\n            HttpServletRequest request, HttpServletResponse response,\n            InputStream is, @Nullable RequestContext ctx)\n            throws IOException, S3Exception {\n        String method = request.getMethod();\n        String uri = request.getRequestURI();\n        String originalUri = request.getRequestURI();\n\n        String healthzUri = servicePath.isEmpty() ? \"/healthz\" :\n                servicePath + \"/healthz\";\n        if (healthzUri.equals(uri) && \"GET\".equalsIgnoreCase(method)) {\n            handleStatuszRequest(response);\n            return;\n        }\n\n        if (!this.servicePath.isEmpty()) {\n            if (uri.length() > this.servicePath.length()) {\n                uri = uri.substring(this.servicePath.length());\n            }\n        }\n\n        logger.debug(\"request: {}\", request);\n        String hostHeader = request.getHeader(HttpHeaders.HOST);\n        if (hostHeader != null && virtualHost.isPresent()) {\n            hostHeader = HostAndPort.fromString(hostHeader).getHost();\n            String virtualHostSuffix = \".\" + virtualHost.orElseThrow();\n            if (!hostHeader.equals(virtualHost.orElseThrow())) {\n                if (hostHeader.endsWith(virtualHostSuffix)) {\n                    String bucket = hostHeader.substring(0,\n                            hostHeader.length() - virtualHostSuffix.length());\n                    uri = \"/\" + bucket + uri;\n                } else {\n                    String bucket = hostHeader.toLowerCase();\n                    uri = \"/\" + bucket + uri;\n                }\n            }\n        }\n\n        response.addHeader(AwsHttpHeaders.REQUEST_ID, generateRequestId());\n\n        boolean hasDateHeader = false;\n        boolean hasXAmzDateHeader = false;\n        for (String headerName : Collections.list(request.getHeaderNames())) {\n            for (String headerValue : Collections.list(request.getHeaders(\n                    headerName))) {\n                logger.trace(\"header: {}: {}\", headerName,\n                        Strings.nullToEmpty(headerValue));\n            }\n            if (headerName.equalsIgnoreCase(HttpHeaders.DATE)) {\n                hasDateHeader = true;\n            } else if (headerName.equalsIgnoreCase(AwsHttpHeaders.DATE)) {\n                if (!Strings.isNullOrEmpty(request.getHeader(\n                        AwsHttpHeaders.DATE))) {\n                    hasXAmzDateHeader = true;\n                }\n            }\n        }\n        boolean haveBothDateHeader = false;\n        if (hasDateHeader && hasXAmzDateHeader) {\n            haveBothDateHeader = true;\n        }\n\n        // when access information is not provided in request header,\n        // treat it as anonymous, return all public accessible information\n        if (!anonymousIdentity &&\n                (method.equals(\"GET\") || method.equals(\"HEAD\") ||\n                method.equals(\"POST\") || method.equals(\"OPTIONS\")) &&\n                request.getHeader(HttpHeaders.AUTHORIZATION) == null &&\n                // v2 or /v4\n                request.getParameter(\"X-Amz-Algorithm\") == null && // v4 query\n                request.getParameter(\"AWSAccessKeyId\") == null &&  // v2 query\n                defaultBlobStore != null) {\n            doHandleAnonymous(request, response, is, uri, defaultBlobStore,\n                    ctx);\n            return;\n        }\n\n        // should according the AWSAccessKeyId=  Signature  or auth header nil\n        if (!anonymousIdentity && !hasDateHeader && !hasXAmzDateHeader &&\n                request.getParameter(\"X-Amz-Date\") == null &&\n                request.getParameter(\"Expires\") == null) {\n            throw new S3Exception(S3ErrorCode.ACCESS_DENIED,\n                    \"AWS authentication requires a valid Date or\" +\n                    \" x-amz-date header\");\n        }\n\n        BlobStore blobStore;\n        String requestIdentity = null;\n        String headerAuthorization = request.getHeader(\n                HttpHeaders.AUTHORIZATION);\n        S3AuthorizationHeader authHeader = null;\n        boolean presignedUrl = false;\n\n        if (!anonymousIdentity) {\n            if (Strings.isNullOrEmpty(headerAuthorization)) {\n                String algorithm = request.getParameter(\"X-Amz-Algorithm\");\n                if (algorithm == null) { //v2 query\n                    String identity = request.getParameter(\"AWSAccessKeyId\");\n                    String signature = request.getParameter(\"Signature\");\n                    if (identity == null || signature == null) {\n                        throw new S3Exception(S3ErrorCode.ACCESS_DENIED);\n                    }\n                    headerAuthorization = \"AWS \" + identity + \":\" + signature;\n                    presignedUrl = true;\n                } else if (algorithm.equals(\"AWS4-HMAC-SHA256\")) { //v4 query\n                    String credential = request.getParameter(\n                            \"X-Amz-Credential\");\n                    String signedHeaders = request.getParameter(\n                            \"X-Amz-SignedHeaders\");\n                    String signature = request.getParameter(\n                            \"X-Amz-Signature\");\n                    if (credential == null || signedHeaders == null ||\n                            signature == null) {\n                        throw new S3Exception(S3ErrorCode.ACCESS_DENIED);\n                    }\n                    headerAuthorization = \"AWS4-HMAC-SHA256\" +\n                            \" Credential=\" + credential +\n                            \", requestSignedHeaders=\" + signedHeaders +\n                            \", Signature=\" + signature;\n                    presignedUrl = true;\n                } else {\n                    throw new IllegalArgumentException(\"unknown algorithm: \" +\n                            algorithm);\n                }\n            }\n\n            try {\n                authHeader = new S3AuthorizationHeader(headerAuthorization);\n                //whether v2 or v4 (normal header and query)\n            } catch (IllegalArgumentException iae) {\n                throw new S3Exception(S3ErrorCode.INVALID_ARGUMENT, iae);\n            }\n            requestIdentity = authHeader.getIdentity();\n        }\n\n        long dateSkew = 0; //date for timeskew check\n\n        //v2 GET /s3proxy-1080747708/foo?AWSAccessKeyId=local-identity&Expires=\n        //1510322602&Signature=UTyfHY1b1Wgr5BFEn9dpPlWdtFE%3D)\n        //have no date\n\n        if (!anonymousIdentity) {\n            boolean haveDate = true;\n\n            AuthenticationType finalAuthType = null;\n            if (authHeader.getAuthenticationType() ==\n                    AuthenticationType.AWS_V2 &&\n                    (authenticationType == AuthenticationType.AWS_V2 ||\n                    authenticationType == AuthenticationType.AWS_V2_OR_V4)) {\n                finalAuthType = AuthenticationType.AWS_V2;\n            } else if (\n                authHeader.getAuthenticationType() ==\n                        AuthenticationType.AWS_V4 &&\n                        (authenticationType == AuthenticationType.AWS_V4 ||\n                    authenticationType == AuthenticationType.AWS_V2_OR_V4)) {\n                finalAuthType = AuthenticationType.AWS_V4;\n            } else if (authenticationType != AuthenticationType.NONE) {\n                throw new S3Exception(S3ErrorCode.ACCESS_DENIED);\n            }\n\n            if (hasXAmzDateHeader) { //format diff between v2 and v4\n                if (finalAuthType == AuthenticationType.AWS_V2) {\n                    dateSkew = request.getDateHeader(AwsHttpHeaders.DATE);\n                    dateSkew /= 1000;\n                    //case sensitive?\n                } else if (finalAuthType == AuthenticationType.AWS_V4) {\n                    dateSkew = parseIso8601(request.getHeader(\n                            AwsHttpHeaders.DATE));\n                }\n            } else if (hasDateHeader) {\n                try {\n                    dateSkew = request.getDateHeader(HttpHeaders.DATE);\n                    dateSkew /= 1000;\n                } catch (IllegalArgumentException iae) {\n                    try {\n                        dateSkew = parseIso8601(request.getHeader(\n                                HttpHeaders.DATE));\n                    } catch (IllegalArgumentException iae2) {\n                        throw new S3Exception(S3ErrorCode.ACCESS_DENIED, iae);\n                    }\n                }\n            } else {\n                haveDate = false;\n            }\n            if (haveDate) {\n                isTimeSkewed(dateSkew, presignedUrl);\n            }\n        }\n\n        String[] path = uri.split(\"/\", 3);\n        for (int i = 0; i < path.length; i++) {\n            path[i] = URLDecoder.decode(path[i], StandardCharsets.UTF_8);\n        }\n\n        for (String parameter : Collections.list(\n                request.getParameterNames())) {\n            if (UNSUPPORTED_PARAMETERS.contains(parameter)) {\n                logger.error(\"Unknown parameters {} with URI {}\",\n                        parameter, request.getRequestURI());\n                throw new S3Exception(S3ErrorCode.NOT_IMPLEMENTED);\n            }\n        }\n\n        // emit NotImplemented for unknown x-amz- headers\n        for (String headerName : Collections.list(request.getHeaderNames())) {\n            headerName = headerName.toLowerCase();\n            if (ignoreUnknownHeaders) {\n                continue;\n            }\n            if (!headerName.startsWith(\"x-amz-\")) {\n                continue;\n            }\n            if (headerName.startsWith(USER_METADATA_PREFIX)) {\n                continue;\n            }\n            if (!SUPPORTED_X_AMZ_HEADERS.contains(headerName)) {\n                logger.error(\"Unknown header {} with URI {}\",\n                        headerName, request.getRequestURI());\n                throw new S3Exception(S3ErrorCode.NOT_IMPLEMENTED);\n            }\n        }\n\n        Map.Entry<String, BlobStore> provider =\n                blobStoreLocator.locateBlobStore(\n                        requestIdentity, path.length > 1 ? path[1] : null,\n                        path.length > 2 ? path[2] : null);\n        if (anonymousIdentity) {\n            blobStore = provider.getValue();\n            String contentSha256 = request.getHeader(\n                    AwsHttpHeaders.CONTENT_SHA256);\n            if (\"STREAMING-AWS4-HMAC-SHA256-PAYLOAD\".equals(contentSha256)) {\n                is = new ChunkedInputStream(is, v4MaxChunkSize);\n            } else if (\"STREAMING-UNSIGNED-PAYLOAD-TRAILER\".equals(contentSha256)) {\n                is = new ChunkedInputStream(is, v4MaxChunkSize, request.getHeader(AwsHttpHeaders.TRAILER));\n            }\n        } else if (requestIdentity == null) {\n            throw new S3Exception(S3ErrorCode.ACCESS_DENIED);\n        } else {\n            if (provider == null) {\n                throw new S3Exception(S3ErrorCode.INVALID_ACCESS_KEY_ID);\n            }\n\n            String credential = provider.getKey();\n            blobStore = provider.getValue();\n\n            String expiresString = request.getParameter(\"Expires\");\n            if (expiresString != null) { // v2 query\n                long expires = Long.parseLong(expiresString);\n                long nowSeconds = System.currentTimeMillis() / 1000;\n                if (nowSeconds >= expires) {\n                    throw new S3Exception(S3ErrorCode.ACCESS_DENIED,\n                            \"Request has expired\");\n                }\n                if (expires - nowSeconds > TimeUnit.DAYS.toSeconds(365)) {\n                    throw new S3Exception(S3ErrorCode.ACCESS_DENIED);\n                }\n            }\n\n            String dateString = request.getParameter(\"X-Amz-Date\");\n            //from para v4 query\n            expiresString = request.getParameter(\"X-Amz-Expires\");\n            if (dateString != null && expiresString != null) { //v4 query\n                long date = parseIso8601(dateString);\n                long expires = Long.parseLong(expiresString);\n                long nowSeconds = System.currentTimeMillis() / 1000;\n                if (nowSeconds >= date + expires) {\n                    throw new S3Exception(S3ErrorCode.ACCESS_DENIED,\n                            \"Request has expired\");\n                }\n                if (expires > TimeUnit.DAYS.toSeconds(7)) {\n                    throw new S3Exception(S3ErrorCode.ACCESS_DENIED);\n                }\n            }\n            // The aim ?\n            switch (authHeader.getAuthenticationType()) {\n            case AWS_V2:\n                switch (authenticationType) {\n                case AWS_V2:\n                case AWS_V2_OR_V4:\n                case NONE:\n                    break;\n                default:\n                    throw new S3Exception(S3ErrorCode.ACCESS_DENIED);\n                }\n                break;\n            case AWS_V4:\n                switch (authenticationType) {\n                case AWS_V4:\n                case AWS_V2_OR_V4:\n                case NONE:\n                    break;\n                default:\n                    throw new S3Exception(S3ErrorCode.ACCESS_DENIED);\n                }\n                break;\n            case NONE:\n                break;\n            default:\n                throw new IllegalArgumentException(\"Unhandled type: \" +\n                        authHeader.getAuthenticationType());\n            }\n\n            String expectedSignature = null;\n\n            if (authHeader.getHmacAlgorithm() == null) { //v2\n                // When presigned url is generated, it doesn't consider\n                // service path\n                String uriForSigning = presignedUrl ? uri : this.servicePath +\n                        uri;\n                expectedSignature = AwsSignature.createAuthorizationSignature(\n                        request, uriForSigning, credential, presignedUrl,\n                        haveBothDateHeader);\n            } else {\n                String contentSha256 = request.getHeader(\n                        AwsHttpHeaders.CONTENT_SHA256);\n                try {\n                    byte[] payload;\n                    if (request.getParameter(\"X-Amz-Algorithm\") != null) {\n                        payload = new byte[0];\n                    } else if (\"STREAMING-AWS4-HMAC-SHA256-PAYLOAD\".equals(\n                            contentSha256)) {\n                        payload = new byte[0];\n                        // ChunkedInputStream constructed below after deriving\n                        // the signing key so per-chunk signatures can be\n                        // verified.\n                    } else if (\"STREAMING-UNSIGNED-PAYLOAD-TRAILER\".equals(contentSha256)) {\n                        payload = new byte[0];\n                        is = new ChunkedInputStream(is, v4MaxChunkSize, request.getHeader(AwsHttpHeaders.TRAILER));\n                    } else if (\"UNSIGNED-PAYLOAD\".equals(contentSha256)) {\n                        payload = new byte[0];\n                    } else {\n                        // buffer the entire stream to calculate digest\n                        // why input stream read contentlength of header?\n                        payload = ByteStreams.limit(is, v4MaxNonChunkedRequestSize + 1)\n                                .readAllBytes();\n                        if (payload.length == v4MaxNonChunkedRequestSize + 1) {\n                            throw new S3Exception(\n                                    S3ErrorCode.MAX_MESSAGE_LENGTH_EXCEEDED);\n                        }\n\n                        // maybe we should check this when signing,\n                        // a lot of dup code with aws sign code.\n                        MessageDigest md = MessageDigest.getInstance(\n                            authHeader.getHashAlgorithm());\n                        byte[] hash = md.digest(payload);\n                        if  (!contentSha256.equals(\n                              BaseEncoding.base16().lowerCase()\n                              .encode(hash))) {\n                            throw new S3Exception(\n                                    S3ErrorCode\n                                    .X_AMZ_CONTENT_S_H_A_256_MISMATCH);\n                        }\n                        is = new ByteArrayInputStream(payload);\n                    }\n\n                    String uriForSigning = presignedUrl ? originalUri :\n                            this.servicePath + originalUri;\n                    expectedSignature = AwsSignature\n                            .createAuthorizationSignatureV4(// v4 sign\n                            baseRequest, authHeader, payload, uriForSigning,\n                            credential);\n                    if (\"STREAMING-AWS4-HMAC-SHA256-PAYLOAD\".equals(\n                            contentSha256)) {\n                        byte[] signingKey = AwsSignature.deriveSigningKeyV4(\n                                authHeader, credential);\n                        String scope = authHeader.getDate() + \"/\" +\n                                authHeader.getRegion() + \"/\" +\n                                authHeader.getService() + \"/aws4_request\";\n                        String timestamp = request.getHeader(\n                                AwsHttpHeaders.DATE);\n                        if (timestamp == null) {\n                            timestamp = request.getParameter(\"X-Amz-Date\");\n                        }\n                        is = new ChunkedInputStream(is, v4MaxChunkSize,\n                                expectedSignature, signingKey,\n                                authHeader.getHmacAlgorithm(), timestamp,\n                                scope);\n                    }\n                } catch (InvalidKeyException | NoSuchAlgorithmException e) {\n                    throw new S3Exception(S3ErrorCode.INVALID_ARGUMENT, e);\n                }\n            }\n\n            // AWS does not check signatures with OPTIONS verb\n            if (!method.equals(\"OPTIONS\") && !constantTimeEquals(\n                    expectedSignature, authHeader.getSignature())) {\n                throw new S3Exception(S3ErrorCode.SIGNATURE_DOES_NOT_MATCH);\n            }\n        }\n\n        // Validate container name\n        if (!uri.equals(\"/\") && !isValidContainer(path[1])) {\n            if (method.equals(\"PUT\") &&\n                    (path.length <= 2 || path[2].isEmpty()) &&\n                    !\"\".equals(request.getParameter(\"acl\")))  {\n                throw new S3Exception(S3ErrorCode.INVALID_BUCKET_NAME);\n            } else {\n                throw new S3Exception(S3ErrorCode.NO_SUCH_BUCKET);\n            }\n        }\n\n        String uploadId = request.getParameter(\"uploadId\");\n\n        if (ctx != null && path.length > 1 && !path[1].isEmpty()) {\n            ctx.setBucket(path[1]);\n        }\n\n        switch (method) {\n        case \"DELETE\":\n            if (path.length <= 2 || path[2].isEmpty()) {\n                setOperation(ctx, S3Operation.DELETE_BUCKET);\n                handleContainerDelete(request, response, blobStore, path[1]);\n                return;\n            } else if (uploadId != null) {\n                setOperation(ctx, S3Operation.ABORT_MULTIPART_UPLOAD);\n                handleAbortMultipartUpload(request, response, blobStore,\n                        path[1], path[2], uploadId);\n                return;\n            } else {\n                setOperation(ctx, S3Operation.DELETE_OBJECT);\n                handleBlobRemove(request, response, blobStore, path[1],\n                        path[2]);\n                return;\n            }\n        case \"GET\":\n            if (uri.equals(\"/\")) {\n                setOperation(ctx, S3Operation.LIST_BUCKETS);\n                handleContainerList(request, response, blobStore);\n                return;\n            } else if (path.length <= 2 || path[2].isEmpty()) {\n                if (request.getParameter(\"acl\") != null) {\n                    setOperation(ctx, S3Operation.GET_BUCKET_ACL);\n                    handleGetContainerAcl(request, response, blobStore,\n                            path[1]);\n                    return;\n                } else if (request.getParameter(\"location\") != null) {\n                    setOperation(ctx, S3Operation.GET_BUCKET_LOCATION);\n                    handleContainerLocation(request, response);\n                    return;\n                } else if (request.getParameter(\"policy\") != null) {\n                    setOperation(ctx, S3Operation.GET_BUCKET_POLICY);\n                    handleBucketPolicy(blobStore, path[1]);\n                    return;\n                } else if (request.getParameter(\"uploads\") != null) {\n                    setOperation(ctx, S3Operation.LIST_MULTIPART_UPLOADS);\n                    handleListMultipartUploads(request, response, blobStore,\n                            path[1]);\n                    return;\n                }\n                setOperation(ctx, S3Operation.LIST_OBJECTS_V2);\n                handleBlobList(request, response, blobStore, path[1]);\n                return;\n            } else {\n                if (request.getParameter(\"acl\") != null) {\n                    setOperation(ctx, S3Operation.GET_OBJECT_ACL);\n                    handleGetBlobAcl(request, response, blobStore, path[1],\n                            path[2]);\n                    return;\n                } else if (uploadId != null) {\n                    setOperation(ctx, S3Operation.LIST_PARTS);\n                    handleListParts(request, response, blobStore, path[1],\n                            path[2], uploadId);\n                    return;\n                }\n                setOperation(ctx, S3Operation.GET_OBJECT);\n                handleGetBlob(request, response, blobStore, path[1],\n                        path[2]);\n                return;\n            }\n        case \"HEAD\":\n            if (path.length <= 2 || path[2].isEmpty()) {\n                setOperation(ctx, S3Operation.HEAD_BUCKET);\n                handleContainerExists(request, response, blobStore, path[1]);\n                return;\n            } else {\n                setOperation(ctx, S3Operation.HEAD_OBJECT);\n                handleBlobMetadata(request, response, blobStore, path[1],\n                        path[2]);\n                return;\n            }\n        case \"POST\":\n            if (request.getParameter(\"delete\") != null) {\n                setOperation(ctx, S3Operation.DELETE_OBJECTS);\n                handleMultiBlobRemove(request, response, is, blobStore,\n                        path[1]);\n                return;\n            } else if (request.getParameter(\"uploads\") != null) {\n                setOperation(ctx, S3Operation.CREATE_MULTIPART_UPLOAD);\n                handleInitiateMultipartUpload(request, response, blobStore,\n                        path[1], path[2]);\n                return;\n            } else if (uploadId != null &&\n                    request.getParameter(\"partNumber\") == null) {\n                setOperation(ctx, S3Operation.COMPLETE_MULTIPART_UPLOAD);\n                handleCompleteMultipartUpload(request, response, is, blobStore,\n                        path[1], path[2], uploadId);\n                return;\n            }\n            break;\n        case \"PUT\":\n            if (path.length <= 2 || path[2].isEmpty()) {\n                if (request.getParameter(\"acl\") != null) {\n                    setOperation(ctx, S3Operation.PUT_BUCKET_ACL);\n                    handleSetContainerAcl(request, response, is, blobStore,\n                            path[1]);\n                    return;\n                }\n                setOperation(ctx, S3Operation.CREATE_BUCKET);\n                handleContainerCreate(request, response, is, blobStore,\n                        path[1]);\n                return;\n            } else if (uploadId != null) {\n                if (request.getHeader(AwsHttpHeaders.COPY_SOURCE) != null) {\n                    setOperation(ctx, S3Operation.UPLOAD_PART_COPY);\n                    handleCopyPart(request, response, blobStore, path[1],\n                            path[2], uploadId);\n                } else {\n                    setOperation(ctx, S3Operation.UPLOAD_PART);\n                    handleUploadPart(request, response, is, blobStore, path[1],\n                            path[2], uploadId);\n                }\n                return;\n            } else if (request.getHeader(AwsHttpHeaders.COPY_SOURCE) != null) {\n                setOperation(ctx, S3Operation.COPY_OBJECT);\n                handleCopyBlob(request, response, is, blobStore, path[1],\n                        path[2]);\n                return;\n            } else {\n                if (request.getParameter(\"acl\") != null) {\n                    setOperation(ctx, S3Operation.PUT_OBJECT_ACL);\n                    handleSetBlobAcl(request, response, is, blobStore, path[1],\n                            path[2]);\n                    return;\n                }\n                setOperation(ctx, S3Operation.PUT_OBJECT);\n                handlePutBlob(request, response, is, blobStore, path[1],\n                        path[2]);\n                return;\n            }\n        case \"OPTIONS\":\n            setOperation(ctx, S3Operation.OPTIONS_OBJECT);\n            handleOptionsBlob(request, response, blobStore, path[1]);\n            return;\n        default:\n            break;\n        }\n        setOperation(ctx, S3Operation.UNKNOWN);\n        logger.error(\"Unknown method {} with URI {}\",\n                method, request.getRequestURI());\n        throw new S3Exception(S3ErrorCode.NOT_IMPLEMENTED);\n    }\n\n    private static void setOperation(@Nullable RequestContext ctx,\n            S3Operation operation) {\n        if (ctx != null) {\n            ctx.setOperation(operation);\n        }\n    }\n\n    private static boolean checkPublicAccess(BlobStore blobStore,\n            String containerName, String blobName) throws S3Exception {\n        String blobStoreType = getBlobStoreType(blobStore);\n        try {\n            if (Quirks.NO_BLOB_ACCESS_CONTROL.contains(blobStoreType)) {\n                ContainerAccess access = blobStore.getContainerAccess(\n                        containerName);\n                return access == ContainerAccess.PUBLIC_READ;\n            }\n            BlobAccess access = blobStore.getBlobAccess(containerName,\n                    blobName);\n            return access == BlobAccess.PUBLIC_READ;\n        } catch (ContainerNotFoundException e) {\n            throw new S3Exception(S3ErrorCode.NO_SUCH_BUCKET, e);\n        } catch (KeyNotFoundException e) {\n            throw new S3Exception(S3ErrorCode.NO_SUCH_KEY, e);\n        }\n    }\n\n    private void doHandleAnonymous(HttpServletRequest request,\n            HttpServletResponse response, InputStream is, String uri,\n            BlobStore blobStore, @Nullable RequestContext ctx)\n            throws IOException, S3Exception {\n        String method = request.getMethod();\n        String[] path = uri.split(\"/\", 3);\n\n        if (ctx != null && path.length > 1 && !path[1].isEmpty()) {\n            ctx.setBucket(path[1]);\n        }\n\n        switch (method) {\n        case \"GET\":\n            if (uri.equals(\"/\")) {\n                setOperation(ctx, S3Operation.LIST_BUCKETS);\n                throw new S3Exception(S3ErrorCode.ACCESS_DENIED);\n            } else if (path.length <= 2 || path[2].isEmpty()) {\n                String containerName = path[1];\n                ContainerAccess access = blobStore.getContainerAccess(\n                        containerName);\n                if (access == ContainerAccess.PRIVATE) {\n                    setOperation(ctx, S3Operation.LIST_OBJECTS_V2);\n                    throw new S3Exception(S3ErrorCode.ACCESS_DENIED);\n                }\n                setOperation(ctx, S3Operation.LIST_OBJECTS_V2);\n                handleBlobList(request, response, blobStore, containerName);\n                return;\n            } else {\n                String containerName = path[1];\n                String blobName = path[2];\n                if (!checkPublicAccess(blobStore, containerName, blobName)) {\n                    setOperation(ctx, S3Operation.GET_OBJECT);\n                    throw new S3Exception(S3ErrorCode.ACCESS_DENIED);\n                }\n                setOperation(ctx, S3Operation.GET_OBJECT);\n                handleGetBlob(request, response, blobStore, containerName,\n                        blobName);\n                return;\n            }\n        case \"HEAD\":\n            if (path.length <= 2 || path[2].isEmpty()) {\n                String containerName = path[1];\n                ContainerAccess access = blobStore.getContainerAccess(\n                        containerName);\n                if (access == ContainerAccess.PRIVATE) {\n                    setOperation(ctx, S3Operation.HEAD_BUCKET);\n                    throw new S3Exception(S3ErrorCode.ACCESS_DENIED);\n                }\n                setOperation(ctx, S3Operation.HEAD_BUCKET);\n                if (!blobStore.containerExists(containerName)) {\n                    throw new S3Exception(S3ErrorCode.NO_SUCH_BUCKET);\n                }\n            } else {\n                String containerName = path[1];\n                String blobName = path[2];\n                if (!checkPublicAccess(blobStore, containerName, blobName)) {\n                    setOperation(ctx, S3Operation.HEAD_OBJECT);\n                    throw new S3Exception(S3ErrorCode.ACCESS_DENIED);\n                }\n                setOperation(ctx, S3Operation.HEAD_OBJECT);\n                handleBlobMetadata(request, response, blobStore, containerName,\n                        blobName);\n            }\n            return;\n        case \"POST\":\n            if (path.length <= 2 || path[2].isEmpty()) {\n                setOperation(ctx, S3Operation.PUT_OBJECT);\n                handlePostBlob(request, response, is, blobStore, path[1]);\n                return;\n            }\n            break;\n        case \"OPTIONS\":\n            if (uri.equals(\"/\")) {\n                setOperation(ctx, S3Operation.OPTIONS_OBJECT);\n                throw new S3Exception(S3ErrorCode.ACCESS_DENIED);\n            } else {\n                String containerName = path[1];\n                setOperation(ctx, S3Operation.OPTIONS_OBJECT);\n                handleOptionsBlob(request, response, blobStore, containerName);\n                return;\n            }\n        default:\n            break;\n        }\n        setOperation(ctx, S3Operation.UNKNOWN);\n        logger.error(\"Unknown method {} with URI {}\",\n                method, request.getRequestURI());\n        throw new S3Exception(S3ErrorCode.NOT_IMPLEMENTED);\n    }\n\n    private void handleGetContainerAcl(HttpServletRequest request,\n            HttpServletResponse response, BlobStore blobStore,\n            String containerName) throws IOException, S3Exception {\n        if (!blobStore.containerExists(containerName)) {\n            throw new S3Exception(S3ErrorCode.NO_SUCH_BUCKET);\n        }\n        ContainerAccess access = blobStore.getContainerAccess(containerName);\n\n        response.setCharacterEncoding(UTF_8);\n        addCorsResponseHeader(request, response);\n        try (Writer writer = response.getWriter()) {\n            response.setContentType(XML_CONTENT_TYPE);\n            XMLStreamWriter xml = xmlOutputFactory.createXMLStreamWriter(\n                    writer);\n            xml.writeStartDocument();\n            xml.writeStartElement(\"AccessControlPolicy\");\n            xml.writeDefaultNamespace(AWS_XMLNS);\n\n            writeOwnerStanza(xml);\n\n            xml.writeStartElement(\"AccessControlList\");\n\n            xml.writeStartElement(\"Grant\");\n\n            xml.writeStartElement(\"Grantee\");\n            xml.writeNamespace(\"xsi\",\n                    \"http://www.w3.org/2001/XMLSchema-instance\");\n            xml.writeAttribute(\"xsi:type\", \"CanonicalUser\");\n\n            writeSimpleElement(xml, \"ID\", FAKE_OWNER_ID);\n            writeSimpleElement(xml, \"DisplayName\",\n                    FAKE_OWNER_DISPLAY_NAME);\n\n            xml.writeEndElement();\n\n            writeSimpleElement(xml, \"Permission\", \"FULL_CONTROL\");\n\n            xml.writeEndElement();\n\n            if (access == ContainerAccess.PUBLIC_READ) {\n                xml.writeStartElement(\"Grant\");\n\n                xml.writeStartElement(\"Grantee\");\n                xml.writeNamespace(\"xsi\",\n                        \"http://www.w3.org/2001/XMLSchema-instance\");\n                xml.writeAttribute(\"xsi:type\", \"Group\");\n\n                writeSimpleElement(xml, \"URI\",\n                        \"http://acs.amazonaws.com/groups/global/AllUsers\");\n\n                xml.writeEndElement();\n\n                writeSimpleElement(xml, \"Permission\", \"READ\");\n\n                xml.writeEndElement();\n            }\n\n            xml.writeEndElement();\n\n            xml.writeEndElement();\n            xml.flush();\n        } catch (XMLStreamException xse) {\n            throw new IOException(xse);\n        }\n    }\n\n    private void handleSetContainerAcl(HttpServletRequest request,\n            HttpServletResponse response, InputStream is, BlobStore blobStore,\n            String containerName) throws IOException, S3Exception {\n        ContainerAccess access;\n\n        String cannedAcl = request.getHeader(AwsHttpHeaders.ACL);\n        if (cannedAcl == null || \"private\".equalsIgnoreCase(cannedAcl)) {\n            access = ContainerAccess.PRIVATE;\n        } else if (\"public-read\".equalsIgnoreCase(cannedAcl)) {\n            access = ContainerAccess.PUBLIC_READ;\n        } else if (CANNED_ACLS.contains(cannedAcl)) {\n            throw new S3Exception(S3ErrorCode.NOT_IMPLEMENTED);\n        } else {\n            response.sendError(HttpServletResponse.SC_BAD_REQUEST);\n            return;\n        }\n\n        var pis = new PushbackInputStream(is);\n        int ch = pis.read();\n        if (ch != -1) {\n            pis.unread(ch);\n            AccessControlPolicy policy = mapper.readValue(\n                    pis, AccessControlPolicy.class);\n            String accessString = mapXmlAclsToCannedPolicy(policy);\n            if (accessString.equals(\"private\")) {\n                access = ContainerAccess.PRIVATE;\n            } else if (accessString.equals(\"public-read\")) {\n                access = ContainerAccess.PUBLIC_READ;\n            } else {\n                throw new S3Exception(S3ErrorCode.NOT_IMPLEMENTED);\n            }\n        }\n\n        blobStore.setContainerAccess(containerName, access);\n        addCorsResponseHeader(request, response);\n    }\n\n    private void handleGetBlobAcl(HttpServletRequest request,\n            HttpServletResponse response, BlobStore blobStore,\n            String containerName, String blobName) throws IOException {\n        BlobAccess access = blobStore.getBlobAccess(containerName, blobName);\n\n        response.setCharacterEncoding(UTF_8);\n        addCorsResponseHeader(request, response);\n        try (Writer writer = response.getWriter()) {\n            response.setContentType(XML_CONTENT_TYPE);\n            XMLStreamWriter xml = xmlOutputFactory.createXMLStreamWriter(\n                    writer);\n            xml.writeStartDocument();\n            xml.writeStartElement(\"AccessControlPolicy\");\n            xml.writeDefaultNamespace(AWS_XMLNS);\n\n            writeOwnerStanza(xml);\n\n            xml.writeStartElement(\"AccessControlList\");\n\n            xml.writeStartElement(\"Grant\");\n\n            xml.writeStartElement(\"Grantee\");\n            xml.writeNamespace(\"xsi\",\n                    \"http://www.w3.org/2001/XMLSchema-instance\");\n            xml.writeAttribute(\"xsi:type\", \"CanonicalUser\");\n\n            writeSimpleElement(xml, \"ID\", FAKE_OWNER_ID);\n            writeSimpleElement(xml, \"DisplayName\",\n                    FAKE_OWNER_DISPLAY_NAME);\n\n            xml.writeEndElement();\n\n            writeSimpleElement(xml, \"Permission\", \"FULL_CONTROL\");\n\n            xml.writeEndElement();\n\n            if (access == BlobAccess.PUBLIC_READ) {\n                xml.writeStartElement(\"Grant\");\n\n                xml.writeStartElement(\"Grantee\");\n                xml.writeNamespace(\"xsi\",\n                        \"http://www.w3.org/2001/XMLSchema-instance\");\n                xml.writeAttribute(\"xsi:type\", \"Group\");\n\n                writeSimpleElement(xml, \"URI\",\n                        \"http://acs.amazonaws.com/groups/global/AllUsers\");\n\n                xml.writeEndElement();\n\n                writeSimpleElement(xml, \"Permission\", \"READ\");\n\n                xml.writeEndElement();\n            }\n\n            xml.writeEndElement();\n\n            xml.writeEndElement();\n            xml.flush();\n        } catch (XMLStreamException xse) {\n            throw new IOException(xse);\n        }\n    }\n\n    private void handleSetBlobAcl(HttpServletRequest request,\n            HttpServletResponse response, InputStream is, BlobStore blobStore,\n            String containerName, String blobName)\n            throws IOException, S3Exception {\n        BlobAccess access;\n\n        String cannedAcl = request.getHeader(AwsHttpHeaders.ACL);\n        if (cannedAcl == null || \"private\".equalsIgnoreCase(cannedAcl)) {\n            access = BlobAccess.PRIVATE;\n        } else if (\"public-read\".equalsIgnoreCase(cannedAcl)) {\n            access = BlobAccess.PUBLIC_READ;\n        } else if (CANNED_ACLS.contains(cannedAcl)) {\n            throw new S3Exception(S3ErrorCode.NOT_IMPLEMENTED);\n        } else {\n            response.sendError(HttpServletResponse.SC_BAD_REQUEST);\n            return;\n        }\n\n        var pis = new PushbackInputStream(is);\n        int ch = pis.read();\n        if (ch != -1) {\n            pis.unread(ch);\n            AccessControlPolicy policy = mapper.readValue(\n                    pis, AccessControlPolicy.class);\n            String accessString = mapXmlAclsToCannedPolicy(policy);\n            if (accessString.equals(\"private\")) {\n                access = BlobAccess.PRIVATE;\n            } else if (accessString.equals(\"public-read\")) {\n                access = BlobAccess.PUBLIC_READ;\n            } else {\n                throw new S3Exception(S3ErrorCode.NOT_IMPLEMENTED);\n            }\n        }\n\n        blobStore.setBlobAccess(containerName, blobName, access);\n        addCorsResponseHeader(request, response);\n    }\n\n    /** Map XML ACLs to a canned policy if an exact transformation exists. */\n    private static String mapXmlAclsToCannedPolicy(\n            AccessControlPolicy policy) throws S3Exception {\n        if (!policy.owner.id.equals(FAKE_OWNER_ID)) {\n            throw new S3Exception(S3ErrorCode.NOT_IMPLEMENTED);\n        }\n\n        boolean ownerFullControl = false;\n        boolean allUsersRead = false;\n        if (policy.aclList != null) {\n            for (AccessControlPolicy.AccessControlList.Grant grant :\n                    policy.aclList.grants) {\n                if (grant.grantee.type.equals(\"CanonicalUser\") &&\n                        grant.grantee.id.equals(FAKE_OWNER_ID) &&\n                        grant.permission.equals(\"FULL_CONTROL\")) {\n                    ownerFullControl = true;\n                } else if (grant.grantee.type.equals(\"Group\") &&\n                        grant.grantee.uri.equals(\"http://acs.amazonaws.com/\" +\n                                \"groups/global/AllUsers\") &&\n                        grant.permission.equals(\"READ\")) {\n                    allUsersRead = true;\n                } else {\n                    throw new S3Exception(S3ErrorCode.NOT_IMPLEMENTED);\n                }\n            }\n        }\n\n        if (ownerFullControl) {\n            if (allUsersRead) {\n                return \"public-read\";\n            }\n            return \"private\";\n        } else {\n            throw new S3Exception(S3ErrorCode.NOT_IMPLEMENTED);\n        }\n    }\n\n    private void handleContainerList(HttpServletRequest request,\n            HttpServletResponse response, BlobStore blobStore)\n            throws IOException {\n        PageSet<? extends StorageMetadata> buckets = blobStore.list();\n\n        response.setCharacterEncoding(UTF_8);\n        addCorsResponseHeader(request, response);\n        try (Writer writer = response.getWriter()) {\n            response.setContentType(XML_CONTENT_TYPE);\n            XMLStreamWriter xml = xmlOutputFactory.createXMLStreamWriter(\n                    writer);\n            xml.writeStartDocument();\n            xml.writeStartElement(\"ListAllMyBucketsResult\");\n            xml.writeDefaultNamespace(AWS_XMLNS);\n\n            writeOwnerStanza(xml);\n\n            xml.writeStartElement(\"Buckets\");\n            for (StorageMetadata metadata : buckets) {\n                xml.writeStartElement(\"Bucket\");\n\n                writeSimpleElement(xml, \"Name\", metadata.getName());\n\n                Date creationDate = metadata.getCreationDate();\n                if (creationDate == null) {\n                    // Some providers, e.g., Swift, do not provide container\n                    // creation date.  Emit a bogus one to satisfy clients like\n                    // s3cmd which require one.\n                    creationDate = new Date(0);\n                }\n                writeSimpleElement(xml, \"CreationDate\",\n                        blobStore.getContext().utils().date()\n                                .iso8601DateFormat(creationDate).trim());\n\n                xml.writeEndElement();\n            }\n            xml.writeEndElement();\n\n            xml.writeEndElement();\n            xml.flush();\n        } catch (XMLStreamException xse) {\n            throw new IOException(xse);\n        }\n    }\n\n    private void handleContainerLocation(HttpServletRequest request,\n            HttpServletResponse response) throws IOException {\n        response.setCharacterEncoding(UTF_8);\n        addCorsResponseHeader(request, response);\n        try (Writer writer = response.getWriter()) {\n            response.setContentType(XML_CONTENT_TYPE);\n            XMLStreamWriter xml = xmlOutputFactory.createXMLStreamWriter(\n                    writer);\n            xml.writeStartDocument();\n            // TODO: using us-standard semantics but could emit actual location\n            xml.writeStartElement(\"LocationConstraint\");\n            xml.writeDefaultNamespace(AWS_XMLNS);\n            xml.writeEndElement();\n            xml.flush();\n        } catch (XMLStreamException xse) {\n            throw new IOException(xse);\n        }\n    }\n\n    private static void handleBucketPolicy(BlobStore blobStore,\n            String containerName) throws S3Exception {\n        if (!blobStore.containerExists(containerName)) {\n            throw new S3Exception(S3ErrorCode.NO_SUCH_BUCKET);\n        }\n        throw new S3Exception(S3ErrorCode.NO_SUCH_POLICY);\n    }\n\n    private void handleListMultipartUploads(HttpServletRequest request,\n            HttpServletResponse response, BlobStore blobStore,\n            String container) throws IOException, S3Exception {\n        if (request.getParameter(\"delimiter\") != null ||\n                request.getParameter(\"max-uploads\") != null ||\n                request.getParameter(\"key-marker\") != null ||\n                request.getParameter(\"upload-id-marker\") != null) {\n            throw new UnsupportedOperationException();\n        }\n\n        String encodingType = request.getParameter(\"encoding-type\");\n        String prefix = request.getParameter(\"prefix\");\n\n        List<MultipartUpload> uploads = blobStore.listMultipartUploads(\n                container);\n\n        response.setCharacterEncoding(UTF_8);\n        addCorsResponseHeader(request, response);\n        try (Writer writer = response.getWriter()) {\n            response.setContentType(XML_CONTENT_TYPE);\n            XMLStreamWriter xml = xmlOutputFactory.createXMLStreamWriter(\n                    writer);\n            xml.writeStartDocument();\n            xml.writeStartElement(\"ListMultipartUploadsResult\");\n            xml.writeDefaultNamespace(AWS_XMLNS);\n\n            writeSimpleElement(xml, \"Bucket\", container);\n\n            // TODO: bogus values\n            xml.writeEmptyElement(\"KeyMarker\");\n            xml.writeEmptyElement(\"UploadIdMarker\");\n            xml.writeEmptyElement(\"NextKeyMarker\");\n            xml.writeEmptyElement(\"NextUploadIdMarker\");\n            xml.writeEmptyElement(\"Delimiter\");\n\n            if (Strings.isNullOrEmpty(prefix)) {\n                xml.writeEmptyElement(\"Prefix\");\n            } else {\n                writeSimpleElement(xml, \"Prefix\", encodeBlob(\n                        encodingType, prefix));\n            }\n\n            writeSimpleElement(xml, \"MaxUploads\", \"1000\");\n            writeSimpleElement(xml, \"IsTruncated\", \"false\");\n\n            for (MultipartUpload upload : uploads) {\n                if (prefix != null &&\n                    !upload.blobName().startsWith(prefix)) {\n                    continue;\n                }\n\n                xml.writeStartElement(\"Upload\");\n\n                writeSimpleElement(xml, \"Key\", upload.blobName());\n                writeSimpleElement(xml, \"UploadId\", upload.id());\n                writeInitiatorStanza(xml);\n                writeOwnerStanza(xml);\n                // TODO: bogus value\n                writeSimpleElement(xml, \"StorageClass\", \"STANDARD\");\n\n                // TODO: bogus value\n                writeSimpleElement(xml, \"Initiated\",\n                        blobStore.getContext().utils().date()\n                                .iso8601DateFormat(new Date()));\n\n                xml.writeEndElement();\n            }\n\n            // TODO: CommonPrefixes\n\n            xml.writeEndElement();\n\n            xml.flush();\n        } catch (XMLStreamException xse) {\n            throw new IOException(xse);\n        }\n    }\n\n    private void handleContainerExists(HttpServletRequest request,\n            HttpServletResponse response, BlobStore blobStore,\n            String containerName) throws IOException, S3Exception {\n        if (!blobStore.containerExists(containerName)) {\n            throw new S3Exception(S3ErrorCode.NO_SUCH_BUCKET);\n        }\n        addCorsResponseHeader(request, response);\n    }\n\n    private void handleContainerCreate(HttpServletRequest request,\n            HttpServletResponse response, InputStream is, BlobStore blobStore,\n            String containerName) throws IOException, S3Exception {\n        if (containerName.isEmpty()) {\n            throw new S3Exception(S3ErrorCode.METHOD_NOT_ALLOWED);\n        }\n\n        String contentLengthString = request.getHeader(\n                HttpHeaders.CONTENT_LENGTH);\n        if (contentLengthString != null) {\n            long contentLength;\n            try {\n                contentLength = Long.parseLong(contentLengthString);\n            } catch (NumberFormatException nfe) {\n                throw new S3Exception(S3ErrorCode.INVALID_ARGUMENT, nfe);\n            }\n            if (contentLength < 0) {\n                throw new S3Exception(S3ErrorCode.INVALID_ARGUMENT);\n            }\n        }\n\n        String locationString;\n        try (PushbackInputStream pis = new PushbackInputStream(is)) {\n            int ch = pis.read();\n            if (ch == -1) {\n                // handle empty bodies\n                locationString = null;\n            } else {\n                pis.unread(ch);\n                CreateBucketRequest cbr = mapper.readValue(\n                        pis, CreateBucketRequest.class);\n                locationString = cbr.locationConstraint;\n            }\n        }\n\n        Location location = null;\n        if (locationString != null) {\n            for (Location loc : blobStore.listAssignableLocations()) {\n                if (loc.getId().equalsIgnoreCase(locationString)) {\n                    location = loc;\n                    break;\n                }\n            }\n            if (location == null) {\n                throw new S3Exception(S3ErrorCode.INVALID_LOCATION_CONSTRAINT);\n            }\n        }\n        logger.debug(\"Creating bucket with location: {}\", location);\n\n        var options = new CreateContainerOptions();\n        String acl = request.getHeader(AwsHttpHeaders.ACL);\n        if (\"public-read\".equalsIgnoreCase(acl)) {\n            options.publicRead();\n        }\n\n        boolean created;\n        try {\n            created = blobStore.createContainerInLocation(location,\n                    containerName, options);\n        } catch (AuthorizationException ae) {\n            if (ae.getCause() instanceof AccessDeniedException) {\n                throw new S3Exception(S3ErrorCode.ACCESS_DENIED,\n                        \"Could not create bucket\", ae);\n            }\n            throw new S3Exception(S3ErrorCode.BUCKET_ALREADY_EXISTS, ae);\n        }\n        if (!created) {\n            throw new S3Exception(S3ErrorCode.BUCKET_ALREADY_OWNED_BY_YOU,\n                    S3ErrorCode.BUCKET_ALREADY_OWNED_BY_YOU.getMessage(),\n                    null, Map.of(\"BucketName\", containerName));\n        }\n\n        response.addHeader(HttpHeaders.LOCATION, \"/\" + containerName);\n        addCorsResponseHeader(request, response);\n    }\n\n    private void handleContainerDelete(HttpServletRequest request,\n            HttpServletResponse response, BlobStore blobStore,\n            String containerName) throws IOException, S3Exception {\n        if (!blobStore.containerExists(containerName)) {\n            throw new S3Exception(S3ErrorCode.NO_SUCH_BUCKET);\n        }\n\n        String blobStoreType = getBlobStoreType(blobStore);\n        if (blobStoreType.equals(\"b2\")) {\n            // S3 allows deleting a container with in-progress MPU while B2 does\n            // not.  Explicitly cancel uploads for B2.\n            for (MultipartUpload mpu : blobStore.listMultipartUploads(\n                    containerName)) {\n                blobStore.abortMultipartUpload(mpu);\n            }\n        }\n\n        if (!blobStore.deleteContainerIfEmpty(containerName)) {\n            throw new S3Exception(S3ErrorCode.BUCKET_NOT_EMPTY);\n        }\n\n        addCorsResponseHeader(request, response);\n        response.setStatus(HttpServletResponse.SC_NO_CONTENT);\n    }\n\n    private void handleBlobList(HttpServletRequest request,\n            HttpServletResponse response, BlobStore blobStore,\n            String containerName) throws IOException, S3Exception {\n        String blobStoreType = getBlobStoreType(blobStore);\n        var options = new ListContainerOptions();\n        String encodingType = request.getParameter(\"encoding-type\");\n        String delimiter = request.getParameter(\"delimiter\");\n        if (delimiter != null) {\n            options.delimiter(delimiter);\n        } else {\n            options.recursive();\n        }\n        String prefix = request.getParameter(\"prefix\");\n        if (prefix != null && !prefix.isEmpty()) {\n            options.prefix(prefix);\n        }\n\n        boolean isListV2 = false;\n        String marker;\n        String listType = request.getParameter(\"list-type\");\n        String continuationToken = request.getParameter(\"continuation-token\");\n        String startAfter = request.getParameter(\"start-after\");\n        if (listType == null) {\n            marker = request.getParameter(\"marker\");\n        } else if (listType.equals(\"2\")) {\n            isListV2 = true;\n            if (continuationToken != null && startAfter != null) {\n                throw new S3Exception(S3ErrorCode.INVALID_ARGUMENT);\n            }\n            if (continuationToken != null) {\n                marker = continuationToken;\n            } else {\n                marker = startAfter;\n            }\n        } else {\n            throw new S3Exception(S3ErrorCode.NOT_IMPLEMENTED);\n        }\n        if (marker != null) {\n            if (Quirks.OPAQUE_MARKERS.contains(blobStoreType)) {\n                String realMarker = lastKeyToMarker.getIfPresent(\n                        Map.entry(containerName, marker));\n                if (realMarker != null) {\n                    marker = realMarker;\n                }\n            }\n            options.afterMarker(marker);\n        }\n\n        boolean fetchOwner = !isListV2 ||\n                \"true\".equals(request.getParameter(\"fetch-owner\"));\n\n        int maxKeys = 1000;\n        String maxKeysString = request.getParameter(\"max-keys\");\n        if (maxKeysString != null) {\n            try {\n                maxKeys = Integer.parseInt(maxKeysString);\n            } catch (NumberFormatException nfe) {\n                throw new S3Exception(S3ErrorCode.INVALID_ARGUMENT, nfe);\n            }\n            if (maxKeys > 1000) {\n                maxKeys = 1000;\n            }\n        }\n        options.maxResults(maxKeys);\n\n        PageSet<? extends StorageMetadata> set = blobStore.list(containerName,\n                options);\n\n        addCorsResponseHeader(request, response);\n\n        response.setCharacterEncoding(UTF_8);\n        try (Writer writer = response.getWriter()) {\n            response.setContentType(XML_CONTENT_TYPE);\n            XMLStreamWriter xml = xmlOutputFactory.createXMLStreamWriter(\n                    writer);\n            xml.writeStartDocument();\n            xml.writeStartElement(\"ListBucketResult\");\n            xml.writeDefaultNamespace(AWS_XMLNS);\n\n            writeSimpleElement(xml, \"Name\", containerName);\n\n            if (prefix == null) {\n                xml.writeEmptyElement(\"Prefix\");\n            } else {\n                writeSimpleElement(xml, \"Prefix\", encodeBlob(\n                        encodingType, prefix));\n            }\n\n            if (isListV2) {\n                writeSimpleElement(xml, \"KeyCount\", String.valueOf(set.size()));\n            }\n            writeSimpleElement(xml, \"MaxKeys\", String.valueOf(maxKeys));\n\n            if (!isListV2) {\n                if (marker == null) {\n                    xml.writeEmptyElement(\"Marker\");\n                } else {\n                    writeSimpleElement(xml, \"Marker\", encodeBlob(\n                            encodingType, marker));\n                }\n            } else {\n                if (continuationToken == null) {\n                    xml.writeEmptyElement(\"ContinuationToken\");\n                } else {\n                    writeSimpleElement(xml, \"ContinuationToken\", encodeBlob(\n                            encodingType, continuationToken));\n                }\n                if (startAfter == null) {\n                    xml.writeEmptyElement(\"StartAfter\");\n                } else {\n                    writeSimpleElement(xml, \"StartAfter\", encodeBlob(\n                            encodingType, startAfter));\n                }\n            }\n\n            if (!Strings.isNullOrEmpty(delimiter)) {\n                writeSimpleElement(xml, \"Delimiter\", encodeBlob(\n                        encodingType, delimiter));\n            }\n\n            if (encodingType != null && encodingType.equals(\"url\")) {\n                writeSimpleElement(xml, \"EncodingType\", encodingType);\n            }\n\n            String nextMarker = set.getNextMarker();\n            if (nextMarker != null) {\n                writeSimpleElement(xml, \"IsTruncated\", \"true\");\n                writeSimpleElement(xml,\n                        isListV2 ? \"NextContinuationToken\" : \"NextMarker\",\n                        encodeBlob(encodingType, nextMarker));\n                if (Quirks.OPAQUE_MARKERS.contains(blobStoreType)) {\n                    StorageMetadata sm = Streams.findLast(\n                            set.stream()).orElse(null);\n                    if (sm != null) {\n                        lastKeyToMarker.put(Map.entry(\n                                containerName,\n                                encodeBlob(encodingType, nextMarker)),\n                                nextMarker);\n                    }\n                }\n            } else {\n                writeSimpleElement(xml, \"IsTruncated\", \"false\");\n            }\n\n            Set<String> commonPrefixes = new TreeSet<>();\n            for (StorageMetadata metadata : set) {\n                switch (metadata.getType()) {\n                case FOLDER:\n                    // fallthrough\n                case RELATIVE_PATH:\n                    if (delimiter != null) {\n                        commonPrefixes.add(metadata.getName());\n                        continue;\n                    }\n                    break;\n                default:\n                    break;\n                }\n\n                xml.writeStartElement(\"Contents\");\n\n                writeSimpleElement(xml, \"Key\", encodeBlob(encodingType,\n                        metadata.getName()));\n\n                Date lastModified = metadata.getLastModified();\n                if (lastModified != null) {\n                    writeSimpleElement(xml, \"LastModified\",\n                            formatDate(lastModified));\n                }\n\n                String eTag = metadata.getETag();\n                if (eTag != null) {\n                    writeSimpleElement(xml, \"ETag\", maybeQuoteETag(eTag));\n                }\n\n                Long size = metadata.getSize();\n                if (size != null) {\n                    writeSimpleElement(xml, \"Size\", String.valueOf(size));\n                }\n\n                Tier tier = metadata.getTier();\n                if (tier != null) {\n                    writeSimpleElement(xml, \"StorageClass\",\n                            StorageClass.fromTier(tier).toString());\n                }\n\n                if (fetchOwner) {\n                    writeOwnerStanza(xml);\n                }\n\n                xml.writeEndElement();\n            }\n\n            for (String commonPrefix : commonPrefixes) {\n                xml.writeStartElement(\"CommonPrefixes\");\n\n                writeSimpleElement(xml, \"Prefix\", encodeBlob(encodingType,\n                        commonPrefix));\n\n                xml.writeEndElement();\n            }\n\n            xml.writeEndElement();\n            xml.flush();\n        } catch (XMLStreamException xse) {\n            throw new IOException(xse);\n        }\n    }\n\n    private void handleBlobRemove(HttpServletRequest request,\n            HttpServletResponse response, BlobStore blobStore,\n            String containerName, String blobName)\n            throws IOException, S3Exception {\n        blobStore.removeBlob(containerName, blobName);\n        addCorsResponseHeader(request, response);\n        response.sendError(HttpServletResponse.SC_NO_CONTENT);\n    }\n\n    private void handleMultiBlobRemove(HttpServletRequest request,\n            HttpServletResponse response, InputStream is,\n            BlobStore blobStore, String containerName)\n            throws IOException, S3Exception {\n        String contentMD5String = request.getHeader(HttpHeaders.CONTENT_MD5);\n        if (contentMD5String == null) {\n            throw new S3Exception(S3ErrorCode.INVALID_REQUEST,\n                    \"Missing required header for this request: Content-Md5\");\n        }\n        HashCode expected;\n        try {\n            expected = HashCode.fromBytes(\n                    Base64.getDecoder().decode(contentMD5String));\n        } catch (IllegalArgumentException iae) {\n            throw new S3Exception(S3ErrorCode.INVALID_DIGEST, iae);\n        }\n        if (expected.bits() != MD5.bits()) {\n            throw new S3Exception(S3ErrorCode.INVALID_DIGEST);\n        }\n        byte[] body = is.readAllBytes();\n        HashCode actual = MD5.hashBytes(body);\n        if (!expected.equals(actual)) {\n            throw new S3Exception(S3ErrorCode.BAD_DIGEST);\n        }\n        DeleteMultipleObjectsRequest dmor = mapper.readValue(\n                body, DeleteMultipleObjectsRequest.class);\n        if (dmor.objects == null) {\n            throw new S3Exception(S3ErrorCode.MALFORMED_X_M_L);\n        }\n\n        if (dmor.objects.size() > 1_000) {\n            throw new S3Exception(S3ErrorCode.INVALID_ARGUMENT);\n        }\n\n        Collection<String> blobNames = new ArrayList<>();\n        for (DeleteMultipleObjectsRequest.S3Object s3Object :\n                dmor.objects) {\n            blobNames.add(s3Object.key);\n        }\n\n        blobStore.removeBlobs(containerName, blobNames);\n\n        response.setCharacterEncoding(UTF_8);\n        addCorsResponseHeader(request, response);\n        try (Writer writer = response.getWriter()) {\n            response.setContentType(XML_CONTENT_TYPE);\n            XMLStreamWriter xml = xmlOutputFactory.createXMLStreamWriter(\n                    writer);\n            xml.writeStartDocument();\n            xml.writeStartElement(\"DeleteResult\");\n            xml.writeDefaultNamespace(AWS_XMLNS);\n\n            if (!dmor.quiet) {\n                for (String blobName : blobNames) {\n                    xml.writeStartElement(\"Deleted\");\n\n                    writeSimpleElement(xml, \"Key\", blobName);\n\n                    xml.writeEndElement();\n                }\n            }\n\n            // TODO: emit error stanza\n            xml.writeEndElement();\n            xml.flush();\n        } catch (XMLStreamException xse) {\n            throw new IOException(xse);\n        }\n    }\n\n    private void handleBlobMetadata(HttpServletRequest request,\n            HttpServletResponse response,\n            BlobStore blobStore, String containerName,\n            String blobName) throws IOException, S3Exception {\n        BlobMetadata metadata = blobStore.blobMetadata(containerName, blobName);\n        if (metadata == null) {\n            throw new S3Exception(S3ErrorCode.NO_SUCH_KEY);\n        }\n\n        // BlobStore.blobMetadata does not support GetOptions so we emulate\n        // conditional requests.\n        String ifMatch = request.getHeader(HttpHeaders.IF_MATCH);\n        String ifNoneMatch = request.getHeader(HttpHeaders.IF_NONE_MATCH);\n        long ifModifiedSince = request.getDateHeader(\n                HttpHeaders.IF_MODIFIED_SINCE);\n        long ifUnmodifiedSince = request.getDateHeader(\n                HttpHeaders.IF_UNMODIFIED_SINCE);\n\n        String eTag = metadata.getETag();\n        if (eTag != null) {\n            eTag = maybeQuoteETag(eTag);\n            if (ifMatch != null && !ifMatch.equals(eTag)) {\n                throw new S3Exception(S3ErrorCode.PRECONDITION_FAILED);\n            }\n            if (ifNoneMatch != null && ifNoneMatch.equals(eTag)) {\n                response.setStatus(HttpServletResponse.SC_NOT_MODIFIED);\n                return;\n            }\n        }\n\n        Date lastModified = metadata.getLastModified();\n        if (lastModified != null) {\n            if (ifModifiedSince != -1 && lastModified.compareTo(\n                    new Date(ifModifiedSince)) <= 0) {\n                throw new S3Exception(S3ErrorCode.PRECONDITION_FAILED);\n            }\n            if (ifUnmodifiedSince != -1 && lastModified.compareTo(\n                    new Date(ifUnmodifiedSince)) >= 0) {\n                response.setStatus(HttpServletResponse.SC_NOT_MODIFIED);\n                return;\n            }\n        }\n\n        response.setStatus(HttpServletResponse.SC_OK);\n        addMetadataToResponse(request, response, metadata);\n        addCorsResponseHeader(request, response);\n    }\n\n    private void handleOptionsBlob(HttpServletRequest request,\n            HttpServletResponse response,\n            BlobStore blobStore,\n            String containerName) throws IOException, S3Exception {\n        if (!blobStore.containerExists(containerName)) {\n            // Don't leak internal information, although authenticated\n            throw new S3Exception(S3ErrorCode.ACCESS_DENIED);\n        }\n\n        String corsOrigin = request.getHeader(HttpHeaders.ORIGIN);\n        if (Strings.isNullOrEmpty(corsOrigin)) {\n            throw new S3Exception(S3ErrorCode.INVALID_CORS_ORIGIN);\n        }\n        if (!corsRules.isOriginAllowed(corsOrigin)) {\n            throw new S3Exception(S3ErrorCode.ACCESS_DENIED);\n        }\n\n        String corsMethod = request.getHeader(\n                HttpHeaders.ACCESS_CONTROL_REQUEST_METHOD);\n        if (!corsRules.isMethodAllowed(corsMethod)) {\n            throw new S3Exception(S3ErrorCode.INVALID_CORS_METHOD);\n        }\n\n        String corsHeaders = request.getHeader(\n                HttpHeaders.ACCESS_CONTROL_REQUEST_HEADERS);\n        if (!Strings.isNullOrEmpty(corsHeaders)) {\n            if (corsRules.isEveryHeaderAllowed(corsHeaders)) {\n                response.addHeader(HttpHeaders.ACCESS_CONTROL_ALLOW_HEADERS,\n                        corsHeaders);\n            } else {\n                throw new S3Exception(S3ErrorCode.ACCESS_DENIED);\n            }\n        }\n\n        response.addHeader(HttpHeaders.VARY, HttpHeaders.ORIGIN);\n        response.addHeader(HttpHeaders.ACCESS_CONTROL_ALLOW_ORIGIN,\n                corsRules.getAllowedOrigin(corsOrigin));\n        response.addHeader(HttpHeaders.ACCESS_CONTROL_ALLOW_METHODS,\n                corsRules.getAllowedMethods());\n\n        String exposedHeaders = corsRules.getExposedHeaders();\n        if (!Strings.isNullOrEmpty(exposedHeaders)) {\n            response.addHeader(HttpHeaders.ACCESS_CONTROL_EXPOSE_HEADERS,\n                exposedHeaders);\n        }\n\n        response.setStatus(HttpServletResponse.SC_OK);\n    }\n\n    private void handleGetBlob(HttpServletRequest request,\n            HttpServletResponse response, BlobStore blobStore,\n            String containerName, String blobName)\n            throws IOException, S3Exception {\n        int status = HttpServletResponse.SC_OK;\n        var options = new GetOptions();\n\n        String ifMatch = request.getHeader(HttpHeaders.IF_MATCH);\n        if (ifMatch != null) {\n            options.ifETagMatches(ifMatch);\n        }\n\n        String ifNoneMatch = request.getHeader(HttpHeaders.IF_NONE_MATCH);\n        if (ifNoneMatch != null) {\n            options.ifETagDoesntMatch(ifNoneMatch);\n        }\n\n        long ifModifiedSince = request.getDateHeader(\n                HttpHeaders.IF_MODIFIED_SINCE);\n        if (ifModifiedSince != -1) {\n            options.ifModifiedSince(new Date(ifModifiedSince));\n        }\n\n        long ifUnmodifiedSince = request.getDateHeader(\n                HttpHeaders.IF_UNMODIFIED_SINCE);\n        if (ifUnmodifiedSince != -1) {\n            options.ifUnmodifiedSince(new Date(ifUnmodifiedSince));\n        }\n\n        String range = request.getHeader(HttpHeaders.RANGE);\n        if (range != null && range.startsWith(\"bytes=\") &&\n                // ignore multiple ranges\n                range.indexOf(',') == -1) {\n            range = range.substring(\"bytes=\".length());\n            String[] ranges = range.split(\"-\", 2);\n            if (ranges[0].isEmpty()) {\n                options.tail(Long.parseLong(ranges[1]));\n            } else if (ranges[1].isEmpty()) {\n                options.startAt(Long.parseLong(ranges[0]));\n            } else {\n                options.range(Long.parseLong(ranges[0]),\n                        Long.parseLong(ranges[1]));\n            }\n            status = HttpServletResponse.SC_PARTIAL_CONTENT;\n        }\n\n        Blob blob = blobStore.getBlob(containerName, blobName, options);\n        if (blob == null) {\n            throw new S3Exception(S3ErrorCode.NO_SUCH_KEY);\n        }\n\n        response.setStatus(status);\n\n        addCorsResponseHeader(request, response);\n\n        addMetadataToResponse(request, response, blob.getMetadata());\n\n        // TODO: handles only a single range due to jclouds limitations\n        var headers = new CaseInsensitiveImmutableMultimap(\n                blob.getAllHeaders());\n        Collection<String> contentRanges =\n                headers.get(HttpHeaders.CONTENT_RANGE);\n        if (!contentRanges.isEmpty()) {\n            response.addHeader(HttpHeaders.CONTENT_RANGE,\n                    contentRanges.iterator().next());\n            response.addHeader(HttpHeaders.ACCEPT_RANGES,\n                    \"bytes\");\n        }\n\n        try (InputStream is = blob.getPayload().openStream();\n             OutputStream os = response.getOutputStream()) {\n            is.transferTo(os);\n            os.flush();\n        }\n    }\n\n    private void handleCopyBlob(HttpServletRequest request,\n            HttpServletResponse response, InputStream is, BlobStore blobStore,\n            String destContainerName, String destBlobName)\n            throws IOException, S3Exception {\n        String copySourceHeader = request.getHeader(AwsHttpHeaders.COPY_SOURCE);\n        copySourceHeader = URLDecoder.decode(\n                copySourceHeader, StandardCharsets.UTF_8);\n        if (copySourceHeader.startsWith(\"/\")) {\n            // Some clients like boto do not include the leading slash\n            copySourceHeader = copySourceHeader.substring(1);\n        }\n        String[] path = copySourceHeader.split(\"/\", 2);\n        if (path.length != 2) {\n            throw new S3Exception(S3ErrorCode.INVALID_REQUEST);\n        }\n        String sourceContainerName = path[0];\n        String sourceBlobName = path[1];\n        boolean replaceMetadata = \"REPLACE\".equalsIgnoreCase(request.getHeader(\n                AwsHttpHeaders.METADATA_DIRECTIVE));\n\n        if (sourceContainerName.equals(destContainerName) &&\n                sourceBlobName.equals(destBlobName) &&\n                !replaceMetadata) {\n            throw new S3Exception(S3ErrorCode.INVALID_REQUEST);\n        }\n\n        CopyOptions.Builder options = CopyOptions.builder();\n\n        String ifMatch = request.getHeader(AwsHttpHeaders.COPY_SOURCE_IF_MATCH);\n        if (ifMatch != null) {\n            options.ifMatch(ifMatch);\n        }\n        String ifNoneMatch = request.getHeader(\n                AwsHttpHeaders.COPY_SOURCE_IF_NONE_MATCH);\n        if (ifNoneMatch != null) {\n            options.ifNoneMatch(ifNoneMatch);\n        }\n        long ifModifiedSince = request.getDateHeader(\n                AwsHttpHeaders.COPY_SOURCE_IF_MODIFIED_SINCE);\n        if (ifModifiedSince != -1) {\n            options.ifModifiedSince(new Date(ifModifiedSince));\n        }\n        long ifUnmodifiedSince = request.getDateHeader(\n                AwsHttpHeaders.COPY_SOURCE_IF_UNMODIFIED_SINCE);\n        if (ifUnmodifiedSince != -1) {\n            options.ifUnmodifiedSince(new Date(ifUnmodifiedSince));\n        }\n\n        if (replaceMetadata) {\n            ContentMetadataBuilder contentMetadata =\n                    ContentMetadataBuilder.create();\n            var userMetadata = ImmutableMap.<String, String>builder();\n            for (String headerName : Collections.list(\n                    request.getHeaderNames())) {\n                String headerValue = Strings.nullToEmpty(request.getHeader(\n                        headerName));\n                if (headerName.equalsIgnoreCase(\n                        HttpHeaders.CACHE_CONTROL)) {\n                    contentMetadata.cacheControl(headerValue);\n                } else if (headerName.equalsIgnoreCase(\n                        HttpHeaders.CONTENT_DISPOSITION)) {\n                    contentMetadata.contentDisposition(headerValue);\n                } else if (headerName.equalsIgnoreCase(\n                        HttpHeaders.CONTENT_ENCODING)) {\n                    contentMetadata.contentEncoding(headerValue);\n                } else if (headerName.equalsIgnoreCase(\n                        HttpHeaders.CONTENT_LANGUAGE)) {\n                    contentMetadata.contentLanguage(headerValue);\n                } else if (headerName.equalsIgnoreCase(\n                        HttpHeaders.CONTENT_TYPE)) {\n                    contentMetadata.contentType(headerValue);\n                } else if (startsWithIgnoreCase(headerName,\n                        USER_METADATA_PREFIX)) {\n                    userMetadata.put(\n                            headerName.substring(USER_METADATA_PREFIX.length()),\n                            headerValue);\n                }\n                // TODO: Expires\n            }\n            options.contentMetadata(contentMetadata.build());\n            options.userMetadata(userMetadata.build());\n        }\n\n        String eTag;\n        try {\n            eTag = blobStore.copyBlob(\n                    sourceContainerName, sourceBlobName,\n                    destContainerName, destBlobName, options.build());\n        } catch (KeyNotFoundException knfe) {\n            throw new S3Exception(S3ErrorCode.NO_SUCH_KEY, knfe);\n        }\n\n        // TODO: jclouds should include this in CopyOptions\n        String cannedAcl = request.getHeader(AwsHttpHeaders.ACL);\n        if (cannedAcl != null && !cannedAcl.equalsIgnoreCase(\"private\")) {\n            handleSetBlobAcl(request, response, is, blobStore,\n                    destContainerName, destBlobName);\n        }\n\n        BlobMetadata blobMetadata = blobStore.blobMetadata(destContainerName,\n                destBlobName);\n        response.setCharacterEncoding(UTF_8);\n        addCorsResponseHeader(request, response);\n        try (Writer writer = response.getWriter()) {\n            response.setContentType(XML_CONTENT_TYPE);\n            XMLStreamWriter xml = xmlOutputFactory.createXMLStreamWriter(\n                    writer);\n            xml.writeStartDocument();\n            xml.writeStartElement(\"CopyObjectResult\");\n            xml.writeDefaultNamespace(AWS_XMLNS);\n\n            var lastModified = blobMetadata.getLastModified();\n            if (lastModified != null) {\n                writeSimpleElement(xml, \"LastModified\",\n                        formatDate(lastModified));\n            }\n\n            writeSimpleElement(xml, \"ETag\", maybeQuoteETag(eTag));\n\n            xml.writeEndElement();\n            xml.flush();\n        } catch (XMLStreamException xse) {\n            throw new IOException(xse);\n        }\n    }\n\n    private void handlePutBlob(HttpServletRequest request,\n            HttpServletResponse response, InputStream is, BlobStore blobStore,\n            String containerName, String blobName)\n            throws IOException, S3Exception {\n        // Flag headers present since HttpServletResponse.getHeader returns\n        // null for empty headers values.\n        String contentLengthString = null;\n        String decodedContentLengthString = null;\n        String contentMD5String = null;\n        for (String headerName : Collections.list(request.getHeaderNames())) {\n            String headerValue = Strings.nullToEmpty(request.getHeader(\n                    headerName));\n            if (headerName.equalsIgnoreCase(HttpHeaders.CONTENT_LENGTH)) {\n                contentLengthString = headerValue;\n            } else if (headerName.equalsIgnoreCase(\n                    AwsHttpHeaders.DECODED_CONTENT_LENGTH)) {\n                decodedContentLengthString = headerValue;\n            } else if (headerName.equalsIgnoreCase(HttpHeaders.CONTENT_MD5)) {\n                contentMD5String = headerValue;\n            }\n        }\n        if (decodedContentLengthString != null) {\n            contentLengthString = decodedContentLengthString;\n        }\n\n        HashCode contentMD5 = null;\n        if (contentMD5String != null) {\n            try {\n                contentMD5 = HashCode.fromBytes(\n                        Base64.getDecoder().decode(contentMD5String));\n            } catch (IllegalArgumentException iae) {\n                throw new S3Exception(S3ErrorCode.INVALID_DIGEST, iae);\n            }\n            if (contentMD5.bits() != MD5.bits()) {\n                throw new S3Exception(S3ErrorCode.INVALID_DIGEST);\n            }\n        }\n\n        if (contentLengthString == null) {\n            throw new S3Exception(S3ErrorCode.MISSING_CONTENT_LENGTH);\n        }\n        long contentLength;\n        try {\n            contentLength = Long.parseLong(contentLengthString);\n        } catch (NumberFormatException nfe) {\n            throw new S3Exception(S3ErrorCode.INVALID_ARGUMENT, nfe);\n        }\n        if (contentLength < 0) {\n            throw new S3Exception(S3ErrorCode.INVALID_ARGUMENT);\n        }\n        if (contentLength > maxSinglePartObjectSize) {\n            throw new S3Exception(S3ErrorCode.ENTITY_TOO_LARGE);\n        }\n        if (decodedContentLengthString != null) {\n            is = ByteStreams.limit(is, contentLength);\n        }\n\n        String ifMatch = request.getHeader(HttpHeaders.IF_MATCH);\n        String ifNoneMatch = request.getHeader(HttpHeaders.IF_NONE_MATCH);\n        String blobStoreType = getBlobStoreType(blobStore);\n\n        // Azure only supports If-None-Match: *, not If-Match: *\n        // Handle If-Match: * manually for the azureblob-sdk provider.\n        // Note: this is a non-atomic operation (HEAD then PUT).\n        if (ifMatch != null && ifMatch.equals(\"*\") &&\n                blobStoreType.equals(\"azureblob-sdk\")) {\n            BlobMetadata metadata = blobStore.blobMetadata(containerName, blobName);\n            if (metadata == null) {\n                throw new S3Exception(S3ErrorCode.PRECONDITION_FAILED);\n            }\n            ifMatch = null;\n        }\n\n        // Providers that support native conditional writes\n        boolean supportsNativeConditionalWrites =\n                blobStoreType.equals(\"azureblob-sdk\") ||\n                blobStoreType.equals(\"aws-s3-sdk\") ||\n                blobStoreType.equals(\"google-cloud-storage-sdk\");\n\n        // Emulate conditional put for backends without native support.\n        // Note: this is a non-atomic operation (HEAD then PUT).\n        if ((ifMatch != null || ifNoneMatch != null) &&\n                !supportsNativeConditionalWrites) {\n            BlobMetadata metadata = blobStore.blobMetadata(containerName, blobName);\n            if (ifMatch != null) {\n                if (ifMatch.equals(\"*\")) {\n                    if (metadata == null) {\n                        throw new S3Exception(S3ErrorCode.PRECONDITION_FAILED);\n                    }\n                } else {\n                    if (metadata == null) {\n                        throw new S3Exception(S3ErrorCode.NO_SUCH_KEY);\n                    }\n                    String eTag = metadata.getETag();\n                    if (eTag != null) {\n                        eTag = maybeQuoteETag(eTag);\n                        if (!equalsIgnoringSurroundingQuotes(ifMatch, eTag)) {\n                            throw new S3Exception(S3ErrorCode.PRECONDITION_FAILED);\n                        }\n                    } else {\n                        throw new S3Exception(S3ErrorCode.PRECONDITION_FAILED);\n                    }\n                }\n            }\n\n            if (ifNoneMatch != null) {\n                if (ifNoneMatch.equals(\"*\")) {\n                    if (metadata != null) {\n                        throw new S3Exception(S3ErrorCode.PRECONDITION_FAILED);\n                    }\n                } else if (metadata != null) {\n                    String eTag = metadata.getETag();\n                    if (eTag != null) {\n                        eTag = maybeQuoteETag(eTag);\n                        if (equalsIgnoringSurroundingQuotes(ifNoneMatch, eTag)) {\n                            throw new S3Exception(S3ErrorCode.PRECONDITION_FAILED);\n                        }\n                    }\n                }\n            }\n        }\n\n        BlobAccess access;\n        String cannedAcl = request.getHeader(AwsHttpHeaders.ACL);\n        if (cannedAcl == null || cannedAcl.equalsIgnoreCase(\"private\")) {\n            access = BlobAccess.PRIVATE;\n        } else if (cannedAcl.equalsIgnoreCase(\"public-read\")) {\n            access = BlobAccess.PUBLIC_READ;\n        } else if (CANNED_ACLS.contains(cannedAcl)) {\n            throw new S3Exception(S3ErrorCode.NOT_IMPLEMENTED);\n        } else {\n            response.sendError(HttpServletResponse.SC_BAD_REQUEST);\n            return;\n        }\n\n        var options = new PutOptions2()\n                .setBlobAccess(access)\n                .setIfMatch(ifMatch)\n                .setIfNoneMatch(ifNoneMatch);\n        if (blobStoreType.equals(\"azureblob\") &&\n                contentLength > 256 * 1024 * 1024) {\n            options.multipart(true);\n        }\n\n        String eTag;\n        BlobBuilder.PayloadBlobBuilder builder = blobStore\n                .blobBuilder(blobName)\n                .payload(is)\n                .contentLength(contentLength);\n\n        String storageClass = request.getHeader(AwsHttpHeaders.STORAGE_CLASS);\n        if (storageClass == null || storageClass.equalsIgnoreCase(\"STANDARD\")) {\n            // defaults to STANDARD\n        } else {\n            builder.tier(StorageClass.valueOf(storageClass).toTier());\n        }\n\n        addContentMetadataFromHttpRequest(builder, request);\n        if (contentMD5 != null) {\n            builder = builder.contentMD5(contentMD5);\n        }\n\n        eTag = blobStore.putBlob(containerName, builder.build(),\n                options);\n\n        addCorsResponseHeader(request, response);\n\n        response.addHeader(HttpHeaders.ETAG, maybeQuoteETag(eTag));\n    }\n\n    private void handleStatuszRequest(HttpServletResponse response)\n            throws IOException {\n        response.setStatus(HttpServletResponse.SC_OK);\n        response.setContentType(\"application/json\");\n        response.setCharacterEncoding(UTF_8);\n\n        Map<String, String> body = ImmutableMap.of(\n                \"status\", \"OK\",\n                \"gitHash\", GIT_HASH,\n                \"launchTime\", LAUNCH_TIME.toString(),\n                \"currentTime\", Instant.now().toString());\n\n        try (PrintWriter writer = response.getWriter()) {\n            JSON_MAPPER.writeValue(writer, body);\n        }\n    }\n\n    private static String loadGitHash() {\n        try (InputStream stream = S3ProxyHandler.class.getClassLoader()\n                .getResourceAsStream(\"git.properties\")) {\n            if (stream == null) {\n                return \"unknown\";\n            }\n            Properties properties = new Properties();\n            properties.load(stream);\n            String hash = properties.getProperty(\"git.commit.id.abbrev\");\n            if (hash == null) {\n                hash = properties.getProperty(\"git.commit.id\", \"unknown\");\n            }\n            return hash;\n        } catch (IOException ioe) {\n            logger.debug(\"Unable to load git.properties\", ioe);\n            return \"unknown\";\n        }\n    }\n\n    private void handlePostBlob(HttpServletRequest request,\n            HttpServletResponse response, InputStream is, BlobStore blobStore,\n            String containerName)\n            throws IOException, S3Exception {\n        String boundaryHeader = request.getHeader(HttpHeaders.CONTENT_TYPE);\n        if (boundaryHeader == null ||\n                !boundaryHeader.startsWith(\"multipart/form-data; boundary=\")) {\n            response.setStatus(HttpServletResponse.SC_BAD_REQUEST);\n            return;\n        }\n        String boundary =\n                boundaryHeader.substring(boundaryHeader.indexOf('=') + 1);\n\n        String blobName = null;\n        String contentType = null;\n        String identity = null;\n        // TODO: handle policy\n        byte[] policy = null;\n        String signature = null;\n        String algorithm = null;\n        byte[] payload = null;\n        var parser = new MultiPartFormData.Parser(boundary);\n        parser.setFilesDirectory(java.nio.file.Path.of(\n                System.getProperty(\"java.io.tmpdir\")));\n        MultiPartFormData.Parts parts = parser.parse(\n                new InputStreamContentSource(is)).join();\n        try {\n            for (var part : parts) {\n                var header = part.getName();\n                if (header.equalsIgnoreCase(\"acl\")) {\n                    // TODO: acl\n                } else if (header.equalsIgnoreCase(\"AWSAccessKeyId\") ||\n                        header.equalsIgnoreCase(\"X-Amz-Credential\")) {\n                    identity = part.getContentAsString(\n                            StandardCharsets.UTF_8);\n                } else if (header.equalsIgnoreCase(\"Content-Type\")) {\n                    contentType = part.getContentAsString(\n                            StandardCharsets.UTF_8);\n                } else if (header.equalsIgnoreCase(\"file\")) {\n                    // TODO: buffers entire payload\n                    payload = part.getContentAsString(\n                            StandardCharsets.ISO_8859_1)\n                            .getBytes(StandardCharsets.ISO_8859_1);\n                } else if (header.equalsIgnoreCase(\"key\")) {\n                    blobName = part.getContentAsString(\n                            StandardCharsets.UTF_8);\n                } else if (header.equalsIgnoreCase(\"policy\")) {\n                    policy = part.getContentAsString(\n                            StandardCharsets.ISO_8859_1)\n                            .getBytes(StandardCharsets.ISO_8859_1);\n                } else if (header.equalsIgnoreCase(\"signature\") ||\n                        header.equalsIgnoreCase(\"X-Amz-Signature\")) {\n                    signature = part.getContentAsString(\n                            StandardCharsets.UTF_8);\n                } else if (header.equalsIgnoreCase(\"X-Amz-Algorithm\")) {\n                    algorithm = part.getContentAsString(\n                            StandardCharsets.UTF_8);\n                }\n            }\n        } finally {\n            parts.close();\n        }\n\n        if (blobName == null || policy == null) {\n            response.setStatus(HttpServletResponse.SC_BAD_REQUEST);\n            return;\n        }\n\n        String headerAuthorization = null;\n        S3AuthorizationHeader authHeader = null;\n        boolean signatureVersion4;\n        if (algorithm == null) {\n            if (identity == null || signature == null) {\n                throw new S3Exception(S3ErrorCode.ACCESS_DENIED);\n            }\n            signatureVersion4 = false;\n            headerAuthorization = \"AWS \" + identity + \":\" + signature;\n        } else if (algorithm.equals(\"AWS4-HMAC-SHA256\")) {\n            if (identity == null || signature == null) {\n                throw new S3Exception(S3ErrorCode.ACCESS_DENIED);\n            }\n            signatureVersion4 = true;\n            headerAuthorization = \"AWS4-HMAC-SHA256\" +\n                    \" Credential=\" + identity +\n                    \", Signature=\" + signature;\n        } else {\n            response.setStatus(HttpServletResponse.SC_BAD_REQUEST);\n            return;\n        }\n\n        try {\n            authHeader = new S3AuthorizationHeader(headerAuthorization);\n        } catch (IllegalArgumentException iae) {\n            throw new S3Exception(S3ErrorCode.INVALID_ARGUMENT, iae);\n        }\n\n        switch (authHeader.getAuthenticationType()) {\n        case AWS_V2:\n            switch (authenticationType) {\n            case AWS_V2:\n            case AWS_V2_OR_V4:\n            case NONE:\n                break;\n            default:\n                throw new S3Exception(S3ErrorCode.ACCESS_DENIED);\n            }\n            break;\n        case AWS_V4:\n            switch (authenticationType) {\n            case AWS_V4:\n            case AWS_V2_OR_V4:\n            case NONE:\n                break;\n            default:\n                throw new S3Exception(S3ErrorCode.ACCESS_DENIED);\n            }\n            break;\n        case NONE:\n            break;\n        default:\n            throw new IllegalArgumentException(\"Unhandled type: \" +\n                    authHeader.getAuthenticationType());\n        }\n\n        Map.Entry<String, BlobStore> provider =\n                blobStoreLocator.locateBlobStore(authHeader.getIdentity(), null,\n                        null);\n        if (provider == null) {\n            response.setStatus(HttpServletResponse.SC_FORBIDDEN);\n            return;\n        }\n        String credential = provider.getKey();\n\n        if (signatureVersion4) {\n            byte[] kSecret = (\"AWS4\" + credential).getBytes(\n                    StandardCharsets.UTF_8);\n            byte[] kDate = hmac(\"HmacSHA256\",\n                    authHeader.getDate().getBytes(StandardCharsets.UTF_8),\n                    kSecret);\n            byte[] kRegion = hmac(\"HmacSHA256\",\n                    authHeader.getRegion().getBytes(StandardCharsets.UTF_8),\n                    kDate);\n            byte[] kService = hmac(\"HmacSHA256\",\n                    authHeader.getService().getBytes(StandardCharsets.UTF_8),\n                    kRegion);\n            byte[] kSigning = hmac(\"HmacSHA256\",\n                    \"aws4_request\".getBytes(StandardCharsets.UTF_8), kService);\n            String expectedSignature = BaseEncoding.base16().lowerCase().encode(\n                    hmac(\"HmacSHA256\", policy, kSigning));\n            if (!constantTimeEquals(signature, expectedSignature)) {\n                response.setStatus(HttpServletResponse.SC_FORBIDDEN);\n                return;\n            }\n        } else {\n            String expectedSignature = Base64.getEncoder().encodeToString(\n                    hmac(\"HmacSHA1\", policy,\n                            credential.getBytes(StandardCharsets.UTF_8)));\n            if (!constantTimeEquals(signature, expectedSignature)) {\n                response.setStatus(HttpServletResponse.SC_FORBIDDEN);\n                return;\n            }\n        }\n\n        BlobBuilder.PayloadBlobBuilder builder = blobStore\n                .blobBuilder(blobName)\n                .payload(payload);\n        if (contentType != null) {\n            builder.contentType(contentType);\n        }\n        Blob blob = builder.build();\n        blobStore.putBlob(containerName, blob);\n\n        response.setStatus(HttpServletResponse.SC_NO_CONTENT);\n\n        addCorsResponseHeader(request, response);\n    }\n\n    private void handleInitiateMultipartUpload(HttpServletRequest request,\n            HttpServletResponse response, BlobStore blobStore,\n            String containerName, String blobName)\n            throws IOException, S3Exception {\n        ByteSource payload = ByteSource.empty();\n        BlobBuilder.PayloadBlobBuilder builder = blobStore\n                .blobBuilder(blobName)\n                .payload(payload);\n        addContentMetadataFromHttpRequest(builder, request);\n        builder.contentLength(payload.size());\n\n        String storageClass = request.getHeader(AwsHttpHeaders.STORAGE_CLASS);\n        if (storageClass == null || storageClass.equalsIgnoreCase(\"STANDARD\")) {\n            // defaults to STANDARD\n        } else {\n            builder.tier(StorageClass.valueOf(storageClass).toTier());\n        }\n\n        String ifMatch = request.getHeader(HttpHeaders.IF_MATCH);\n        String ifNoneMatch = request.getHeader(HttpHeaders.IF_NONE_MATCH);\n        String blobStoreType = getBlobStoreType(blobStore);\n\n        // Azure only supports If-None-Match: *, not If-Match: *\n        // Handle If-Match: * manually for the azureblob-sdk provider.\n        // Note: this is a non-atomic operation (HEAD then PUT).\n        if (ifMatch != null && ifMatch.equals(\"*\") &&\n                blobStoreType.equals(\"azureblob-sdk\")) {\n            BlobMetadata metadata = blobStore.blobMetadata(containerName, blobName);\n            if (metadata == null) {\n                throw new S3Exception(S3ErrorCode.PRECONDITION_FAILED);\n            }\n            ifMatch = null;\n        }\n\n        BlobAccess access;\n        String cannedAcl = request.getHeader(AwsHttpHeaders.ACL);\n        if (cannedAcl == null || cannedAcl.equalsIgnoreCase(\"private\")) {\n            access = BlobAccess.PRIVATE;\n        } else if (cannedAcl.equalsIgnoreCase(\"public-read\")) {\n            access = BlobAccess.PUBLIC_READ;\n        } else if (CANNED_ACLS.contains(cannedAcl)) {\n            throw new S3Exception(S3ErrorCode.NOT_IMPLEMENTED);\n        } else {\n            response.sendError(HttpServletResponse.SC_BAD_REQUEST);\n            return;\n        }\n\n        var options = new PutOptions2()\n                .setBlobAccess(access)\n                .setIfMatch(ifMatch)\n                .setIfNoneMatch(ifNoneMatch);\n\n        MultipartUpload mpu = blobStore.initiateMultipartUpload(containerName,\n                builder.build().getMetadata(), options);\n\n        if (Quirks.MULTIPART_REQUIRES_STUB.contains(getBlobStoreType(\n                blobStore))) {\n            blobStore.putBlob(containerName, builder.name(mpu.id()).build(),\n                    options);\n        }\n\n        response.setCharacterEncoding(UTF_8);\n        addCorsResponseHeader(request, response);\n        try (Writer writer = response.getWriter()) {\n            response.setContentType(XML_CONTENT_TYPE);\n            XMLStreamWriter xml = xmlOutputFactory.createXMLStreamWriter(\n                    writer);\n            xml.writeStartDocument();\n            xml.writeStartElement(\"InitiateMultipartUploadResult\");\n            xml.writeDefaultNamespace(AWS_XMLNS);\n\n            writeSimpleElement(xml, \"Bucket\", containerName);\n            writeSimpleElement(xml, \"Key\", blobName);\n            writeSimpleElement(xml, \"UploadId\", mpu.id());\n\n            xml.writeEndElement();\n            xml.flush();\n        } catch (XMLStreamException xse) {\n            throw new IOException(xse);\n        }\n    }\n\n    private void handleCompleteMultipartUpload(HttpServletRequest request,\n            HttpServletResponse response, InputStream is,\n            final BlobStore blobStore, String containerName, String blobName,\n            String uploadId) throws IOException, S3Exception {\n        BlobMetadata metadata;\n        PutOptions options;\n        if (Quirks.MULTIPART_REQUIRES_STUB.contains(getBlobStoreType(\n                blobStore))) {\n            metadata = blobStore.blobMetadata(containerName, uploadId);\n            BlobAccess access = blobStore.getBlobAccess(containerName,\n                    uploadId);\n            options = new PutOptions().setBlobAccess(access);\n        } else {\n            metadata = new MutableBlobMetadataImpl();\n            options = new PutOptions();\n        }\n        final MultipartUpload mpu = MultipartUpload.create(containerName,\n                blobName, uploadId, metadata, options);\n\n        final List<MultipartPart> parts = new ArrayList<>();\n        String blobStoreType = getBlobStoreType(blobStore);\n        if (blobStoreType.equals(\"azureblob\")) {\n            for (MultipartPart part : blobStore.listMultipartUpload(mpu)) {\n                parts.add(part);\n            }\n        } else if (blobStoreType.equals(\"azureblob-sdk\") ||\n                blobStoreType.equals(\"google-cloud-storage-sdk\")) {\n            var partsByListing =\n                blobStore.listMultipartUpload(mpu).stream().collect(\n                        Collectors.toMap(\n                                part -> part.partNumber(),\n                                part -> part));\n            CompleteMultipartUploadRequest cmu;\n            try {\n                cmu = mapper.readValue(\n                        is, CompleteMultipartUploadRequest.class);\n            } catch (JsonParseException jpe) {\n                throw new S3Exception(S3ErrorCode.MALFORMED_X_M_L, jpe);\n            }\n\n            if (cmu.parts != null) {\n                //  sort by part number and deduplicate (last occurrence wins)\n                SortedMap<Integer, MultipartPart> partsMap = new TreeMap<>();\n                for (CompleteMultipartUploadRequest.Part part : cmu.parts) {\n                    if (part.partNumber <= 0) {\n                        throw new S3Exception(S3ErrorCode.INVALID_PART_ORDER,\n                                \"Part numbers must be positive integers.\");\n                    }\n                    MultipartPart uploadedPart = partsByListing.get(part.partNumber);\n                    if (uploadedPart == null) {\n                        throw new S3Exception(S3ErrorCode.INVALID_PART);\n                    }\n                    partsMap.put(part.partNumber, uploadedPart);\n                }\n                parts.addAll(partsMap.values());\n            }\n        } else if (blobStoreType.equals(\"google-cloud-storage\")) {\n            // GCS only supports 32 parts but we can support up to 1024 by\n            // recursively combining objects.\n            for (int partNumber = 1;; ++partNumber) {\n                MultipartUpload mpu2 = MultipartUpload.create(\n                        containerName,\n                        \"%s_%08d\".formatted(mpu.id(), partNumber),\n                        \"%s_%08d\".formatted(mpu.id(), partNumber),\n                        metadata, options);\n                List<MultipartPart> subParts = blobStore.listMultipartUpload(\n                        mpu2);\n                if (subParts.isEmpty()) {\n                    break;\n                }\n                long partSize = 0;\n                for (MultipartPart part : subParts) {\n                    partSize += part.partSize();\n                }\n                String eTag = blobStore.completeMultipartUpload(mpu2, subParts);\n                parts.add(MultipartPart.create(\n                        partNumber, partSize, eTag, /*lastModified=*/ null));\n            }\n        } else {\n            // List parts to get part sizes and to map multiple Azure parts\n            // into single parts.\n            var partsByListing =\n                blobStore.listMultipartUpload(mpu).stream().collect(\n                        Collectors.toMap(\n                                part -> part.partNumber(),\n                                part -> part));\n            CompleteMultipartUploadRequest cmu;\n            try {\n                cmu = mapper.readValue(\n                        is, CompleteMultipartUploadRequest.class);\n            } catch (JsonParseException jpe) {\n                throw new S3Exception(S3ErrorCode.MALFORMED_X_M_L, jpe);\n            }\n\n            // use TreeMap to sort by part number and deduplicate (last wins)\n            SortedMap<Integer, String> requestParts = new TreeMap<>();\n            if (cmu.parts != null) {\n                for (CompleteMultipartUploadRequest.Part part : cmu.parts) {\n                    if (part.partNumber <= 0) {\n                        throw new S3Exception(S3ErrorCode.INVALID_PART_ORDER,\n                                \"Part numbers must be positive integers.\");\n                    }\n                    requestParts.put(part.partNumber, part.eTag);\n                }\n            }\n\n            for (var it = requestParts.entrySet().iterator(); it.hasNext();) {\n                var entry = it.next();\n                MultipartPart part = partsByListing.get(entry.getKey());\n                if (part == null) {\n                    throw new S3Exception(S3ErrorCode.INVALID_PART);\n                }\n                long partSize = part.partSize();\n                if (it.hasNext() && partSize != -1 &&\n                        (partSize < 5 * 1024 * 1024 || partSize <\n                                blobStore.getMinimumMultipartPartSize())) {\n                    throw new S3Exception(S3ErrorCode.ENTITY_TOO_SMALL);\n                }\n                if (part.partETag() != null &&\n                        !equalsIgnoringSurroundingQuotes(part.partETag(),\n                                entry.getValue())) {\n                    throw new S3Exception(S3ErrorCode.INVALID_PART);\n                }\n                parts.add(MultipartPart.create(entry.getKey(),\n                        partSize, part.partETag(), part.lastModified()));\n            }\n        }\n\n        if (parts.isEmpty()) {\n            // Amazon requires at least one part\n            throw new S3Exception(S3ErrorCode.MALFORMED_X_M_L);\n        }\n\n        response.setCharacterEncoding(UTF_8);\n        addCorsResponseHeader(request, response);\n        try (PrintWriter writer = response.getWriter()) {\n            response.setStatus(HttpServletResponse.SC_OK);\n            response.setContentType(XML_CONTENT_TYPE);\n\n            // Launch async thread to allow main thread to emit newlines to\n            // the client while completeMultipartUpload processes.\n            final AtomicReference<String> eTag = new AtomicReference<>();\n            final AtomicReference<RuntimeException> exception =\n                    new AtomicReference<>();\n            var thread = new Thread() {\n                @Override\n                public void run() {\n                    try {\n                        eTag.set(blobStore.completeMultipartUpload(mpu, parts));\n                    } catch (RuntimeException re) {\n                        exception.set(re);\n                    }\n                }\n            };\n            thread.start();\n\n            XMLStreamWriter xml = xmlOutputFactory.createXMLStreamWriter(\n                    writer);\n            xml.writeStartDocument();\n            xml.writeStartElement(\"CompleteMultipartUploadResult\");\n            xml.writeDefaultNamespace(AWS_XMLNS);\n            xml.flush();\n\n            while (thread.isAlive()) {\n                try {\n                    thread.join(1000);\n                } catch (InterruptedException ie) {\n                    // ignore\n                }\n                writer.write(\"\\n\");\n                writer.flush();\n            }\n\n            if (exception.get() != null) {\n                throw exception.get();\n            }\n\n            if (Quirks.MULTIPART_REQUIRES_STUB.contains(getBlobStoreType(\n                    blobStore))) {\n                blobStore.removeBlob(containerName, uploadId);\n            }\n\n            // TODO: bogus value\n            writeSimpleElement(xml, \"Location\",\n                    \"http://Example-Bucket.s3.amazonaws.com/\" + blobName);\n\n            writeSimpleElement(xml, \"Bucket\", containerName);\n            writeSimpleElement(xml, \"Key\", blobName);\n\n            if (eTag.get() != null) {\n                writeSimpleElement(xml, \"ETag\", maybeQuoteETag(eTag.get()));\n            }\n\n            xml.writeEndElement();\n            xml.flush();\n        } catch (XMLStreamException xse) {\n            throw new IOException(xse);\n        }\n    }\n\n    private void handleAbortMultipartUpload(HttpServletRequest request,\n            HttpServletResponse response, BlobStore blobStore,\n            String containerName, String blobName,\n            String uploadId) throws IOException, S3Exception {\n        if (Quirks.MULTIPART_REQUIRES_STUB.contains(getBlobStoreType(\n                blobStore))) {\n            if (!blobStore.blobExists(containerName, uploadId)) {\n                throw new S3Exception(S3ErrorCode.NO_SUCH_UPLOAD);\n            }\n\n            blobStore.removeBlob(containerName, uploadId);\n        }\n\n        addCorsResponseHeader(request, response);\n\n        // TODO: how to reconstruct original mpu?\n        MultipartUpload mpu = MultipartUpload.create(containerName,\n                blobName, uploadId, createFakeBlobMetadata(blobStore),\n                new PutOptions());\n        try {\n            blobStore.abortMultipartUpload(mpu);\n        } catch (KeyNotFoundException knfe) {\n            throw new S3Exception(S3ErrorCode.NO_SUCH_UPLOAD, knfe);\n        }\n        response.sendError(HttpServletResponse.SC_NO_CONTENT);\n    }\n\n    private void handleListParts(HttpServletRequest request,\n            HttpServletResponse response, BlobStore blobStore,\n            String containerName, String blobName, String uploadId)\n            throws IOException, S3Exception {\n        // support only the no-op zero case\n        String partNumberMarker = request.getParameter(\"part-number-marker\");\n        if (partNumberMarker != null && !partNumberMarker.equals(\"0\")) {\n            throw new S3Exception(S3ErrorCode.NOT_IMPLEMENTED);\n        }\n\n        // TODO: how to reconstruct original mpu?\n        MultipartUpload mpu = MultipartUpload.create(containerName,\n                blobName, uploadId, createFakeBlobMetadata(blobStore),\n                new PutOptions());\n\n        List<MultipartPart> parts;\n        var blobStoreType = getBlobStoreType(blobStore);\n        if (blobStoreType.equals(\"azureblob\")) {\n            // map Azure subparts back into S3 parts\n            SortedMap<Integer, Long> map = new TreeMap<>();\n            for (MultipartPart part : blobStore.listMultipartUpload(mpu)) {\n                int virtualPartNumber = part.partNumber() / 10_000;\n                Long size = map.get(virtualPartNumber);\n                map.put(virtualPartNumber,\n                        (size == null ? 0L : (long) size) + part.partSize());\n            }\n            parts = new ArrayList<>();\n            for (var entry : map.entrySet()) {\n                String eTag = \"\";  // TODO: bogus value\n                Date lastModified = null;  // TODO: bogus value\n                parts.add(MultipartPart.create(entry.getKey(),\n                        entry.getValue(), eTag, lastModified));\n            }\n        } else {\n            parts = blobStore.listMultipartUpload(mpu);\n        }\n\n        String encodingType = request.getParameter(\"encoding-type\");\n\n        response.setCharacterEncoding(UTF_8);\n        addCorsResponseHeader(request, response);\n        try (Writer writer = response.getWriter()) {\n            response.setContentType(XML_CONTENT_TYPE);\n            XMLStreamWriter xml = xmlOutputFactory.createXMLStreamWriter(\n                    writer);\n            xml.writeStartDocument();\n            xml.writeStartElement(\"ListPartsResult\");\n            xml.writeDefaultNamespace(AWS_XMLNS);\n\n            if (encodingType != null && encodingType.equals(\"url\")) {\n                writeSimpleElement(xml, \"EncodingType\", encodingType);\n            }\n\n            writeSimpleElement(xml, \"Bucket\", containerName);\n            writeSimpleElement(xml, \"Key\", encodeBlob(\n                    encodingType, blobName));\n            writeSimpleElement(xml, \"UploadId\", uploadId);\n            writeInitiatorStanza(xml);\n            writeOwnerStanza(xml);\n            // TODO: bogus value\n            writeSimpleElement(xml, \"StorageClass\", \"STANDARD\");\n\n            // TODO: pagination\n/*\n            writeSimpleElement(xml, \"PartNumberMarker\", \"1\");\n            writeSimpleElement(xml, \"NextPartNumberMarker\", \"3\");\n            writeSimpleElement(xml, \"MaxParts\", \"2\");\n            writeSimpleElement(xml, \"IsTruncated\", \"true\");\n*/\n\n            for (MultipartPart part : parts) {\n                xml.writeStartElement(\"Part\");\n\n                writeSimpleElement(xml, \"PartNumber\", String.valueOf(\n                        part.partNumber()));\n\n                Date lastModified = part.lastModified();\n                if (lastModified != null) {\n                    writeSimpleElement(xml, \"LastModified\",\n                            formatDate(lastModified));\n                }\n\n                String eTag = part.partETag();\n                if (eTag != null) {\n                    writeSimpleElement(xml, \"ETag\", maybeQuoteETag(eTag));\n                }\n\n                writeSimpleElement(xml, \"Size\", String.valueOf(\n                        part.partSize()));\n\n                xml.writeEndElement();\n            }\n\n            xml.writeEndElement();\n            xml.flush();\n        } catch (XMLStreamException xse) {\n            throw new IOException(xse);\n        }\n    }\n\n    private void handleCopyPart(HttpServletRequest request,\n            HttpServletResponse response, BlobStore blobStore,\n            String containerName, String blobName, String uploadId)\n            throws IOException, S3Exception {\n        // TODO: duplicated from handlePutBlob\n        String copySourceHeader = request.getHeader(AwsHttpHeaders.COPY_SOURCE);\n        copySourceHeader = URLDecoder.decode(\n                copySourceHeader, StandardCharsets.UTF_8);\n        if (copySourceHeader.startsWith(\"/\")) {\n            // Some clients like boto do not include the leading slash\n            copySourceHeader = copySourceHeader.substring(1);\n        }\n        String[] path = copySourceHeader.split(\"/\", 2);\n        if (path.length != 2) {\n            throw new S3Exception(S3ErrorCode.INVALID_REQUEST);\n        }\n        String sourceContainerName = path[0];\n        String sourceBlobName = path[1];\n\n        var options = new GetOptions();\n        String range = request.getHeader(AwsHttpHeaders.COPY_SOURCE_RANGE);\n        long expectedSize = -1;\n        if (range != null) {\n            if (!range.startsWith(\"bytes=\") || range.indexOf(',') != -1 ||\n                range.indexOf('-') == -1) {\n                throw new S3Exception(S3ErrorCode.INVALID_ARGUMENT,\n                    \"The x-amz-copy-source-range value must be of the form \" +\n                    \"bytes=first-last where first and last are the \" +\n                    \"zero-based offsets of the first and last bytes to copy\");\n            }\n            try {\n                range = range.substring(\"bytes=\".length());\n                String[] ranges = range.split(\"-\", 2);\n                if (ranges[0].isEmpty()) {\n                    options.tail(Long.parseLong(ranges[1]));\n                } else if (ranges[1].isEmpty()) {\n                    options.startAt(Long.parseLong(ranges[0]));\n                } else {\n                    long start = Long.parseLong(ranges[0]);\n                    long end = Long.parseLong(ranges[1]);\n                    if (end < start) {\n                        throw new S3Exception(S3ErrorCode.INVALID_RANGE);\n                    }\n                    expectedSize = end - start + 1;\n                    if (expectedSize > MAX_MULTIPART_COPY_SIZE) {\n                        throw new S3Exception(S3ErrorCode.INVALID_REQUEST,\n                                \"The specified copy source is larger than\" +\n                                \" the maximum allowable size for a copy\" +\n                                \" source: \" + MAX_MULTIPART_COPY_SIZE);\n                    }\n                    options.range(start, end);\n                }\n            } catch (NumberFormatException nfe) {\n                throw new S3Exception(S3ErrorCode.INVALID_ARGUMENT,\n                    \"The x-amz-copy-source-range value must be of the form \" +\n                    \"bytes=first-last where first and last are the \" +\n                    \"zero-based offsets of the first and last bytes to copy\",\n                    nfe);\n            }\n        }\n\n        String partNumberString = request.getParameter(\"partNumber\");\n        if (partNumberString == null) {\n            throw new S3Exception(S3ErrorCode.INVALID_ARGUMENT);\n        }\n        int partNumber;\n        try {\n            partNumber = Integer.parseInt(partNumberString);\n        } catch (NumberFormatException nfe) {\n            throw new S3Exception(S3ErrorCode.INVALID_ARGUMENT,\n                    \"Part number must be an integer between 1 and 10000\" +\n                    \", inclusive\", nfe, Map.of(\n                            \"ArgumentName\", \"partNumber\",\n                            \"ArgumentValue\", partNumberString));\n        }\n        if (partNumber < 1 || partNumber > 10_000) {\n            throw new S3Exception(S3ErrorCode.INVALID_ARGUMENT,\n                    \"Part number must be an integer between 1 and 10000\" +\n                    \", inclusive\", (Throwable) null, Map.of(\n                            \"ArgumentName\", \"partNumber\",\n                            \"ArgumentValue\", partNumberString));\n        }\n\n        // GCS only supports 32 parts so partition MPU into 32-part chunks.\n        String blobStoreType = getBlobStoreType(blobStore);\n        if (blobStoreType.equals(\"google-cloud-storage\")) {\n            // fix up 1-based part numbers\n            uploadId = \"%s_%08d\".formatted(\n                    uploadId, ((partNumber - 1) / 32) + 1);\n            partNumber = ((partNumber - 1) % 32) + 1;\n        }\n\n        // TODO: how to reconstruct original mpu?\n        MultipartUpload mpu = MultipartUpload.create(containerName,\n                blobName, uploadId, createFakeBlobMetadata(blobStore),\n                new PutOptions());\n\n        // TODO: Blob can leak on precondition failures.\n        Blob blob = blobStore.getBlob(sourceContainerName, sourceBlobName,\n                options);\n        if (blob == null) {\n            throw new S3Exception(S3ErrorCode.NO_SUCH_KEY);\n        }\n\n        BlobMetadata blobMetadata = blob.getMetadata();\n        // HTTP GET allow overlong ranges but S3 CopyPart does not\n        if (expectedSize != -1 && blobMetadata.getSize() < expectedSize) {\n            throw new S3Exception(S3ErrorCode.INVALID_RANGE);\n        }\n\n        String ifMatch = request.getHeader(\n                AwsHttpHeaders.COPY_SOURCE_IF_MATCH);\n        String ifNoneMatch = request.getHeader(\n                AwsHttpHeaders.COPY_SOURCE_IF_NONE_MATCH);\n        long ifModifiedSince = request.getDateHeader(\n                AwsHttpHeaders.COPY_SOURCE_IF_MODIFIED_SINCE);\n        long ifUnmodifiedSince = request.getDateHeader(\n                AwsHttpHeaders.COPY_SOURCE_IF_UNMODIFIED_SINCE);\n        String eTag = blobMetadata.getETag();\n        if (eTag != null) {\n            eTag = maybeQuoteETag(eTag);\n            if (ifMatch != null && !ifMatch.equals(eTag)) {\n                throw new S3Exception(S3ErrorCode.PRECONDITION_FAILED);\n            }\n            if (ifNoneMatch != null && ifNoneMatch.equals(eTag)) {\n                throw new S3Exception(S3ErrorCode.PRECONDITION_FAILED);\n            }\n        }\n\n        Date lastModified = blobMetadata.getLastModified();\n        if (lastModified != null) {\n            if (ifModifiedSince != -1 && lastModified.compareTo(\n                    new Date(ifModifiedSince)) <= 0) {\n                throw new S3Exception(S3ErrorCode.PRECONDITION_FAILED);\n            }\n            if (ifUnmodifiedSince != -1 && lastModified.compareTo(\n                    new Date(ifUnmodifiedSince)) >= 0) {\n                throw new S3Exception(S3ErrorCode.PRECONDITION_FAILED);\n            }\n        }\n\n        long contentLength =\n                blobMetadata.getContentMetadata().getContentLength();\n\n        try (InputStream is = blob.getPayload().openStream()) {\n            if (blobStoreType.equals(\"azureblob\")) {\n                // Azure has a smaller maximum part size than S3.  Split a\n                // single S3 part multiple Azure parts.\n                long azureMaximumMultipartPartSize =\n                        blobStore.getMaximumMultipartPartSize();\n                var his = new HashingInputStream(MD5, is);\n                int subPartNumber = 0;\n                for (long offset = 0; offset < contentLength;\n                        offset += azureMaximumMultipartPartSize,\n                        ++subPartNumber) {\n                    Payload payload = Payloads.newInputStreamPayload(\n                            new UncloseableInputStream(ByteStreams.limit(his,\n                                    azureMaximumMultipartPartSize)));\n                    payload.getContentMetadata().setContentLength(\n                            Math.min(azureMaximumMultipartPartSize,\n                                    contentLength - offset));\n                    blobStore.uploadMultipartPart(mpu,\n                            10_000 * partNumber + subPartNumber, payload);\n                }\n                eTag = BaseEncoding.base16().lowerCase().encode(\n                        his.hash().asBytes());\n            } else {\n                Payload payload = Payloads.newInputStreamPayload(is);\n                payload.getContentMetadata().setContentLength(contentLength);\n\n                MultipartPart part = blobStore.uploadMultipartPart(mpu,\n                        partNumber, payload);\n                eTag = part.partETag();\n            }\n        }\n\n        response.setCharacterEncoding(UTF_8);\n        addCorsResponseHeader(request, response);\n        try (Writer writer = response.getWriter()) {\n            response.setContentType(XML_CONTENT_TYPE);\n            XMLStreamWriter xml = xmlOutputFactory.createXMLStreamWriter(\n                    writer);\n            xml.writeStartDocument();\n            xml.writeStartElement(\"CopyObjectResult\");\n            xml.writeDefaultNamespace(AWS_XMLNS);\n\n            if (lastModified != null) {\n                writeSimpleElement(xml, \"LastModified\",\n                        formatDate(lastModified));\n            }\n            if (eTag != null) {\n                writeSimpleElement(xml, \"ETag\", maybeQuoteETag(eTag));\n            }\n\n            xml.writeEndElement();\n            xml.flush();\n        } catch (XMLStreamException xse) {\n            throw new IOException(xse);\n        }\n    }\n\n    private void handleUploadPart(HttpServletRequest request,\n            HttpServletResponse response, InputStream is, BlobStore blobStore,\n            String containerName, String blobName, String uploadId)\n            throws IOException, S3Exception {\n        // TODO: duplicated from handlePutBlob\n        String contentLengthString = null;\n        String decodedContentLengthString = null;\n        String contentMD5String = null;\n        for (String headerName : Collections.list(request.getHeaderNames())) {\n            String headerValue = Strings.nullToEmpty(request.getHeader(\n                    headerName));\n            if (headerName.equalsIgnoreCase(HttpHeaders.CONTENT_LENGTH)) {\n                contentLengthString = headerValue;\n            } else if (headerName.equalsIgnoreCase(\n                    AwsHttpHeaders.DECODED_CONTENT_LENGTH)) {\n                decodedContentLengthString = headerValue;\n            } else if (headerName.equalsIgnoreCase(HttpHeaders.CONTENT_MD5)) {\n                contentMD5String = headerValue;\n            }\n        }\n        if (decodedContentLengthString != null) {\n            contentLengthString = decodedContentLengthString;\n        }\n\n        HashCode contentMD5 = null;\n        if (contentMD5String != null) {\n            try {\n                contentMD5 = HashCode.fromBytes(\n                        Base64.getDecoder().decode(contentMD5String));\n            } catch (IllegalArgumentException iae) {\n                throw new S3Exception(S3ErrorCode.INVALID_DIGEST, iae);\n            }\n            if (contentMD5.bits() != MD5.bits()) {\n                throw new S3Exception(S3ErrorCode.INVALID_DIGEST);\n            }\n        }\n\n        if (contentLengthString == null) {\n            throw new S3Exception(S3ErrorCode.MISSING_CONTENT_LENGTH);\n        }\n        long contentLength;\n        try {\n            contentLength = Long.parseLong(contentLengthString);\n        } catch (NumberFormatException nfe) {\n            throw new S3Exception(S3ErrorCode.INVALID_ARGUMENT, nfe);\n        }\n        if (contentLength < 0) {\n            throw new S3Exception(S3ErrorCode.INVALID_ARGUMENT);\n        }\n        if (decodedContentLengthString != null) {\n            is = ByteStreams.limit(is, contentLength);\n        }\n\n        String partNumberString = request.getParameter(\"partNumber\");\n        if (partNumberString == null) {\n            throw new S3Exception(S3ErrorCode.INVALID_ARGUMENT);\n        }\n        int partNumber;\n        try {\n            partNumber = Integer.parseInt(partNumberString);\n        } catch (NumberFormatException nfe) {\n            throw new S3Exception(S3ErrorCode.INVALID_ARGUMENT,\n                    \"Part number must be an integer between 1 and 10000\" +\n                    \", inclusive\", nfe, Map.of(\n                            \"ArgumentName\", \"partNumber\",\n                            \"ArgumentValue\", partNumberString));\n        }\n        if (partNumber < 1 || partNumber > 10_000) {\n            throw new S3Exception(S3ErrorCode.INVALID_ARGUMENT,\n                    \"Part number must be an integer between 1 and 10000\" +\n                    \", inclusive\", (Throwable) null, Map.of(\n                            \"ArgumentName\", \"partNumber\",\n                            \"ArgumentValue\", partNumberString));\n        }\n\n        // GCS only supports 32 parts so partition MPU into 32-part chunks.\n        String blobStoreType = getBlobStoreType(blobStore);\n        if (blobStoreType.equals(\"google-cloud-storage\")) {\n            // fix up 1-based part numbers\n            uploadId = \"%s_%08d\".formatted(\n                    uploadId, ((partNumber - 1) / 32) + 1);\n            partNumber = ((partNumber - 1) % 32) + 1;\n        }\n\n        // TODO: how to reconstruct original mpu?\n        BlobMetadata blobMetadata;\n        if (Quirks.MULTIPART_REQUIRES_STUB.contains(getBlobStoreType(\n                blobStore))) {\n            blobMetadata = blobStore.blobMetadata(containerName, uploadId);\n        } else {\n            blobMetadata = createFakeBlobMetadata(blobStore);\n        }\n        MultipartUpload mpu = MultipartUpload.create(containerName,\n                blobName, uploadId, blobMetadata, new PutOptions());\n\n        if (getBlobStoreType(blobStore).equals(\"azureblob\")) {\n            // Azure has a smaller maximum part size than S3.  Split a single\n            // S3 part multiple Azure parts.\n            long azureMaximumMultipartPartSize =\n                        blobStore.getMaximumMultipartPartSize();\n            var his = new HashingInputStream(MD5, is);\n            int subPartNumber = 0;\n            for (long offset = 0; offset < contentLength;\n                    offset += azureMaximumMultipartPartSize,\n                    ++subPartNumber) {\n                Payload payload = Payloads.newInputStreamPayload(\n                        ByteStreams.limit(his,\n                                azureMaximumMultipartPartSize));\n                payload.getContentMetadata().setContentLength(\n                        Math.min(azureMaximumMultipartPartSize,\n                                contentLength - offset));\n                blobStore.uploadMultipartPart(mpu,\n                        10_000 * partNumber + subPartNumber, payload);\n            }\n            response.addHeader(HttpHeaders.ETAG, maybeQuoteETag(\n                    BaseEncoding.base16().lowerCase().encode(\n                            his.hash().asBytes())));\n        } else {\n            MultipartPart part;\n            Payload payload = Payloads.newInputStreamPayload(is);\n            payload.getContentMetadata().setContentLength(contentLength);\n            if (contentMD5 != null) {\n                payload.getContentMetadata().setContentMD5(contentMD5);\n            }\n\n            part = blobStore.uploadMultipartPart(mpu, partNumber, payload);\n\n            if (part.partETag() != null) {\n                response.addHeader(HttpHeaders.ETAG,\n                        maybeQuoteETag(part.partETag()));\n            }\n        }\n\n        addCorsResponseHeader(request, response);\n    }\n\n    private static void addResponseHeaderWithOverride(\n            HttpServletRequest request, HttpServletResponse response,\n            String headerName, String overrideHeaderName, String value) {\n        String override = request.getParameter(overrideHeaderName);\n\n        // NPE in if value is null\n        override = (override != null) ? override : value;\n\n        if (override != null) {\n            response.addHeader(headerName, override);\n        }\n    }\n\n    private static void addMetadataToResponse(HttpServletRequest request,\n            HttpServletResponse response,\n            BlobMetadata metadata) {\n        ContentMetadata contentMetadata =\n                metadata.getContentMetadata();\n        addResponseHeaderWithOverride(request, response,\n                HttpHeaders.CACHE_CONTROL, \"response-cache-control\",\n                contentMetadata.getCacheControl());\n        addResponseHeaderWithOverride(request, response,\n                HttpHeaders.CONTENT_ENCODING, \"response-content-encoding\",\n                contentMetadata.getContentEncoding());\n        addResponseHeaderWithOverride(request, response,\n                HttpHeaders.CONTENT_LANGUAGE, \"response-content-language\",\n                contentMetadata.getContentLanguage());\n        addResponseHeaderWithOverride(request, response,\n                HttpHeaders.CONTENT_DISPOSITION, \"response-content-disposition\",\n                contentMetadata.getContentDisposition());\n        Long contentLength = contentMetadata.getContentLength();\n        if (contentLength != null) {\n            response.addHeader(HttpHeaders.CONTENT_LENGTH,\n                    contentLength.toString());\n        }\n        String overrideContentType = request.getParameter(\n                \"response-content-type\");\n        response.setContentType(overrideContentType != null ?\n                overrideContentType : contentMetadata.getContentType());\n        String eTag = metadata.getETag();\n        if (eTag != null) {\n            response.addHeader(HttpHeaders.ETAG, maybeQuoteETag(eTag));\n        }\n        String overrideExpires = request.getParameter(\"response-expires\");\n        if (overrideExpires != null) {\n            response.addHeader(HttpHeaders.EXPIRES, overrideExpires);\n        } else {\n            Date expires = contentMetadata.getExpires();\n            if (expires != null) {\n                response.addDateHeader(HttpHeaders.EXPIRES, expires.getTime());\n            }\n        }\n        Date lastModified = metadata.getLastModified();\n        if (lastModified != null) {\n            response.addDateHeader(HttpHeaders.LAST_MODIFIED,\n                    lastModified.getTime());\n        }\n        Tier tier = metadata.getTier();\n        if (tier != null) {\n            response.addHeader(AwsHttpHeaders.STORAGE_CLASS,\n                    StorageClass.fromTier(tier).toString());\n        }\n        for (var entry : metadata.getUserMetadata().entrySet()) {\n            response.addHeader(USER_METADATA_PREFIX + entry.getKey(),\n                    entry.getValue());\n        }\n    }\n\n    /** Parse ISO 8601 timestamp into seconds since 1970. */\n    private static long parseIso8601(String date) {\n        var formatter = new SimpleDateFormat(\n                \"yyyyMMdd'T'HHmmss'Z'\");\n        formatter.setTimeZone(TimeZone.getTimeZone(\"UTC\"));\n        try {\n            return formatter.parse(date).getTime() / 1000;\n        } catch (ParseException pe) {\n            throw new IllegalArgumentException(pe);\n        }\n    }\n\n    private void isTimeSkewed(\n            long date, boolean isPresigned) throws S3Exception  {\n        if (date < 0) {\n            throw new S3Exception(S3ErrorCode.ACCESS_DENIED);\n        }\n        long now = System.currentTimeMillis() / 1000;\n        if (isPresigned) {\n            if (now + maximumTimeSkew < date) {\n                logger.debug(\"request is not valid yet {} {}\", date, now);\n                throw new S3Exception(S3ErrorCode.ACCESS_DENIED);\n            }\n        } else {\n            if (now + maximumTimeSkew < date || now - maximumTimeSkew > date) {\n                logger.debug(\"time skewed {} {}\", date, now);\n                throw new S3Exception(S3ErrorCode.REQUEST_TIME_TOO_SKEWED);\n            }\n        }\n    }\n\n    // cannot call BlobStore.getContext().utils().date().iso8601DateFormat since\n    // it has unwanted millisecond precision\n    private static String generateRequestId() {\n        return String.format(\"%016X\", ThreadLocalRandom.current().nextLong());\n    }\n\n    private static String formatDate(Date date) {\n        var formatter = new SimpleDateFormat(\n                \"yyyy-MM-dd'T'HH:mm:ss'Z'\");\n        formatter.setTimeZone(TimeZone.getTimeZone(\"GMT\"));\n        return formatter.format(date);\n    }\n\n    protected final void sendSimpleErrorResponse(\n            HttpServletRequest request, HttpServletResponse response,\n            S3ErrorCode code, String message,\n            Map<String, String> elements) throws IOException {\n        logger.debug(\"sendSimpleErrorResponse: {} {}\", code, elements);\n\n        if (response.isCommitted()) {\n            // Another handler already opened and closed the writer.\n            return;\n        }\n\n        response.setStatus(code.getHttpStatusCode());\n\n        if (request.getMethod().equals(\"HEAD\")) {\n            // The HEAD method is identical to GET except that the server MUST\n            // NOT return a message-body in the response.\n            return;\n        }\n\n        response.setCharacterEncoding(UTF_8);\n        try (Writer writer = response.getWriter()) {\n            response.setContentType(XML_CONTENT_TYPE);\n            XMLStreamWriter xml = xmlOutputFactory.createXMLStreamWriter(\n                    writer);\n            xml.writeStartDocument();\n            xml.writeStartElement(\"Error\");\n\n            writeSimpleElement(xml, \"Code\", code.getErrorCode());\n            writeSimpleElement(xml, \"Message\", message);\n\n            for (var entry : elements.entrySet()) {\n                writeSimpleElement(xml, entry.getKey(), entry.getValue());\n            }\n\n            String requestId = response.getHeader(AwsHttpHeaders.REQUEST_ID);\n            if (requestId == null) {\n                requestId = generateRequestId();\n            }\n            writeSimpleElement(xml, \"RequestId\", requestId);\n\n            xml.writeEndElement();\n            xml.flush();\n        } catch (XMLStreamException xse) {\n            throw new IOException(xse);\n        }\n    }\n\n    private void addCorsResponseHeader(HttpServletRequest request,\n          HttpServletResponse response) {\n        String corsOrigin = request.getHeader(HttpHeaders.ORIGIN);\n        if (!Strings.isNullOrEmpty(corsOrigin) &&\n                corsRules.isOriginAllowed(corsOrigin)) {\n            response.addHeader(HttpHeaders.ACCESS_CONTROL_ALLOW_ORIGIN,\n                    corsRules.getAllowedOrigin(corsOrigin));\n            response.addHeader(HttpHeaders.ACCESS_CONTROL_ALLOW_METHODS,\n                    corsRules.getAllowedMethods());\n            response.addHeader(HttpHeaders.ACCESS_CONTROL_EXPOSE_HEADERS,\n                    corsRules.getExposedHeaders());\n            if (corsRules.isAllowCredentials()) {\n                response.addHeader(HttpHeaders.ACCESS_CONTROL_ALLOW_CREDENTIALS,\n                        \"true\");\n            }\n        }\n    }\n\n    private static void addContentMetadataFromHttpRequest(\n            BlobBuilder.PayloadBlobBuilder builder,\n            HttpServletRequest request) {\n        var userMetadata = ImmutableMap.<String, String>builder();\n        for (String headerName : Collections.list(request.getHeaderNames())) {\n            if (startsWithIgnoreCase(headerName, USER_METADATA_PREFIX)) {\n                userMetadata.put(\n                        headerName.substring(USER_METADATA_PREFIX.length()),\n                        Strings.nullToEmpty(request.getHeader(headerName)));\n            }\n        }\n        builder.cacheControl(request.getHeader(\n                        HttpHeaders.CACHE_CONTROL))\n                .contentDisposition(request.getHeader(\n                        HttpHeaders.CONTENT_DISPOSITION))\n                .contentEncoding(request.getHeader(\n                        HttpHeaders.CONTENT_ENCODING))\n                .contentLanguage(request.getHeader(\n                        HttpHeaders.CONTENT_LANGUAGE))\n                .userMetadata(userMetadata.build());\n        String contentType = request.getContentType();\n        if (contentType != null) {\n            builder.contentType(contentType);\n        }\n        long expires = request.getDateHeader(HttpHeaders.EXPIRES);\n        if (expires != -1) {\n            builder.expires(new Date(expires));\n        }\n    }\n\n    // TODO: bogus values\n    private static void writeInitiatorStanza(XMLStreamWriter xml)\n            throws XMLStreamException {\n        xml.writeStartElement(\"Initiator\");\n\n        writeSimpleElement(xml, \"ID\", FAKE_INITIATOR_ID);\n        writeSimpleElement(xml, \"DisplayName\",\n                FAKE_INITIATOR_DISPLAY_NAME);\n\n        xml.writeEndElement();\n    }\n\n    // TODO: bogus values\n    private static void writeOwnerStanza(XMLStreamWriter xml)\n            throws XMLStreamException {\n        xml.writeStartElement(\"Owner\");\n\n        writeSimpleElement(xml, \"ID\", FAKE_OWNER_ID);\n        writeSimpleElement(xml, \"DisplayName\", FAKE_OWNER_DISPLAY_NAME);\n\n        xml.writeEndElement();\n    }\n\n    private static void writeSimpleElement(XMLStreamWriter xml,\n            String elementName, String characters) throws XMLStreamException {\n        xml.writeStartElement(elementName);\n        xml.writeCharacters(characters);\n        xml.writeEndElement();\n    }\n\n    private static BlobMetadata createFakeBlobMetadata(BlobStore blobStore) {\n        return blobStore.blobBuilder(\"fake-name\")\n                .build()\n                .getMetadata();\n    }\n\n    private static boolean equalsIgnoringSurroundingQuotes(String s1,\n            String s2) {\n        if (s1.length() >= 2 && s1.startsWith(\"\\\"\") && s1.endsWith(\"\\\"\")) {\n            s1 = s1.substring(1, s1.length() - 1);\n        }\n        if (s2.length() >= 2 && s2.startsWith(\"\\\"\") && s2.endsWith(\"\\\"\")) {\n            s2 = s2.substring(1, s2.length() - 1);\n        }\n        return s1.equals(s2);\n    }\n\n    private static String maybeQuoteETag(String eTag) {\n        if (!eTag.startsWith(\"\\\"\") && !eTag.endsWith(\"\\\"\")) {\n            eTag = \"\\\"\" + eTag + \"\\\"\";\n        }\n        return eTag;\n    }\n\n    private static boolean startsWithIgnoreCase(String string, String prefix) {\n        return string.toLowerCase().startsWith(prefix.toLowerCase());\n    }\n\n    private static byte[] hmac(String algorithm, byte[] data, byte[] key) {\n        try {\n            Mac mac = Mac.getInstance(algorithm);\n            mac.init(new SecretKeySpec(key, algorithm));\n            return mac.doFinal(data);\n        } catch (InvalidKeyException | NoSuchAlgorithmException e) {\n            throw new RuntimeException(e);\n        }\n    }\n\n    // Encode blob name if client requests it.  This allows for characters\n    // which XML 1.0 cannot represent.\n    private static String encodeBlob(String encodingType, String blobName) {\n        if (encodingType != null && encodingType.equals(\"url\")) {\n            return urlEscaper.escape(blobName);\n        } else {\n            return blobName;\n        }\n    }\n\n    private static final class UncloseableInputStream\n            extends FilterInputStream {\n        UncloseableInputStream(InputStream is) {\n            super(is);\n        }\n\n        @Override\n        public void close() throws IOException {\n        }\n    }\n\n    public final BlobStoreLocator getBlobStoreLocator() {\n        return blobStoreLocator;\n    }\n\n    public final void setBlobStoreLocator(BlobStoreLocator locator) {\n        this.blobStoreLocator = locator;\n    }\n\n    private static boolean validateIpAddress(String string) {\n        List<String> parts = Splitter.on('.').splitToList(string);\n        if (parts.size() != 4) {\n            return false;\n        }\n        for (String part : parts) {\n            try {\n                int num = Integer.parseInt(part);\n                if (num < 0 || num > 255) {\n                    return false;\n                }\n            } catch (NumberFormatException nfe) {\n                return false;\n            }\n        }\n        return true;\n    }\n\n    private static boolean constantTimeEquals(String x, String y) {\n        return MessageDigest.isEqual(x.getBytes(StandardCharsets.UTF_8),\n                y.getBytes(StandardCharsets.UTF_8));\n    }\n}\n"
  },
  {
    "path": "src/main/java/org/gaul/s3proxy/S3ProxyHandlerJetty.java",
    "content": "/*\n * Copyright 2014-2026 Andrew Gaul <andrew@gaul.org>\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * https://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\npackage org.gaul.s3proxy;\n\nimport java.io.IOException;\nimport java.io.InputStream;\nimport java.util.Map;\nimport java.util.concurrent.TimeoutException;\n\nimport com.google.common.net.HttpHeaders;\n\nimport jakarta.servlet.http.HttpServlet;\nimport jakarta.servlet.http.HttpServletRequest;\nimport jakarta.servlet.http.HttpServletResponse;\n\nimport org.jclouds.blobstore.BlobStore;\nimport org.jclouds.blobstore.ContainerNotFoundException;\nimport org.jclouds.blobstore.KeyNotFoundException;\nimport org.jclouds.http.HttpResponse;\nimport org.jclouds.http.HttpResponseException;\nimport org.jclouds.rest.AuthorizationException;\nimport org.jclouds.util.Throwables2;\nimport org.jspecify.annotations.Nullable;\nimport org.slf4j.Logger;\nimport org.slf4j.LoggerFactory;\n\n/** Jetty-specific handler for S3 requests. */\nfinal class S3ProxyHandlerJetty extends HttpServlet {\n    private static final Logger logger = LoggerFactory.getLogger(\n            S3ProxyHandlerJetty.class);\n\n    private final S3ProxyHandler handler;\n    private final S3ProxyMetrics metrics;\n\n    S3ProxyHandlerJetty(final BlobStore blobStore,\n            AuthenticationType authenticationType, final String identity,\n            final String credential, @Nullable String virtualHost,\n            long maxSinglePartObjectSize, long v4MaxNonChunkedRequestSize,\n            int v4MaxChunkSize,\n            boolean ignoreUnknownHeaders, CrossOriginResourceSharing corsRules,\n            String servicePath, int maximumTimeSkew,\n            @Nullable S3ProxyMetrics metrics) {\n        handler = new S3ProxyHandler(blobStore, authenticationType, identity,\n                credential, virtualHost, maxSinglePartObjectSize,\n                v4MaxNonChunkedRequestSize, v4MaxChunkSize,\n                ignoreUnknownHeaders, corsRules,\n                servicePath, maximumTimeSkew);\n        this.metrics = metrics;\n    }\n\n    private void sendS3Exception(HttpServletRequest request,\n            HttpServletResponse response, S3Exception se)\n            throws IOException {\n        handler.sendSimpleErrorResponse(request, response,\n                se.getError(), se.getMessage(), se.getElements());\n    }\n\n    @Override\n    protected void service(HttpServletRequest request,\n            HttpServletResponse response)\n            throws IOException {\n        long startNanos = System.nanoTime();\n        var ctx = new S3ProxyHandler.RequestContext();\n\n        try (InputStream is = request.getInputStream()) {\n\n            handler.doHandle(request, request, response, is, ctx);\n        } catch (ContainerNotFoundException cnfe) {\n            S3ErrorCode code = S3ErrorCode.NO_SUCH_BUCKET;\n            handler.sendSimpleErrorResponse(request, response, code,\n                    code.getMessage(), Map.of());\n            return;\n        } catch (HttpResponseException hre) {\n            HttpResponse hr = hre.getResponse();\n            if (hr == null) {\n                logger.debug(\"HttpResponseException without HttpResponse:\",\n                        hre);\n                response.sendError(\n                        HttpServletResponse.SC_INTERNAL_SERVER_ERROR,\n                        hre.getMessage());\n                return;\n            }\n\n            String eTag = hr.getFirstHeaderOrNull(HttpHeaders.ETAG);\n            if (eTag != null) {\n                response.setHeader(HttpHeaders.ETAG, eTag);\n            }\n\n            int status = hr.getStatusCode();\n            switch (status) {\n            case 412:\n                sendS3Exception(request, response,\n                        new S3Exception(S3ErrorCode.PRECONDITION_FAILED));\n                break;\n            case 416:\n                sendS3Exception(request, response,\n                        new S3Exception(S3ErrorCode.INVALID_RANGE));\n                break;\n            case HttpServletResponse.SC_BAD_REQUEST:\n            case 422:  // Swift returns 422 Unprocessable Entity\n                sendS3Exception(request, response,\n                    new S3Exception(S3ErrorCode.BAD_DIGEST));\n                break;\n            default:\n                logger.debug(\"HttpResponseException:\", hre);\n                response.setStatus(status);\n                break;\n            }\n            return;\n        } catch (IllegalArgumentException iae) {\n            logger.debug(\"IllegalArgumentException:\", iae);\n            response.sendError(HttpServletResponse.SC_BAD_REQUEST,\n                    iae.getMessage());\n            return;\n        } catch (IllegalStateException ise) {\n            // google-cloud-storage uses a different exception\n            if (ise.getMessage().startsWith(\"PreconditionFailed\")) {\n                sendS3Exception(request, response,\n                        new S3Exception(S3ErrorCode.PRECONDITION_FAILED));\n                return;\n            }\n            logger.debug(\"IllegalStateException:\", ise);\n            response.sendError(HttpServletResponse.SC_BAD_REQUEST,\n                    ise.getMessage());\n            return;\n        } catch (IOException ioe) {\n            var cause = Throwables2.getFirstThrowableOfType(ioe,\n                    S3Exception.class);\n            if (cause != null) {\n                sendS3Exception(request, response, cause);\n                return;\n            }\n            throw ioe;\n        } catch (KeyNotFoundException knfe) {\n            S3ErrorCode code = S3ErrorCode.NO_SUCH_KEY;\n            handler.sendSimpleErrorResponse(request, response, code,\n                    code.getMessage(), Map.of());\n            return;\n        } catch (S3Exception se) {\n            sendS3Exception(request, response, se);\n            return;\n        } catch (UnsupportedOperationException uoe) {\n            logger.debug(\"UnsupportedOperationException:\", uoe);\n            response.sendError(HttpServletResponse.SC_NOT_IMPLEMENTED,\n                    uoe.getMessage());\n            return;\n        } catch (Throwable throwable) {\n            if (Throwables2.getFirstThrowableOfType(throwable,\n                    AuthorizationException.class) != null) {\n                S3ErrorCode code = S3ErrorCode.ACCESS_DENIED;\n                handler.sendSimpleErrorResponse(request, response, code,\n                        code.getMessage(), Map.of());\n                return;\n            } else if (Throwables2.getFirstThrowableOfType(throwable,\n                    TimeoutException.class) != null) {\n                S3ErrorCode code = S3ErrorCode.REQUEST_TIMEOUT;\n                handler.sendSimpleErrorResponse(request, response, code,\n                        code.getMessage(), Map.of());\n                return;\n            } else {\n                logger.debug(\"Unknown exception:\", throwable);\n                throw throwable;\n            }\n        } finally {\n            recordMetrics(request, response, ctx, startNanos);\n        }\n    }\n\n    private void recordMetrics(HttpServletRequest request,\n            HttpServletResponse response, S3ProxyHandler.RequestContext ctx,\n            long startNanos) {\n        if (metrics == null || ctx.getOperation() == null) {\n            return;\n        }\n        long durationNanos = System.nanoTime() - startNanos;\n        metrics.recordRequest(\n                request.getMethod(),\n                request.getScheme(),\n                response.getStatus(),\n                ctx.getOperation(),\n                ctx.getBucket(),\n                durationNanos);\n    }\n\n    public S3ProxyHandler getHandler() {\n        return this.handler;\n    }\n}\n"
  },
  {
    "path": "src/main/java/org/gaul/s3proxy/S3ProxyMetrics.java",
    "content": "/*\n * Copyright 2014-2026 Andrew Gaul <andrew@gaul.org>\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * https://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\npackage org.gaul.s3proxy;\n\nimport java.util.List;\n\nimport io.opentelemetry.api.common.AttributeKey;\nimport io.opentelemetry.api.common.Attributes;\nimport io.opentelemetry.api.common.AttributesBuilder;\nimport io.opentelemetry.api.metrics.DoubleHistogram;\nimport io.opentelemetry.api.metrics.Meter;\nimport io.opentelemetry.exporter.prometheus.PrometheusHttpServer;\nimport io.opentelemetry.sdk.metrics.SdkMeterProvider;\nimport io.opentelemetry.semconv.HttpAttributes;\nimport io.opentelemetry.semconv.UrlAttributes;\n\nimport org.jspecify.annotations.Nullable;\n\npublic final class S3ProxyMetrics {\n    /** Default metrics port (0 = ephemeral). */\n    public static final int DEFAULT_METRICS_PORT = 0;\n    public static final String DEFAULT_METRICS_HOST = \"0.0.0.0\";\n\n    private static final AttributeKey<String> S3_OPERATION =\n            AttributeKey.stringKey(\"s3.operation\");\n    private static final AttributeKey<String> S3_BUCKET =\n            AttributeKey.stringKey(\"s3.bucket\");\n    // OTel semantic conventions specify these bucket boundaries for\n    // http.server.request.duration histogram.\n    // See: https://opentelemetry.io/docs/specs/semconv/http/http-metrics/\n    private static final List<Double> DURATION_BUCKETS = List.of(\n            0.005, 0.01, 0.025, 0.05, 0.075, 0.1, 0.25, 0.5,\n            0.75, 1.0, 2.5, 5.0, 7.5, 10.0);\n\n    private final SdkMeterProvider meterProvider;\n    private final DoubleHistogram requestDuration;\n    private final PrometheusHttpServer prometheusServer;\n\n    public S3ProxyMetrics() {\n        this(DEFAULT_METRICS_HOST, DEFAULT_METRICS_PORT);\n    }\n\n    public S3ProxyMetrics(String host, int port) {\n        prometheusServer = PrometheusHttpServer.builder()\n                .setHost(host)\n                .setPort(port)\n                .build();\n\n        meterProvider = SdkMeterProvider.builder()\n                .registerMetricReader(prometheusServer)\n                .build();\n\n        Meter meter = meterProvider.get(\"org.gaul.s3proxy\");\n\n        requestDuration = meter.histogramBuilder(\"http.server.request.duration\")\n                .setDescription(\"Duration of HTTP server requests\")\n                .setUnit(\"s\")\n                .setExplicitBucketBoundariesAdvice(DURATION_BUCKETS)\n                .build();\n    }\n\n    public void recordRequest(\n            String method,\n            String scheme,\n            int statusCode,\n            @Nullable S3Operation operation,\n            @Nullable String bucket,\n            long durationNanos) {\n        if (operation == null) {\n            return;\n        }\n\n        double durationSeconds = durationNanos / 1_000_000_000.0;\n\n        AttributesBuilder builder = Attributes.builder()\n                .put(HttpAttributes.HTTP_REQUEST_METHOD, method)\n                .put(UrlAttributes.URL_SCHEME, scheme)\n                .put(HttpAttributes.HTTP_RESPONSE_STATUS_CODE, statusCode)\n                .put(S3_OPERATION, operation.getValue());\n\n        if (bucket != null && !bucket.isEmpty()) {\n            builder.put(S3_BUCKET, bucket);\n        }\n\n        requestDuration.record(durationSeconds, builder.build());\n    }\n\n    public String scrape() {\n        return prometheusServer.toString();\n    }\n\n    public void close() {\n        if (prometheusServer != null) {\n            prometheusServer.close();\n        }\n        if (meterProvider != null) {\n            meterProvider.close();\n        }\n    }\n}\n"
  },
  {
    "path": "src/main/java/org/gaul/s3proxy/ShardedBlobStore.java",
    "content": "/*\n * Copyright 2014-2026 Andrew Gaul <andrew@gaul.org>\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * https://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\npackage org.gaul.s3proxy;\n\nimport static com.google.common.base.Preconditions.checkArgument;\n\nimport java.io.File;\nimport java.io.InputStream;\nimport java.nio.charset.StandardCharsets;\nimport java.util.ArrayList;\nimport java.util.HashMap;\nimport java.util.HashSet;\nimport java.util.List;\nimport java.util.Map;\nimport java.util.Objects;\nimport java.util.Properties;\nimport java.util.Set;\nimport java.util.concurrent.ExecutionException;\nimport java.util.concurrent.ExecutorService;\nimport java.util.concurrent.Executors;\nimport java.util.concurrent.Future;\nimport java.util.regex.Matcher;\nimport java.util.regex.Pattern;\nimport java.util.stream.Collectors;\n\nimport com.google.common.collect.ImmutableList;\nimport com.google.common.collect.ImmutableMap;\nimport com.google.common.collect.Sets;\nimport com.google.common.hash.HashCode;\nimport com.google.common.hash.HashFunction;\nimport com.google.common.hash.Hashing;\n\nimport org.jclouds.blobstore.BlobStore;\nimport org.jclouds.blobstore.ContainerNotFoundException;\nimport org.jclouds.blobstore.domain.Blob;\nimport org.jclouds.blobstore.domain.BlobAccess;\nimport org.jclouds.blobstore.domain.BlobMetadata;\nimport org.jclouds.blobstore.domain.ContainerAccess;\nimport org.jclouds.blobstore.domain.MultipartPart;\nimport org.jclouds.blobstore.domain.MultipartUpload;\nimport org.jclouds.blobstore.domain.MutableStorageMetadata;\nimport org.jclouds.blobstore.domain.PageSet;\nimport org.jclouds.blobstore.domain.StorageMetadata;\nimport org.jclouds.blobstore.domain.internal.MutableStorageMetadataImpl;\nimport org.jclouds.blobstore.domain.internal.PageSetImpl;\nimport org.jclouds.blobstore.options.CopyOptions;\nimport org.jclouds.blobstore.options.CreateContainerOptions;\nimport org.jclouds.blobstore.options.GetOptions;\nimport org.jclouds.blobstore.options.ListContainerOptions;\nimport org.jclouds.blobstore.options.PutOptions;\nimport org.jclouds.blobstore.util.ForwardingBlobStore;\nimport org.jclouds.domain.Location;\nimport org.jclouds.io.Payload;\n\n/**\n * This class implements the ability to split objects destined for specified\n * buckets across multiple backend buckets. The sharding is only applied to\n * the configured buckets. Each sharded bucket must specify the number of\n * shards in the form:\n *   s3proxy.sharded-blobstore.&lt;bucket name&gt;.shards=&lt;integer&gt;.\n * The number of shards is limited to 1000. An optional prefix can be\n * specified to use for shard names, like so:\n *   s3proxy.sharded-blobstore.&lt;bucket name&gt;.prefix=&lt;string&gt;.\n * The shards are named as follows: &lt;prefix&gt;-&lt;integer&gt;,\n * corresponding to the shards from 0 to the specified number. If a\n * &lt;prefix&gt; is not specified, the name of the bucket is used instead.\n *\n * Requests for all other buckets are passed through unchanged. Shards must\n * be pre-created either out of band or by issuing the CreateBucket API with\n * the sharded bucket name. The sharded bucket itself will not be\n * instantiated on the backend.\n */\nfinal class ShardedBlobStore extends ForwardingBlobStore {\n    public static final Pattern PROPERTIES_PREFIX_RE = Pattern.compile(\n            S3ProxyConstants.PROPERTY_SHARDED_BLOBSTORE +\n                    \"\\\\.(?<bucket>.*)\\\\.prefix$\");\n    private static final Pattern PROPERTIES_SHARDS_RE = Pattern.compile(\n            S3ProxyConstants.PROPERTY_SHARDED_BLOBSTORE +\n            \"\\\\.(?<bucket>.*)\\\\.shards$\");\n    private static final Pattern SHARD_RE = Pattern.compile(\n            \"(?<prefix>.*)-(?<shard>[0-9]+)$\");\n    private static final HashFunction SHARD_HASH = Hashing.murmur3_128();\n    private static final int MAX_SHARD_THREADS = 10;\n    private static final String SUPERBLOCK_VERSION = \"1.0\";\n    private static final String SUPERBLOCK_BLOB_NAME =\n            \".s3proxy-sharded-superblock\";\n    private static final int MAX_SHARDS = 1000;\n    private final Map<String, ShardedBucket> buckets;\n    private final Map<String, String> prefixMap;\n\n    private static final class ShardedBucket {\n        private final String prefix;\n        private final int shards;\n\n        private ShardedBucket(String name, int shards) {\n            this.prefix = Objects.requireNonNull(name);\n            this.shards = shards;\n        }\n    }\n\n    private ShardedBlobStore(BlobStore blobStore,\n                             Map<String, Integer> shards,\n                             Map<String, String> prefixes) {\n        super(blobStore);\n        Set<String> missingShards = Sets.difference(\n                prefixes.keySet(), shards.keySet());\n        if (!missingShards.isEmpty()) {\n            String allMissingShards = missingShards.stream().collect(\n                    Collectors.joining(\", \"));\n            throw new IllegalArgumentException(\n                    \"Number of shards unset for sharded buckets: %s\"\n                            .formatted(allMissingShards));\n        }\n        var bucketsBuilder = new ImmutableMap.Builder<String, ShardedBucket>();\n        for (String bucket : shards.keySet()) {\n            String prefix = prefixes.get(bucket);\n            if (prefix == null) {\n                prefix = bucket;\n            }\n            bucketsBuilder.put(bucket, new ShardedBucket(prefix,\n                    shards.get(bucket)));\n        }\n        this.buckets = bucketsBuilder.build();\n\n        this.prefixMap = buckets.keySet().stream().collect(Collectors.toMap(\n                virtualBucket -> buckets.get(virtualBucket).prefix,\n                virtualBucket -> virtualBucket));\n    }\n\n    public static Map<String, Integer> parseBucketShards(\n            Properties properties) {\n        var shardsMap = new ImmutableMap.Builder<String, Integer>();\n        for (String key : properties.stringPropertyNames()) {\n            Matcher matcher = PROPERTIES_SHARDS_RE.matcher(key);\n            if (!matcher.matches()) {\n                continue;\n            }\n            String bucket = matcher.group(\"bucket\");\n            int shards = Integer.parseInt(properties.getProperty(key));\n            checkArgument(shards > 0 && shards < MAX_SHARDS,\n                    \"number of shards must be between 1 and 1000 for %s\",\n                        bucket);\n            shardsMap.put(bucket, shards);\n        }\n        return shardsMap.build();\n    }\n\n    public static Map<String, String> parsePrefixes(Properties properties) {\n        var prefixesMap = new ImmutableMap.Builder<String, String>();\n        for (String key : properties.stringPropertyNames()) {\n            Matcher matcher = PROPERTIES_PREFIX_RE.matcher(key);\n            if (!matcher.matches()) {\n                continue;\n            }\n            prefixesMap.put(matcher.group(\"bucket\"),\n                    properties.getProperty(key));\n        }\n        return prefixesMap.build();\n    }\n\n    static ShardedBlobStore newShardedBlobStore(\n            BlobStore blobStore,\n            Map<String, Integer> shards,\n            Map<String, String> prefixes) {\n        return new ShardedBlobStore(blobStore, shards, prefixes);\n    }\n\n    private Map<String, String> createSuperblockMeta(ShardedBucket bucket) {\n        return Map.of(\n                \"s3proxy-sharded-superblock-version\", SUPERBLOCK_VERSION,\n                \"s3proxy-sharded-superblock-prefix\", bucket.prefix,\n                \"s3proxy-sharded-superblock-shards\",\n                Integer.toString(bucket.shards));\n    }\n\n    private static String getShardContainer(ShardedBucket bucket, int shard) {\n        return \"%s-%d\".formatted(bucket.prefix, shard);\n    }\n\n    private String getShard(String containerName, String blob) {\n        ShardedBucket bucket = buckets.get(containerName);\n        if (bucket == null) {\n            return containerName;\n        }\n        HashCode hash = SHARD_HASH.hashString(blob, StandardCharsets.UTF_8);\n        return ShardedBlobStore.getShardContainer(\n                bucket, Hashing.consistentHash(hash, bucket.shards));\n    }\n\n    private void checkSuperBlock(Blob blob, Map<String, String> expectedMeta,\n                                 String container) {\n        Map<String, String> currentSuperblockMeta =\n                blob.getMetadata().getUserMetadata();\n        for (var entry : expectedMeta.entrySet()) {\n            String current = currentSuperblockMeta.get(entry.getKey());\n            String expected = entry.getValue();\n            if (!expected.equalsIgnoreCase(current)) {\n                throw new RuntimeException(\n                        \"Superblock block for %s does not match: %s, %s\".formatted(\n                        container, expected, current));\n            }\n        }\n    }\n\n    private boolean createShards(ShardedBucket bucket, Location location,\n                                 CreateContainerOptions options) {\n        var futuresBuilder = new ImmutableList.Builder<Future<Boolean>>();\n        ExecutorService executor = Executors.newFixedThreadPool(\n                Math.min(bucket.shards, MAX_SHARD_THREADS));\n        BlobStore blobStore = this.delegate();\n        for (int n = 0; n < bucket.shards; ++n) {\n            String shardContainer = ShardedBlobStore.getShardContainer(\n                    bucket, n);\n            futuresBuilder.add(executor.submit(\n                () -> blobStore.createContainerInLocation(\n                        location, shardContainer, options)));\n        }\n        var futures = futuresBuilder.build();\n        executor.shutdown();\n        boolean ret = true;\n        for (Future<Boolean> future : futures) {\n            try {\n                ret &= future.get();\n            } catch (InterruptedException | ExecutionException e) {\n                throw new RuntimeException(\"Failed to create some shards\", e);\n            }\n        }\n\n        return ret;\n    }\n\n    @Override\n    public boolean createContainerInLocation(Location location,\n                                             String container) {\n        return createContainerInLocation(\n                location, container, CreateContainerOptions.NONE);\n    }\n\n    @SuppressWarnings(\"EmptyCatch\")\n    @Override\n    public boolean createContainerInLocation(\n            Location location, String container,\n            CreateContainerOptions createContainerOptions) {\n\n        ShardedBucket bucket = this.buckets.get(container);\n        if (bucket == null) {\n            return this.delegate().createContainerInLocation(\n                    location, container, createContainerOptions);\n        }\n\n        Map<String, String> superblockMeta = this.createSuperblockMeta(bucket);\n        Blob superblockBlob = null;\n        try {\n            superblockBlob = this.delegate().getBlob(\n                    ShardedBlobStore.getShardContainer(bucket, 0),\n                    SUPERBLOCK_BLOB_NAME);\n        } catch (ContainerNotFoundException ignored) {\n        }\n        if (superblockBlob != null) {\n            checkSuperBlock(superblockBlob, superblockMeta, container);\n        }\n\n        boolean ret = createShards(bucket, location, createContainerOptions);\n\n        // Upload the superblock\n        if (superblockBlob == null) {\n            superblockBlob = this.delegate().blobBuilder(SUPERBLOCK_BLOB_NAME)\n                    .payload(\"\")\n                    .userMetadata(superblockMeta)\n                    .build();\n            this.delegate().putBlob(ShardedBlobStore.getShardContainer(\n                    bucket, 0), superblockBlob);\n        }\n\n        return ret;\n    }\n\n    @Override\n    public PageSet<? extends StorageMetadata> list() {\n        PageSet<? extends StorageMetadata> upstream = this.delegate().list();\n        var results = new ImmutableList.Builder<StorageMetadata>();\n        Set<String> virtualBuckets = new HashSet<>();\n        for (StorageMetadata sm : upstream) {\n            Matcher matcher = SHARD_RE.matcher(sm.getName());\n            if (!matcher.matches()) {\n                results.add(sm);\n                continue;\n            }\n            String prefix = matcher.group(\"prefix\");\n            String virtualBucketName = this.prefixMap.get(prefix);\n            if (virtualBucketName == null) {\n                results.add(sm);\n                continue;\n            }\n            if (!virtualBuckets.contains(prefix)) {\n                virtualBuckets.add(prefix);\n                MutableStorageMetadata virtualBucket =\n                        new MutableStorageMetadataImpl();\n                virtualBucket.setCreationDate(sm.getCreationDate());\n                virtualBucket.setETag(sm.getETag());\n                virtualBucket.setId(sm.getProviderId());\n                virtualBucket.setLastModified(sm.getLastModified());\n                virtualBucket.setLocation(sm.getLocation());\n                virtualBucket.setName(virtualBucketName);\n                virtualBucket.setSize(sm.getSize());\n                virtualBucket.setTier(sm.getTier());\n                virtualBucket.setType(sm.getType());\n                virtualBucket.setUri(sm.getUri());\n                // copy the user metadata from the first shard as part\n                // of the response\n                virtualBucket.setUserMetadata(sm.getUserMetadata());\n                results.add(virtualBucket);\n            }\n        }\n        return new PageSetImpl<>(results.build(), upstream.getNextMarker());\n    }\n\n    @Override\n    public PageSet<? extends StorageMetadata> list(String container) {\n        if (!this.buckets.containsKey(container)) {\n            return this.delegate().list(container);\n        }\n        // TODO: implement listing a sharded container\n        throw new UnsupportedOperationException(\"sharded bucket\");\n    }\n\n    @Override\n    public PageSet<? extends StorageMetadata> list(\n            String container,\n            ListContainerOptions options) {\n        if (!this.buckets.containsKey(container)) {\n            return this.delegate().list(container, options);\n        }\n        // TODO: implement listing a sharded container\n        throw new UnsupportedOperationException(\"sharded bucket\");\n    }\n\n    @Override\n    public boolean containerExists(String container) {\n        if (!this.buckets.containsKey(container)) {\n            return this.delegate().containerExists(container);\n        }\n        return true;\n    }\n\n    @Override\n    public ContainerAccess getContainerAccess(String container) {\n        if (!this.buckets.containsKey(container)) {\n            return this.delegate().getContainerAccess(container);\n        }\n        throw new UnsupportedOperationException(\"sharded bucket\");\n    }\n\n    @Override\n    public void setContainerAccess(String container,\n                                   ContainerAccess containerAccess) {\n        if (!this.buckets.containsKey(container)) {\n            this.delegate().setContainerAccess(container, containerAccess);\n        }\n        throw new UnsupportedOperationException(\"sharded bucket\");\n    }\n\n    @Override\n    public void clearContainer(String container) {\n        clearContainer(container, new ListContainerOptions());\n    }\n\n    @Override\n    public void clearContainer(String container, ListContainerOptions options) {\n        throw new UnsupportedOperationException(\"sharded bucket\");\n    }\n\n    @Override\n    public void deleteContainer(String container) {\n        throw new UnsupportedOperationException(\"sharded bucket\");\n    }\n\n    private boolean deleteShards(ShardedBucket bucket) {\n        var futuresBuilder = new ImmutableList.Builder<Future<Boolean>>();\n        ExecutorService executor = Executors.newFixedThreadPool(\n                Math.min(bucket.shards, MAX_SHARD_THREADS));\n        for (int n = 0; n < bucket.shards; ++n) {\n            String shard = ShardedBlobStore.getShardContainer(bucket, n);\n            futuresBuilder.add(executor.submit(\n                () -> this.delegate().deleteContainerIfEmpty(shard)));\n        }\n        executor.shutdown();\n        var futures = futuresBuilder.build();\n        boolean ret = true;\n        for (Future<Boolean> future : futures) {\n            try {\n                ret &= future.get();\n            } catch (InterruptedException | ExecutionException e) {\n                throw new RuntimeException(\"Failed to delete shards\", e);\n            }\n        }\n\n        return ret;\n    }\n\n    @Override\n    public boolean deleteContainerIfEmpty(String container) {\n        ShardedBucket bucket = this.buckets.get(container);\n        if (bucket == null) {\n            return this.delegate().deleteContainerIfEmpty(container);\n        }\n\n        String zeroShardContainer = ShardedBlobStore.getShardContainer(\n                bucket, 0);\n        PageSet<? extends StorageMetadata> listing = this.delegate().list(\n                zeroShardContainer);\n        if (listing.size() > 1) {\n            return false;\n        }\n        StorageMetadata sm = listing.iterator().next();\n        if (!sm.getName().equals(SUPERBLOCK_BLOB_NAME)) {\n            return false;\n        }\n        // Remove the superblock\n        this.delegate().removeBlob(zeroShardContainer, SUPERBLOCK_BLOB_NAME);\n        return this.deleteShards(bucket);\n    }\n\n    @Override\n    public boolean directoryExists(String container, String directory) {\n        throw new UnsupportedOperationException(\"sharded bucket\");\n    }\n\n    @Override\n    public void createDirectory(String container, String directory) {\n        throw new UnsupportedOperationException(\"sharded bucket\");\n    }\n\n    @Override\n    public void deleteDirectory(String container, String directory) {\n        throw new UnsupportedOperationException(\"sharded bucket\");\n    }\n\n    @Override\n    public boolean blobExists(String container, String name) {\n        return this.delegate().blobExists(this.getShard(container, name), name);\n    }\n\n    @Override\n    public String putBlob(String containerName, Blob blob) {\n        return this.delegate().putBlob(this.getShard(containerName,\n                blob.getMetadata().getName()), blob);\n    }\n\n    @Override\n    public String putBlob(final String containerName, Blob blob,\n                          final PutOptions putOptions) {\n        return this.delegate().putBlob(\n                this.getShard(containerName, blob.getMetadata().getName()),\n                blob, putOptions);\n    }\n\n    @Override\n    public String copyBlob(String fromContainer, String fromName,\n                           String toContainer, String toName,\n                           CopyOptions options) {\n        String srcShard = this.getShard(fromContainer, fromName);\n        String dstShard = this.getShard(toContainer, toName);\n        return this.delegate().copyBlob(srcShard, fromName,\n                dstShard, toName, options);\n    }\n\n    @Override\n    public BlobMetadata blobMetadata(String container, String name) {\n        return this.delegate().blobMetadata(this.getShard(container, name),\n                name);\n    }\n\n    @Override\n    public Blob getBlob(String containerName, String blobName) {\n        return this.delegate().getBlob(this.getShard(containerName, blobName),\n                blobName);\n    }\n\n    @Override\n    public Blob getBlob(String containerName, String blobName,\n                        GetOptions getOptions) {\n        return this.delegate()\n                .getBlob(this.getShard(containerName, blobName), blobName,\n                        getOptions);\n    }\n\n    @Override\n    public void removeBlob(String container, String name) {\n        this.delegate().removeBlob(this.getShard(container, name), name);\n    }\n\n    @Override\n    public void removeBlobs(String container, Iterable<String> iterable) {\n        if (!this.buckets.containsKey(container)) {\n            this.delegate().removeBlobs(container, iterable);\n        }\n\n        Map<String, List<String>> shardMap = new HashMap<>();\n        for (String blob : iterable) {\n            List<String> shardBlobs =\n                    shardMap.computeIfAbsent(this.getShard(container, blob),\n                        k -> new ArrayList<>());\n            shardBlobs.add(blob);\n        }\n\n        for (var entry : shardMap.entrySet()) {\n            this.delegate().removeBlobs(entry.getKey(), entry.getValue());\n        }\n    }\n\n    @Override\n    public BlobAccess getBlobAccess(String container, String name) {\n        return this.delegate()\n                .getBlobAccess(this.getShard(container, name), name);\n    }\n\n    @Override\n    public void setBlobAccess(String container, String name,\n                              BlobAccess access) {\n        this.delegate()\n                .setBlobAccess(this.getShard(container, name), name, access);\n    }\n\n    @Override\n    public long countBlobs(String container) {\n        if (!this.buckets.containsKey(container)) {\n            return this.delegate().countBlobs(container);\n        }\n        throw new UnsupportedOperationException(\"sharded bucket\");\n    }\n\n    @Override\n    public long countBlobs(String container, ListContainerOptions options) {\n        if (!this.buckets.containsKey(container)) {\n            return this.delegate().countBlobs(container, options);\n        }\n        throw new UnsupportedOperationException(\"sharded bucket\");\n    }\n\n    @Override\n    public MultipartUpload initiateMultipartUpload(String container,\n                                                   BlobMetadata blobMetadata,\n                                                   PutOptions options) {\n        if (!this.buckets.containsKey(container)) {\n            return this.delegate()\n                    .initiateMultipartUpload(container, blobMetadata, options);\n        }\n        throw new UnsupportedOperationException(\"sharded bucket\");\n    }\n\n    @Override\n    public void abortMultipartUpload(MultipartUpload mpu) {\n        if (!this.buckets.containsKey(mpu.containerName())) {\n            this.delegate().abortMultipartUpload(mpu);\n        }\n        throw new UnsupportedOperationException(\"sharded bucket\");\n    }\n\n    @Override\n    public String completeMultipartUpload(MultipartUpload mpu,\n                                          List<MultipartPart> parts) {\n        if (!this.buckets.containsKey(mpu.containerName())) {\n            return this.delegate().completeMultipartUpload(mpu, parts);\n        }\n        throw new UnsupportedOperationException(\"sharded bucket\");\n    }\n\n    @Override\n    public MultipartPart uploadMultipartPart(MultipartUpload mpu,\n                                             int partNumber, Payload payload) {\n        if (!this.buckets.containsKey(mpu.containerName())) {\n            return this.delegate()\n                    .uploadMultipartPart(mpu, partNumber, payload);\n        }\n        throw new UnsupportedOperationException(\"sharded bucket\");\n    }\n\n    @Override\n    public List<MultipartPart> listMultipartUpload(MultipartUpload mpu) {\n        if (!this.buckets.containsKey(mpu.containerName())) {\n            return this.delegate().listMultipartUpload(mpu);\n        }\n        throw new UnsupportedOperationException(\"sharded bucket\");\n    }\n\n    @Override\n    public List<MultipartUpload> listMultipartUploads(String container) {\n        if (!this.buckets.containsKey(container)) {\n            return this.delegate().listMultipartUploads(container);\n        }\n        throw new UnsupportedOperationException(\"sharded bucket\");\n    }\n\n    @Override\n    public void downloadBlob(String container, String name, File destination) {\n        this.delegate().downloadBlob(this.getShard(container, name), name,\n                destination);\n    }\n\n    @Override\n    public void downloadBlob(String container, String name, File destination,\n                             ExecutorService executor) {\n        this.delegate()\n                .downloadBlob(this.getShard(container, name), name, destination,\n                        executor);\n    }\n\n    @Override\n    public InputStream streamBlob(String container, String name) {\n        return this.delegate().streamBlob(this.getShard(container, name), name);\n    }\n\n    @Override\n    public InputStream streamBlob(String container, String name,\n                                  ExecutorService executor) {\n        return this.delegate()\n                .streamBlob(this.getShard(container, name), name, executor);\n    }\n}\n"
  },
  {
    "path": "src/main/java/org/gaul/s3proxy/StorageClassBlobStore.java",
    "content": "/*\n * Copyright 2014-2026 Andrew Gaul <andrew@gaul.org>\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * https://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\npackage org.gaul.s3proxy;\n\nimport org.jclouds.blobstore.BlobStore;\nimport org.jclouds.blobstore.domain.Blob;\nimport org.jclouds.blobstore.domain.BlobMetadata;\nimport org.jclouds.blobstore.domain.MultipartUpload;\nimport org.jclouds.blobstore.domain.Tier;\nimport org.jclouds.blobstore.domain.internal.BlobMetadataImpl;\nimport org.jclouds.blobstore.options.PutOptions;\nimport org.jclouds.blobstore.util.ForwardingBlobStore;\nimport org.jclouds.s3.domain.ObjectMetadata.StorageClass;\n\n/**\n * This class implements a middleware to set the storage tier when creating\n * objects.  The class is configured via:\n *\n *   s3proxy.storage-class-blobstore = VALUE\n *\n * VALUE can be anything from org.jclouds.s3.domain.StorageClass, e.g.,\n * STANDARD, STANDARD_IA, GLACIER_IR, DEEP_ARCHIVE.  Some values do not\n * translate exactly due to jclouds limitations, e.g., REDUCED_REDUNDANCY maps\n * to STANDARD.  This mapping is best effort especially for non-S3 object\n * stores.\n */\npublic final class StorageClassBlobStore extends ForwardingBlobStore {\n    private final Tier tier;\n\n    private StorageClassBlobStore(BlobStore delegate,\n            String storageClassString) {\n        super(delegate);\n        StorageClass storageClass;\n        try {\n            storageClass = StorageClass.valueOf(\n                    storageClassString.toUpperCase());\n        } catch (IllegalArgumentException iae) {\n            storageClass = StorageClass.STANDARD;\n        }\n        this.tier = storageClass.toTier();\n    }\n\n    static StorageClassBlobStore newStorageClassBlobStore(BlobStore blobStore,\n            String storageClass) {\n        return new StorageClassBlobStore(blobStore, storageClass);\n    }\n\n    public Tier getTier() {\n        return tier;\n    }\n\n    @Override\n    public String putBlob(String containerName, Blob blob) {\n        var newBlob = replaceTier(containerName, blob);\n        return delegate().putBlob(containerName, newBlob);\n    }\n\n    @Override\n    public String putBlob(String containerName, Blob blob,\n            PutOptions options) {\n        var newBlob = replaceTier(containerName, blob);\n        return delegate().putBlob(containerName, newBlob, options);\n    }\n\n    @Override\n    public MultipartUpload initiateMultipartUpload(\n            String container, BlobMetadata blobMetadata, PutOptions options) {\n        var newBlobMetadata = replaceTier(blobMetadata);\n        return delegate().initiateMultipartUpload(container, newBlobMetadata,\n                options);\n    }\n\n    private Blob replaceTier(String containerName, Blob blob) {\n        var blobMeta = blob.getMetadata();\n        var contentMeta = blob.getMetadata().getContentMetadata();\n        return blobBuilder(containerName)\n                .name(blobMeta.getName())\n                .type(blobMeta.getType())\n                .tier(tier)\n                .userMetadata(blobMeta.getUserMetadata())\n                .payload(blob.getPayload())\n                .cacheControl(contentMeta.getCacheControl())\n                .contentDisposition(contentMeta.getContentDisposition())\n                .contentEncoding(contentMeta.getContentEncoding())\n                .contentLanguage(contentMeta.getContentLanguage())\n                .contentType(contentMeta.getContentType())\n                .build();\n    }\n\n    private BlobMetadata replaceTier(BlobMetadata meta) {\n        return new BlobMetadataImpl(meta.getProviderId(), meta.getName(),\n                meta.getLocation(), meta.getUri(), meta.getETag(),\n                meta.getCreationDate(), meta.getLastModified(),\n                meta.getUserMetadata(), meta.getPublicUri(),\n                meta.getContainer(), meta.getContentMetadata(), meta.getSize(),\n                tier);\n    }\n\n    // TODO: copyBlob\n}\n"
  },
  {
    "path": "src/main/java/org/gaul/s3proxy/ThrottledInputStream.java",
    "content": "/*\n * Copyright 2014-2026 Andrew Gaul <andrew@gaul.org>\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * https://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\npackage org.gaul.s3proxy;\n\nimport java.io.FilterInputStream;\nimport java.io.IOException;\nimport java.io.InputStream;\n\nfinal class ThrottledInputStream extends FilterInputStream {\n    private final Long speed;\n\n    ThrottledInputStream(InputStream is, Long speed) {\n        super(is);\n        this.speed = speed;\n    }\n\n    @Override\n    public int read() throws IOException {\n        int b = super.read();\n        if (b != -1) {\n            simulateLatency(1);\n        }\n        return b;\n    }\n\n    @Override\n    public int read(byte[] b, int off, int len) throws IOException {\n        int n = super.read(b, off, len);\n        if (n != -1) {\n            simulateLatency(n);\n        }\n        return n;\n    }\n\n    private void simulateLatency(int size) {\n        if (size == 0 || speed == null) {\n            return;\n        }\n        try {\n            Thread.sleep(size / speed, (int) (size % speed) * 1_000_000);\n        } catch (InterruptedException e) {\n            Thread.currentThread().interrupt();\n        }\n    }\n}\n"
  },
  {
    "path": "src/main/java/org/gaul/s3proxy/UserMetadataReplacerBlobStore.java",
    "content": "/*\n * Copyright 2014-2026 Andrew Gaul <andrew@gaul.org>\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * https://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\npackage org.gaul.s3proxy;\n\nimport static com.google.common.base.Preconditions.checkArgument;\n\nimport com.google.common.collect.ImmutableMap;\n\nimport org.jclouds.blobstore.BlobStore;\nimport org.jclouds.blobstore.domain.Blob;\nimport org.jclouds.blobstore.domain.BlobMetadata;\nimport org.jclouds.blobstore.domain.MultipartUpload;\nimport org.jclouds.blobstore.domain.MutableBlobMetadata;\nimport org.jclouds.blobstore.options.GetOptions;\nimport org.jclouds.blobstore.options.PutOptions;\nimport org.jclouds.blobstore.util.ForwardingBlobStore;\n\n/**\n * BlobStore which maps user metadata keys and values using character\n * replacement.  This is useful for some object stores like Azure which do not\n * allow characters like hyphens.  This munges keys and values during putBlob\n * and unmunges them on getBlob.\n */\nfinal class UserMetadataReplacerBlobStore extends ForwardingBlobStore {\n    private final String fromChars;\n    private final String toChars;\n\n    private UserMetadataReplacerBlobStore(\n            BlobStore blobStore, String fromChars, String toChars) {\n        super(blobStore);\n        checkArgument(fromChars.length() == toChars.length());\n        this.fromChars = fromChars;\n        this.toChars = toChars;\n    }\n\n    public static BlobStore newUserMetadataReplacerBlobStore(\n            BlobStore blobStore, String fromChars, String toChars) {\n        return new UserMetadataReplacerBlobStore(blobStore, fromChars, toChars);\n    }\n\n    @Override\n    public String putBlob(String containerName, Blob blob) {\n        return putBlob(containerName, blob, new PutOptions());\n    }\n\n    @Override\n    public String putBlob(String containerName, Blob blob,\n            PutOptions putOptions) {\n        var metadata = ImmutableMap.<String, String>builder();\n        for (var entry : blob.getMetadata().getUserMetadata().entrySet()) {\n            metadata.put(replaceChars(entry.getKey(), fromChars, toChars),\n                    replaceChars(entry.getValue(), fromChars, toChars));\n        }\n        // TODO: should this modify the parameter?\n        blob.getMetadata().setUserMetadata(metadata.build());\n        return super.putBlob(containerName, blob, putOptions);\n    }\n\n    @Override\n    public BlobMetadata blobMetadata(String container, String name) {\n        var blobMetadata = super.blobMetadata(container, name);\n        if (blobMetadata == null) {\n            return null;\n        }\n\n        var metadata = ImmutableMap.<String, String>builder();\n        // TODO: duplication\n        for (var entry : blobMetadata.getUserMetadata().entrySet()) {\n            metadata.put(replaceChars(entry.getKey(), /*fromChars=*/ toChars, /*toChars=*/ fromChars),\n                    replaceChars(entry.getValue(), /*fromChars=*/ toChars, /*toChars=*/ fromChars));\n        }\n        ((MutableBlobMetadata) blobMetadata).setUserMetadata(metadata.build());\n        return blobMetadata;\n    }\n\n    @Override\n    public Blob getBlob(String containerName, String name) {\n        return getBlob(containerName, name, new GetOptions());\n    }\n\n    @Override\n    public Blob getBlob(String containerName, String name,\n            GetOptions getOptions) {\n        var blob = super.getBlob(containerName, name, getOptions);\n        if (blob == null) {\n            return null;\n        }\n\n        var metadata = ImmutableMap.<String, String>builder();\n        for (var entry : blob.getMetadata().getUserMetadata().entrySet()) {\n            metadata.put(replaceChars(entry.getKey(), /*fromChars=*/ toChars, /*toChars=*/ fromChars),\n                    replaceChars(entry.getValue(), /*fromChars=*/ toChars, /*toChars=*/ fromChars));\n        }\n        blob.getMetadata().setUserMetadata(metadata.build());\n        return blob;\n    }\n\n    @Override\n    public MultipartUpload initiateMultipartUpload(String container,\n            BlobMetadata blobMetadata, PutOptions overrides) {\n        var metadata = ImmutableMap.<String, String>builder();\n        for (var entry : blobMetadata.getUserMetadata().entrySet()) {\n            metadata.put(replaceChars(entry.getKey(), /*fromChars=*/ fromChars, /*toChars=*/ toChars),\n                    replaceChars(entry.getValue(), /*fromChars=*/ fromChars, /*toChars=*/ toChars));\n        }\n        ((MutableBlobMetadata) blobMetadata).setUserMetadata(metadata.build());\n        return super.initiateMultipartUpload(container, blobMetadata,\n                overrides);\n    }\n\n    private static String replaceChars(String value, String fromChars,\n            String toChars) {\n        var builder = new StringBuilder(/*capacity=*/ value.length());\n        for (int i = 0; i < value.length(); ++i) {\n            for (int j = 0; j < fromChars.length(); ++j) {\n                builder.append(value.charAt(i) == fromChars.charAt(j) ?\n                        toChars.charAt(j) : value.charAt(i));\n            }\n        }\n        return builder.toString();\n    }\n}\n"
  },
  {
    "path": "src/main/java/org/gaul/s3proxy/awssdk/AwsS3SdkApiMetadata.java",
    "content": "/*\n * Copyright 2014-2026 Andrew Gaul <andrew@gaul.org>\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * https://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\npackage org.gaul.s3proxy.awssdk;\n\nimport java.net.URI;\nimport java.util.Properties;\nimport java.util.Set;\n\nimport org.jclouds.blobstore.BlobStoreContext;\nimport org.jclouds.reflect.Reflection2;\nimport org.jclouds.rest.internal.BaseHttpApiMetadata;\n\n\n@SuppressWarnings(\"rawtypes\")\npublic final class AwsS3SdkApiMetadata extends BaseHttpApiMetadata {\n    public static final String REGION = \"aws-s3-sdk.region\";\n\n    /**\n     * Property for conditional writes mode.\n     * Values: \"native\" (default) - use If-Match/If-None-Match headers directly\n     *         \"emulated\" - validate via HEAD request before PUT\n     */\n    public static final String CONDITIONAL_WRITES =\n            \"aws-s3-sdk.conditional-writes\";\n\n    /**\n     * Property for enabling chunked encoding (default: true).\n     * When false, sends \"x-amz-content-sha256: UNSIGNED-PAYLOAD\" instead of\n     * streaming signatures. Disable for S3-compatible backends that don't\n     * support aws-chunked encoding (e.g., some Ceph RGW versions).\n     */\n    public static final String CHUNKED_ENCODING_ENABLED =\n            \"aws-s3-sdk.chunked-encoding\";\n\n    /**\n     * Property for stripping quotes from ETag values in conditional headers.\n     * Enable for S3-compatible backends with Ceph Reef bug that requires\n     * unquoted ETags in If-Match/If-None-Match headers.\n     * See: https://tracker.ceph.com/issues/68712\n     * TODO: Can be removed after 2027-01-01 - by then every provider should\n     * have migrated to a newer Ceph version (including Hetzner).\n     */\n    public static final String STRIP_ETAG_QUOTES =\n            \"aws-s3-sdk.strip-etag-quotes\";\n\n    public AwsS3SdkApiMetadata() {\n        this(builder());\n    }\n\n    protected AwsS3SdkApiMetadata(Builder builder) {\n        super(builder);\n    }\n\n    private static Builder builder() {\n        return new Builder();\n    }\n\n    @Override\n    public Builder toBuilder() {\n        return builder().fromApiMetadata(this);\n    }\n\n    public static Properties defaultProperties() {\n        Properties properties = BaseHttpApiMetadata.defaultProperties();\n        properties.setProperty(REGION, \"us-east-1\");\n        properties.setProperty(CONDITIONAL_WRITES, \"native\");\n        properties.setProperty(CHUNKED_ENCODING_ENABLED, \"true\");\n        properties.setProperty(STRIP_ETAG_QUOTES, \"false\");\n        return properties;\n    }\n\n    // Fake API client - required by jclouds but not actually used\n    private interface AwsS3SdkClient {\n    }\n\n    public static final class Builder\n            extends BaseHttpApiMetadata.Builder<AwsS3SdkClient, Builder> {\n        protected Builder() {\n            super(AwsS3SdkClient.class);\n            id(\"aws-s3-sdk\")\n                .name(\"AWS S3 SDK Backend\")\n                .identityName(\"Access Key ID\")\n                .credentialName(\"Secret Access Key\")\n                .version(\"2006-03-01\")\n                .defaultEndpoint(\"https://s3.amazonaws.com\")\n                .documentation(URI.create(\n                        \"https://docs.aws.amazon.com/AmazonS3/latest/\" +\n                        \"API/Welcome.html\"))\n                .defaultProperties(AwsS3SdkApiMetadata.defaultProperties())\n                .view(Reflection2.typeToken(BlobStoreContext.class))\n                .defaultModules(Set.of(AwsS3SdkBlobStoreContextModule.class));\n        }\n\n        @Override\n        public AwsS3SdkApiMetadata build() {\n            return new AwsS3SdkApiMetadata(this);\n        }\n\n        @Override\n        protected Builder self() {\n            return this;\n        }\n    }\n}\n"
  },
  {
    "path": "src/main/java/org/gaul/s3proxy/awssdk/AwsS3SdkBlobStore.java",
    "content": "/*\n * Copyright 2014-2026 Andrew Gaul <andrew@gaul.org>\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * https://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\npackage org.gaul.s3proxy.awssdk;\n\nimport java.io.IOException;\nimport java.io.InputStream;\nimport java.net.URI;\nimport java.time.Instant;\nimport java.util.Base64;\nimport java.util.Comparator;\nimport java.util.Date;\nimport java.util.List;\nimport java.util.Map;\nimport java.util.Set;\n\nimport com.google.common.base.Supplier;\nimport com.google.common.collect.ImmutableList;\nimport com.google.common.collect.ImmutableSet;\nimport com.google.common.collect.Streams;\nimport com.google.common.net.HttpHeaders;\n\nimport jakarta.inject.Inject;\nimport jakarta.inject.Named;\nimport jakarta.inject.Singleton;\n\nimport org.gaul.s3proxy.PutOptions2;\nimport org.jclouds.blobstore.BlobStoreContext;\nimport org.jclouds.blobstore.ContainerNotFoundException;\nimport org.jclouds.blobstore.KeyNotFoundException;\nimport org.jclouds.blobstore.domain.Blob;\nimport org.jclouds.blobstore.domain.BlobAccess;\nimport org.jclouds.blobstore.domain.BlobMetadata;\nimport org.jclouds.blobstore.domain.ContainerAccess;\nimport org.jclouds.blobstore.domain.MultipartPart;\nimport org.jclouds.blobstore.domain.MultipartUpload;\nimport org.jclouds.blobstore.domain.PageSet;\nimport org.jclouds.blobstore.domain.StorageMetadata;\nimport org.jclouds.blobstore.domain.StorageType;\nimport org.jclouds.blobstore.domain.Tier;\nimport org.jclouds.blobstore.domain.internal.BlobBuilderImpl;\nimport org.jclouds.blobstore.domain.internal.BlobMetadataImpl;\nimport org.jclouds.blobstore.domain.internal.PageSetImpl;\nimport org.jclouds.blobstore.domain.internal.StorageMetadataImpl;\nimport org.jclouds.blobstore.internal.BaseBlobStore;\nimport org.jclouds.blobstore.options.CopyOptions;\nimport org.jclouds.blobstore.options.CreateContainerOptions;\nimport org.jclouds.blobstore.options.GetOptions;\nimport org.jclouds.blobstore.options.ListContainerOptions;\nimport org.jclouds.blobstore.options.PutOptions;\nimport org.jclouds.blobstore.util.BlobUtils;\nimport org.jclouds.collect.Memoized;\nimport org.jclouds.domain.Credentials;\nimport org.jclouds.domain.Location;\nimport org.jclouds.http.HttpCommand;\nimport org.jclouds.http.HttpRequest;\nimport org.jclouds.http.HttpResponse;\nimport org.jclouds.http.HttpResponseException;\nimport org.jclouds.io.ContentMetadataBuilder;\nimport org.jclouds.io.Payload;\nimport org.jclouds.io.PayloadSlicer;\nimport org.jclouds.providers.ProviderMetadata;\nimport org.jclouds.rest.AuthorizationException;\nimport org.jspecify.annotations.Nullable;\n\nimport software.amazon.awssdk.auth.credentials.AwsBasicCredentials;\nimport software.amazon.awssdk.auth.credentials.StaticCredentialsProvider;\nimport software.amazon.awssdk.awscore.exception.AwsErrorDetails;\nimport software.amazon.awssdk.core.checksums.RequestChecksumCalculation;\nimport software.amazon.awssdk.core.checksums.ResponseChecksumValidation;\nimport software.amazon.awssdk.core.sync.RequestBody;\nimport software.amazon.awssdk.regions.Region;\nimport software.amazon.awssdk.services.s3.S3Client;\nimport software.amazon.awssdk.services.s3.S3ClientBuilder;\nimport software.amazon.awssdk.services.s3.S3Configuration;\nimport software.amazon.awssdk.services.s3.model.AbortMultipartUploadRequest;\nimport software.amazon.awssdk.services.s3.model.Bucket;\nimport software.amazon.awssdk.services.s3.model.BucketCannedACL;\nimport software.amazon.awssdk.services.s3.model.CommonPrefix;\nimport software.amazon.awssdk.services.s3.model.CompleteMultipartUploadRequest;\nimport software.amazon.awssdk.services.s3.model.CompletedMultipartUpload;\nimport software.amazon.awssdk.services.s3.model.CompletedPart;\nimport software.amazon.awssdk.services.s3.model.CopyObjectRequest;\nimport software.amazon.awssdk.services.s3.model.CreateBucketConfiguration;\nimport software.amazon.awssdk.services.s3.model.CreateBucketRequest;\nimport software.amazon.awssdk.services.s3.model.CreateMultipartUploadRequest;\nimport software.amazon.awssdk.services.s3.model.DeleteBucketRequest;\nimport software.amazon.awssdk.services.s3.model.DeleteObjectRequest;\nimport software.amazon.awssdk.services.s3.model.GetBucketAclRequest;\nimport software.amazon.awssdk.services.s3.model.GetObjectAclRequest;\nimport software.amazon.awssdk.services.s3.model.GetObjectRequest;\nimport software.amazon.awssdk.services.s3.model.Grant;\nimport software.amazon.awssdk.services.s3.model.HeadBucketRequest;\nimport software.amazon.awssdk.services.s3.model.HeadObjectRequest;\nimport software.amazon.awssdk.services.s3.model.HeadObjectResponse;\nimport software.amazon.awssdk.services.s3.model.ListMultipartUploadsRequest;\nimport software.amazon.awssdk.services.s3.model.ListObjectsV2Request;\nimport software.amazon.awssdk.services.s3.model.ListPartsRequest;\nimport software.amazon.awssdk.services.s3.model.NoSuchBucketException;\nimport software.amazon.awssdk.services.s3.model.NoSuchKeyException;\nimport software.amazon.awssdk.services.s3.model.ObjectCannedACL;\nimport software.amazon.awssdk.services.s3.model.Part;\nimport software.amazon.awssdk.services.s3.model.Permission;\nimport software.amazon.awssdk.services.s3.model.PutBucketAclRequest;\nimport software.amazon.awssdk.services.s3.model.PutObjectAclRequest;\nimport software.amazon.awssdk.services.s3.model.PutObjectRequest;\nimport software.amazon.awssdk.services.s3.model.S3Exception;\nimport software.amazon.awssdk.services.s3.model.S3Object;\nimport software.amazon.awssdk.services.s3.model.StorageClass;\nimport software.amazon.awssdk.services.s3.model.Type;\nimport software.amazon.awssdk.services.s3.model.UploadPartRequest;\n\n@Singleton\npublic final class AwsS3SdkBlobStore extends BaseBlobStore {\n    private final S3Client s3Client;\n    private final String endpoint;\n    private final boolean useNativeConditionalWrites;\n    private final boolean stripETagQuotes;\n    private final Region awsRegion;\n\n    @Inject\n    AwsS3SdkBlobStore(BlobStoreContext context, BlobUtils blobUtils,\n            Supplier<Location> defaultLocation,\n            @Memoized Supplier<Set<? extends Location>> locations,\n            PayloadSlicer slicer,\n            @org.jclouds.location.Provider Supplier<Credentials> creds,\n            ProviderMetadata provider,\n            @Named(AwsS3SdkApiMetadata.REGION) String region,\n            @Named(AwsS3SdkApiMetadata.CONDITIONAL_WRITES)\n                String conditionalWrites,\n            @Named(AwsS3SdkApiMetadata.CHUNKED_ENCODING_ENABLED)\n                String chunkedEncodingEnabled,\n            @Named(AwsS3SdkApiMetadata.STRIP_ETAG_QUOTES)\n                String stripETagQuotes) {\n        super(context, blobUtils, defaultLocation, locations, slicer);\n        this.endpoint = provider.getEndpoint();\n        this.awsRegion = Region.of(region);\n        this.useNativeConditionalWrites = !\"emulated\".equalsIgnoreCase(\n                conditionalWrites);\n        this.stripETagQuotes = Boolean.parseBoolean(stripETagQuotes);\n        var cred = creds.get();\n\n        S3ClientBuilder builder = S3Client.builder();\n\n        builder.serviceConfiguration(S3Configuration.builder()\n                .chunkedEncodingEnabled(Boolean.valueOf(chunkedEncodingEnabled))\n                .build());\n\n        // Disable checksum calculation to avoid reading the stream twice.\n        // This allows streaming non-resettable InputStreams to S3-compatible\n        // backends that don't support aws-chunked encoding.\n        builder.requestChecksumCalculation(RequestChecksumCalculation.WHEN_REQUIRED);\n        builder.responseChecksumValidation(ResponseChecksumValidation.WHEN_REQUIRED);\n\n        if (cred.identity != null && !cred.identity.isEmpty() &&\n                cred.credential != null && !cred.credential.isEmpty()) {\n            builder.credentialsProvider(StaticCredentialsProvider.create(\n                    AwsBasicCredentials.create(cred.identity, cred.credential)));\n        }\n\n        if (endpoint != null && !endpoint.isEmpty()) {\n            URI endpointUri = URI.create(endpoint);\n            builder.endpointOverride(endpointUri);\n\n            // Use path-style for non-AWS endpoints (Hetzner, MinIO, etc.)\n            String host = endpointUri.getHost();\n            if (host != null && !host.endsWith(\".amazonaws.com\")) {\n                builder.forcePathStyle(true);\n            }\n        }\n\n        builder.region(this.awsRegion);\n\n        this.s3Client = builder.build();\n    }\n\n    @Override\n    public PageSet<? extends StorageMetadata> list() {\n        try {\n            var set = ImmutableSet.<StorageMetadata>builder();\n            for (Bucket bucket : s3Client.listBuckets().buckets()) {\n                set.add(new StorageMetadataImpl(StorageType.CONTAINER, /*id=*/ null,\n                        bucket.name(), /*location=*/ null, /*uri=*/ null,\n                        /*eTag=*/ null,\n                        toDate(bucket.creationDate()),\n                        toDate(bucket.creationDate()),\n                        Map.of(), /*size=*/ null,\n                        Tier.STANDARD));\n            }\n            return new PageSetImpl<StorageMetadata>(set.build(), null);\n        } catch (S3Exception e) {\n            translateAndRethrowException(e, null, null);\n            throw e;\n        }\n    }\n\n    @Override\n    public PageSet<? extends StorageMetadata> list(String container,\n            ListContainerOptions options) {\n        var requestBuilder = ListObjectsV2Request.builder()\n                .bucket(container);\n\n        if (options.getPrefix() != null) {\n            requestBuilder.prefix(options.getPrefix());\n        }\n        if (options.getDelimiter() != null) {\n            requestBuilder.delimiter(options.getDelimiter());\n        }\n        if (options.getMarker() != null) {\n            requestBuilder.startAfter(options.getMarker());\n        }\n        int maxKeys = options.getMaxResults() != null ?\n                options.getMaxResults() : 1000;\n        if (maxKeys == 0) {\n            return new PageSetImpl<StorageMetadata>(ImmutableSet.of(), null);\n        }\n        requestBuilder.maxKeys(maxKeys);\n\n        try {\n            var response = s3Client.listObjectsV2(requestBuilder.build());\n\n            var set = ImmutableSet.<StorageMetadata>builder();\n            String nextMarker = null;\n\n            for (S3Object obj : response.contents()) {\n                set.add(new StorageMetadataImpl(StorageType.BLOB,\n                        /*id=*/ null, obj.key(), /*location=*/ null,\n                        /*uri=*/ null, obj.eTag(),\n                        toDate(obj.lastModified()),\n                        toDate(obj.lastModified()),\n                        Map.of(),\n                        obj.size(),\n                        toTier(obj.storageClass())));\n            }\n\n            for (CommonPrefix prefix : response.commonPrefixes()) {\n                set.add(new StorageMetadataImpl(StorageType.RELATIVE_PATH,\n                        /*id=*/ null, prefix.prefix(), /*location=*/ null,\n                        /*uri=*/ null, /*eTag=*/ null,\n                        /*creationDate=*/ null,\n                        /*lastModified=*/ null,\n                        Map.of(),\n                        /*size=*/ 0L,\n                        Tier.STANDARD));\n            }\n\n            if (response.isTruncated()) {\n                if (!response.contents().isEmpty()) {\n                    nextMarker = Streams.findLast(response.contents().stream())\n                            .orElseThrow().key();\n                } else if (!response.commonPrefixes().isEmpty()) {\n                    nextMarker = Streams.findLast(\n                            response.commonPrefixes().stream())\n                            .orElseThrow().prefix();\n                }\n            }\n\n            return new PageSetImpl<StorageMetadata>(set.build(), nextMarker);\n        } catch (NoSuchBucketException e) {\n            throw new ContainerNotFoundException(container, e.getMessage());\n        } catch (S3Exception e) {\n            translateAndRethrowException(e, container, null);\n            throw e;\n        }\n    }\n\n    @Override\n    public boolean containerExists(String container) {\n        try {\n            s3Client.headBucket(HeadBucketRequest.builder()\n                    .bucket(container)\n                    .build());\n            return true;\n        } catch (NoSuchBucketException e) {\n            return false;\n        } catch (S3Exception e) {\n            if (e.statusCode() == 404) {\n                return false;\n            }\n            throw e;\n        }\n    }\n\n    @Override\n    public boolean createContainerInLocation(Location location,\n            String container) {\n        return createContainerInLocation(location, container,\n                new CreateContainerOptions());\n    }\n\n    @Override\n    public boolean createContainerInLocation(Location location,\n            String container, CreateContainerOptions options) {\n        if (options == null) {\n            options = new CreateContainerOptions();\n        }\n        try {\n            var requestBuilder = CreateBucketRequest.builder()\n                    .bucket(container);\n            if (!Region.US_EAST_1.equals(awsRegion)) {\n                requestBuilder.createBucketConfiguration(\n                        CreateBucketConfiguration.builder()\n                                .locationConstraint(awsRegion.id())\n                                .build());\n            }\n            s3Client.createBucket(requestBuilder.build());\n            if (options.isPublicRead()) {\n                setContainerAccess(container, ContainerAccess.PUBLIC_READ);\n            }\n            return true;\n        } catch (S3Exception e) {\n            if (e.statusCode() == 409) {\n                String errorCode = e.awsErrorDetails() != null ?\n                        e.awsErrorDetails().errorCode() :\n                        null;\n                if (\"BucketAlreadyOwnedByYou\".equals(errorCode)) {\n                    // Idempotent success - bucket exists and caller owns it\n                    return false;\n                }\n                if (\"BucketAlreadyExists\".equals(errorCode)) {\n                    // Bucket exists but is owned by someone else\n                    throw new AuthorizationException(\n                            \"Bucket already exists: \" + container, e);\n                }\n            }\n            translateAndRethrowException(e, container, null);\n            throw e;\n        }\n    }\n\n    @Override\n    public void deleteContainer(String container) {\n        try {\n            clearContainer(container);\n            s3Client.deleteBucket(DeleteBucketRequest.builder()\n                    .bucket(container)\n                    .build());\n        } catch (NoSuchBucketException e) {\n            // Already deleted, ignore\n        } catch (S3Exception e) {\n            translateAndRethrowException(e, container, null);\n            throw e;\n        }\n    }\n\n    @Override\n    public boolean deleteContainerIfEmpty(String container) {\n        try {\n            var response = s3Client.listObjectsV2(ListObjectsV2Request.builder()\n                    .bucket(container)\n                    .maxKeys(1)\n                    .build());\n            if (!response.contents().isEmpty()) {\n                return false;\n            }\n            s3Client.deleteBucket(DeleteBucketRequest.builder()\n                    .bucket(container)\n                    .build());\n            return true;\n        } catch (NoSuchBucketException e) {\n            return true;\n        } catch (S3Exception e) {\n            if (e.statusCode() == 409) {\n                // Bucket not empty\n                return false;\n            }\n            throw e;\n        }\n    }\n\n    @Override\n    public boolean blobExists(String container, String key) {\n        try {\n            s3Client.headObject(HeadObjectRequest.builder()\n                    .bucket(container)\n                    .key(key)\n                    .build());\n            return true;\n        } catch (NoSuchKeyException e) {\n            return false;\n        } catch (S3Exception e) {\n            if (e.statusCode() == 404) {\n                return false;\n            }\n            throw e;\n        }\n    }\n\n    @Override\n    public Blob getBlob(String container, String key, GetOptions options) {\n        var requestBuilder = GetObjectRequest.builder()\n                .bucket(container)\n                .key(key);\n\n        if (!options.getRanges().isEmpty()) {\n            String rangeSpec = options.getRanges().get(0);\n            requestBuilder.range(\"bytes=\" + rangeSpec);\n        }\n\n        if (options.getIfMatch() != null) {\n            requestBuilder.ifMatch(maybeStripETagQuotes(options.getIfMatch()));\n        }\n        if (options.getIfNoneMatch() != null) {\n            requestBuilder.ifNoneMatch(\n                    maybeStripETagQuotes(options.getIfNoneMatch()));\n        }\n        if (options.getIfModifiedSince() != null) {\n            requestBuilder.ifModifiedSince(\n                    options.getIfModifiedSince().toInstant());\n        }\n        if (options.getIfUnmodifiedSince() != null) {\n            requestBuilder.ifUnmodifiedSince(\n                    options.getIfUnmodifiedSince().toInstant());\n        }\n\n        try {\n            var responseStream = s3Client.getObject(requestBuilder.build());\n            var response = responseStream.response();\n\n            var blob = new BlobBuilderImpl()\n                    .name(key)\n                    .userMetadata(response.metadata())\n                    .payload(responseStream)\n                    .cacheControl(response.cacheControl())\n                    .contentDisposition(response.contentDisposition())\n                    .contentEncoding(response.contentEncoding())\n                    .contentLanguage(response.contentLanguage())\n                    .contentLength(response.contentLength())\n                    .contentType(response.contentType())\n                    .expires(response.expires() != null ?\n                            Date.from(response.expires()) : null)\n                    .build();\n\n            if (response.contentRange() != null) {\n                blob.getAllHeaders().put(HttpHeaders.CONTENT_RANGE,\n                        response.contentRange());\n            }\n\n            var metadata = blob.getMetadata();\n            metadata.setETag(response.eTag());\n            if (response.lastModified() != null) {\n                metadata.setLastModified(Date.from(response.lastModified()));\n            }\n            metadata.setSize(response.contentLength());\n\n            return blob;\n        } catch (NoSuchKeyException e) {\n            throw new KeyNotFoundException(container, key, e.getMessage());\n        } catch (NoSuchBucketException e) {\n            throw new ContainerNotFoundException(container, e.getMessage());\n        } catch (S3Exception e) {\n            if (e.statusCode() == 304) {\n                var request = HttpRequest.builder()\n                        .method(\"GET\")\n                        .endpoint(endpoint)\n                        .build();\n                var responseBuilder = HttpResponse.builder()\n                        .statusCode(304);\n\n                e.awsErrorDetails().sdkHttpResponse().firstMatchingHeader(\"ETag\")\n                        .ifPresent(etag -> responseBuilder.addHeader(HttpHeaders.ETAG, etag));\n\n                throw new HttpResponseException(\n                        new HttpCommand(request), responseBuilder.build(), e);\n            }\n            translateAndRethrowException(e, container, key);\n            throw e;\n        }\n    }\n\n    @Override\n    public String putBlob(String container, Blob blob) {\n        return putBlob(container, blob, new PutOptions());\n    }\n\n    @Override\n    public String putBlob(String container, Blob blob, PutOptions options) {\n        var contentMetadata = blob.getMetadata().getContentMetadata();\n        var requestBuilder = PutObjectRequest.builder()\n                .bucket(container)\n                .key(blob.getMetadata().getName());\n\n        if (contentMetadata.getCacheControl() != null) {\n            requestBuilder.cacheControl(contentMetadata.getCacheControl());\n        }\n        if (contentMetadata.getContentDisposition() != null) {\n            requestBuilder.contentDisposition(\n                    contentMetadata.getContentDisposition());\n        }\n        if (contentMetadata.getContentEncoding() != null) {\n            requestBuilder.contentEncoding(contentMetadata.getContentEncoding());\n        }\n        if (contentMetadata.getContentLanguage() != null) {\n            requestBuilder.contentLanguage(contentMetadata.getContentLanguage());\n        }\n        if (contentMetadata.getContentMD5() != null) {\n            requestBuilder.contentMD5(Base64.getEncoder().encodeToString(\n                    contentMetadata.getContentMD5()));\n        }\n        if (contentMetadata.getContentType() != null) {\n            requestBuilder.contentType(contentMetadata.getContentType());\n        }\n        if (contentMetadata.getExpires() != null) {\n            requestBuilder.expires(contentMetadata.getExpires().toInstant());\n        }\n\n        var userMetadata = blob.getMetadata().getUserMetadata();\n        if (userMetadata != null && !userMetadata.isEmpty()) {\n            requestBuilder.metadata(userMetadata);\n        }\n\n        BlobAccess requestedAccess = options != null ? options.getBlobAccess() : null;\n        if (requestedAccess == BlobAccess.PUBLIC_READ) {\n            requestBuilder.acl(ObjectCannedACL.PUBLIC_READ);\n        }\n\n        if (blob.getMetadata().getTier() != null &&\n                blob.getMetadata().getTier() != Tier.STANDARD) {\n            requestBuilder.storageClass(\n                    toStorageClass(blob.getMetadata().getTier()));\n        }\n\n        String ifMatch = null;\n        String ifNoneMatch = null;\n        if (options instanceof PutOptions2) {\n            var putOptions2 = (PutOptions2) options;\n            ifMatch = putOptions2.getIfMatch();\n            ifNoneMatch = putOptions2.getIfNoneMatch();\n        }\n\n        boolean hasConditionalHeaders = ifMatch != null || ifNoneMatch != null;\n        if (hasConditionalHeaders && !useNativeConditionalWrites) {\n            validateConditionalPut(container, blob.getMetadata().getName(),\n                    ifMatch, ifNoneMatch);\n            ifMatch = null;\n            ifNoneMatch = null;\n        }\n\n        if (ifMatch != null) {\n            requestBuilder.ifMatch(maybeStripETagQuotes(ifMatch));\n        }\n        if (ifNoneMatch != null) {\n            requestBuilder.ifNoneMatch(maybeStripETagQuotes(ifNoneMatch));\n        }\n\n        try (InputStream is = blob.getPayload().openStream()) {\n            Long contentLength = contentMetadata.getContentLength();\n            if (contentLength == null) {\n                // Mimic S3 behavior: Reject unknown length instead of crashing memory\n                throw new IllegalArgumentException(\"Content-Length is required for S3 putBlob\");\n            } else {\n                var response = s3Client.putObject(requestBuilder.build(),\n                        RequestBody.fromInputStream(is, contentLength));\n                return response.eTag();\n            }\n        } catch (IOException e) {\n            throw new RuntimeException(\"Failed to read blob payload\", e);\n        } catch (S3Exception e) {\n            translateAndRethrowException(e, container,\n                    blob.getMetadata().getName());\n            throw e;\n        }\n    }\n\n    @Override\n    public String copyBlob(String fromContainer, String fromName,\n            String toContainer, String toName, CopyOptions options) {\n        var requestBuilder = CopyObjectRequest.builder()\n                .sourceBucket(fromContainer)\n                .sourceKey(fromName)\n                .destinationBucket(toContainer)\n                .destinationKey(toName);\n\n        var contentMetadata = options.contentMetadata();\n        if (contentMetadata != null) {\n            if (contentMetadata.getCacheControl() != null) {\n                requestBuilder.cacheControl(contentMetadata.getCacheControl());\n            }\n            if (contentMetadata.getContentDisposition() != null) {\n                requestBuilder.contentDisposition(\n                        contentMetadata.getContentDisposition());\n            }\n            if (contentMetadata.getContentEncoding() != null) {\n                requestBuilder.contentEncoding(\n                        contentMetadata.getContentEncoding());\n            }\n            if (contentMetadata.getContentLanguage() != null) {\n                requestBuilder.contentLanguage(\n                        contentMetadata.getContentLanguage());\n            }\n            if (contentMetadata.getContentType() != null) {\n                requestBuilder.contentType(contentMetadata.getContentType());\n            }\n            requestBuilder.metadataDirective(\"REPLACE\");\n        }\n\n        var userMetadata = options.userMetadata();\n        if (userMetadata != null) {\n            requestBuilder.metadata(userMetadata);\n            requestBuilder.metadataDirective(\"REPLACE\");\n        }\n\n        try {\n            var response = s3Client.copyObject(requestBuilder.build());\n            return response.copyObjectResult().eTag();\n        } catch (NoSuchKeyException e) {\n            throw new KeyNotFoundException(fromContainer, fromName,\n                    e.getMessage());\n        } catch (NoSuchBucketException e) {\n            throw new ContainerNotFoundException(fromContainer, e.getMessage());\n        } catch (S3Exception e) {\n            translateAndRethrowException(e, fromContainer, fromName);\n            throw e;\n        }\n    }\n\n    @Override\n    public void removeBlob(String container, String key) {\n        try {\n            s3Client.deleteObject(DeleteObjectRequest.builder()\n                    .bucket(container)\n                    .key(key)\n                    .build());\n        } catch (NoSuchKeyException | NoSuchBucketException e) {\n            // Ignore - delete is idempotent\n        } catch (S3Exception e) {\n            if (e.statusCode() != 404) {\n                throw e;\n            }\n        }\n    }\n\n    @Override\n    public BlobMetadata blobMetadata(String container, String key) {\n        try {\n            HeadObjectResponse response = s3Client.headObject(\n                    HeadObjectRequest.builder()\n                            .bucket(container)\n                            .key(key)\n                            .build());\n\n            return new BlobMetadataImpl(/*id=*/ null, key, /*location=*/ null,\n                    /*uri=*/ null, response.eTag(),\n                    toDate(response.lastModified()),\n                    toDate(response.lastModified()),\n                    response.metadata(), /*publicUri=*/ null, container,\n                    toContentMetadata(response),\n                    response.contentLength(),\n                    toTier(response.storageClass()));\n        } catch (NoSuchKeyException e) {\n            return null;\n        } catch (NoSuchBucketException e) {\n            throw new ContainerNotFoundException(container, e.getMessage());\n        } catch (S3Exception e) {\n            if (e.statusCode() == 404) {\n                return null;\n            }\n            translateAndRethrowException(e, container, key);\n            throw e;\n        }\n    }\n\n    @Override\n    protected boolean deleteAndVerifyContainerGone(String container) {\n        try {\n            s3Client.deleteBucket(DeleteBucketRequest.builder()\n                    .bucket(container)\n                    .build());\n            return true;\n        } catch (NoSuchBucketException e) {\n            return true;\n        }\n    }\n\n    @Override\n    public ContainerAccess getContainerAccess(String container) {\n        try {\n            var response = s3Client.getBucketAcl(GetBucketAclRequest.builder()\n                    .bucket(container)\n                    .build());\n            boolean isPublic = hasPublicRead(response.grants());\n            return isPublic ?\n                    ContainerAccess.PUBLIC_READ : ContainerAccess.PRIVATE;\n        } catch (NoSuchBucketException e) {\n            throw new ContainerNotFoundException(container, e.getMessage());\n        } catch (S3Exception e) {\n            if (e.statusCode() == 404) {\n                throw new ContainerNotFoundException(container, e.getMessage());\n            }\n            return ContainerAccess.PRIVATE;\n        }\n    }\n\n    @Override\n    public void setContainerAccess(String container, ContainerAccess access) {\n        BucketCannedACL acl = access == ContainerAccess.PUBLIC_READ ?\n                BucketCannedACL.PUBLIC_READ : BucketCannedACL.PRIVATE;\n        try {\n            s3Client.putBucketAcl(PutBucketAclRequest.builder()\n                    .bucket(container)\n                    .acl(acl)\n                    .build());\n        } catch (NoSuchBucketException e) {\n            throw new ContainerNotFoundException(container, e.getMessage());\n        } catch (S3Exception e) {\n            translateAndRethrowException(e, container, null);\n            throw e;\n        }\n    }\n\n    @Override\n    public BlobAccess getBlobAccess(String container, String key) {\n        try {\n            var response = s3Client.getObjectAcl(GetObjectAclRequest.builder()\n                    .bucket(container)\n                    .key(key)\n                    .build());\n            return hasPublicRead(response.grants()) ?\n                    BlobAccess.PUBLIC_READ : BlobAccess.PRIVATE;\n        } catch (NoSuchKeyException e) {\n            throw new KeyNotFoundException(container, key, e.getMessage());\n        } catch (NoSuchBucketException e) {\n            throw new ContainerNotFoundException(container, e.getMessage());\n        } catch (S3Exception e) {\n            if (e.statusCode() == 404) {\n                throw translateAclNotFound(container, key, e);\n            }\n            throw e;\n        }\n    }\n\n    private static boolean hasPublicRead(List<Grant> grants) {\n        for (Grant grant : grants) {\n            if (grant.permission() == Permission.READ || grant.permission() == Permission.FULL_CONTROL) {\n                if (grant.grantee().type() == Type.GROUP &&\n                        \"http://acs.amazonaws.com/groups/global/AllUsers\".equals(grant.grantee().uri())) {\n                    return true;\n                }\n            }\n        }\n        return false;\n    }\n\n    private RuntimeException translateAclNotFound(String container, String key,\n            S3Exception e) {\n        AwsErrorDetails details = e.awsErrorDetails();\n        String errorCode = details != null ? details.errorCode() : null;\n        if (\"NoSuchKey\".equals(errorCode) || \"NotFound\".equals(errorCode)) {\n            return new KeyNotFoundException(container, key, e.getMessage());\n        }\n        if (\"NoSuchBucket\".equals(errorCode)) {\n            return new ContainerNotFoundException(container, e.getMessage());\n        }\n        if (key != null) {\n            return new KeyNotFoundException(container, key, e.getMessage());\n        }\n        return new ContainerNotFoundException(container, e.getMessage());\n    }\n\n    private void applyMultipartAclIfNeeded(MultipartUpload mpu) {\n        if (mpu == null) {\n            return;\n        }\n        PutOptions putOptions = mpu.putOptions();\n        if (putOptions != null && putOptions.getBlobAccess() == BlobAccess.PUBLIC_READ) {\n            setBlobAccess(mpu.containerName(), mpu.blobName(), BlobAccess.PUBLIC_READ);\n        }\n    }\n\n    @Override\n    public void setBlobAccess(String container, String key, BlobAccess access) {\n        ObjectCannedACL acl = access == BlobAccess.PUBLIC_READ ?\n                ObjectCannedACL.PUBLIC_READ : ObjectCannedACL.PRIVATE;\n        try {\n            s3Client.putObjectAcl(PutObjectAclRequest.builder()\n                    .bucket(container)\n                    .key(key)\n                    .acl(acl)\n                    .build());\n        } catch (NoSuchKeyException e) {\n            throw new KeyNotFoundException(container, key, e.getMessage());\n        } catch (NoSuchBucketException e) {\n            throw new ContainerNotFoundException(container, e.getMessage());\n        } catch (S3Exception e) {\n            translateAndRethrowException(e, container, key);\n            throw e;\n        }\n    }\n\n    @Override\n    public MultipartUpload initiateMultipartUpload(String container,\n            BlobMetadata blobMetadata, PutOptions options) {\n        var requestBuilder = CreateMultipartUploadRequest.builder()\n                .bucket(container)\n                .key(blobMetadata.getName());\n\n        var contentMetadata = blobMetadata.getContentMetadata();\n        if (contentMetadata != null) {\n            if (contentMetadata.getCacheControl() != null) {\n                requestBuilder.cacheControl(contentMetadata.getCacheControl());\n            }\n            if (contentMetadata.getContentDisposition() != null) {\n                requestBuilder.contentDisposition(\n                        contentMetadata.getContentDisposition());\n            }\n            if (contentMetadata.getContentEncoding() != null) {\n                requestBuilder.contentEncoding(\n                        contentMetadata.getContentEncoding());\n            }\n            if (contentMetadata.getContentLanguage() != null) {\n                requestBuilder.contentLanguage(\n                        contentMetadata.getContentLanguage());\n            }\n            if (contentMetadata.getContentType() != null) {\n                requestBuilder.contentType(contentMetadata.getContentType());\n            }\n        }\n\n        var userMetadata = blobMetadata.getUserMetadata();\n        if (userMetadata != null && !userMetadata.isEmpty()) {\n            requestBuilder.metadata(userMetadata);\n        }\n\n        if (options != null && options.getBlobAccess() == BlobAccess.PUBLIC_READ) {\n            requestBuilder.acl(ObjectCannedACL.PUBLIC_READ);\n        }\n\n        if (blobMetadata.getTier() != null &&\n                blobMetadata.getTier() != Tier.STANDARD) {\n            requestBuilder.storageClass(\n                    toStorageClass(blobMetadata.getTier()));\n        }\n\n        try {\n            var response = s3Client.createMultipartUpload(\n                    requestBuilder.build());\n            return MultipartUpload.create(container, blobMetadata.getName(),\n                    response.uploadId(), blobMetadata, options);\n        } catch (NoSuchBucketException e) {\n            throw new ContainerNotFoundException(container, e.getMessage());\n        } catch (S3Exception e) {\n            translateAndRethrowException(e, container, blobMetadata.getName());\n            throw e;\n        }\n    }\n\n    @Override\n    public void abortMultipartUpload(MultipartUpload mpu) {\n        try {\n            s3Client.abortMultipartUpload(AbortMultipartUploadRequest.builder()\n                    .bucket(mpu.containerName())\n                    .key(mpu.blobName())\n                    .uploadId(mpu.id())\n                    .build());\n        } catch (NoSuchKeyException e) {\n            throw new KeyNotFoundException(mpu.containerName(), mpu.blobName(),\n                    \"Multipart upload not found: \" + mpu.id());\n        } catch (S3Exception e) {\n            if (e.statusCode() == 404) {\n                throw new KeyNotFoundException(mpu.containerName(),\n                        mpu.blobName(),\n                        \"Multipart upload not found: \" + mpu.id());\n            }\n            throw e;\n        }\n    }\n\n    @Override\n    public String completeMultipartUpload(MultipartUpload mpu,\n            List<MultipartPart> parts) {\n        var sortedParts = sortAndValidateParts(parts);\n        var completedParts = sortedParts.stream()\n                .map(part -> CompletedPart.builder()\n                        .partNumber(part.partNumber())\n                        .eTag(part.partETag())\n                        .build())\n                .toList();\n\n        var requestBuilder = CompleteMultipartUploadRequest.builder()\n                .bucket(mpu.containerName())\n                .key(mpu.blobName())\n                .uploadId(mpu.id())\n                .multipartUpload(CompletedMultipartUpload.builder()\n                        .parts(completedParts)\n                        .build());\n\n        try {\n            var response = s3Client.completeMultipartUpload(\n                    requestBuilder.build());\n            applyMultipartAclIfNeeded(mpu);\n            return response.eTag();\n        } catch (S3Exception e) {\n            translateAndRethrowException(e, mpu.containerName(), mpu.blobName());\n            throw e;\n        }\n    }\n\n    @Override\n    public MultipartPart uploadMultipartPart(MultipartUpload mpu,\n            int partNumber, Payload payload) {\n        Long contentLength = payload.getContentMetadata().getContentLength();\n        if (contentLength == null) {\n            throw new IllegalArgumentException(\"Content-Length is required\");\n        }\n\n        try (InputStream is = payload.openStream()) {\n            var response = s3Client.uploadPart(UploadPartRequest.builder()\n                    .bucket(mpu.containerName())\n                    .key(mpu.blobName())\n                    .uploadId(mpu.id())\n                    .partNumber(partNumber)\n                    .build(),\n                    RequestBody.fromInputStream(is, contentLength));\n\n            return MultipartPart.create(partNumber, contentLength,\n                    response.eTag(), null);\n        } catch (IOException e) {\n            throw new RuntimeException(\"Failed to upload part\", e);\n        } catch (S3Exception e) {\n            translateAndRethrowException(e, mpu.containerName(), mpu.blobName());\n            throw e;\n        }\n    }\n\n    @Override\n    public List<MultipartPart> listMultipartUpload(MultipartUpload mpu) {\n        try {\n            var parts = ImmutableList.<MultipartPart>builder();\n            Integer partNumberMarker = null;\n\n            do {\n                var response = s3Client.listParts(ListPartsRequest.builder()\n                        .bucket(mpu.containerName())\n                        .key(mpu.blobName())\n                        .uploadId(mpu.id())\n                        .partNumberMarker(partNumberMarker)\n                        .build());\n\n                for (Part part : response.parts()) {\n                    parts.add(MultipartPart.create(part.partNumber(),\n                            part.size(),\n                            part.eTag(),\n                            toDate(part.lastModified())));\n                }\n\n                partNumberMarker = response.isTruncated() ?\n                        response.nextPartNumberMarker() : null;\n            } while (partNumberMarker != null);\n\n            return parts.build();\n        } catch (S3Exception e) {\n            if (e.statusCode() == 404) {\n                return ImmutableList.of();\n            }\n            translateAndRethrowException(e, mpu.containerName(), mpu.blobName());\n            throw e;\n        }\n    }\n\n    @Override\n    public List<MultipartUpload> listMultipartUploads(String container) {\n        try {\n            var builder = ImmutableList.<MultipartUpload>builder();\n            String keyMarker = null;\n            String uploadIdMarker = null;\n\n            do {\n                var response = s3Client.listMultipartUploads(\n                        ListMultipartUploadsRequest.builder()\n                                .bucket(container)\n                                .keyMarker(keyMarker)\n                                .uploadIdMarker(uploadIdMarker)\n                                .build());\n\n                for (var upload : response.uploads()) {\n                    builder.add(MultipartUpload.create(container,\n                            upload.key(),\n                            upload.uploadId(),\n                            null, null));\n                }\n\n                if (response.isTruncated()) {\n                    keyMarker = response.nextKeyMarker();\n                    uploadIdMarker = response.nextUploadIdMarker();\n                } else {\n                    keyMarker = null;\n                }\n            } while (keyMarker != null);\n\n            return builder.build();\n        } catch (NoSuchBucketException e) {\n            throw new ContainerNotFoundException(container, e.getMessage());\n        } catch (S3Exception e) {\n            translateAndRethrowException(e, container, null);\n            throw e;\n        }\n    }\n\n    @Override\n    public long getMinimumMultipartPartSize() {\n        // S3 minimum part size is 5MB (except for last part)\n        return 5L * 1024 * 1024;\n    }\n\n    @Override\n    public long getMaximumMultipartPartSize() {\n        // S3 maximum part size is 5GB\n        return 5L * 1024 * 1024 * 1024;\n    }\n\n    @Override\n    public int getMaximumNumberOfParts() {\n        return 10000;\n    }\n\n    @Override\n    public InputStream streamBlob(String container, String name) {\n        throw new UnsupportedOperationException(\"not yet implemented\");\n    }\n\n    private static List<MultipartPart> sortAndValidateParts(\n            List<MultipartPart> parts) {\n        if (parts == null || parts.isEmpty()) {\n            throw new IllegalArgumentException(\n                    \"At least one multipart part is required\");\n        }\n        var sortedParts = parts.stream()\n                .sorted(Comparator.comparingInt(MultipartPart::partNumber))\n                .toList();\n        int previousPartNumber = 0;\n        for (MultipartPart part : sortedParts) {\n            int partNumber = part.partNumber();\n            if (partNumber <= 0) {\n                throw new IllegalArgumentException(\n                        \"Part numbers must be positive integers\");\n            }\n            if (partNumber < previousPartNumber) {\n                throw new IllegalArgumentException(\n                        \"Parts must be provided in ascending PartNumber order\");\n            }\n            previousPartNumber = partNumber;\n        }\n        return sortedParts;\n    }\n\n    private static Date toDate(@Nullable Instant instant) {\n        if (instant == null) {\n            return null;\n        }\n        return Date.from(instant);\n    }\n\n    private static StorageClass toStorageClass(Tier tier) {\n        return switch (tier) {\n        case ARCHIVE -> StorageClass.GLACIER;\n        case COLD -> StorageClass.GLACIER_IR;\n        case COOL, INFREQUENT -> StorageClass.STANDARD_IA;\n        case STANDARD -> StorageClass.STANDARD;\n        };\n    }\n\n    private static Tier toTier(@Nullable StorageClass storageClass) {\n        if (storageClass == null) {\n            return Tier.STANDARD;\n        }\n        return switch (storageClass) {\n        case GLACIER, DEEP_ARCHIVE -> Tier.ARCHIVE;\n        case GLACIER_IR -> Tier.COLD;\n        case STANDARD_IA, ONEZONE_IA -> Tier.INFREQUENT;\n        default -> Tier.STANDARD;\n        };\n    }\n\n    private static Tier toTier(\n            software.amazon.awssdk.services.s3.model.@Nullable\n                ObjectStorageClass storageClass) {\n        if (storageClass == null) {\n            return Tier.STANDARD;\n        }\n        return switch (storageClass) {\n        case GLACIER, DEEP_ARCHIVE -> Tier.ARCHIVE;\n        case GLACIER_IR -> Tier.COLD;\n        case STANDARD_IA, ONEZONE_IA -> Tier.INFREQUENT;\n        default -> Tier.STANDARD;\n        };\n    }\n\n    private static org.jclouds.io.ContentMetadata toContentMetadata(\n            HeadObjectResponse response) {\n        var builder = ContentMetadataBuilder.create();\n        if (response.cacheControl() != null) {\n            builder.cacheControl(response.cacheControl());\n        }\n        if (response.contentDisposition() != null) {\n            builder.contentDisposition(response.contentDisposition());\n        }\n        if (response.contentEncoding() != null) {\n            builder.contentEncoding(response.contentEncoding());\n        }\n        if (response.contentLanguage() != null) {\n            builder.contentLanguage(response.contentLanguage());\n        }\n        if (response.contentLength() != null) {\n            builder.contentLength(response.contentLength());\n        }\n        if (response.contentType() != null) {\n            builder.contentType(response.contentType());\n        }\n        if (response.expires() != null) {\n            builder.expires(Date.from(response.expires()));\n        }\n        return builder.build();\n    }\n\n    private void translateAndRethrowException(S3Exception e,\n            @Nullable String container, @Nullable String key) {\n        if (container != null && e.statusCode() == 404) {\n            String errorCode = e.awsErrorDetails().errorCode();\n            if (\"NoSuchBucket\".equals(errorCode)) {\n                throw new ContainerNotFoundException(container, e.getMessage());\n            } else if (\"NoSuchKey\".equals(errorCode)) {\n                if (key == null) {\n                    throw new ContainerNotFoundException(container, e.getMessage());\n                }\n                throw new KeyNotFoundException(container, key, e.getMessage());\n            }\n            if (key != null) {\n                throw new KeyNotFoundException(container, key, e.getMessage());\n            } else {\n                throw new ContainerNotFoundException(container, e.getMessage());\n            }\n        }\n        var request = HttpRequest.builder()\n                .method(\"GET\")\n                .endpoint(endpoint)\n                .build();\n        var responseBuilder = HttpResponse.builder()\n                .statusCode(e.statusCode())\n                .message(e.getMessage());\n\n        if (e.statusCode() == 304) {\n            e.awsErrorDetails().sdkHttpResponse().firstMatchingHeader(HttpHeaders.ETAG)\n                    .ifPresent(etag -> responseBuilder.addHeader(HttpHeaders.ETAG, etag));\n        }\n\n        throw new HttpResponseException(\n                new HttpCommand(request), responseBuilder.build(), e);\n    }\n\n    /**\n     * Ensures the ETag is surrounded by quotes if not already.\n     */\n    private static String maybeQuoteETag(String eTag) {\n        if (!eTag.startsWith(\"\\\"\") && !eTag.endsWith(\"\\\"\")) {\n            eTag = \"\\\"\" + eTag + \"\\\"\";\n        }\n        return eTag;\n    }\n\n    /**\n     * Strips surrounding quotes from ETag if stripETagQuotes is enabled.\n     * Required for backends with Ceph Reef bug.\n     * See: https://tracker.ceph.com/issues/68712\n     * TODO: Can be removed after 2027-01-01 - by then every provider should\n     * have migrated to a newer Ceph version (including Hetzner).\n     */\n    private String maybeStripETagQuotes(String eTag) {\n        if (!stripETagQuotes || eTag == null) {\n            return eTag;\n        }\n        if (eTag.length() >= 2 && eTag.startsWith(\"\\\"\") && eTag.endsWith(\"\\\"\")) {\n            return eTag.substring(1, eTag.length() - 1);\n        }\n        return eTag;\n    }\n\n    /**\n     * Compares two ETags, ignoring surrounding quotes.\n     */\n    private static boolean equalsIgnoringSurroundingQuotes(\n            String s1, String s2) {\n        if (s1.length() >= 2 && s1.startsWith(\"\\\"\") && s1.endsWith(\"\\\"\")) {\n            s1 = s1.substring(1, s1.length() - 1);\n        }\n        if (s2.length() >= 2 && s2.startsWith(\"\\\"\") && s2.endsWith(\"\\\"\")) {\n            s2 = s2.substring(1, s2.length() - 1);\n        }\n        return s1.equals(s2);\n    }\n\n    private void throwPreconditionFailed() {\n        var request = HttpRequest.builder()\n                .method(\"PUT\")\n                .endpoint(endpoint)\n                .build();\n        var response = HttpResponse.builder()\n                .statusCode(412)\n                .message(\"Precondition Failed\")\n                .build();\n        throw new HttpResponseException(new HttpCommand(request), response);\n    }\n\n    private void throwKeyNotFound(String container, String key) {\n        throw new KeyNotFoundException(container, key,\n                \"Object does not exist for If-Match condition\");\n    }\n\n    /**\n     * For S3-compatible backends that don't support If-Match/If-None-Match\n     * headers natively.\n     */\n    private void validateConditionalPut(String container, String blobName,\n            @Nullable String ifMatch, @Nullable String ifNoneMatch) {\n        BlobMetadata metadata = blobMetadata(container, blobName);\n\n        if (ifMatch != null) {\n            validateIfMatch(container, blobName, ifMatch, metadata);\n        }\n\n        if (ifNoneMatch != null) {\n            validateIfNoneMatch(ifNoneMatch, metadata);\n        }\n    }\n\n    private void validateIfMatch(String container, String blobName,\n            String ifMatch, @Nullable BlobMetadata metadata) {\n        if (\"*\".equals(ifMatch)) {\n            if (metadata == null) {\n                throwPreconditionFailed();\n            }\n            return;\n        }\n\n        if (metadata == null) {\n            throwKeyNotFound(container, blobName);\n        }\n\n        String currentETag = metadata.getETag();\n        if (currentETag == null ||\n                !equalsIgnoringSurroundingQuotes(ifMatch,\n                    maybeQuoteETag(currentETag))) {\n            throwPreconditionFailed();\n        }\n    }\n\n    private void validateIfNoneMatch(String ifNoneMatch,\n            @Nullable BlobMetadata metadata) {\n        if (\"*\".equals(ifNoneMatch)) {\n            if (metadata != null) {\n                throwPreconditionFailed();\n            }\n            return;\n        }\n\n        if (metadata == null) {\n            return;\n        }\n\n        String currentETag = metadata.getETag();\n        if (currentETag != null &&\n                equalsIgnoringSurroundingQuotes(ifNoneMatch,\n                    maybeQuoteETag(currentETag))) {\n            throwPreconditionFailed();\n        }\n    }\n}\n"
  },
  {
    "path": "src/main/java/org/gaul/s3proxy/awssdk/AwsS3SdkBlobStoreContextModule.java",
    "content": "/*\n * Copyright 2014-2026 Andrew Gaul <andrew@gaul.org>\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * https://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\npackage org.gaul.s3proxy.awssdk;\n\nimport com.google.inject.AbstractModule;\nimport com.google.inject.Scopes;\n\nimport org.jclouds.blobstore.BlobStore;\nimport org.jclouds.blobstore.attr.ConsistencyModel;\n\npublic final class AwsS3SdkBlobStoreContextModule extends AbstractModule {\n    @Override\n    protected void configure() {\n        bind(ConsistencyModel.class).toInstance(ConsistencyModel.STRICT);\n        bind(BlobStore.class).to(AwsS3SdkBlobStore.class).in(Scopes.SINGLETON);\n    }\n}\n"
  },
  {
    "path": "src/main/java/org/gaul/s3proxy/awssdk/AwsS3SdkProviderMetadata.java",
    "content": "/*\n * Copyright 2014-2026 Andrew Gaul <andrew@gaul.org>\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * https://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\npackage org.gaul.s3proxy.awssdk;\n\nimport java.util.Properties;\n\nimport com.google.auto.service.AutoService;\n\nimport org.jclouds.providers.ProviderMetadata;\nimport org.jclouds.providers.internal.BaseProviderMetadata;\n\n@AutoService(ProviderMetadata.class)\npublic final class AwsS3SdkProviderMetadata extends BaseProviderMetadata {\n    public AwsS3SdkProviderMetadata() {\n        super(builder());\n    }\n\n    public AwsS3SdkProviderMetadata(Builder builder) {\n        super(builder);\n    }\n\n    public static Builder builder() {\n        return new Builder();\n    }\n\n    @Override\n    public Builder toBuilder() {\n        return builder().fromProviderMetadata(this);\n    }\n\n    public static Properties defaultProperties() {\n        var properties = new Properties();\n        return properties;\n    }\n\n    public static final class Builder extends BaseProviderMetadata.Builder {\n        protected Builder() {\n            id(\"aws-s3-sdk\")\n                .name(\"AWS S3 SDK Backend\")\n                .apiMetadata(new AwsS3SdkApiMetadata())\n                .endpoint(\"https://s3.amazonaws.com\")\n                .defaultProperties(\n                        AwsS3SdkProviderMetadata.defaultProperties());\n        }\n\n        @Override\n        public AwsS3SdkProviderMetadata build() {\n            return new AwsS3SdkProviderMetadata(this);\n        }\n\n        @Override\n        public Builder fromProviderMetadata(ProviderMetadata in) {\n            super.fromProviderMetadata(in);\n            return this;\n        }\n    }\n}\n"
  },
  {
    "path": "src/main/java/org/gaul/s3proxy/azureblob/AzureBlobApiMetadata.java",
    "content": "/*\n * Copyright 2014-2026 Andrew Gaul <andrew@gaul.org>\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * https://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\npackage org.gaul.s3proxy.azureblob;\n\nimport java.net.URI;\nimport java.util.Properties;\nimport java.util.Set;\n\nimport org.jclouds.azure.storage.config.AuthType;\nimport org.jclouds.azure.storage.config.AzureStorageProperties;\nimport org.jclouds.blobstore.BlobStoreContext;\nimport org.jclouds.blobstore.reference.BlobStoreConstants;\nimport org.jclouds.reflect.Reflection2;\nimport org.jclouds.rest.internal.BaseHttpApiMetadata;\n\n\n@SuppressWarnings(\"rawtypes\")\npublic final class AzureBlobApiMetadata extends BaseHttpApiMetadata {\n    public AzureBlobApiMetadata() {\n        this(builder());\n    }\n\n    protected AzureBlobApiMetadata(Builder builder) {\n        super(builder);\n    }\n\n    private static Builder builder() {\n        return new Builder();\n    }\n\n    @Override\n    public Builder toBuilder() {\n        return builder().fromApiMetadata(this);\n    }\n\n    public static Properties defaultProperties() {\n        Properties properties = BaseHttpApiMetadata.defaultProperties();\n        properties.setProperty(BlobStoreConstants.PROPERTY_USER_METADATA_PREFIX,\n                \"x-ms-meta-\");\n        properties.setProperty(AzureStorageProperties.AUTH_TYPE,\n                AuthType.AZURE_KEY.toString());\n        properties.setProperty(AzureStorageProperties.ACCOUNT, \"\");\n        properties.setProperty(AzureStorageProperties.TENANT_ID, \"\");\n        return properties;\n    }\n\n    // Fake API client\n    private interface AzureBlobClient {\n    }\n\n    public static final class Builder\n            extends BaseHttpApiMetadata.Builder<AzureBlobClient, Builder> {\n        protected Builder() {\n            super(AzureBlobClient.class);\n            id(\"azureblob-sdk\")\n                .name(\"Microsoft Azure Blob Service API\")\n                .identityName(\"Account Name\")\n                .credentialName(\"Access Key\")\n                // TODO: update\n                .version(\"2017-11-09\")\n                .defaultEndpoint(\n                        \"https://${jclouds.identity}.blob.core.windows.net\")\n                .documentation(URI.create(\n                        \"https://learn.microsoft.com/en-us/rest/api/\" +\n                        \"storageservices/Blob-Service-REST-API\"))\n                .defaultProperties(AzureBlobApiMetadata.defaultProperties())\n                .view(Reflection2.typeToken(BlobStoreContext.class))\n                .defaultModules(Set.of(AzureBlobStoreContextModule.class));\n        }\n\n        @Override\n        public AzureBlobApiMetadata build() {\n            return new AzureBlobApiMetadata(this);\n        }\n\n        @Override\n        protected Builder self() {\n            return this;\n        }\n    }\n}\n"
  },
  {
    "path": "src/main/java/org/gaul/s3proxy/azureblob/AzureBlobProviderMetadata.java",
    "content": "/*\n * Copyright 2014-2026 Andrew Gaul <andrew@gaul.org>\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * https://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\npackage org.gaul.s3proxy.azureblob;\n\nimport java.net.URI;\nimport java.util.Properties;\n\nimport com.google.auto.service.AutoService;\n\nimport org.jclouds.azure.storage.config.AzureStorageProperties;\nimport org.jclouds.oauth.v2.config.CredentialType;\nimport org.jclouds.oauth.v2.config.OAuthProperties;\nimport org.jclouds.providers.ProviderMetadata;\nimport org.jclouds.providers.internal.BaseProviderMetadata;\n\n/**\n * Implementation of org.jclouds.types.ProviderMetadata for Microsoft Azure\n * Blob Service.\n */\n@AutoService(ProviderMetadata.class)\npublic final class AzureBlobProviderMetadata extends BaseProviderMetadata {\n    public AzureBlobProviderMetadata() {\n        super(builder());\n    }\n\n    public AzureBlobProviderMetadata(Builder builder) {\n        super(builder);\n    }\n\n    public static Builder builder() {\n        return new Builder();\n    }\n\n    @Override\n    public Builder toBuilder() {\n        return builder().fromProviderMetadata(this);\n    }\n\n    public static Properties defaultProperties() {\n        var properties = new Properties();\n        properties.put(\"oauth.endpoint\", \"https://login.microsoft.com/${\" +\n                AzureStorageProperties.TENANT_ID + \"}/oauth2/token\");\n        properties.put(OAuthProperties.RESOURCE, \"https://storage.azure.com\");\n        properties.put(OAuthProperties.CREDENTIAL_TYPE,\n                CredentialType.CLIENT_CREDENTIALS_SECRET.toString());\n        properties.put(AzureStorageProperties.ACCOUNT, \"${jclouds.identity}\");\n        return properties;\n    }\n    public static final class Builder extends BaseProviderMetadata.Builder {\n        protected Builder() {\n            id(\"azureblob-sdk\")\n                .name(\"Microsoft Azure Blob Service\")\n                .apiMetadata(new AzureBlobApiMetadata())\n                .endpoint(\"https://${\" + AzureStorageProperties.ACCOUNT +\n                        \"}.blob.core.windows.net\")\n                .homepage(URI.create(\n                        \"http://www.microsoft.com/windowsazure/storage/\"))\n                .console(URI.create(\"https://windows.azure.com/default.aspx\"))\n                .linkedServices(\"azureblob\", \"azurequeue\", \"azuretable\")\n                .iso3166Codes(\"US-TX\", \"US-IL\", \"IE-D\", \"SG\", \"NL-NH\", \"HK\")\n                .defaultProperties(\n                        AzureBlobProviderMetadata.defaultProperties());\n        }\n\n        @Override\n        public AzureBlobProviderMetadata build() {\n            return new AzureBlobProviderMetadata(this);\n        }\n\n        @Override\n        public Builder fromProviderMetadata(\n                ProviderMetadata in) {\n            super.fromProviderMetadata(in);\n            return this;\n        }\n    }\n}\n"
  },
  {
    "path": "src/main/java/org/gaul/s3proxy/azureblob/AzureBlobStore.java",
    "content": "/*\n * Copyright 2014-2026 Andrew Gaul <andrew@gaul.org>\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * https://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\npackage org.gaul.s3proxy.azureblob;\n\nimport java.io.ByteArrayInputStream;\nimport java.io.IOException;\nimport java.io.InputStream;\nimport java.net.URLDecoder;\nimport java.nio.ByteBuffer;\nimport java.nio.charset.StandardCharsets;\nimport java.security.MessageDigest;\nimport java.time.OffsetDateTime;\nimport java.time.ZoneOffset;\nimport java.util.Base64;\nimport java.util.Date;\nimport java.util.List;\nimport java.util.Map;\nimport java.util.Set;\nimport java.util.UUID;\n\nimport com.azure.core.credential.AzureNamedKeyCredential;\nimport com.azure.core.http.rest.PagedResponse;\nimport com.azure.identity.DefaultAzureCredentialBuilder;\nimport com.azure.storage.blob.BlobServiceClient;\nimport com.azure.storage.blob.BlobServiceClientBuilder;\nimport com.azure.storage.blob.models.AccessTier;\nimport com.azure.storage.blob.models.BlobErrorCode;\nimport com.azure.storage.blob.models.BlobHttpHeaders;\nimport com.azure.storage.blob.models.BlobItem;\nimport com.azure.storage.blob.models.BlobListDetails;\nimport com.azure.storage.blob.models.BlobProperties;\nimport com.azure.storage.blob.models.BlobRange;\nimport com.azure.storage.blob.models.BlobRequestConditions;\nimport com.azure.storage.blob.models.BlobStorageException;\nimport com.azure.storage.blob.models.BlockList;\nimport com.azure.storage.blob.models.BlockListType;\nimport com.azure.storage.blob.models.ListBlobsOptions;\nimport com.azure.storage.blob.models.PublicAccessType;\nimport com.azure.storage.blob.options.BlobContainerCreateOptions;\nimport com.azure.storage.blob.options.BlobUploadFromUrlOptions;\nimport com.azure.storage.blob.options.BlockBlobCommitBlockListOptions;\nimport com.azure.storage.blob.options.BlockBlobOutputStreamOptions;\nimport com.azure.storage.blob.options.BlockBlobSimpleUploadOptions;\nimport com.azure.storage.blob.sas.BlobSasPermission;\nimport com.azure.storage.blob.sas.BlobServiceSasSignatureValues;\nimport com.azure.storage.blob.specialized.BlobInputStream;\nimport com.azure.storage.blob.specialized.BlockBlobAsyncClient;\nimport com.azure.storage.common.policy.RequestRetryOptions;\nimport com.azure.storage.common.policy.RetryPolicyType;\nimport com.google.common.base.Supplier;\nimport com.google.common.collect.ImmutableList;\nimport com.google.common.collect.ImmutableSet;\nimport com.google.common.hash.HashFunction;\nimport com.google.common.hash.Hashing;\nimport com.google.common.hash.HashingInputStream;\nimport com.google.common.io.BaseEncoding;\nimport com.google.common.net.HttpHeaders;\n\nimport jakarta.inject.Inject;\nimport jakarta.inject.Singleton;\nimport jakarta.ws.rs.core.Response.Status;\n\nimport org.gaul.s3proxy.PutOptions2;\nimport org.jclouds.blobstore.BlobStoreContext;\nimport org.jclouds.blobstore.ContainerNotFoundException;\nimport org.jclouds.blobstore.KeyNotFoundException;\nimport org.jclouds.blobstore.domain.Blob;\nimport org.jclouds.blobstore.domain.BlobAccess;\nimport org.jclouds.blobstore.domain.BlobMetadata;\nimport org.jclouds.blobstore.domain.ContainerAccess;\nimport org.jclouds.blobstore.domain.MultipartPart;\nimport org.jclouds.blobstore.domain.MultipartUpload;\nimport org.jclouds.blobstore.domain.PageSet;\nimport org.jclouds.blobstore.domain.StorageMetadata;\nimport org.jclouds.blobstore.domain.StorageType;\nimport org.jclouds.blobstore.domain.Tier;\nimport org.jclouds.blobstore.domain.internal.BlobBuilderImpl;\nimport org.jclouds.blobstore.domain.internal.BlobMetadataImpl;\nimport org.jclouds.blobstore.domain.internal.PageSetImpl;\nimport org.jclouds.blobstore.domain.internal.StorageMetadataImpl;\nimport org.jclouds.blobstore.internal.BaseBlobStore;\nimport org.jclouds.blobstore.options.CopyOptions;\nimport org.jclouds.blobstore.options.CreateContainerOptions;\nimport org.jclouds.blobstore.options.GetOptions;\nimport org.jclouds.blobstore.options.ListContainerOptions;\nimport org.jclouds.blobstore.options.PutOptions;\nimport org.jclouds.blobstore.util.BlobUtils;\nimport org.jclouds.collect.Memoized;\nimport org.jclouds.domain.Credentials;\nimport org.jclouds.domain.Location;\nimport org.jclouds.http.HttpCommand;\nimport org.jclouds.http.HttpRequest;\nimport org.jclouds.http.HttpResponse;\nimport org.jclouds.http.HttpResponseException;\nimport org.jclouds.io.ContentMetadata;\nimport org.jclouds.io.ContentMetadataBuilder;\nimport org.jclouds.io.Payload;\nimport org.jclouds.io.PayloadSlicer;\nimport org.jclouds.providers.ProviderMetadata;\nimport org.jspecify.annotations.Nullable;\n\nimport reactor.core.publisher.Flux;\n\n@Singleton\npublic final class AzureBlobStore extends BaseBlobStore {\n    private static final String STUB_BLOB_PREFIX = \".s3proxy/stubs/\";\n    private static final String TARGET_BLOB_NAME_TAG = \"s3proxy_target_blob_name\";\n    private static final HashFunction MD5 = Hashing.md5();\n    // Disable retries since client should retry on errors.\n    private static final RequestRetryOptions NO_RETRY_OPTIONS = new RequestRetryOptions(\n            RetryPolicyType.FIXED, /*maxTries=*/ 1,\n            /*tryTimeoutInSeconds=*/ (Integer) null,\n            /*retryDelayInMs=*/ null, /*maxRetryDelayInMs=*/ null,\n            /*secondaryHost=*/ null);\n\n    private final BlobServiceClient blobServiceClient;\n    private final String endpoint;\n    private final Supplier<Credentials> creds;\n\n    @Inject\n    AzureBlobStore(BlobStoreContext context, BlobUtils blobUtils,\n            Supplier<Location> defaultLocation,\n            @Memoized Supplier<Set<? extends Location>> locations,\n            PayloadSlicer slicer,\n            @org.jclouds.location.Provider Supplier<Credentials> creds,\n            ProviderMetadata provider) {\n        super(context, blobUtils, defaultLocation, locations, slicer);\n        this.endpoint = provider.getEndpoint();\n        this.creds = creds;\n        var cred = creds.get();\n        var blobServiceClientBuilder = new BlobServiceClientBuilder();\n        if (!cred.identity.isEmpty() && !cred.credential.isEmpty()) {\n            blobServiceClientBuilder.credential(\n                new AzureNamedKeyCredential(cred.identity, cred.credential));\n        } else {\n            blobServiceClientBuilder.credential(\n                new DefaultAzureCredentialBuilder().build());\n        }\n        blobServiceClient = blobServiceClientBuilder\n                .endpoint(endpoint)\n                .retryOptions(NO_RETRY_OPTIONS)\n                .buildClient();\n    }\n\n    @Override\n    public PageSet<? extends StorageMetadata> list() {\n        var set = ImmutableSet.<StorageMetadata>builder();\n        for (var container : blobServiceClient.listBlobContainers()) {\n            set.add(new StorageMetadataImpl(StorageType.CONTAINER, /*id=*/ null,\n                    container.getName(), /*location=*/ null, /*uri=*/ null,\n                    /*eTag=*/ null, /*creationDate=*/ null,\n                    toDate(container.getProperties().getLastModified()),\n                    Map.of(), /*size=*/ null,\n                    Tier.STANDARD));\n        }\n        return new PageSetImpl<StorageMetadata>(set.build(), null);\n    }\n\n    @Override\n    public PageSet<? extends StorageMetadata> list(String container,\n            ListContainerOptions options) {\n        var client = blobServiceClient.getBlobContainerClient(container);\n        var azureOptions = new ListBlobsOptions();\n        azureOptions.setPrefix(options.getPrefix());\n        azureOptions.setMaxResultsPerPage(options.getMaxResults());\n        var marker = options.getMarker() != null ?\n                URLDecoder.decode(options.getMarker(), StandardCharsets.UTF_8) :\n                null;\n\n        var set = ImmutableSet.<StorageMetadata>builder();\n        PagedResponse<BlobItem> page;\n        try {\n            page = client.listBlobsByHierarchy(\n                    options.getDelimiter(), azureOptions, /*timeout=*/ null)\n                    .iterableByPage(marker).iterator().next();\n        } catch (BlobStorageException bse) {\n            translateAndRethrowException(bse, container, /*key=*/ null);\n            throw bse;\n        }\n        for (var blob : page.getValue()) {\n            var properties = blob.getProperties();\n            if (blob.isPrefix()) {\n                set.add(new StorageMetadataImpl(StorageType.RELATIVE_PATH,\n                        /*id=*/ null, blob.getName(), /*location=*/ null,\n                        /*uri=*/ null, /*eTag=*/ null,\n                        /*creationDate=*/ null,\n                        /*lastModified=*/ null,\n                        Map.of(),\n                        /*size=*/ null,\n                        Tier.STANDARD));\n            } else {\n                set.add(new StorageMetadataImpl(StorageType.BLOB,\n                        /*id=*/ null, blob.getName(), /*location=*/ null,\n                        /*uri=*/ null, properties.getETag(),\n                        toDate(properties.getCreationTime()),\n                        toDate(properties.getLastModified()),\n                        Map.of(),\n                        properties.getContentLength(),\n                        toTier(properties.getAccessTier())));\n            }\n        }\n\n        return new PageSetImpl<StorageMetadata>(set.build(),\n                page.getContinuationToken());\n    }\n\n    @Override\n    public boolean containerExists(String container) {\n        var client = blobServiceClient.getBlobContainerClient(container);\n        return client.exists();\n    }\n\n    @Override\n    public boolean createContainerInLocation(Location location,\n            String container) {\n        return createContainerInLocation(location, container,\n                new CreateContainerOptions());\n    }\n\n    @Override\n    public boolean createContainerInLocation(Location location,\n            String container, CreateContainerOptions options) {\n        var azureOptions = new BlobContainerCreateOptions();\n        if (options.isPublicRead()) {\n            azureOptions.setPublicAccessType(PublicAccessType.CONTAINER);\n        }\n        try {\n            var response = blobServiceClient\n                    .createBlobContainerIfNotExistsWithResponse(\n                            container, azureOptions, /*context=*/ null);\n            return switch (response.getStatusCode()) {\n            case 201 -> true;\n            case 409 -> false;\n            default -> false;\n            };\n        } catch (BlobStorageException bse) {\n            translateAndRethrowException(bse, container, /*key=*/ null);\n            throw bse;\n        }\n    }\n\n    @Override\n    public void deleteContainer(String container) {\n        try {\n            blobServiceClient.deleteBlobContainer(container);\n        } catch (BlobStorageException bse) {\n            if (!bse.getErrorCode().equals(BlobErrorCode.CONTAINER_NOT_FOUND)) {\n                throw bse;\n            }\n        }\n    }\n\n    @Override\n    public boolean deleteContainerIfEmpty(String container) {\n        var client = blobServiceClient.getBlobContainerClient(container);\n        try {\n            var page = client.listBlobsByHierarchy(\n                    /*delimiter=*/ null, /*options=*/ null, /*timeout=*/ null)\n                    .iterableByPage().iterator().next();\n            if (!page.getValue().isEmpty()) {\n                return false;\n            }\n            blobServiceClient.deleteBlobContainer(container);\n            return true;\n        } catch (BlobStorageException bse) {\n            if (bse.getErrorCode().equals(BlobErrorCode.CONTAINER_NOT_FOUND)) {\n                return true;\n            }\n            throw bse;\n        }\n    }\n\n    @Override\n    public boolean blobExists(String container, String key) {\n        var client = blobServiceClient.getBlobContainerClient(container)\n                .getBlobClient(key);\n        return client.exists();\n    }\n\n    @Override\n    public Blob getBlob(String container, String key, GetOptions options) {\n        var client = blobServiceClient.getBlobContainerClient(container)\n                .getBlobClient(key);\n        BlobRange azureRange = null;\n        if (!options.getRanges().isEmpty()) {\n            var ranges = options.getRanges().get(0).split(\"-\", 2);\n\n            if (ranges[0].isEmpty()) {\n                // handle to read from the end\n                long offset = 0;\n                long end = Long.parseLong(ranges[1]);\n                long length = end;\n                azureRange = new BlobRange(offset, length);\n                throw new UnsupportedOperationException(\n                        \"trailing ranges unsupported\");\n            } else if (ranges[1].isEmpty()) {\n                // handle to read from an offset till the end\n                long offset = Long.parseLong(ranges[0]);\n                azureRange = new BlobRange(offset);\n            } else {\n                // handle to read from an offset\n                long offset = Long.parseLong(ranges[0]);\n                long end = Long.parseLong(ranges[1]);\n                long length = end - offset + 1;\n                azureRange = new BlobRange(offset, length);\n            }\n        }\n        var conditions = new BlobRequestConditions()\n                .setIfMatch(options.getIfMatch())\n                .setIfModifiedSince(toOffsetDateTime(\n                        options.getIfModifiedSince()))\n                .setIfNoneMatch(options.getIfNoneMatch())\n                .setIfUnmodifiedSince(toOffsetDateTime(\n                        options.getIfUnmodifiedSince()));\n        BlobInputStream blobStream;\n        try {\n            blobStream = client.openInputStream(azureRange, conditions);\n        } catch (BlobStorageException bse) {\n            translateAndRethrowException(bse, container, key);\n            if (bse.getStatusCode() ==\n                    Status.REQUESTED_RANGE_NOT_SATISFIABLE.getStatusCode()) {\n                throw new HttpResponseException(\n                        \"illegal range: \" + azureRange, null,\n                        HttpResponse.builder()\n                        .statusCode(Status.REQUESTED_RANGE_NOT_SATISFIABLE\n                                .getStatusCode())\n                        .build());\n            }\n            throw bse;\n        }\n        var properties = blobStream.getProperties();\n        var expires = properties.getExpiresOn();\n        long contentLength;\n        if (azureRange == null) {\n            contentLength = properties.getBlobSize();\n        } else {\n            if (azureRange.getCount() == null) {\n                contentLength = properties.getBlobSize() -\n                        azureRange.getOffset();\n            } else {\n                contentLength = azureRange.getCount();\n            }\n        }\n        var blob = new BlobBuilderImpl()\n                .name(key)\n                .userMetadata(properties.getMetadata())\n                .payload(blobStream)\n                .cacheControl(properties.getCacheControl())\n                .contentDisposition(properties.getContentDisposition())\n                .contentEncoding(properties.getContentEncoding())\n                .contentLanguage(properties.getContentLanguage())\n                .contentLength(contentLength)\n                .contentType(properties.getContentType())\n                .expires(expires != null ? toDate(expires) : null)\n                .build();\n        if (azureRange != null) {\n            blob.getAllHeaders().put(HttpHeaders.CONTENT_RANGE,\n                    \"bytes \" + azureRange.getOffset() +\n                    \"-\" + (azureRange.getOffset() + contentLength - 1) +\n                    \"/\" + properties.getBlobSize());\n        }\n        var metadata = blob.getMetadata();\n        metadata.setETag(properties.getETag());\n        metadata.setCreationDate(toDate(properties.getCreationTime()));\n        metadata.setLastModified(toDate(properties.getLastModified()));\n        return blob;\n    }\n\n    @Override\n    public String putBlob(String container, Blob blob) {\n        return putBlob(container, blob, new PutOptions());\n    }\n\n    @Override\n    public String putBlob(String container, Blob blob, PutOptions options) {\n        var client = blobServiceClient.getBlobContainerClient(container)\n                .getBlobClient(blob.getMetadata().getName())\n                .getBlockBlobClient();\n        try (var is = blob.getPayload().openStream()) {\n            var azureOptions = new BlockBlobOutputStreamOptions();\n            azureOptions.setMetadata(blob.getMetadata().getUserMetadata());\n\n            // TODO: Expires?\n            var blobHttpHeaders = new BlobHttpHeaders();\n            var contentMetadata = blob.getMetadata().getContentMetadata();\n            blobHttpHeaders.setCacheControl(contentMetadata.getCacheControl());\n            blobHttpHeaders.setContentDisposition(\n                    contentMetadata.getContentDisposition());\n            blobHttpHeaders.setContentEncoding(\n                    contentMetadata.getContentEncoding());\n            blobHttpHeaders.setContentLanguage(\n                    contentMetadata.getContentLanguage());\n            var hash = contentMetadata.getContentMD5AsHashCode();\n            blobHttpHeaders.setContentMd5(hash != null ? hash.asBytes() : null);\n            blobHttpHeaders.setContentType(contentMetadata.getContentType());\n            azureOptions.setHeaders(blobHttpHeaders);\n            if (blob.getMetadata().getTier() != Tier.STANDARD) {\n                azureOptions.setTier(toAccessTier(\n                        blob.getMetadata().getTier()));\n            }\n\n            if (options instanceof PutOptions2 putOptions2) {\n                String ifMatch = putOptions2.getIfMatch();\n                String ifNoneMatch = putOptions2.getIfNoneMatch();\n                if (ifMatch != null || ifNoneMatch != null) {\n                    azureOptions.setRequestConditions(new BlobRequestConditions()\n                            .setIfMatch(ifMatch)\n                            .setIfNoneMatch(ifNoneMatch));\n                }\n            }\n\n            try (var os = client.getBlobOutputStream(\n                    azureOptions, /*context=*/ null)) {\n                is.transferTo(os);\n            }\n\n            // TODO: racy\n            return blobServiceClient\n                    .getBlobContainerClient(container)\n                    .getBlobClient(blob.getMetadata().getName())\n                    .getProperties()\n                    .getETag();\n        } catch (IOException ioe) {\n            var cause = ioe.getCause();\n            if (cause instanceof BlobStorageException bse) {\n                translateAndRethrowException(\n                        bse, container, /*key=*/ null);\n            }\n            throw new RuntimeException(ioe);\n        }\n    }\n\n    @Override\n    public String copyBlob(String fromContainer, String fromName,\n            String toContainer, String toName, CopyOptions options) {\n        var expiryTime = OffsetDateTime.now().plusDays(1);\n        var permission = new BlobSasPermission().setReadPermission(true);\n        var values = new BlobServiceSasSignatureValues(expiryTime, permission)\n                .setStartTime(OffsetDateTime.now());\n\n        var fromClient = blobServiceClient\n                .getBlobContainerClient(fromContainer)\n                .getBlobClient(fromName);\n        var url = fromClient.getBlobUrl();\n        String token;\n        var cred = creds.get();\n        if (!cred.identity.isEmpty() && !cred.credential.isEmpty()) {\n            token = fromClient.generateSas(values);\n        } else {\n            var userDelegationKey = blobServiceClient.getUserDelegationKey(\n                    OffsetDateTime.now().minusMinutes(5), expiryTime);\n            token = fromClient.generateUserDelegationSas(values, userDelegationKey);\n        }\n\n        // TODO: is this the best way to generate a SAS URL?\n        var azureOptions = new BlobUploadFromUrlOptions(url + \"?\" + token);\n        var client = blobServiceClient\n                .getBlobContainerClient(toContainer)\n                .getBlobClient(toName)\n                .getBlockBlobClient();\n\n        var headers = new BlobHttpHeaders();\n        var contentMetadata = options.contentMetadata();\n        if (contentMetadata != null) {\n            var cacheControl = contentMetadata.getCacheControl();\n            if (cacheControl != null) {\n                headers.setCacheControl(cacheControl);\n            }\n\n            var contentDisposition = contentMetadata.getContentDisposition();\n            if (contentDisposition != null) {\n                headers.setContentDisposition(contentDisposition);\n            }\n\n            var contentEncoding = contentMetadata.getContentEncoding();\n            if (contentEncoding != null) {\n                headers.setContentEncoding(contentEncoding);\n            }\n\n            var contentLanguage = contentMetadata.getContentLanguage();\n            if (contentLanguage != null) {\n                headers.setContentLanguage(contentLanguage);\n            }\n\n            var contentType = contentMetadata.getContentType();\n            if (contentType != null) {\n                headers.setContentType(contentType);\n            }\n        }\n        azureOptions.setHeaders(headers);\n\n        // TODO: setSourceRequestConditions(BlobRequestConditions)\n        var response = client.uploadFromUrlWithResponse(\n                azureOptions, /*timeout=*/ null, /*context=*/ null);\n\n        // TODO: cannot do this as part of uploadFromUrlWithResponse?\n        var userMetadata = options.userMetadata();\n        if (userMetadata != null) {\n            client.setMetadata(userMetadata);\n        }\n\n        return response.getValue().getETag();\n    }\n\n    @Override\n    public void removeBlob(String container, String key) {\n        var client = blobServiceClient.getBlobContainerClient(container)\n                .getBlobClient(key);\n        try {\n            client.delete();\n        } catch (BlobStorageException bse) {\n            if (!bse.getErrorCode().equals(BlobErrorCode.BLOB_NOT_FOUND) &&\n                    !bse.getErrorCode().equals(BlobErrorCode.CONTAINER_NOT_FOUND)) {\n                throw bse;\n            }\n        }\n    }\n\n    @Override\n    public BlobMetadata blobMetadata(String container, String key) {\n        var client = blobServiceClient.getBlobContainerClient(container)\n                .getBlobClient(key);\n        BlobProperties properties;\n        try {\n            properties = client.getProperties();\n        } catch (BlobStorageException bse) {\n            if (bse.getErrorCode().equals(BlobErrorCode.BLOB_NOT_FOUND)) {\n                return null;\n            }\n            translateAndRethrowException(bse, container, /*key=*/ null);\n            throw bse;\n        }\n        return new BlobMetadataImpl(/*id=*/ null, key, /*location=*/ null,\n                /*uri=*/ null, properties.getETag(),\n                toDate(properties.getCreationTime()),\n                toDate(properties.getLastModified()),\n                properties.getMetadata(), /*publicUri=*/ null, container,\n                toContentMetadata(properties),\n                properties.getBlobSize(), toTier(properties.getAccessTier()));\n    }\n\n    @Override\n    protected boolean deleteAndVerifyContainerGone(String container) {\n        blobServiceClient.deleteBlobContainer(container);\n        return true;\n    }\n\n    @Override\n    public ContainerAccess getContainerAccess(String container) {\n        var client = blobServiceClient.getBlobContainerClient(container);\n        try {\n            var blobAccessType = client.getAccessPolicy().getBlobAccessType();\n            return blobAccessType != null && blobAccessType.equals(\n                    PublicAccessType.CONTAINER) ?\n                    ContainerAccess.PUBLIC_READ :\n                    ContainerAccess.PRIVATE;\n        } catch (BlobStorageException bse) {\n            translateAndRethrowException(bse, container, /*key=*/ null);\n            throw bse;\n        }\n    }\n\n    @Override\n    public void setContainerAccess(String container, ContainerAccess access) {\n        var client = blobServiceClient.getBlobContainerClient(container);\n        var publicAccess = access == ContainerAccess.PUBLIC_READ ?\n                PublicAccessType.CONTAINER : PublicAccessType.BLOB;\n        client.setAccessPolicy(publicAccess, List.of());\n    }\n\n    @Override\n    public BlobAccess getBlobAccess(String container, String key) {\n        return BlobAccess.PRIVATE;\n    }\n\n    @Override\n    public void setBlobAccess(String container, String key, BlobAccess access) {\n        throw new UnsupportedOperationException(\"unsupported in Azure\");\n    }\n\n    @Override\n    public MultipartUpload initiateMultipartUpload(String container,\n            BlobMetadata blobMetadata, PutOptions options) {\n        var containerClient = blobServiceClient.getBlobContainerClient(container);\n        try {\n            if (!containerClient.exists()) {\n                throw new ContainerNotFoundException(container, \"\");\n            }\n        } catch (BlobStorageException bse) {\n            translateAndRethrowException(bse, container, /*key=*/ null);\n            throw bse;\n        }\n\n        var userMetadata = blobMetadata.getUserMetadata();\n        if (userMetadata != null && !userMetadata.isEmpty()) {\n            for (var key : userMetadata.keySet()) {\n                if (!isValidMetadataKey(key)) {\n                    throw new IllegalArgumentException(\n                            \"Invalid metadata key: \" + key);\n                }\n            }\n        }\n\n        String uploadKey = STUB_BLOB_PREFIX + UUID.randomUUID().toString();\n        String targetBlobName = blobMetadata.getName();\n        var stubBlobClient = containerClient.getBlobClient(uploadKey).getBlockBlobClient();\n\n        var contentMetadata = blobMetadata.getContentMetadata();\n        BlobHttpHeaders headers = new BlobHttpHeaders();\n        if (contentMetadata != null) {\n            headers.setContentType(contentMetadata.getContentType());\n            headers.setContentDisposition(contentMetadata.getContentDisposition());\n            headers.setContentEncoding(contentMetadata.getContentEncoding());\n            headers.setContentLanguage(contentMetadata.getContentLanguage());\n            headers.setCacheControl(contentMetadata.getCacheControl());\n        }\n\n        var uploadOptions = new BlockBlobSimpleUploadOptions(\n                new ByteArrayInputStream(new byte[0]), 0);\n        uploadOptions.setHeaders(headers);\n        if (userMetadata != null && !userMetadata.isEmpty()) {\n            uploadOptions.setMetadata(userMetadata);\n        }\n        if (blobMetadata.getTier() != null && blobMetadata.getTier() != Tier.STANDARD) {\n            uploadOptions.setTier(toAccessTier(blobMetadata.getTier()));\n        }\n\n        stubBlobClient.uploadWithResponse(uploadOptions, null, null);\n\n        var tags = new java.util.HashMap<String, String>();\n        tags.put(TARGET_BLOB_NAME_TAG, targetBlobName);\n        stubBlobClient.setTags(tags);\n\n        return MultipartUpload.create(container, targetBlobName,\n                uploadKey, blobMetadata, options);\n    }\n\n    /**\n     * Validates metadata key according to Azure naming rules.\n     * Keys must be valid C# identifiers (alphanumeric and underscores).\n     */\n    private static boolean isValidMetadataKey(String key) {\n        if (key == null || key.isEmpty()) {\n            return false;\n        }\n        // Must start with letter or underscore\n        if (!Character.isLetter(key.charAt(0)) && key.charAt(0) != '_') {\n            return false;\n        }\n        // Rest must be alphanumeric or underscore\n        for (int i = 1; i < key.length(); i++) {\n            char c = key.charAt(i);\n            if (!Character.isLetterOrDigit(c) && c != '_') {\n                return false;\n            }\n        }\n        return true;\n    }\n\n    @Override\n    public void abortMultipartUpload(MultipartUpload mpu) {\n        // Delete the stub blob to remove the upload from listMultipartUploads\n        // Note: Uncommitted blocks are automatically removed by Azure after 7 days\n        try {\n            blobServiceClient\n                    .getBlobContainerClient(mpu.containerName())\n                    .getBlobClient(mpu.id())\n                    .delete();\n        } catch (BlobStorageException bse) {\n            if (bse.getStatusCode() == 404) {\n                throw new KeyNotFoundException(mpu.containerName(), mpu.id(),\n                        \"Multipart upload not found: \" + mpu.id());\n            }\n            throw bse;\n        }\n    }\n\n    @Override\n    public String completeMultipartUpload(MultipartUpload mpu,\n            List<MultipartPart> parts) {\n        String uploadKey = mpu.id();\n        String nonce = uploadKey.substring(STUB_BLOB_PREFIX.length());\n\n        var containerClient = blobServiceClient.getBlobContainerClient(mpu.containerName());\n        var stubBlobClient = containerClient.getBlobClient(uploadKey);\n\n        BlobProperties stubProperties;\n        java.util.Map<String, String> stubTags;\n        try {\n            stubProperties = stubBlobClient.getProperties();\n            stubTags = stubBlobClient.getTags();\n        } catch (BlobStorageException bse) {\n            if (bse.getErrorCode().equals(BlobErrorCode.BLOB_NOT_FOUND)) {\n                throw new IllegalArgumentException(\n                        \"Upload not found: uploadId=\" + uploadKey);\n            }\n            throw bse;\n        }\n\n        String targetBlobName = stubTags.get(TARGET_BLOB_NAME_TAG);\n        if (targetBlobName == null) {\n            throw new IllegalArgumentException(\n                    \"Stub blob missing target name tag: uploadId=\" + uploadKey);\n        }\n\n        var userMetadata = stubProperties.getMetadata();\n        var contentMetadata = toContentMetadata(stubProperties);\n        var tier = stubProperties.getAccessTier();\n\n        if (parts == null || parts.isEmpty()) {\n            throw new IllegalArgumentException(\"Parts list cannot be empty\");\n        }\n\n        int previousPartNumber = 0;\n        for (var part : parts) {\n            int partNumber = part.partNumber();\n            if (partNumber <= previousPartNumber) {\n                throw new IllegalArgumentException(\n                        \"Parts must be in strictly ascending order\");\n            }\n            previousPartNumber = partNumber;\n        }\n\n        if (parts.size() > 50_000) {\n            throw new IllegalArgumentException(\n                    \"Too many parts: \" + parts.size() + \" (max 50,000)\");\n        }\n\n        var client = containerClient\n                .getBlobClient(targetBlobName)\n                .getBlockBlobClient();\n\n        var blockList = client.listBlocks(BlockListType.UNCOMMITTED);\n        var uncommittedBlocks = blockList.getUncommittedBlocks();\n\n        var blockMap = new java.util.HashMap<String, Long>();\n        for (var block : uncommittedBlocks) {\n            blockMap.put(block.getName(), block.getSizeLong());\n        }\n\n        var blockIds = ImmutableList.<String>builder();\n\n        for (int i = 0; i < parts.size(); i++) {\n            var part = parts.get(i);\n            int partNumber = part.partNumber();\n\n            String blockId = makeBlockId(nonce, partNumber);\n            blockIds.add(blockId);\n\n            if (!blockMap.containsKey(blockId)) {\n                throw new IllegalArgumentException(\n                        \"Part \" + partNumber + \" not found in staged blocks\");\n            }\n        }\n\n        BlobHttpHeaders blobHttpHeaders = new BlobHttpHeaders();\n        blobHttpHeaders.setContentType(contentMetadata.getContentType());\n        blobHttpHeaders.setContentDisposition(contentMetadata.getContentDisposition());\n        blobHttpHeaders.setContentEncoding(contentMetadata.getContentEncoding());\n        blobHttpHeaders.setContentLanguage(contentMetadata.getContentLanguage());\n        blobHttpHeaders.setCacheControl(contentMetadata.getCacheControl());\n\n        var options = new BlockBlobCommitBlockListOptions(\n                blockIds.build());\n        options.setHeaders(blobHttpHeaders);\n        if (userMetadata != null && !userMetadata.isEmpty()) {\n            options.setMetadata(userMetadata);\n        }\n        if (tier != null) {\n            options.setTier(tier);\n        }\n\n        // Support conditional writes (If-Match/If-None-Match)\n        if (mpu.putOptions() instanceof PutOptions2) {\n            var putOptions2 = (PutOptions2) mpu.putOptions();\n            String ifMatch = putOptions2.getIfMatch();\n            String ifNoneMatch = putOptions2.getIfNoneMatch();\n            if (ifMatch != null || ifNoneMatch != null) {\n                options.setRequestConditions(new BlobRequestConditions()\n                        .setIfMatch(ifMatch)\n                        .setIfNoneMatch(ifNoneMatch));\n            }\n        }\n\n        try {\n            var response = client.commitBlockListWithResponse(\n                    options, /*timeout=*/ null, /*context=*/ null);\n\n            stubBlobClient.delete();\n\n            String finalETag = response.getValue().getETag();\n            return finalETag;\n        } catch (BlobStorageException bse) {\n            var errorCode = bse.getErrorCode();\n            if (errorCode.equals(BlobErrorCode.BLOB_NOT_FOUND) ||\n                    errorCode.equals(BlobErrorCode.CONTAINER_NOT_FOUND)) {\n                throw new IllegalArgumentException(\n                        \"Upload not found: container=\" + mpu.containerName() +\n                        \", key=\" + targetBlobName);\n            } else if (bse.getStatusCode() == 409) {\n                throw new IllegalArgumentException(\n                        \"Conflict during commit: \" + bse.getMessage(), bse);\n            } else if (bse.getStatusCode() == 412) {\n                translateAndRethrowException(bse, mpu.containerName(), targetBlobName);\n            }\n            throw bse;\n        }\n    }\n\n    @Override\n    public MultipartPart uploadMultipartPart(MultipartUpload mpu,\n            int partNumber, Payload payload) {\n\n        if (partNumber < 1 || partNumber > 10_000) {\n            throw new IllegalArgumentException(\n                    \"Part number must be between 1 and 10,000, got: \" + partNumber);\n        }\n\n        Long contentLength = payload.getContentMetadata().getContentLength();\n        if (contentLength == null) {\n            throw new IllegalArgumentException(\"Content-Length is required\");\n        }\n        if (contentLength < 0) {\n            throw new IllegalArgumentException(\n                    \"Content-Length must be non-negative, got: \" + contentLength);\n        }\n\n        if (contentLength > getMaximumMultipartPartSize()) {\n            throw new IllegalArgumentException(\n                    \"Part size exceeds maximum of \" + getMaximumMultipartPartSize() +\n                    \" bytes: \" + contentLength);\n        }\n\n        String uploadKey = mpu.id();\n        String nonce = uploadKey.substring(STUB_BLOB_PREFIX.length());\n        String blockId = makeBlockId(nonce, partNumber);\n        var asyncClient = createNonRetryingBlockBlobAsyncClient(\n                mpu.containerName(), mpu.blobName());\n\n        byte[] md5Hash;\n        try (var is = payload.openStream();\n             var his = new HashingInputStream(MD5, is)) {\n            var providedMd5 = payload.getContentMetadata().getContentMD5AsHashCode();\n\n            final int maxChunkSize = 4 * 1024 * 1024;\n\n            Flux<ByteBuffer> body = Flux.generate(\n                () -> 0L,\n                (position, sink) -> {\n                    try {\n                        if (position >= contentLength) {\n                            sink.complete();\n                            return position;\n                        }\n                        int chunkSize = (int) Math.min(maxChunkSize,\n                                contentLength - position);\n                        ByteBuffer buffer = ByteBuffer.allocate(chunkSize);\n                        byte[] array = buffer.array();\n                        int totalRead = 0;\n                        while (totalRead < chunkSize) {\n                            int read = his.read(array, totalRead,\n                                    chunkSize - totalRead);\n                            if (read == -1) {\n                                if (position + totalRead < contentLength) {\n                                    sink.error(new IOException(\n                                        \"Stream ended at %d bytes, expected %d\".formatted(\n                                            position + totalRead, contentLength)));\n                                    return position + totalRead;\n                                }\n                                break;\n                            }\n                            totalRead += read;\n                        }\n                        if (totalRead == 0) {\n                            sink.error(new IOException(\n                                \"Stream ended at %d bytes, expected %d\".formatted(\n                                        position, contentLength)));\n                            return position;\n                        }\n                        buffer.position(totalRead);\n                        buffer.flip();\n                        sink.next(buffer.asReadOnlyBuffer());\n                        long nextPosition = position + totalRead;\n                        if (nextPosition >= contentLength) {\n                            sink.complete();\n                        }\n                        return nextPosition;\n                    } catch (IOException e) {\n                        sink.error(e);\n                        return position;\n                    }\n                },\n                position -> {\n                    // Stream is closed by try-with-resources\n                }\n            );\n\n            asyncClient.stageBlock(blockId, body, contentLength).block();\n\n            md5Hash = his.hash().asBytes();\n\n            if (providedMd5 != null) {\n                if (!MessageDigest.isEqual(md5Hash, providedMd5.asBytes())) {\n                    throw new IllegalArgumentException(\"Content-MD5 mismatch\");\n                }\n            }\n\n        } catch (BlobStorageException bse) {\n            translateAndRethrowException(bse, mpu.containerName(), mpu.blobName());\n            throw new RuntimeException(\n                    \"Failed to upload part %d for blob '%s' in container '%s': %s\".formatted(\n                    partNumber, mpu.blobName(), mpu.containerName(), bse.getMessage()), bse);\n        } catch (IOException ioe) {\n            throw new RuntimeException(\n                    \"Failed to upload part %d for blob '%s' in container '%s': %s\".formatted(\n                    partNumber, mpu.blobName(), mpu.containerName(), ioe.getMessage()), ioe);\n        }\n\n        String eTag = BaseEncoding.base16()\n                .lowerCase().encode(md5Hash);\n        Date lastModified = null;\n        return MultipartPart.create(partNumber, contentLength, eTag, lastModified);\n    }\n\n    /**\n     * Creates a BlockBlobAsyncClient with retries disabled for streaming uploads.\n     * This allows us to stream directly from non-markable InputStreams without\n     * needing temp files or buffering. The S3 client can retry the entire part\n     * upload if needed.\n     */\n    private BlockBlobAsyncClient createNonRetryingBlockBlobAsyncClient(\n            String container, String blobName) {\n        var cred = creds.get();\n\n        var clientBuilder = new BlobServiceClientBuilder()\n                .endpoint(endpoint)\n                .retryOptions(NO_RETRY_OPTIONS);\n\n        if (!cred.identity.isEmpty() && !cred.credential.isEmpty()) {\n            clientBuilder.credential(\n                new AzureNamedKeyCredential(cred.identity, cred.credential));\n        } else {\n            clientBuilder.credential(new DefaultAzureCredentialBuilder().build());\n        }\n\n        return clientBuilder.buildAsyncClient()\n                .getBlobContainerAsyncClient(container)\n                .getBlobAsyncClient(blobName)\n                .getBlockBlobAsyncClient();\n    }\n\n    @Override\n    public List<MultipartPart> listMultipartUpload(MultipartUpload mpu) {\n        String uploadKey = mpu.id();\n        String nonce = uploadKey.substring(STUB_BLOB_PREFIX.length());\n\n        var containerClient = blobServiceClient.getBlobContainerClient(mpu.containerName());\n        var stubBlobClient = containerClient.getBlobClient(uploadKey);\n\n        String targetBlobName;\n        try {\n            var stubTags = stubBlobClient.getTags();\n            targetBlobName = stubTags.get(TARGET_BLOB_NAME_TAG);\n        } catch (BlobStorageException bse) {\n            if (bse.getErrorCode().equals(BlobErrorCode.BLOB_NOT_FOUND)) {\n                throw new IllegalArgumentException(\n                        \"Upload not found: uploadId=\" + uploadKey);\n            }\n            throw bse;\n        }\n\n        var client = containerClient\n                .getBlobClient(targetBlobName)\n                .getBlockBlobClient();\n\n        BlockList blockList;\n        try {\n            blockList = client.listBlocks(BlockListType.ALL);\n        } catch (BlobStorageException bse) {\n            if (bse.getStatusCode() == 404) {\n                return ImmutableList.of();\n            }\n            throw bse;\n        }\n\n        var parts = ImmutableList.<MultipartPart>builder();\n\n        String noncePrefix = nonce + \":\";\n\n        for (var properties : blockList.getUncommittedBlocks()) {\n            String encodedBlockId = properties.getName();\n            String blockId;\n            try {\n                blockId = new String(Base64.getDecoder().decode(encodedBlockId),\n                        StandardCharsets.UTF_8);\n            } catch (IllegalArgumentException e) {\n                continue;\n            }\n\n            if (!blockId.startsWith(noncePrefix)) {\n                continue;\n            }\n\n            int partNumber;\n            try {\n                String partNumberStr = blockId.substring(noncePrefix.length());\n                partNumber = Integer.parseInt(partNumberStr);\n            } catch (NumberFormatException e) {\n                continue;\n            }\n\n            String eTag = \"\";  // listBlocks does not return ETag\n            Date lastModified = null; // listBlocks does not return LastModified\n            parts.add(MultipartPart.create(partNumber, properties.getSizeLong(),\n                    eTag, lastModified));\n        }\n        return parts.build();\n    }\n\n    @Override\n    public List<MultipartUpload> listMultipartUploads(String container) {\n        var containerClient = blobServiceClient.getBlobContainerClient(container);\n\n        var builder = ImmutableList.<MultipartUpload>builder();\n\n        var options = new ListBlobsOptions();\n        options.setPrefix(STUB_BLOB_PREFIX);\n        var details = new BlobListDetails();\n        details.setRetrieveTags(true);\n        options.setDetails(details);\n\n        for (var blobItem : containerClient.listBlobs(options, null, null)) {\n            // e.g., \".s3proxy/stubs/<uuid>\"\n            String uploadKey = blobItem.getName();\n            var tags = blobItem.getTags();\n\n            if (tags == null || tags.get(TARGET_BLOB_NAME_TAG) == null) {\n                continue;\n            }\n\n            String targetBlobName = tags.get(TARGET_BLOB_NAME_TAG);\n            builder.add(MultipartUpload.create(container, targetBlobName,\n                    uploadKey, null, null));\n        }\n\n        return builder.build();\n    }\n\n    @Override\n    public long getMinimumMultipartPartSize() {\n        return 1;\n    }\n\n    @Override\n    public long getMaximumMultipartPartSize() {\n        return 4000L * 1024 * 1024;\n    }\n\n    @Override\n    public int getMaximumNumberOfParts() {\n        return 50 * 1000;\n    }\n\n    @Override\n    public InputStream streamBlob(String container, String name) {\n        throw new UnsupportedOperationException(\"not yet implemented\");\n    }\n\n    private static OffsetDateTime toOffsetDateTime(@Nullable Date date) {\n        if (date == null) {\n            return null;\n        }\n        return date.toInstant().atOffset(ZoneOffset.UTC);\n    }\n\n    private static Date toDate(OffsetDateTime time) {\n        return new Date(time.toInstant().toEpochMilli());\n    }\n\n    private static AccessTier toAccessTier(Tier tier) {\n        return switch (tier) {\n        case ARCHIVE -> AccessTier.ARCHIVE;\n        case COOL -> AccessTier.COOL;\n        case INFREQUENT -> AccessTier.COOL;\n        case COLD -> AccessTier.COLD;\n        case STANDARD -> AccessTier.HOT;\n        };\n    }\n\n    private static Tier toTier(AccessTier tier) {\n        if (tier == null) {\n            return Tier.STANDARD;\n        } else if (tier.equals(AccessTier.ARCHIVE)) {\n            return Tier.ARCHIVE;\n        } else if (tier.equals(AccessTier.COLD)) {\n            return Tier.COLD;\n        } else if (tier.equals(AccessTier.COOL)) {\n            return Tier.COOL;\n        } else {\n            return Tier.STANDARD;\n        }\n    }\n\n    private static ContentMetadata toContentMetadata(\n            BlobProperties properties) {\n        var expires = properties.getExpiresOn();\n        return ContentMetadataBuilder.create()\n                .cacheControl(properties.getCacheControl())\n                .contentDisposition(properties.getContentDisposition())\n                .contentEncoding(properties.getContentEncoding())\n                .contentLanguage(properties.getContentLanguage())\n                .contentLength(properties.getBlobSize())\n                .contentType(properties.getContentType())\n                .expires(expires != null ? toDate(expires) : null)\n                .build();\n    }\n\n    /**\n     * Creates a deterministic Base64-encoded block ID using the upload nonce\n     * and padded part number.\n     *\n     * \"Block IDs are strings of equal length within a blob. Block client code usually uses base-64 encoding to normalize strings into equal lengths.\"\n     * Source: https://learn.microsoft.com/en-us/rest/api/storageservices/understanding-block-blobs--append-blobs--and-page-blobs\n     *\n     * Format: nonce + \":\" + 5-digit padded part number (e.g., \"nonce:00001\")\n     *\n     * @param nonce The upload session nonce from the uploadId context\n     * @param partNumber The part number (1-10,000)\n     * @return Base64-encoded block ID\n     */\n    private static String makeBlockId(String nonce, int partNumber) {\n        String rawId = \"%s:%05d\".formatted(nonce, partNumber);\n        return Base64.getEncoder().encodeToString(\n                rawId.getBytes(StandardCharsets.UTF_8));\n    }\n\n    /**\n     * Translate BlobStorageException to a jclouds exception.  Throws if\n     * translated otherwise returns.\n     */\n    private void translateAndRethrowException(BlobStorageException bse,\n            String container, @Nullable String key) {\n        var code = bse.getErrorCode();\n        if (code.equals(BlobErrorCode.BLOB_NOT_FOUND)) {\n            var exception = new KeyNotFoundException(container, key, \"\");\n            exception.initCause(bse);\n            throw exception;\n        } else if (code.equals(BlobErrorCode.CONTAINER_NOT_FOUND)) {\n            var exception = new ContainerNotFoundException(container, \"\");\n            exception.initCause(bse);\n            throw exception;\n        } else if (code.equals(BlobErrorCode.CONDITION_NOT_MET)) {\n            var request = HttpRequest.builder()\n                    .method(\"GET\")\n                    .endpoint(endpoint)\n                    .build();\n            var response = HttpResponse.builder()\n                    .statusCode(Status.PRECONDITION_FAILED.getStatusCode())\n                    .build();\n            throw new HttpResponseException(\n                    new HttpCommand(request), response, bse);\n        } else if (code.equals(BlobErrorCode.BLOB_ALREADY_EXISTS)) {\n            var request = HttpRequest.builder()\n                    .method(\"PUT\")\n                    .endpoint(endpoint)\n                    .build();\n            var response = HttpResponse.builder()\n                    .statusCode(Status.PRECONDITION_FAILED.getStatusCode())\n                    .build();\n            throw new HttpResponseException(\n                    new HttpCommand(request), response, bse);\n        } else if (code.equals(BlobErrorCode.INVALID_OPERATION)) {\n            var request = HttpRequest.builder()\n                    .method(\"GET\")\n                    .endpoint(endpoint)\n                    .build();\n            var response = HttpResponse.builder()\n                    .statusCode(Status.BAD_REQUEST.getStatusCode())\n                    .build();\n            throw new HttpResponseException(\n                    new HttpCommand(request), response, bse);\n        } else if (bse.getErrorCode().equals(BlobErrorCode.INVALID_RESOURCE_NAME)) {\n            throw new IllegalArgumentException(\n                    \"Invalid container name\", bse);\n        }\n    }\n}\n"
  },
  {
    "path": "src/main/java/org/gaul/s3proxy/azureblob/AzureBlobStoreContextModule.java",
    "content": "/*\n * Copyright 2014-2026 Andrew Gaul <andrew@gaul.org>\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * https://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\npackage org.gaul.s3proxy.azureblob;\n\nimport com.google.inject.AbstractModule;\nimport com.google.inject.Scopes;\n\nimport org.jclouds.blobstore.BlobStore;\nimport org.jclouds.blobstore.attr.ConsistencyModel;\n\npublic final class AzureBlobStoreContextModule extends AbstractModule {\n    @Override\n    protected void configure() {\n        bind(ConsistencyModel.class).toInstance(ConsistencyModel.STRICT);\n        bind(BlobStore.class).to(AzureBlobStore.class).in(Scopes.SINGLETON);\n    }\n}\n"
  },
  {
    "path": "src/main/java/org/gaul/s3proxy/crypto/Constants.java",
    "content": "/*\n * Copyright 2014-2026 Andrew Gaul <andrew@gaul.org>\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * https://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\npackage org.gaul.s3proxy.crypto;\n\nimport java.nio.charset.StandardCharsets;\nimport java.util.regex.Pattern;\n\npublic final class Constants {\n    public static final short VERSION = 1;\n    public static final String AES_CIPHER = \"AES/CFB/NoPadding\";\n    public static final String S3_ENC_SUFFIX = \".s3enc\";\n    public static final String MPU_FOLDER = \".mpu/\";\n    public static final Pattern MPU_ETAG_SUFFIX_PATTERN =\n        Pattern.compile(\"-([0-9]+)$\");\n    public static final String METADATA_ENCRYPTION_PARTS =\n        \"s3proxy_encryption_parts\";\n    public static final String METADATA_IS_ENCRYPTED_MULTIPART =\n        \"s3proxy_encryption_multipart\";\n    public static final String METADATA_MULTIPART_KEY =\n        \"s3proxy_mpu_key\";\n    public static final int AES_BLOCK_SIZE = 16;\n    public static final int PADDING_BLOCK_SIZE = 64;\n    public static final byte[] DELIMITER =\n        \"-S3-ENC-\".getBytes(StandardCharsets.UTF_8);\n    public static final int PADDING_DELIMITER_LENGTH = DELIMITER.length;\n    public static final int PADDING_IV_LENGTH = 16;\n    public static final int PADDING_PART_LENGTH = 4;\n    public static final int PADDING_SIZE_LENGTH = 8;\n    public static final int PADDING_VERSION_LENGTH = 2;\n\n    private Constants() {\n        throw new AssertionError(\"Cannot instantiate utility constructor\");\n    }\n}\n"
  },
  {
    "path": "src/main/java/org/gaul/s3proxy/crypto/Decryption.java",
    "content": "/*\n * Copyright 2014-2026 Andrew Gaul <andrew@gaul.org>\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * https://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\npackage org.gaul.s3proxy.crypto;\n\nimport java.io.IOException;\nimport java.io.InputStream;\nimport java.nio.charset.StandardCharsets;\nimport java.util.Arrays;\nimport java.util.TreeMap;\n\nimport javax.crypto.SecretKey;\nimport javax.crypto.spec.SecretKeySpec;\n\nimport com.google.common.io.ByteStreams;\n\nimport org.jclouds.blobstore.BlobStore;\nimport org.jclouds.blobstore.domain.Blob;\nimport org.jclouds.blobstore.domain.BlobMetadata;\nimport org.jclouds.blobstore.options.GetOptions;\n\npublic class Decryption {\n    private final SecretKey encryptionKey;\n    private TreeMap<Integer, PartPadding> partList;\n    private long outputOffset;\n    private long outputLength;\n    private boolean skipFirstBlock;\n    private long unencryptedSize;\n    private long encryptedSize;\n    private long startAt;\n    private int skipParts;\n    private long skipPartBytes;\n    private boolean isEncrypted;\n\n    public Decryption(SecretKeySpec key, BlobStore blobStore,\n        BlobMetadata meta,\n        long offset, long length) throws IOException {\n        encryptionKey = key;\n        outputLength = length;\n        isEncrypted = true;\n\n        // if blob does not exist or size is smaller than the part padding\n        // then the file is considered not encrypted\n        if (meta == null || meta.getSize() <= 64) {\n            blobIsNotEncrypted(offset);\n            return;\n        }\n\n        // get the 64 byte of part padding from the end of the blob\n        var options = new GetOptions();\n        options.range(meta.getSize() - Constants.PADDING_BLOCK_SIZE,\n            meta.getSize());\n        Blob blob =\n            blobStore.getBlob(meta.getContainer(), meta.getName(), options);\n\n        // read the padding structure\n        PartPadding lastPartPadding = PartPadding.readPartPaddingFromBlob(blob);\n        if (!Arrays.equals(\n            lastPartPadding.getDelimiter().getBytes(StandardCharsets.UTF_8),\n            Constants.DELIMITER)) {\n            blobIsNotEncrypted(offset);\n            return;\n        }\n\n        partList = new TreeMap<>();\n\n        // detect multipart\n        if (lastPartPadding.getPart() > 1 &&\n            meta.getSize() >\n                (lastPartPadding.getSize() + Constants.PADDING_BLOCK_SIZE)) {\n            unencryptedSize = lastPartPadding.getSize();\n            encryptedSize =\n                lastPartPadding.getSize() + Constants.PADDING_BLOCK_SIZE;\n\n            // note that parts are in reversed order\n            int part = 1;\n\n            // add the last part to the list\n            partList.put(part, lastPartPadding);\n\n            // loop part by part from end to the beginning\n            // to build a list of all blocks\n            while (encryptedSize < meta.getSize()) {\n                // get the next block\n                // rewind by the current encrypted block size\n                // minus the encryption padding\n                options = new GetOptions();\n                long startAt = (meta.getSize() - encryptedSize) -\n                    Constants.PADDING_BLOCK_SIZE;\n                long endAt = meta.getSize() - encryptedSize - 1;\n                options.range(startAt, endAt);\n                blob = blobStore.getBlob(meta.getContainer(), meta.getName(),\n                    options);\n\n                part++;\n\n                // read the padding structure\n                PartPadding partPadding =\n                    PartPadding.readPartPaddingFromBlob(blob);\n\n                // add the part to the list\n                this.partList.put(part, partPadding);\n\n                // update the encrypted size\n                encryptedSize = encryptedSize +\n                    (partPadding.getSize() + Constants.PADDING_BLOCK_SIZE);\n                unencryptedSize = this.unencryptedSize + partPadding.getSize();\n            }\n\n        } else {\n            // add the single part to the list\n            partList.put(1, lastPartPadding);\n\n            // update the unencrypted size\n            unencryptedSize = meta.getSize() - Constants.PADDING_BLOCK_SIZE;\n\n            // update the encrypted size\n            encryptedSize = meta.getSize();\n        }\n\n        // calculate the offset\n        calculateOffset(offset);\n\n        // if there is a offset and no length set the output length\n        if (offset > 0 && length <= 0) {\n            outputLength = unencryptedSize - offset;\n        }\n    }\n\n    private void blobIsNotEncrypted(long offset) {\n        isEncrypted = false;\n        startAt = offset;\n    }\n\n    // calculate the tail bytes we need to read\n    // because we know the unencryptedSize we can return startAt offset\n    public final long calculateTail() {\n        long offset = unencryptedSize - outputLength;\n        calculateOffset(offset);\n\n        return startAt;\n    }\n\n    public final long getEncryptedSize() {\n        return encryptedSize;\n    }\n\n    public final long getUnencryptedSize() {\n        return unencryptedSize;\n    }\n\n    public final long calculateEndAt(long endAt) {\n        // need to have always one more\n        endAt++;\n\n        // handle multipart\n        if (partList.size() > 1) {\n            long plaintextSize = 0;\n\n            // always skip 1 part at the end\n            int partCounter = 1;\n\n            // we need the map in reversed order\n            for (var part : partList.descendingMap().entrySet()) {\n                // check the parts that are between offset and end\n                plaintextSize = plaintextSize + part.getValue().getSize();\n                if (endAt > plaintextSize) {\n                    partCounter++;\n                } else {\n                    break;\n                }\n            }\n\n            // add the paddings of all parts\n            endAt = endAt + ((long) Constants.PADDING_BLOCK_SIZE * partCounter);\n        } else {\n            // we need to read one AES block more in AES CFB mode\n            long rest = endAt % Constants.AES_BLOCK_SIZE;\n            if (rest > 0) {\n                endAt = endAt + Constants.AES_BLOCK_SIZE;\n            }\n        }\n\n        return endAt;\n    }\n\n    // open the streams and pipes\n    public final InputStream openStream(InputStream is) throws IOException {\n        // if the blob is not encrypted return the unencrypted stream\n        if (!isEncrypted) {\n            return is;\n        }\n\n        // pass input stream through decryption\n        InputStream dis = new DecryptionInputStream(is, encryptionKey, partList,\n            skipParts, skipPartBytes);\n\n        // skip some bytes if necessary\n        long offset = outputOffset;\n        if (this.skipFirstBlock) {\n            offset = offset + Constants.AES_BLOCK_SIZE;\n        }\n        dis.skipNBytes(offset);\n\n        // trim the stream to a specific length if needed\n        return outputLength >= 0 ? ByteStreams.limit(dis, outputLength) : dis;\n    }\n\n    private void calculateOffset(long offset) {\n        startAt = 0;\n        skipParts = 0;\n\n        // handle multipart\n        if (partList.size() > 1) {\n\n            // init counters\n            long plaintextSize = 0;\n            long encryptedSize = 0;\n            long partOffset;\n            long partStartAt = 0;\n\n            // we need the map in reversed order\n            for (var part : partList.descendingMap().entrySet()) {\n                // compute the plaintext size of the current part\n                plaintextSize = plaintextSize + part.getValue().getSize();\n\n                // check if the offset is located in another part\n                if (offset > plaintextSize) {\n                    // compute the encrypted size of the skipped part\n                    encryptedSize = encryptedSize + part.getValue().getSize() +\n                        Constants.PADDING_BLOCK_SIZE;\n\n                    // compute offset in this part\n                    partOffset = offset - plaintextSize;\n\n                    // skip the first block in CFB mode\n                    skipFirstBlock = partOffset >= 16;\n\n                    // compute the offset of the output\n                    outputOffset = partOffset % Constants.AES_BLOCK_SIZE;\n\n                    // skip this part\n                    skipParts++;\n\n                    // we always need to read one previous AES block in CFB mode\n                    // if we read from offset\n                    if (partOffset > Constants.AES_BLOCK_SIZE) {\n                        long rest = partOffset % Constants.AES_BLOCK_SIZE;\n                        partStartAt =\n                            (partOffset - Constants.AES_BLOCK_SIZE) - rest;\n                    } else {\n                        partStartAt = 0;\n                    }\n                } else {\n                    // start at a specific byte position\n                    // while respecting other parts\n                    startAt = encryptedSize + partStartAt;\n\n                    // skip part bytes if we are not starting\n                    // from the beginning of a part\n                    skipPartBytes = partStartAt;\n                    break;\n                }\n            }\n        }\n\n        // handle single part\n        if (skipParts == 0) {\n            // skip the first block in CFB mode\n            skipFirstBlock = offset >= 16;\n\n            // compute the offset of the output\n            outputOffset = offset % Constants.AES_BLOCK_SIZE;\n\n            // we always need to read one previous AES block in CFB mode\n            // if we read from offset\n            if (offset > Constants.AES_BLOCK_SIZE) {\n                long rest = offset % Constants.AES_BLOCK_SIZE;\n                startAt = (offset - Constants.AES_BLOCK_SIZE) - rest;\n            }\n\n            // skip part bytes if we are not starting\n            // from the beginning of a part\n            skipPartBytes = startAt;\n        }\n    }\n\n    public final long getStartAt() {\n        return startAt;\n    }\n\n    public final boolean isEncrypted() {\n        return isEncrypted;\n    }\n\n    public final long getContentLength() {\n        if (outputLength > 0) {\n            return outputLength;\n        } else {\n            return unencryptedSize;\n        }\n    }\n}\n"
  },
  {
    "path": "src/main/java/org/gaul/s3proxy/crypto/DecryptionInputStream.java",
    "content": "/*\n * Copyright 2014-2026 Andrew Gaul <andrew@gaul.org>\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * https://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\npackage org.gaul.s3proxy.crypto;\n\nimport java.io.FilterInputStream;\nimport java.io.IOException;\nimport java.io.InputStream;\nimport java.util.SortedMap;\n\nimport javax.crypto.Cipher;\nimport javax.crypto.SecretKey;\nimport javax.crypto.ShortBufferException;\n\npublic class DecryptionInputStream extends FilterInputStream {\n\n    // the cipher engine to use to process stream data\n    private final Cipher cipher;\n\n    // the secret key\n    private final SecretKey key;\n\n    // the list of parts we expect in the stream\n    private final SortedMap<Integer, PartPadding> parts;\n\n    /* the buffer holding data that have been read in from the\n       underlying stream, but have not been processed by the cipher\n       engine. */\n    private final byte[] ibuffer = new byte[4096];\n\n    // having reached the end of the underlying input stream\n    private boolean done;\n\n    /* the buffer holding data that have been processed by the cipher\n       engine, but have not been read out */\n    private byte[] obuffer;\n    // the offset pointing to the next \"new\" byte\n    private int ostart;\n    // the offset pointing to the last \"new\" byte\n    private int ofinish;\n    // stream status\n    private boolean closed;\n    // the current part\n    private int part;\n    // the remaining bytes of the current part\n    private long partBytesRemain;\n\n    /**\n     * Constructs a CipherInputStream from an InputStream and a\n     * Cipher.\n     * <br>Note: if the specified input stream or cipher is\n     * null, a NullPointerException may be thrown later when\n     * they are used.\n     *\n     * @param is            the to-be-processed input stream\n     * @param key           the decryption key\n     * @param parts         the list of parts\n     * @param skipParts     the amount of parts to skip\n     * @param skipPartBytes the amount of part bytes to skip\n     * @throws IOException if cipher fails\n     */\n    public DecryptionInputStream(InputStream is, SecretKey key,\n            SortedMap<Integer, PartPadding> parts, int skipParts,\n            long skipPartBytes) throws IOException {\n        super(is);\n        in = is;\n        this.parts = parts;\n        this.key = key;\n\n        PartPadding partPadding = parts.get(parts.size() - skipParts);\n\n        try {\n            // init the cipher\n            cipher = Cipher.getInstance(Constants.AES_CIPHER);\n            cipher.init(Cipher.DECRYPT_MODE, key, partPadding.getIv());\n        } catch (Exception e) {\n            throw new IOException(e);\n        }\n\n        // set the part to begin with\n        part = parts.size() - skipParts;\n\n        // adjust part size due to offset\n        partBytesRemain = parts.get(part).getSize() - skipPartBytes;\n    }\n\n    /**\n     * Ensure obuffer is big enough for the next update or doFinal\n     * operation, given the input length <code>inLen</code> (in bytes)\n     * The ostart and ofinish indices are reset to 0.\n     *\n     * @param inLen the input length (in bytes)\n     */\n    private void ensureCapacity(int inLen) {\n        int minLen = cipher.getOutputSize(inLen);\n        if (obuffer == null || obuffer.length < minLen) {\n            obuffer = new byte[minLen];\n        }\n        ostart = 0;\n        ofinish = 0;\n    }\n\n    /**\n     * Private convenience function, read in data from the underlying\n     * input stream and process them with cipher. This method is called\n     * when the processed bytes inside obuffer has been exhausted.\n     * <p>\n     * Entry condition: ostart = ofinish\n     * <p>\n     * Exit condition: ostart = 0 AND ostart <= ofinish\n     * <p>\n     * return (ofinish-ostart) (we have this many bytes for you)\n     * return 0 (no data now, but could have more later)\n     * return -1 (absolutely no more data)\n     * <p>\n     * Note: Exceptions are only thrown after the stream is completely read.\n     * For AEAD ciphers a read() of any length will internally cause the\n     * whole stream to be read fully and verify the authentication tag before\n     * returning decrypted data or exceptions.\n     */\n    private int getMoreData() throws IOException {\n        if (done) {\n            return -1;\n        }\n\n        int readLimit = ibuffer.length;\n        if (partBytesRemain < ibuffer.length) {\n            readLimit = (int) partBytesRemain;\n        }\n\n        int readin;\n        if (partBytesRemain == 0) {\n            readin = -1;\n        } else {\n            readin = in.read(ibuffer, 0, readLimit);\n        }\n\n        if (readin == -1) {\n            ensureCapacity(0);\n            try {\n                ofinish = cipher.doFinal(obuffer, 0);\n            } catch (Exception e) {\n                throw new IOException(e);\n            }\n\n            int nextPart = part - 1;\n            if (parts.containsKey(nextPart)) {\n                // reset cipher\n                PartPadding partPadding = parts.get(nextPart);\n                try {\n                    cipher.init(Cipher.DECRYPT_MODE, key, partPadding.getIv());\n                } catch (Exception e) {\n                    throw new IOException(e);\n                }\n\n                // update to the next part\n                part = nextPart;\n\n                // update the remaining bytes of the next part\n                partBytesRemain = parts.get(nextPart).getSize();\n\n                // Cannot call ByteStreams.skipFully since in may be shorter\n                in.readNBytes(Constants.PADDING_BLOCK_SIZE);\n\n                return ofinish;\n            } else {\n                done = true;\n                if (ofinish == 0) {\n                    return -1;\n                } else {\n                    return ofinish;\n                }\n            }\n        }\n        ensureCapacity(readin);\n        try {\n            ofinish = cipher.update(ibuffer, 0, readin, obuffer, ostart);\n        } catch (ShortBufferException e) {\n            throw new IOException(e);\n        }\n\n        partBytesRemain = partBytesRemain - readin;\n        return ofinish;\n    }\n\n    /**\n     * Reads the next byte of data from this input stream. The value\n     * byte is returned as an <code>int</code> in the range\n     * <code>0</code> to <code>255</code>. If no byte is available\n     * because the end of the stream has been reached, the value\n     * <code>-1</code> is returned. This method blocks until input data\n     * is available, the end of the stream is detected, or an exception\n     * is thrown.\n     *\n     * @return the next byte of data, or <code>-1</code> if the end of the\n     * stream is reached.\n     * @throws IOException if an I/O error occurs.\n     */\n    @Override\n    public final int read() throws IOException {\n        if (ostart >= ofinish) {\n            // we loop for new data as the spec says we are blocking\n            int i = 0;\n            while (i == 0) {\n                i = getMoreData();\n            }\n            if (i == -1) {\n                return -1;\n            }\n        }\n        return (int) obuffer[ostart++] & 0xff;\n    }\n\n    /**\n     * Reads up to <code>b.length</code> bytes of data from this input\n     * stream into an array of bytes.\n     * <p>\n     * The <code>read</code> method of <code>InputStream</code> calls\n     * the <code>read</code> method of three arguments with the arguments\n     * <code>b</code>, <code>0</code>, and <code>b.length</code>.\n     *\n     * @param b the buffer into which the data is read.\n     * @return the total number of bytes read into the buffer, or\n     * <code>-1</code> is there is no more data because the end of\n     * the stream has been reached.\n     * @throws IOException if an I/O error occurs.\n     * @see java.io.InputStream#read(byte[], int, int)\n     */\n    @Override\n    public final int read(byte[] b) throws IOException {\n        return read(b, 0, b.length);\n    }\n\n    /**\n     * Reads up to <code>len</code> bytes of data from this input stream\n     * into an array of bytes. This method blocks until some input is\n     * available. If the first argument is <code>null,</code> up to\n     * <code>len</code> bytes are read and discarded.\n     *\n     * @param b   the buffer into which the data is read.\n     * @param off the start offset in the destination array\n     *            <code>buf</code>\n     * @param len the maximum number of bytes read.\n     * @return the total number of bytes read into the buffer, or\n     * <code>-1</code> if there is no more data because the end of\n     * the stream has been reached.\n     * @throws IOException if an I/O error occurs.\n     * @see java.io.InputStream#read()\n     */\n    @Override\n    public final int read(byte[] b, int off, int len) throws IOException {\n        if (ostart >= ofinish) {\n            // we loop for new data as the spec says we are blocking\n            int i = 0;\n            while (i == 0) {\n                i = getMoreData();\n            }\n            if (i == -1) {\n                return -1;\n            }\n        }\n        if (len <= 0) {\n            return 0;\n        }\n        int available = ofinish - ostart;\n        if (len < available) {\n            available = len;\n        }\n        if (b != null) {\n            System.arraycopy(obuffer, ostart, b, off, available);\n        }\n        ostart = ostart + available;\n        return available;\n    }\n\n    /**\n     * Skips <code>n</code> bytes of input from the bytes that can be read\n     * from this input stream without blocking.\n     *\n     * <p>Fewer bytes than requested might be skipped.\n     * The actual number of bytes skipped is equal to <code>n</code> or\n     * the result of a call to\n     * {@link #available() available},\n     * whichever is smaller.\n     * If <code>n</code> is less than zero, no bytes are skipped.\n     *\n     * <p>The actual number of bytes skipped is returned.\n     *\n     * @param n the number of bytes to be skipped.\n     * @return the actual number of bytes skipped.\n     * @throws IOException if an I/O error occurs.\n     */\n    @Override\n    public final long skip(long n) throws IOException {\n        int available = ofinish - ostart;\n        if (n > available) {\n            n = available;\n        }\n        if (n < 0) {\n            return 0;\n        }\n        ostart += (int) n;\n        return n;\n    }\n\n    /**\n     * Returns the number of bytes that can be read from this input\n     * stream without blocking. The <code>available</code> method of\n     * <code>InputStream</code> returns <code>0</code>. This method\n     * <B>should</B> be overridden by subclasses.\n     *\n     * @return the number of bytes that can be read from this input stream\n     * without blocking.\n     */\n    @Override\n    public final int available() {\n        return ofinish - ostart;\n    }\n\n    /**\n     * Closes this input stream and releases any system resources\n     * associated with the stream.\n     * <p>\n     * The <code>close</code> method of <code>CipherInputStream</code>\n     * calls the <code>close</code> method of its underlying input\n     * stream.\n     *\n     * @throws IOException if an I/O error occurs.\n     */\n    @Override\n    public final void close() throws IOException {\n        if (closed) {\n            return;\n        }\n        closed = true;\n        in.close();\n\n        // Throw away the unprocessed data and throw no crypto exceptions.\n        // AEAD ciphers are fully read before closing.  Any authentication\n        // exceptions would occur while reading.\n        if (!done) {\n            ensureCapacity(0);\n            try {\n                cipher.doFinal(obuffer, 0);\n            } catch (Exception e) {\n                // Catch exceptions as the rest of the stream is unused.\n            }\n        }\n        obuffer = null;\n    }\n\n    /**\n     * Tests if this input stream supports the <code>mark</code>\n     * and <code>reset</code> methods, which it does not.\n     *\n     * @return <code>false</code>, since this class does not support the\n     * <code>mark</code> and <code>reset</code> methods.\n     * @see java.io.InputStream#mark(int)\n     * @see java.io.InputStream#reset()\n     */\n    @Override\n    public final boolean markSupported() {\n        return false;\n    }\n}\n"
  },
  {
    "path": "src/main/java/org/gaul/s3proxy/crypto/Encryption.java",
    "content": "/*\n * Copyright 2014-2026 Andrew Gaul <andrew@gaul.org>\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * https://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\npackage org.gaul.s3proxy.crypto;\n\nimport java.io.IOException;\nimport java.io.InputStream;\nimport java.security.GeneralSecurityException;\nimport java.security.SecureRandom;\n\nimport javax.crypto.Cipher;\nimport javax.crypto.CipherInputStream;\nimport javax.crypto.spec.IvParameterSpec;\nimport javax.crypto.spec.SecretKeySpec;\n\npublic class Encryption {\n    private final InputStream cis;\n    private final IvParameterSpec iv;\n    private final int part;\n\n    public Encryption(SecretKeySpec key, InputStream isRaw, int partNumber)\n            throws GeneralSecurityException {\n        iv = generateIV();\n\n        Cipher cipher = Cipher.getInstance(Constants.AES_CIPHER);\n        cipher.init(Cipher.ENCRYPT_MODE, key, iv);\n        cis = new CipherInputStream(isRaw, cipher);\n        part = partNumber;\n    }\n\n    public final InputStream openStream() throws IOException {\n        return new EncryptionInputStream(cis, part, iv);\n    }\n\n    private IvParameterSpec generateIV() {\n        byte[] iv = new byte[Constants.AES_BLOCK_SIZE];\n        var randomSecureRandom = new SecureRandom();\n        randomSecureRandom.nextBytes(iv);\n\n        return new IvParameterSpec(iv);\n    }\n}\n"
  },
  {
    "path": "src/main/java/org/gaul/s3proxy/crypto/EncryptionInputStream.java",
    "content": "/*\n * Copyright 2014-2026 Andrew Gaul <andrew@gaul.org>\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * https://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\npackage org.gaul.s3proxy.crypto;\n\nimport java.io.ByteArrayInputStream;\nimport java.io.IOException;\nimport java.io.InputStream;\nimport java.nio.ByteBuffer;\n\nimport javax.crypto.spec.IvParameterSpec;\n\npublic class EncryptionInputStream extends InputStream {\n\n    private final int part;\n    private final IvParameterSpec iv;\n    private boolean hasPadding;\n    private long size;\n    private InputStream in;\n\n    public EncryptionInputStream(InputStream in, int part,\n        IvParameterSpec iv) {\n        this.part = part;\n        this.iv = iv;\n        this.in = in;\n    }\n\n    // Padding (64 byte)\n    // Delimiter (8 byte)\n    // IV (16 byte)\n    // Part (4 byte)\n    // Size (8 byte)\n    // Version (2 byte)\n    // Reserved (26 byte)\n    final void padding() throws IOException {\n        if (in != null) {\n            in.close();\n        }\n\n        if (!hasPadding) {\n            ByteBuffer bb = ByteBuffer.allocate(Constants.PADDING_BLOCK_SIZE);\n            bb.put(Constants.DELIMITER);\n            bb.put(iv.getIV());\n            bb.putInt(part);\n            bb.putLong(size);\n            bb.putShort(Constants.VERSION);\n\n            in = new ByteArrayInputStream(bb.array());\n            hasPadding = true;\n        } else {\n            in = null;\n        }\n    }\n\n    @Override\n    public final int available() throws IOException {\n        if (in == null) {\n            return 0; // no way to signal EOF from available()\n        }\n        return in.available();\n    }\n\n    @Override\n    public final int read() throws IOException {\n        while (in != null) {\n            int c = in.read();\n            if (c != -1) {\n                size++;\n                return c;\n            }\n            padding();\n        }\n        return -1;\n    }\n\n    @Override\n    public final int read(byte[] b, int off, int len) throws IOException {\n        if (in == null) {\n            return -1;\n        } else if (b == null) {\n            throw new NullPointerException();\n        } else if (off < 0 || len < 0 || len > b.length - off) {\n            throw new IndexOutOfBoundsException();\n        } else if (len == 0) {\n            return 0;\n        }\n        do {\n            int n = in.read(b, off, len);\n            if (n > 0) {\n                size = size + n;\n                return n;\n            }\n            padding();\n        } while (in != null);\n        return -1;\n    }\n\n    @Override\n    public final void close() throws IOException {\n        IOException ioe = null;\n        while (in != null) {\n            try {\n                in.close();\n            } catch (IOException e) {\n                if (ioe == null) {\n                    ioe = e;\n                } else {\n                    ioe.addSuppressed(e);\n                }\n            }\n            padding();\n        }\n        if (ioe != null) {\n            throw ioe;\n        }\n    }\n}\n"
  },
  {
    "path": "src/main/java/org/gaul/s3proxy/crypto/PartPadding.java",
    "content": "/*\n * Copyright 2014-2026 Andrew Gaul <andrew@gaul.org>\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * https://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\npackage org.gaul.s3proxy.crypto;\n\nimport java.io.IOException;\nimport java.nio.ByteBuffer;\nimport java.nio.charset.StandardCharsets;\nimport java.util.Arrays;\n\nimport javax.crypto.spec.IvParameterSpec;\n\nimport org.jclouds.blobstore.domain.Blob;\nimport org.slf4j.Logger;\nimport org.slf4j.LoggerFactory;\n\npublic class PartPadding {\n    private static final Logger logger =\n        LoggerFactory.getLogger(PartPadding.class);\n\n    private String delimiter;\n    private IvParameterSpec iv;\n    private int part;\n    private long size;\n    private short version;\n\n    public static PartPadding readPartPaddingFromBlob(Blob blob)\n            throws IOException {\n        var partPadding = new PartPadding();\n\n        try (var is = blob.getPayload().openStream()) {\n            byte[] paddingBytes = is.readAllBytes();\n            ByteBuffer bb = ByteBuffer.wrap(paddingBytes);\n\n            byte[] delimiterBytes =\n                new byte[Constants.PADDING_DELIMITER_LENGTH];\n            bb.get(delimiterBytes);\n            partPadding.delimiter =\n                new String(delimiterBytes, StandardCharsets.UTF_8);\n\n            byte[] ivBytes = new byte[Constants.PADDING_IV_LENGTH];\n            bb.get(ivBytes);\n            partPadding.iv = new IvParameterSpec(ivBytes);\n\n            partPadding.part = bb.getInt();\n            partPadding.size = bb.getLong();\n            partPadding.version = bb.getShort();\n\n            logger.debug(\"delimiter {}\", partPadding.delimiter);\n            logger.debug(\"iv {}\", Arrays.toString(ivBytes));\n            logger.debug(\"part {}\", partPadding.part);\n            logger.debug(\"size {}\", partPadding.size);\n            logger.debug(\"version {}\", partPadding.version);\n\n            return partPadding;\n        }\n    }\n\n    public final String getDelimiter() {\n        return delimiter;\n    }\n\n    public final IvParameterSpec getIv() {\n        return iv;\n    }\n\n    public final int getPart() {\n        return part;\n    }\n\n    public final long getSize() {\n        return size;\n    }\n}\n"
  },
  {
    "path": "src/main/java/org/gaul/s3proxy/gcloudsdk/GCloudApiMetadata.java",
    "content": "/*\n * Copyright 2014-2026 Andrew Gaul <andrew@gaul.org>\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * https://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\npackage org.gaul.s3proxy.gcloudsdk;\n\nimport java.net.URI;\nimport java.util.Properties;\nimport java.util.Set;\n\nimport org.jclouds.blobstore.BlobStoreContext;\nimport org.jclouds.blobstore.reference.BlobStoreConstants;\nimport org.jclouds.reflect.Reflection2;\nimport org.jclouds.rest.internal.BaseHttpApiMetadata;\n\n\n@SuppressWarnings(\"rawtypes\")\npublic final class GCloudApiMetadata extends BaseHttpApiMetadata {\n    public GCloudApiMetadata() {\n        this(builder());\n    }\n\n    protected GCloudApiMetadata(Builder builder) {\n        super(builder);\n    }\n\n    private static Builder builder() {\n        return new Builder();\n    }\n\n    @Override\n    public Builder toBuilder() {\n        return builder().fromApiMetadata(this);\n    }\n\n    public static Properties defaultProperties() {\n        Properties properties = BaseHttpApiMetadata.defaultProperties();\n        properties.setProperty(BlobStoreConstants.PROPERTY_USER_METADATA_PREFIX,\n                \"x-goog-meta-\");\n        return properties;\n    }\n\n    // Fake API client\n    private interface GCloudClient {\n    }\n\n    public static final class Builder\n            extends BaseHttpApiMetadata.Builder<GCloudClient, Builder> {\n        protected Builder() {\n            super(GCloudClient.class);\n            id(\"google-cloud-storage-sdk\")\n                .name(\"Google Cloud Storage API\")\n                .identityName(\"Project ID\")\n                .credentialName(\"JSON Key or Path\")\n                .version(\"v1\")\n                .defaultEndpoint(\"https://storage.googleapis.com\")\n                .documentation(URI.create(\n                        \"https://cloud.google.com/storage/docs/json_api\"))\n                .defaultProperties(GCloudApiMetadata.defaultProperties())\n                .view(Reflection2.typeToken(BlobStoreContext.class))\n                .defaultModules(Set.of(\n                        GCloudBlobStoreContextModule.class));\n        }\n\n        @Override\n        public GCloudApiMetadata build() {\n            return new GCloudApiMetadata(this);\n        }\n\n        @Override\n        protected Builder self() {\n            return this;\n        }\n    }\n}\n"
  },
  {
    "path": "src/main/java/org/gaul/s3proxy/gcloudsdk/GCloudBlobStore.java",
    "content": "/*\n * Copyright 2014-2026 Andrew Gaul <andrew@gaul.org>\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * https://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\npackage org.gaul.s3proxy.gcloudsdk;\n\nimport java.io.ByteArrayInputStream;\nimport java.io.IOException;\nimport java.io.InputStream;\nimport java.nio.channels.Channels;\nimport java.nio.charset.StandardCharsets;\nimport java.security.MessageDigest;\nimport java.util.Date;\nimport java.util.HashMap;\nimport java.util.List;\nimport java.util.Map;\nimport java.util.Set;\nimport java.util.UUID;\n\nimport com.google.auth.oauth2.GoogleCredentials;\nimport com.google.auth.oauth2.ServiceAccountCredentials;\nimport com.google.cloud.NoCredentials;\nimport com.google.cloud.ReadChannel;\nimport com.google.cloud.storage.Acl;\nimport com.google.cloud.storage.Blob;\nimport com.google.cloud.storage.BlobId;\nimport com.google.cloud.storage.BlobInfo;\nimport com.google.cloud.storage.Bucket;\nimport com.google.cloud.storage.BucketInfo;\nimport com.google.cloud.storage.Storage;\nimport com.google.cloud.storage.Storage.BlobField;\nimport com.google.cloud.storage.Storage.BlobGetOption;\nimport com.google.cloud.storage.Storage.BlobListOption;\nimport com.google.cloud.storage.Storage.BlobWriteOption;\nimport com.google.cloud.storage.Storage.BucketField;\nimport com.google.cloud.storage.Storage.BucketGetOption;\nimport com.google.cloud.storage.Storage.ComposeRequest;\nimport com.google.cloud.storage.Storage.CopyRequest;\nimport com.google.cloud.storage.StorageException;\nimport com.google.cloud.storage.StorageOptions;\nimport com.google.common.base.Supplier;\nimport com.google.common.collect.ImmutableList;\nimport com.google.common.collect.ImmutableSet;\nimport com.google.common.hash.HashFunction;\nimport com.google.common.hash.Hashing;\nimport com.google.common.hash.HashingInputStream;\nimport com.google.common.io.BaseEncoding;\nimport com.google.common.net.HttpHeaders;\n\nimport jakarta.inject.Inject;\nimport jakarta.inject.Singleton;\n\nimport org.gaul.s3proxy.PutOptions2;\nimport org.jclouds.blobstore.BlobStoreContext;\nimport org.jclouds.blobstore.ContainerNotFoundException;\nimport org.jclouds.blobstore.KeyNotFoundException;\nimport org.jclouds.blobstore.domain.BlobAccess;\nimport org.jclouds.blobstore.domain.BlobMetadata;\nimport org.jclouds.blobstore.domain.ContainerAccess;\nimport org.jclouds.blobstore.domain.MultipartPart;\nimport org.jclouds.blobstore.domain.MultipartUpload;\nimport org.jclouds.blobstore.domain.PageSet;\nimport org.jclouds.blobstore.domain.StorageMetadata;\nimport org.jclouds.blobstore.domain.StorageType;\nimport org.jclouds.blobstore.domain.Tier;\nimport org.jclouds.blobstore.domain.internal.BlobBuilderImpl;\nimport org.jclouds.blobstore.domain.internal.BlobMetadataImpl;\nimport org.jclouds.blobstore.domain.internal.PageSetImpl;\nimport org.jclouds.blobstore.domain.internal.StorageMetadataImpl;\nimport org.jclouds.blobstore.internal.BaseBlobStore;\nimport org.jclouds.blobstore.options.CopyOptions;\nimport org.jclouds.blobstore.options.CreateContainerOptions;\nimport org.jclouds.blobstore.options.GetOptions;\nimport org.jclouds.blobstore.options.ListContainerOptions;\nimport org.jclouds.blobstore.options.PutOptions;\nimport org.jclouds.blobstore.util.BlobUtils;\nimport org.jclouds.collect.Memoized;\nimport org.jclouds.domain.Credentials;\nimport org.jclouds.domain.Location;\nimport org.jclouds.http.HttpCommand;\nimport org.jclouds.http.HttpRequest;\nimport org.jclouds.http.HttpResponse;\nimport org.jclouds.http.HttpResponseException;\nimport org.jclouds.io.ContentMetadata;\nimport org.jclouds.io.ContentMetadataBuilder;\nimport org.jclouds.io.PayloadSlicer;\nimport org.jclouds.providers.ProviderMetadata;\nimport org.jspecify.annotations.Nullable;\n\n@Singleton\npublic final class GCloudBlobStore extends BaseBlobStore {\n    private static final String STUB_BLOB_PREFIX = \".s3proxy/stubs/\";\n    private static final String TARGET_BLOB_NAME_KEY =\n            \"s3proxy_target_blob_name\";\n    private static final HashFunction MD5 = Hashing.md5();\n    // GCS compose supports up to 32 source objects\n    private static final int MAX_COMPOSE_PARTS = 32;\n\n    private final Storage storage;\n\n    @Inject\n    GCloudBlobStore(BlobStoreContext context, BlobUtils blobUtils,\n            Supplier<Location> defaultLocation,\n            @Memoized Supplier<Set<? extends Location>> locations,\n            PayloadSlicer slicer,\n            @org.jclouds.location.Provider Supplier<Credentials> creds,\n            ProviderMetadata provider) {\n        super(context, blobUtils, defaultLocation, locations, slicer);\n        var cred = creds.get();\n        var storageBuilder = StorageOptions.newBuilder();\n        if (cred.identity != null && !cred.identity.isEmpty()) {\n            storageBuilder.setProjectId(cred.identity);\n        }\n        if (cred.credential != null && !cred.credential.isEmpty()) {\n            try {\n                var credentials = ServiceAccountCredentials.fromStream(\n                        new ByteArrayInputStream(\n                                cred.credential.getBytes(StandardCharsets.UTF_8)));\n                storageBuilder.setCredentials(credentials);\n            } catch (IOException ioe) {\n                // Fall back to application default credentials\n                try {\n                    storageBuilder.setCredentials(\n                            GoogleCredentials.getApplicationDefault());\n                } catch (IOException ioe2) {\n                    throw new RuntimeException(\n                            \"Failed to initialize GCS credentials\", ioe2);\n                }\n            }\n        } else {\n            // No credentials provided — use NoCredentials for emulator\n            storageBuilder.setCredentials(NoCredentials.getInstance());\n        }\n        var endpoint = provider.getEndpoint();\n        if (endpoint != null && !endpoint.isEmpty() &&\n                !endpoint.equals(\"https://storage.googleapis.com\")) {\n            storageBuilder.setHost(endpoint);\n        }\n        storage = storageBuilder.build().getService();\n    }\n\n    @Override\n    public PageSet<? extends StorageMetadata> list() {\n        var set = ImmutableSet.<StorageMetadata>builder();\n        for (Bucket bucket : storage.list().iterateAll()) {\n            set.add(new StorageMetadataImpl(StorageType.CONTAINER,\n                    /*id=*/ null, bucket.getName(), /*location=*/ null,\n                    /*uri=*/ null, /*eTag=*/ null,\n                    toDate(bucket.getCreateTimeOffsetDateTime()),\n                    toDate(bucket.getUpdateTimeOffsetDateTime()),\n                    Map.of(), /*size=*/ null, Tier.STANDARD));\n        }\n        return new PageSetImpl<StorageMetadata>(set.build(), null);\n    }\n\n    @Override\n    public PageSet<? extends StorageMetadata> list(String container,\n            ListContainerOptions options) {\n        var gcsOptions = new java.util.ArrayList<BlobListOption>();\n        if (options.getPrefix() != null) {\n            gcsOptions.add(BlobListOption.prefix(options.getPrefix()));\n        }\n        if (options.getMaxResults() != null) {\n            gcsOptions.add(BlobListOption.pageSize(\n                    options.getMaxResults()));\n        }\n        String marker = options.getMarker();\n        if (options.getDelimiter() != null) {\n            gcsOptions.add(BlobListOption.delimiter(options.getDelimiter()));\n        }\n\n        com.google.api.gax.paging.Page<Blob> page;\n        try {\n            page = storage.list(container,\n                    gcsOptions.toArray(new BlobListOption[0]));\n        } catch (StorageException se) {\n            translateAndRethrowException(se, container, null);\n            throw se;\n        }\n\n        var set = ImmutableSet.<StorageMetadata>builder();\n        Integer maxResults = options.getMaxResults();\n        int count = 0;\n        boolean hasMore = false;\n        String lastName = null;\n        for (Blob blob : page.iterateAll()) {\n            // Skip blobs at or before the marker (S3 marker is exclusive)\n            if (marker != null && blob.getName().compareTo(marker) <= 0) {\n                continue;\n            }\n            if (maxResults != null && count >= maxResults) {\n                hasMore = true;\n                break;\n            }\n            if (blob.isDirectory()) {\n                set.add(new StorageMetadataImpl(StorageType.RELATIVE_PATH,\n                        /*id=*/ null, blob.getName(), /*location=*/ null,\n                        /*uri=*/ null, /*eTag=*/ null,\n                        /*creationDate=*/ null, /*lastModified=*/ null,\n                        Map.of(), /*size=*/ null, Tier.STANDARD));\n            } else {\n                set.add(new StorageMetadataImpl(StorageType.BLOB,\n                        /*id=*/ null, blob.getName(), /*location=*/ null,\n                        /*uri=*/ null, blob.getEtag(),\n                        toDate(blob.getCreateTimeOffsetDateTime()),\n                        toDate(blob.getUpdateTimeOffsetDateTime()),\n                        Map.of(), blob.getSize(),\n                        toTier(blob.getStorageClass())));\n            }\n            lastName = blob.getName();\n            count++;\n        }\n\n        // Synthesize a next marker if we truncated results\n        String nextMarker = hasMore ? lastName : null;\n        return new PageSetImpl<StorageMetadata>(set.build(), nextMarker);\n    }\n\n    @Override\n    public boolean containerExists(String container) {\n        return storage.get(container,\n                BucketGetOption.fields(BucketField.NAME)) != null;\n    }\n\n    @Override\n    public boolean createContainerInLocation(Location location,\n            String container) {\n        return createContainerInLocation(location, container,\n                new CreateContainerOptions());\n    }\n\n    @Override\n    public boolean createContainerInLocation(Location location,\n            String container, CreateContainerOptions options) {\n        try {\n            var bucketInfo = BucketInfo.newBuilder(container).build();\n            storage.create(bucketInfo);\n            if (options.isPublicRead()) {\n                try {\n                    storage.createAcl(container,\n                            Acl.of(Acl.User.ofAllUsers(), Acl.Role.READER));\n                } catch (StorageException se2) {\n                    // ACL operations not supported (e.g., emulator)\n                }\n            }\n            return true;\n        } catch (StorageException se) {\n            if (se.getCode() == 409) {\n                return false;\n            }\n            throw se;\n        }\n    }\n\n    @Override\n    public void deleteContainer(String container) {\n        try {\n            // Delete all blobs first since GCS requires empty bucket\n            var page = storage.list(container);\n            for (Blob blob : page.iterateAll()) {\n                storage.delete(blob.getBlobId());\n            }\n            storage.delete(container);\n        } catch (StorageException se) {\n            if (se.getCode() != 404) {\n                throw se;\n            }\n        }\n    }\n\n    @Override\n    public boolean deleteContainerIfEmpty(String container) {\n        var page = storage.list(container,\n                BlobListOption.pageSize(1));\n        if (page.getValues().iterator().hasNext()) {\n            return false;\n        }\n        try {\n            storage.delete(container);\n            return true;\n        } catch (StorageException se) {\n            if (se.getCode() == 404) {\n                return true;\n            }\n            throw se;\n        }\n    }\n\n    @Override\n    public boolean blobExists(String container, String key) {\n        return storage.get(BlobId.of(container, key),\n                BlobGetOption.fields(BlobField.NAME)) != null;\n    }\n\n    @Override\n    public org.jclouds.blobstore.domain.Blob getBlob(String container,\n            String key, GetOptions options) {\n        var gcsOptions = new java.util.ArrayList<BlobGetOption>();\n\n        Blob gcsBlob;\n        try {\n            gcsBlob = storage.get(BlobId.of(container, key),\n                    gcsOptions.toArray(new BlobGetOption[0]));\n        } catch (StorageException se) {\n            translateAndRethrowException(se, container, key);\n            throw se;\n        }\n        if (gcsBlob == null) {\n            throw new KeyNotFoundException(container, key, \"\");\n        }\n\n        Long rangeOffset = null;\n        Long rangeEnd = null;\n        if (!options.getRanges().isEmpty()) {\n            var ranges = options.getRanges().get(0).split(\"-\", 2);\n            if (ranges[0].isEmpty()) {\n                // trailing range: last N bytes\n                long trailing = Long.parseLong(ranges[1]);\n                long blobSz = gcsBlob.getSize();\n                rangeOffset = Math.max(0, blobSz - trailing);\n                rangeEnd = blobSz - 1;\n            } else if (ranges[1].isEmpty()) {\n                rangeOffset = Long.parseLong(ranges[0]);\n            } else {\n                rangeOffset = Long.parseLong(ranges[0]);\n                rangeEnd = Long.parseLong(ranges[1]);\n            }\n        }\n\n        InputStream is;\n        long contentLength;\n        long blobSize = gcsBlob.getSize();\n        try {\n            if (rangeOffset != null) {\n                ReadChannel reader = gcsBlob.reader();\n                reader.seek(rangeOffset);\n                if (rangeEnd != null) {\n                    reader.limit(rangeEnd + 1);\n                    contentLength = rangeEnd - rangeOffset + 1;\n                } else {\n                    contentLength = blobSize - rangeOffset;\n                }\n                is = Channels.newInputStream(reader);\n            } else {\n                ReadChannel reader = gcsBlob.reader();\n                is = Channels.newInputStream(reader);\n                contentLength = blobSize;\n            }\n        } catch (IOException ioe) {\n            throw new RuntimeException(ioe);\n        }\n\n        var metadata = gcsBlob.getMetadata();\n        var blob = new BlobBuilderImpl()\n                .name(key)\n                .userMetadata(metadata != null ? metadata : Map.of())\n                .payload(is)\n                .cacheControl(gcsBlob.getCacheControl())\n                .contentDisposition(gcsBlob.getContentDisposition())\n                .contentEncoding(gcsBlob.getContentEncoding())\n                .contentLanguage(gcsBlob.getContentLanguage())\n                .contentLength(contentLength)\n                .contentType(gcsBlob.getContentType())\n                .build();\n        if (rangeOffset != null) {\n            long end = rangeEnd != null ? rangeEnd :\n                    blobSize - 1;\n            blob.getAllHeaders().put(HttpHeaders.CONTENT_RANGE,\n                    \"bytes \" + rangeOffset + \"-\" + end + \"/\" + blobSize);\n        }\n        var blobMeta = blob.getMetadata();\n        blobMeta.setETag(gcsBlob.getEtag());\n        blobMeta.setSize(blobSize);\n        blobMeta.setTier(toTier(gcsBlob.getStorageClass()));\n        blobMeta.setCreationDate(\n                toDate(gcsBlob.getCreateTimeOffsetDateTime()));\n        blobMeta.setLastModified(\n                toDate(gcsBlob.getUpdateTimeOffsetDateTime()));\n        return blob;\n    }\n\n    @Override\n    public String putBlob(String container,\n            org.jclouds.blobstore.domain.Blob blob) {\n        return putBlob(container, blob, new PutOptions());\n    }\n\n    @Override\n    public String putBlob(String container,\n            org.jclouds.blobstore.domain.Blob blob, PutOptions options) {\n        var contentMetadata = blob.getMetadata().getContentMetadata();\n        var blobInfo = BlobInfo.newBuilder(\n                BlobId.of(container, blob.getMetadata().getName()));\n        blobInfo.setContentType(contentMetadata.getContentType());\n        blobInfo.setContentDisposition(\n                contentMetadata.getContentDisposition());\n        blobInfo.setContentEncoding(contentMetadata.getContentEncoding());\n        blobInfo.setContentLanguage(contentMetadata.getContentLanguage());\n        blobInfo.setCacheControl(contentMetadata.getCacheControl());\n        var hash = contentMetadata.getContentMD5AsHashCode();\n        if (hash != null) {\n            blobInfo.setMd5(hash.toString());\n        }\n        if (blob.getMetadata().getUserMetadata() != null) {\n            blobInfo.setMetadata(blob.getMetadata().getUserMetadata());\n        }\n        if (blob.getMetadata().getTier() != null &&\n                blob.getMetadata().getTier() != Tier.STANDARD) {\n            blobInfo.setStorageClass(\n                    toStorageClass(blob.getMetadata().getTier()));\n        }\n\n        var writeOptions = new java.util.ArrayList<BlobWriteOption>();\n        if (options instanceof PutOptions2 putOptions2) {\n            String ifMatch = putOptions2.getIfMatch();\n            String ifNoneMatch = putOptions2.getIfNoneMatch();\n            if (ifNoneMatch != null && ifNoneMatch.equals(\"*\")) {\n                writeOptions.add(BlobWriteOption.doesNotExist());\n            } else if (ifMatch != null) {\n                writeOptions.add(\n                        BlobWriteOption.generationMatch(\n                                getGeneration(container,\n                                        blob.getMetadata().getName(),\n                                        ifMatch)));\n            }\n        }\n\n        try (var is = blob.getPayload().openStream()) {\n            Blob gcsBlob = storage.createFrom(blobInfo.build(), is,\n                    writeOptions.toArray(new BlobWriteOption[0]));\n            return gcsBlob.getEtag();\n        } catch (StorageException se) {\n            translateAndRethrowException(se, container, null);\n            throw se;\n        } catch (IOException ioe) {\n            throw new RuntimeException(ioe);\n        }\n    }\n\n    @Override\n    public String copyBlob(String fromContainer, String fromName,\n            String toContainer, String toName, CopyOptions options) {\n        var source = BlobId.of(fromContainer, fromName);\n        var targetBuilder = BlobInfo.newBuilder(\n                BlobId.of(toContainer, toName));\n\n        var contentMetadata = options.contentMetadata();\n        if (contentMetadata != null) {\n            if (contentMetadata.getCacheControl() != null) {\n                targetBuilder.setCacheControl(\n                        contentMetadata.getCacheControl());\n            }\n            if (contentMetadata.getContentDisposition() != null) {\n                targetBuilder.setContentDisposition(\n                        contentMetadata.getContentDisposition());\n            }\n            if (contentMetadata.getContentEncoding() != null) {\n                targetBuilder.setContentEncoding(\n                        contentMetadata.getContentEncoding());\n            }\n            if (contentMetadata.getContentLanguage() != null) {\n                targetBuilder.setContentLanguage(\n                        contentMetadata.getContentLanguage());\n            }\n            if (contentMetadata.getContentType() != null) {\n                targetBuilder.setContentType(\n                        contentMetadata.getContentType());\n            }\n        }\n        var userMetadata = options.userMetadata();\n        if (userMetadata != null) {\n            targetBuilder.setMetadata(userMetadata);\n        }\n\n        try {\n            var copyRequest = CopyRequest.newBuilder()\n                    .setSource(source)\n                    .setTarget(targetBuilder.build())\n                    .build();\n            var result = storage.copy(copyRequest);\n            return result.getResult().getEtag();\n        } catch (StorageException se) {\n            translateAndRethrowException(se, fromContainer, fromName);\n            throw se;\n        }\n    }\n\n    @Override\n    public void removeBlob(String container, String key) {\n        try {\n            storage.delete(BlobId.of(container, key));\n        } catch (StorageException se) {\n            if (se.getCode() != 404) {\n                throw se;\n            }\n        }\n    }\n\n    @Override\n    public BlobMetadata blobMetadata(String container, String key) {\n        Blob gcsBlob;\n        try {\n            gcsBlob = storage.get(BlobId.of(container, key));\n        } catch (StorageException se) {\n            if (se.getCode() == 404) {\n                return null;\n            }\n            translateAndRethrowException(se, container, null);\n            throw se;\n        }\n        if (gcsBlob == null) {\n            return null;\n        }\n        Long size = gcsBlob.getSize();\n        return new BlobMetadataImpl(/*id=*/ null, key, /*location=*/ null,\n                /*uri=*/ null, gcsBlob.getEtag(),\n                toDate(gcsBlob.getCreateTimeOffsetDateTime()),\n                toDate(gcsBlob.getUpdateTimeOffsetDateTime()),\n                gcsBlob.getMetadata() != null ?\n                        gcsBlob.getMetadata() : Map.of(),\n                /*publicUri=*/ null, container,\n                toContentMetadata(gcsBlob),\n                size != null ? size : 0L,\n                toTier(gcsBlob.getStorageClass()));\n    }\n\n    @Override\n    protected boolean deleteAndVerifyContainerGone(String container) {\n        try {\n            storage.delete(container);\n        } catch (StorageException se) {\n            if (se.getCode() == 404) {\n                return true;\n            }\n            throw se;\n        }\n        return true;\n    }\n\n    @Override\n    public ContainerAccess getContainerAccess(String container) {\n        var bucket = storage.get(container);\n        if (bucket == null) {\n            throw new ContainerNotFoundException(container, \"\");\n        }\n        try {\n            var acls = bucket.listAcls();\n            for (var acl : acls) {\n                if (acl.getEntity().equals(Acl.User.ofAllUsers())) {\n                    return ContainerAccess.PUBLIC_READ;\n                }\n            }\n        } catch (StorageException se) {\n            // ACL operations not supported (e.g., emulator)\n        }\n        return ContainerAccess.PRIVATE;\n    }\n\n    @Override\n    public void setContainerAccess(String container,\n            ContainerAccess access) {\n        try {\n            if (access == ContainerAccess.PUBLIC_READ) {\n                storage.createAcl(container,\n                        Acl.of(Acl.User.ofAllUsers(), Acl.Role.READER));\n            } else {\n                storage.deleteAcl(container, Acl.User.ofAllUsers());\n            }\n        } catch (StorageException se) {\n            // ACL operations not supported (e.g., emulator)\n        }\n    }\n\n    @Override\n    public BlobAccess getBlobAccess(String container, String key) {\n        return BlobAccess.PRIVATE;\n    }\n\n    @Override\n    public void setBlobAccess(String container, String key,\n            BlobAccess access) {\n        throw new UnsupportedOperationException(\n                \"unsupported in Google Cloud Storage\");\n    }\n\n    @Override\n    public MultipartUpload initiateMultipartUpload(String container,\n            BlobMetadata blobMetadata, PutOptions options) {\n        if (!containerExists(container)) {\n            throw new ContainerNotFoundException(container, \"\");\n        }\n\n        String uploadKey = STUB_BLOB_PREFIX + UUID.randomUUID().toString();\n        String targetBlobName = blobMetadata.getName();\n\n        // Store stub blob with metadata for later use during complete\n        var stubMetadata = new HashMap<String, String>();\n        stubMetadata.put(TARGET_BLOB_NAME_KEY, targetBlobName);\n\n        var contentMetadata = blobMetadata.getContentMetadata();\n        if (contentMetadata != null) {\n            if (contentMetadata.getContentType() != null) {\n                stubMetadata.put(\"s3proxy_content_type\",\n                        contentMetadata.getContentType());\n            }\n            if (contentMetadata.getContentDisposition() != null) {\n                stubMetadata.put(\"s3proxy_content_disposition\",\n                        contentMetadata.getContentDisposition());\n            }\n            if (contentMetadata.getContentEncoding() != null) {\n                stubMetadata.put(\"s3proxy_content_encoding\",\n                        contentMetadata.getContentEncoding());\n            }\n            if (contentMetadata.getContentLanguage() != null) {\n                stubMetadata.put(\"s3proxy_content_language\",\n                        contentMetadata.getContentLanguage());\n            }\n            if (contentMetadata.getCacheControl() != null) {\n                stubMetadata.put(\"s3proxy_cache_control\",\n                        contentMetadata.getCacheControl());\n            }\n        }\n\n        var userMetadata = blobMetadata.getUserMetadata();\n        if (userMetadata != null) {\n            for (var entry : userMetadata.entrySet()) {\n                stubMetadata.put(\"s3proxy_user_\" + entry.getKey(),\n                        entry.getValue());\n            }\n        }\n\n        if (blobMetadata.getTier() != null &&\n                blobMetadata.getTier() != Tier.STANDARD) {\n            stubMetadata.put(\"s3proxy_tier\",\n                    blobMetadata.getTier().name());\n        }\n\n        var stubInfo = BlobInfo.newBuilder(\n                BlobId.of(container, uploadKey))\n                .setMetadata(stubMetadata)\n                .build();\n        storage.create(stubInfo, new byte[0]);\n\n        return MultipartUpload.create(container, targetBlobName,\n                uploadKey, blobMetadata, options);\n    }\n\n    @Override\n    public void abortMultipartUpload(MultipartUpload mpu) {\n        String uploadKey = mpu.id();\n\n        if (!uploadKey.startsWith(STUB_BLOB_PREFIX)) {\n            throw new KeyNotFoundException(mpu.containerName(), uploadKey,\n                    \"Multipart upload not found: \" + uploadKey);\n        }\n\n        String nonce = uploadKey.substring(STUB_BLOB_PREFIX.length());\n\n        // Delete part blobs\n        var page = storage.list(mpu.containerName(),\n                BlobListOption.prefix(STUB_BLOB_PREFIX + nonce + \"/\"));\n        for (Blob blob : page.iterateAll()) {\n            storage.delete(blob.getBlobId());\n        }\n\n        // Delete stub\n        if (!storage.delete(BlobId.of(mpu.containerName(), uploadKey))) {\n            throw new KeyNotFoundException(mpu.containerName(), uploadKey,\n                    \"Multipart upload not found: \" + uploadKey);\n        }\n    }\n\n    @Override\n    public String completeMultipartUpload(MultipartUpload mpu,\n            List<MultipartPart> parts) {\n        String uploadKey = mpu.id();\n        String nonce = uploadKey.substring(STUB_BLOB_PREFIX.length());\n\n        Blob stubBlob = storage.get(\n                BlobId.of(mpu.containerName(), uploadKey));\n        if (stubBlob == null) {\n            throw new IllegalArgumentException(\n                    \"Upload not found: uploadId=\" + uploadKey);\n        }\n\n        var stubMetadata = stubBlob.getMetadata();\n        String targetBlobName = stubMetadata.get(TARGET_BLOB_NAME_KEY);\n        if (targetBlobName == null) {\n            throw new IllegalArgumentException(\n                    \"Stub blob missing target name: uploadId=\" + uploadKey);\n        }\n\n        if (parts == null || parts.isEmpty()) {\n            throw new IllegalArgumentException(\"Parts list cannot be empty\");\n        }\n\n        int previousPartNumber = 0;\n        for (var part : parts) {\n            if (part.partNumber() <= previousPartNumber) {\n                throw new IllegalArgumentException(\n                        \"Parts must be in strictly ascending order\");\n            }\n            previousPartNumber = part.partNumber();\n        }\n\n        // Build target blob info from stub metadata\n        var targetBuilder = BlobInfo.newBuilder(\n                BlobId.of(mpu.containerName(), targetBlobName));\n        if (stubMetadata.containsKey(\"s3proxy_content_type\")) {\n            targetBuilder.setContentType(\n                    stubMetadata.get(\"s3proxy_content_type\"));\n        }\n        if (stubMetadata.containsKey(\"s3proxy_content_disposition\")) {\n            targetBuilder.setContentDisposition(\n                    stubMetadata.get(\"s3proxy_content_disposition\"));\n        }\n        if (stubMetadata.containsKey(\"s3proxy_content_encoding\")) {\n            targetBuilder.setContentEncoding(\n                    stubMetadata.get(\"s3proxy_content_encoding\"));\n        }\n        if (stubMetadata.containsKey(\"s3proxy_content_language\")) {\n            targetBuilder.setContentLanguage(\n                    stubMetadata.get(\"s3proxy_content_language\"));\n        }\n        if (stubMetadata.containsKey(\"s3proxy_cache_control\")) {\n            targetBuilder.setCacheControl(\n                    stubMetadata.get(\"s3proxy_cache_control\"));\n        }\n        if (stubMetadata.containsKey(\"s3proxy_tier\")) {\n            targetBuilder.setStorageClass(toStorageClass(\n                    Tier.valueOf(stubMetadata.get(\"s3proxy_tier\"))));\n        }\n\n        // Restore user metadata\n        var userMetadata = new HashMap<String, String>();\n        for (var entry : stubMetadata.entrySet()) {\n            if (entry.getKey().startsWith(\"s3proxy_user_\")) {\n                userMetadata.put(\n                        entry.getKey().substring(\"s3proxy_user_\".length()),\n                        entry.getValue());\n            }\n        }\n        if (!userMetadata.isEmpty()) {\n            targetBuilder.setMetadata(userMetadata);\n        }\n\n        // If single part, just copy it to the target\n        if (parts.size() == 1) {\n            String partBlobName = makePartBlobName(nonce,\n                    parts.get(0).partNumber());\n            var source = BlobId.of(mpu.containerName(), partBlobName);\n            var copyRequest = CopyRequest.newBuilder()\n                    .setSource(source)\n                    .setTarget(targetBuilder.build())\n                    .build();\n            var result = storage.copy(copyRequest);\n            // Clean up\n            storage.delete(source);\n            storage.delete(BlobId.of(mpu.containerName(), uploadKey));\n            return result.getResult().getEtag();\n        }\n\n        // GCS compose supports up to 32 parts.\n        // For more parts, compose recursively.\n        var sourceBlobIds = new java.util.ArrayList<BlobId>();\n        for (var part : parts) {\n            String partBlobName = makePartBlobName(nonce, part.partNumber());\n            sourceBlobIds.add(BlobId.of(mpu.containerName(), partBlobName));\n        }\n\n        String eTag = composeRecursive(mpu.containerName(),\n                targetBuilder.build(), sourceBlobIds, nonce);\n\n        // Clean up part blobs and stub\n        for (var blobId : sourceBlobIds) {\n            storage.delete(blobId);\n        }\n        // Clean up any intermediate compose blobs\n        var intermediatePage = storage.list(mpu.containerName(),\n                BlobListOption.prefix(\n                        STUB_BLOB_PREFIX + nonce + \"/compose_\"));\n        for (Blob blob : intermediatePage.iterateAll()) {\n            storage.delete(blob.getBlobId());\n        }\n        storage.delete(BlobId.of(mpu.containerName(), uploadKey));\n\n        return eTag;\n    }\n\n    /**\n     * Recursively compose blobs to handle more than 32 parts.\n     * GCS compose supports max 32 sources, so for N > 32 parts we\n     * compose in groups of 32, then compose those results.\n     */\n    private String composeRecursive(String container, BlobInfo target,\n            List<BlobId> sources, String nonce) {\n        if (sources.size() <= MAX_COMPOSE_PARTS) {\n            var composeBuilder = ComposeRequest.newBuilder();\n            composeBuilder.setTarget(target);\n            for (var source : sources) {\n                composeBuilder.addSource(source.getName());\n            }\n            var result = storage.compose(composeBuilder.build());\n            return result.getEtag();\n        }\n\n        // Compose in groups of MAX_COMPOSE_PARTS\n        var intermediateIds = new java.util.ArrayList<BlobId>();\n        int groupIndex = 0;\n        for (int i = 0; i < sources.size();\n                i += MAX_COMPOSE_PARTS) {\n            int end = Math.min(i + MAX_COMPOSE_PARTS, sources.size());\n            var group = sources.subList(i, end);\n            String intermediateName = STUB_BLOB_PREFIX + nonce +\n                    \"/compose_\" + groupIndex;\n            var intermediateInfo = BlobInfo.newBuilder(\n                    BlobId.of(container, intermediateName)).build();\n\n            var composeBuilder = ComposeRequest.newBuilder();\n            composeBuilder.setTarget(intermediateInfo);\n            for (var source : group) {\n                composeBuilder.addSource(source.getName());\n            }\n            storage.compose(composeBuilder.build());\n\n            intermediateIds.add(BlobId.of(container, intermediateName));\n            groupIndex++;\n        }\n\n        // Recursively compose intermediates\n        return composeRecursive(container, target, intermediateIds,\n                nonce);\n    }\n\n    @Override\n    public MultipartPart uploadMultipartPart(MultipartUpload mpu,\n            int partNumber, org.jclouds.io.Payload payload) {\n        if (partNumber < 1 || partNumber > 10_000) {\n            throw new IllegalArgumentException(\n                    \"Part number must be between 1 and 10,000, got: \" +\n                    partNumber);\n        }\n\n        Long contentLength = payload.getContentMetadata()\n                .getContentLength();\n        if (contentLength == null) {\n            throw new IllegalArgumentException(\n                    \"Content-Length is required\");\n        }\n\n        String uploadKey = mpu.id();\n        String nonce = uploadKey.substring(STUB_BLOB_PREFIX.length());\n        String partBlobName = makePartBlobName(nonce, partNumber);\n\n        byte[] md5Hash;\n        try (var is = payload.openStream();\n             var his = new HashingInputStream(MD5, is)) {\n            var partInfo = BlobInfo.newBuilder(\n                    BlobId.of(mpu.containerName(), partBlobName)).build();\n            storage.createFrom(partInfo, his);\n\n            md5Hash = his.hash().asBytes();\n\n            var providedMd5 = payload.getContentMetadata()\n                    .getContentMD5AsHashCode();\n            if (providedMd5 != null) {\n                if (!MessageDigest.isEqual(md5Hash,\n                        providedMd5.asBytes())) {\n                    // Clean up the uploaded part\n                    storage.delete(BlobId.of(mpu.containerName(),\n                            partBlobName));\n                    throw new IllegalArgumentException(\n                            \"Content-MD5 mismatch\");\n                }\n            }\n        } catch (StorageException se) {\n            translateAndRethrowException(se, mpu.containerName(),\n                    mpu.blobName());\n            throw new RuntimeException((\n                    \"Failed to upload part %d for blob '%s' in \" +\n                    \"container '%s': %s\").formatted(\n                    partNumber, mpu.blobName(), mpu.containerName(),\n                    se.getMessage()), se);\n        } catch (IOException ioe) {\n            throw new RuntimeException((\n                    \"Failed to upload part %d for blob '%s' in \" +\n                    \"container '%s': %s\").formatted(\n                    partNumber, mpu.blobName(), mpu.containerName(),\n                    ioe.getMessage()), ioe);\n        }\n\n        String eTag = BaseEncoding.base16().lowerCase().encode(md5Hash);\n        return MultipartPart.create(partNumber, contentLength, eTag, null);\n    }\n\n    @Override\n    public List<MultipartPart> listMultipartUpload(MultipartUpload mpu) {\n        String uploadKey = mpu.id();\n        String nonce = uploadKey.substring(STUB_BLOB_PREFIX.length());\n        String prefix = STUB_BLOB_PREFIX + nonce + \"/part_\";\n\n        var parts = ImmutableList.<MultipartPart>builder();\n        var page = storage.list(mpu.containerName(),\n                BlobListOption.prefix(prefix));\n        for (Blob blob : page.iterateAll()) {\n            String name = blob.getName();\n            String partNumberStr = name.substring(\n                    name.lastIndexOf('_') + 1);\n            int partNumber;\n            try {\n                partNumber = Integer.parseInt(partNumberStr);\n            } catch (NumberFormatException e) {\n                continue;\n            }\n            parts.add(MultipartPart.create(partNumber, blob.getSize(),\n                    \"\", null));\n        }\n        return parts.build();\n    }\n\n    @Override\n    public List<MultipartUpload> listMultipartUploads(String container) {\n        var builder = ImmutableList.<MultipartUpload>builder();\n        var page = storage.list(container,\n                BlobListOption.prefix(STUB_BLOB_PREFIX));\n        for (Blob blob : page.iterateAll()) {\n            String name = blob.getName();\n            // Only look at stub blobs, not part blobs\n            if (name.contains(\"/part_\") || name.contains(\"/compose_\")) {\n                continue;\n            }\n            var metadata = blob.getMetadata();\n            if (metadata == null ||\n                    !metadata.containsKey(TARGET_BLOB_NAME_KEY)) {\n                continue;\n            }\n            String targetBlobName = metadata.get(TARGET_BLOB_NAME_KEY);\n            builder.add(MultipartUpload.create(container, targetBlobName,\n                    name, null, null));\n        }\n        return builder.build();\n    }\n\n    @Override\n    public long getMinimumMultipartPartSize() {\n        // GCS minimum part is 5 MB except for last part\n        return 5L * 1024 * 1024;\n    }\n\n    @Override\n    public long getMaximumMultipartPartSize() {\n        return 5L * 1024 * 1024 * 1024;\n    }\n\n    @Override\n    public int getMaximumNumberOfParts() {\n        // With recursive compose we can handle many more than 32\n        return 10_000;\n    }\n\n    @Override\n    public InputStream streamBlob(String container, String name) {\n        throw new UnsupportedOperationException(\"not yet implemented\");\n    }\n\n    private static String makePartBlobName(String nonce, int partNumber) {\n        return STUB_BLOB_PREFIX + nonce +\n                \"/part_%05d\".formatted(partNumber);\n    }\n\n    /**\n     * Get blob generation for conditional writes.  GCS uses generations\n     * rather than ETags for conditional operations.\n     */\n    private long getGeneration(String container, String name,\n            String eTag) {\n        Blob blob = storage.get(BlobId.of(container, name));\n        if (blob == null) {\n            throw new KeyNotFoundException(container, name, \"\");\n        }\n        // If the ETag doesn't match, the precondition fails\n        if (!eTag.equals(\"*\") && !eTag.equals(blob.getEtag())) {\n            var request = HttpRequest.builder()\n                    .method(\"PUT\")\n                    .endpoint(\"https://storage.googleapis.com\")\n                    .build();\n            var response = HttpResponse.builder()\n                    .statusCode(412)\n                    .build();\n            throw new HttpResponseException(\n                    new HttpCommand(request), response);\n        }\n        return blob.getGeneration();\n    }\n\n    private static Date toDate(\n            java.time.@Nullable OffsetDateTime offsetDateTime) {\n        if (offsetDateTime == null) {\n            return null;\n        }\n        return new Date(offsetDateTime.toInstant().toEpochMilli());\n    }\n\n    private static com.google.cloud.storage.StorageClass toStorageClass(\n            Tier tier) {\n        if (tier == Tier.ARCHIVE) {\n            return com.google.cloud.storage.StorageClass.ARCHIVE;\n        } else if (tier == Tier.COLD) {\n            return com.google.cloud.storage.StorageClass.COLDLINE;\n        } else if (tier == Tier.COOL || tier == Tier.INFREQUENT) {\n            return com.google.cloud.storage.StorageClass.NEARLINE;\n        } else {\n            return com.google.cloud.storage.StorageClass.STANDARD;\n        }\n    }\n\n    private static Tier toTier(\n            com.google.cloud.storage.@Nullable StorageClass storageClass) {\n        if (storageClass == null) {\n            return Tier.STANDARD;\n        } else if (storageClass.equals(\n                com.google.cloud.storage.StorageClass.ARCHIVE)) {\n            return Tier.ARCHIVE;\n        } else if (storageClass.equals(\n                com.google.cloud.storage.StorageClass.COLDLINE)) {\n            return Tier.COLD;\n        } else if (storageClass.equals(\n                com.google.cloud.storage.StorageClass.NEARLINE)) {\n            return Tier.COOL;\n        } else {\n            return Tier.STANDARD;\n        }\n    }\n\n    private static ContentMetadata toContentMetadata(Blob blob) {\n        return ContentMetadataBuilder.create()\n                .cacheControl(blob.getCacheControl())\n                .contentDisposition(blob.getContentDisposition())\n                .contentEncoding(blob.getContentEncoding())\n                .contentLanguage(blob.getContentLanguage())\n                .contentLength(blob.getSize())\n                .contentType(blob.getContentType())\n                .build();\n    }\n\n    /**\n     * Translate StorageException to jclouds exceptions.\n     */\n    private static void translateAndRethrowException(StorageException se,\n            String container, @Nullable String key) {\n        switch (se.getCode()) {\n        case 404:\n            if (key != null) {\n                var keyEx = new KeyNotFoundException(container, key, \"\");\n                keyEx.initCause(se);\n                throw keyEx;\n            } else {\n                var containerEx = new ContainerNotFoundException(\n                        container, \"\");\n                containerEx.initCause(se);\n                throw containerEx;\n            }\n        case 412:\n            var request = HttpRequest.builder()\n                    .method(\"GET\")\n                    .endpoint(\"https://storage.googleapis.com\")\n                    .build();\n            var response = HttpResponse.builder()\n                    .statusCode(412)\n                    .build();\n            throw new HttpResponseException(\n                    new HttpCommand(request), response, se);\n        default:\n            break;\n        }\n    }\n}\n"
  },
  {
    "path": "src/main/java/org/gaul/s3proxy/gcloudsdk/GCloudBlobStoreContextModule.java",
    "content": "/*\n * Copyright 2014-2026 Andrew Gaul <andrew@gaul.org>\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * https://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\npackage org.gaul.s3proxy.gcloudsdk;\n\nimport com.google.inject.AbstractModule;\nimport com.google.inject.Scopes;\n\nimport org.jclouds.blobstore.BlobStore;\nimport org.jclouds.blobstore.attr.ConsistencyModel;\n\npublic final class GCloudBlobStoreContextModule extends AbstractModule {\n    @Override\n    protected void configure() {\n        bind(ConsistencyModel.class).toInstance(ConsistencyModel.STRICT);\n        bind(BlobStore.class).to(GCloudBlobStore.class).in(Scopes.SINGLETON);\n    }\n}\n"
  },
  {
    "path": "src/main/java/org/gaul/s3proxy/gcloudsdk/GCloudProviderMetadata.java",
    "content": "/*\n * Copyright 2014-2026 Andrew Gaul <andrew@gaul.org>\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * https://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\npackage org.gaul.s3proxy.gcloudsdk;\n\nimport java.net.URI;\nimport java.util.Properties;\n\nimport com.google.auto.service.AutoService;\n\nimport org.jclouds.providers.ProviderMetadata;\nimport org.jclouds.providers.internal.BaseProviderMetadata;\n\n/**\n * Implementation of org.jclouds.types.ProviderMetadata for Google Cloud\n * Storage using the official Google Cloud Storage SDK.\n */\n@AutoService(ProviderMetadata.class)\npublic final class GCloudProviderMetadata extends BaseProviderMetadata {\n    public GCloudProviderMetadata() {\n        super(builder());\n    }\n\n    public GCloudProviderMetadata(Builder builder) {\n        super(builder);\n    }\n\n    public static Builder builder() {\n        return new Builder();\n    }\n\n    @Override\n    public Builder toBuilder() {\n        return builder().fromProviderMetadata(this);\n    }\n\n    public static Properties defaultProperties() {\n        var properties = new Properties();\n        return properties;\n    }\n\n    public static final class Builder extends BaseProviderMetadata.Builder {\n        protected Builder() {\n            id(\"google-cloud-storage-sdk\")\n                .name(\"Google Cloud Storage\")\n                .apiMetadata(new GCloudApiMetadata())\n                .endpoint(\"https://storage.googleapis.com\")\n                .homepage(URI.create(\n                        \"https://cloud.google.com/storage\"))\n                .console(URI.create(\n                        \"https://console.cloud.google.com/storage\"))\n                .linkedServices(\"google-cloud-storage\")\n                .iso3166Codes(\"US\", \"EU\")\n                .defaultProperties(\n                        GCloudProviderMetadata.defaultProperties());\n        }\n\n        @Override\n        public GCloudProviderMetadata build() {\n            return new GCloudProviderMetadata(this);\n        }\n\n        @Override\n        public Builder fromProviderMetadata(ProviderMetadata in) {\n            super.fromProviderMetadata(in);\n            return this;\n        }\n    }\n}\n"
  },
  {
    "path": "src/main/java/org/gaul/s3proxy/junit/S3ProxyExtension.java",
    "content": "/*\n * Copyright 2014-2026 Andrew Gaul <andrew@gaul.org>\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * https://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\npackage org.gaul.s3proxy.junit;\n\nimport java.net.URI;\n\nimport org.gaul.s3proxy.AuthenticationType;\nimport org.junit.jupiter.api.extension.AfterEachCallback;\nimport org.junit.jupiter.api.extension.BeforeEachCallback;\nimport org.junit.jupiter.api.extension.ExtensionContext;\n\n/**\n * A JUnit 5 Extension that manages an S3Proxy instance which tests\n * can use as an S3 API endpoint.\n */\npublic final class S3ProxyExtension\n        implements AfterEachCallback, BeforeEachCallback {\n\n    private final S3ProxyJunitCore core;\n\n    public static final class Builder {\n\n        private final S3ProxyJunitCore.Builder builder;\n\n        private Builder() {\n            builder = new S3ProxyJunitCore.Builder();\n        }\n\n        public Builder withCredentials(AuthenticationType authType,\n                                       String accessKey, String secretKey) {\n            builder.withCredentials(authType, accessKey, secretKey);\n            return this;\n        }\n\n        public Builder withCredentials(String accessKey, String secretKey) {\n            builder.withCredentials(accessKey, secretKey);\n            return this;\n        }\n\n        public Builder withSecretStore(String path, String password) {\n            builder.withSecretStore(path, password);\n            return this;\n        }\n\n        public Builder withPort(int port) {\n            builder.withPort(port);\n            return this;\n        }\n\n        public Builder withBlobStoreProvider(String blobStoreProvider) {\n            builder.withBlobStoreProvider(blobStoreProvider);\n            return this;\n        }\n\n        public Builder ignoreUnknownHeaders() {\n            builder.ignoreUnknownHeaders();\n            return this;\n        }\n\n        public S3ProxyExtension build() {\n            return new S3ProxyExtension(this);\n        }\n    }\n\n    private S3ProxyExtension(Builder builder) {\n        core = new S3ProxyJunitCore(builder.builder);\n    }\n\n    public static Builder builder() {\n        return new Builder();\n    }\n\n    @Override\n    public void beforeEach(ExtensionContext extensionContext) throws Exception {\n        core.beforeEach();\n    }\n\n    @Override\n    public void afterEach(ExtensionContext extensionContext) {\n        core.afterEach();\n    }\n\n    public URI getUri() {\n        return core.getUri();\n    }\n\n    public String getAccessKey() {\n        return core.getAccessKey();\n    }\n\n    public String getSecretKey() {\n        return core.getSecretKey();\n    }\n}\n"
  },
  {
    "path": "src/main/java/org/gaul/s3proxy/junit/S3ProxyJunitCore.java",
    "content": "/*\n * Copyright 2014-2026 Andrew Gaul <andrew@gaul.org>\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * https://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\npackage org.gaul.s3proxy.junit;\n\nimport java.io.File;\nimport java.io.IOException;\nimport java.net.URI;\nimport java.nio.file.Files;\nimport java.util.Properties;\n\nimport com.google.common.io.MoreFiles;\n\nimport org.eclipse.jetty.util.component.AbstractLifeCycle;\nimport org.gaul.s3proxy.AuthenticationType;\nimport org.gaul.s3proxy.S3Proxy;\nimport org.jclouds.ContextBuilder;\nimport org.jclouds.blobstore.BlobStore;\nimport org.jclouds.blobstore.BlobStoreContext;\nimport org.jclouds.blobstore.domain.StorageMetadata;\nimport org.slf4j.Logger;\nimport org.slf4j.LoggerFactory;\n\npublic class S3ProxyJunitCore {\n\n    private static final Logger logger = LoggerFactory.getLogger(\n            S3ProxyJunitCore.class);\n\n    private static final String LOCALHOST = \"127.0.0.1\";\n\n    private final String accessKey;\n    private final String secretKey;\n    private final String endpointFormat;\n    private final S3Proxy s3Proxy;\n\n    private final BlobStoreContext blobStoreContext;\n    private URI endpointUri;\n    private final File blobStoreLocation;\n\n    public static final class Builder {\n        private AuthenticationType authType = AuthenticationType.NONE;\n        private String accessKey;\n        private String secretKey;\n        private String secretStorePath;\n        private String secretStorePassword;\n        private int port = -1;\n        private boolean ignoreUnknownHeaders;\n        private String blobStoreProvider = \"filesystem\";\n\n        public Builder withCredentials(AuthenticationType authType,\n                                        String accessKey, String secretKey) {\n            this.authType = authType;\n            this.accessKey = accessKey;\n            this.secretKey = secretKey;\n            return this;\n        }\n\n        public Builder withCredentials(String accessKey, String secretKey) {\n            return withCredentials(AuthenticationType.AWS_V2_OR_V4, accessKey,\n                    secretKey);\n        }\n\n        public Builder withSecretStore(String path, String password) {\n            secretStorePath = path;\n            secretStorePassword = password;\n            return this;\n        }\n\n        public Builder withPort(int port) {\n            this.port = port;\n            return this;\n        }\n\n        public Builder withBlobStoreProvider(String blobStoreProvider) {\n            this.blobStoreProvider = blobStoreProvider;\n            return this;\n        }\n\n        public Builder ignoreUnknownHeaders() {\n            ignoreUnknownHeaders = true;\n            return this;\n        }\n\n        public S3ProxyJunitCore build() {\n            return new S3ProxyJunitCore(this);\n        }\n    }\n\n    S3ProxyJunitCore(Builder builder) {\n        accessKey = builder.accessKey;\n        secretKey = builder.secretKey;\n\n        var properties = new Properties();\n        try {\n            blobStoreLocation = Files.createTempDirectory(\"S3Proxy\")\n                    .toFile();\n            properties.setProperty(\"jclouds.filesystem.basedir\",\n                    blobStoreLocation.getCanonicalPath());\n        } catch (IOException e) {\n            throw new RuntimeException(\"Unable to initialize Blob Store\", e);\n        }\n\n        ContextBuilder blobStoreContextBuilder = ContextBuilder.newBuilder(\n                builder.blobStoreProvider)\n                .overrides(properties);\n        if (!AuthenticationType.NONE.equals(builder.authType)) {\n            blobStoreContextBuilder = blobStoreContextBuilder.credentials(\n                    accessKey, secretKey);\n        }\n        blobStoreContext = blobStoreContextBuilder.build(\n                BlobStoreContext.class);\n\n        S3Proxy.Builder s3ProxyBuilder = S3Proxy.builder()\n                .blobStore(blobStoreContext.getBlobStore())\n                .awsAuthentication(builder.authType, accessKey, secretKey)\n                .ignoreUnknownHeaders(builder.ignoreUnknownHeaders);\n\n        if (builder.secretStorePath != null ||\n                builder.secretStorePassword != null) {\n            s3ProxyBuilder.keyStore(builder.secretStorePath,\n                    builder.secretStorePassword);\n        }\n\n        int port = Math.max(builder.port, 0);\n        endpointFormat = \"http://%s:%d\";\n        String endpoint = endpointFormat.formatted(LOCALHOST, port);\n        s3ProxyBuilder.endpoint(URI.create(endpoint));\n\n        s3Proxy = s3ProxyBuilder.build();\n    }\n\n    public final void beforeEach() throws Exception {\n        logger.debug(\"S3 proxy is starting\");\n        s3Proxy.start();\n        while (!s3Proxy.getState().equals(AbstractLifeCycle.STARTED)) {\n            Thread.sleep(10);\n        }\n        endpointUri = URI.create(endpointFormat.formatted(LOCALHOST,\n                s3Proxy.getPort()));\n        logger.debug(\"S3 proxy is running\");\n    }\n\n    public final void afterEach() {\n        logger.debug(\"S3 proxy is stopping\");\n        try {\n            s3Proxy.stop();\n            BlobStore blobStore = blobStoreContext.getBlobStore();\n            for (StorageMetadata metadata : blobStore.list()) {\n                blobStore.deleteContainer(metadata.getName());\n            }\n            blobStoreContext.close();\n        } catch (Exception e) {\n            throw new RuntimeException(\"Unable to stop S3 proxy\", e);\n        }\n        try {\n            MoreFiles.deleteRecursively(blobStoreLocation.toPath());\n        } catch (IOException ioe) {\n            // ignore\n        }\n        logger.debug(\"S3 proxy has stopped\");\n    }\n\n    public final URI getUri() {\n        return endpointUri;\n    }\n\n    public final String getAccessKey() {\n        return accessKey;\n    }\n\n    public final String getSecretKey() {\n        return secretKey;\n    }\n}\n"
  },
  {
    "path": "src/main/java/org/gaul/s3proxy/junit/S3ProxyRule.java",
    "content": "/*\n * Copyright 2014-2026 Andrew Gaul <andrew@gaul.org>\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * https://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\npackage org.gaul.s3proxy.junit;\n\nimport java.net.URI;\n\nimport com.google.common.annotations.Beta;\n\nimport org.gaul.s3proxy.AuthenticationType;\nimport org.junit.rules.ExternalResource;\n\n/**\n * A JUnit Rule that manages an S3Proxy instance which tests can use as an S3\n * API endpoint.\n */\n@Beta\npublic final class S3ProxyRule extends ExternalResource {\n\n    private final S3ProxyJunitCore core;\n\n    public static final class Builder {\n\n        private final S3ProxyJunitCore.Builder builder;\n\n        private Builder() {\n            builder = new S3ProxyJunitCore.Builder();\n        }\n\n        public Builder withCredentials(AuthenticationType authType,\n                                         String accessKey, String secretKey) {\n            builder.withCredentials(authType, accessKey, secretKey);\n            return this;\n        }\n\n        public Builder withCredentials(String accessKey, String secretKey) {\n            builder.withCredentials(accessKey, secretKey);\n            return this;\n        }\n\n        public Builder withSecretStore(String path, String password) {\n            builder.withSecretStore(path, password);\n            return this;\n        }\n\n        public Builder withPort(int port) {\n            builder.withPort(port);\n            return this;\n        }\n\n        public Builder withBlobStoreProvider(String blobStoreProvider) {\n            builder.withBlobStoreProvider(blobStoreProvider);\n            return this;\n        }\n\n        public Builder ignoreUnknownHeaders() {\n            builder.ignoreUnknownHeaders();\n            return this;\n        }\n\n        public S3ProxyRule build() {\n            return new S3ProxyRule(this);\n        }\n    }\n\n    private S3ProxyRule(Builder builder) {\n        core = new S3ProxyJunitCore(builder.builder);\n    }\n\n    public static Builder builder() {\n        return new Builder();\n    }\n\n    @Override\n    protected void before() throws Throwable {\n        core.beforeEach();\n    }\n\n    @Override\n    protected void after() {\n        core.afterEach();\n    }\n\n    public URI getUri() {\n        return core.getUri();\n    }\n\n    public String getAccessKey() {\n        return core.getAccessKey();\n    }\n\n    public String getSecretKey() {\n        return core.getSecretKey();\n    }\n}\n"
  },
  {
    "path": "src/main/java/org/gaul/s3proxy/nio2blob/AbstractNio2BlobStore.java",
    "content": "/*\n * Copyright 2014-2026 Andrew Gaul <andrew@gaul.org>\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * https://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\npackage org.gaul.s3proxy.nio2blob;\n\nimport java.io.IOException;\nimport java.io.InputStream;\nimport java.nio.ByteBuffer;\nimport java.nio.charset.StandardCharsets;\nimport java.nio.file.DirectoryNotEmptyException;\nimport java.nio.file.FileAlreadyExistsException;\nimport java.nio.file.Files;\nimport java.nio.file.NoSuchFileException;\nimport java.nio.file.Path;\nimport java.nio.file.StandardCopyOption;\nimport java.nio.file.attribute.BasicFileAttributes;\nimport java.nio.file.attribute.PosixFilePermission;\nimport java.nio.file.attribute.UserDefinedFileAttributeView;\nimport java.util.Date;\nimport java.util.HashSet;\nimport java.util.Iterator;\nimport java.util.List;\nimport java.util.Map;\nimport java.util.Set;\nimport java.util.UUID;\nimport java.util.stream.Collectors;\n\nimport com.google.common.base.Supplier;\nimport com.google.common.collect.ImmutableList;\nimport com.google.common.collect.ImmutableMap;\nimport com.google.common.collect.ImmutableSortedSet;\nimport com.google.common.hash.HashCode;\nimport com.google.common.hash.HashFunction;\nimport com.google.common.hash.Hashing;\nimport com.google.common.hash.HashingInputStream;\nimport com.google.common.io.BaseEncoding;\nimport com.google.common.io.ByteSource;\nimport com.google.common.io.ByteStreams;\nimport com.google.common.net.HttpHeaders;\nimport com.google.common.primitives.Longs;\n\nimport jakarta.inject.Singleton;\nimport jakarta.ws.rs.core.Response.Status;\n\nimport org.jclouds.blobstore.BlobStore;\nimport org.jclouds.blobstore.BlobStoreContext;\nimport org.jclouds.blobstore.ContainerNotFoundException;\nimport org.jclouds.blobstore.KeyNotFoundException;\nimport org.jclouds.blobstore.domain.Blob;\nimport org.jclouds.blobstore.domain.BlobAccess;\nimport org.jclouds.blobstore.domain.BlobMetadata;\nimport org.jclouds.blobstore.domain.ContainerAccess;\nimport org.jclouds.blobstore.domain.MultipartPart;\nimport org.jclouds.blobstore.domain.MultipartUpload;\nimport org.jclouds.blobstore.domain.PageSet;\nimport org.jclouds.blobstore.domain.StorageMetadata;\nimport org.jclouds.blobstore.domain.StorageType;\nimport org.jclouds.blobstore.domain.Tier;\nimport org.jclouds.blobstore.domain.internal.BlobBuilderImpl;\nimport org.jclouds.blobstore.domain.internal.PageSetImpl;\nimport org.jclouds.blobstore.domain.internal.StorageMetadataImpl;\nimport org.jclouds.blobstore.internal.BaseBlobStore;\nimport org.jclouds.blobstore.options.CopyOptions;\nimport org.jclouds.blobstore.options.CreateContainerOptions;\nimport org.jclouds.blobstore.options.GetOptions;\nimport org.jclouds.blobstore.options.ListContainerOptions;\nimport org.jclouds.blobstore.options.PutOptions;\nimport org.jclouds.blobstore.util.BlobStoreUtils;\nimport org.jclouds.blobstore.util.BlobUtils;\nimport org.jclouds.collect.Memoized;\nimport org.jclouds.domain.Credentials;\nimport org.jclouds.domain.Location;\nimport org.jclouds.http.HttpCommand;\nimport org.jclouds.http.HttpRequest;\nimport org.jclouds.http.HttpResponse;\nimport org.jclouds.http.HttpResponseException;\nimport org.jclouds.io.Payload;\nimport org.jclouds.io.PayloadSlicer;\nimport org.slf4j.Logger;\nimport org.slf4j.LoggerFactory;\n\n@Singleton\npublic abstract class AbstractNio2BlobStore extends BaseBlobStore {\n    private static final Logger logger = LoggerFactory.getLogger(\n            AbstractNio2BlobStore.class);\n    private static final String XATTR_CACHE_CONTROL = \"user.cache-control\";\n    private static final String XATTR_CONTENT_DISPOSITION =\n            \"user.content-disposition\";\n    private static final String XATTR_CONTENT_ENCODING =\n            \"user.content-encoding\";\n    private static final String XATTR_CONTENT_LANGUAGE =\n            \"user.content-language\";\n    private static final String XATTR_CONTENT_MD5 = \"user.content-md5\";\n    private static final String XATTR_CONTENT_TYPE = \"user.content-type\";\n    private static final String XATTR_EXPIRES = \"user.expires\";\n    private static final String XATTR_STORAGE_TIER = \"user.storage-tier\";\n    private static final String XATTR_USER_METADATA_PREFIX =\n            \"user.user-metadata.\";\n    private static final Set<String> NO_ATTRIBUTES = Set.of();\n    private static final String MULTIPART_PREFIX = \".mpus-\";\n    @SuppressWarnings(\"deprecation\")\n    private static final HashFunction md5 = Hashing.md5();\n    private static final byte[] DIRECTORY_MD5 =\n            md5.hashBytes(new byte[0]).asBytes();\n\n    private final Path root;\n\n    protected AbstractNio2BlobStore(BlobStoreContext context, BlobUtils blobUtils,\n            Supplier<Location> defaultLocation,\n            @Memoized Supplier<Set<? extends Location>> locations,\n            PayloadSlicer slicer,\n            @org.jclouds.location.Provider Supplier<Credentials> creds,\n            Path root) {\n        super(context, blobUtils, defaultLocation, locations, slicer);\n        this.root = root;\n    }\n\n    protected final Path getRoot() {\n        return root;\n    }\n\n    @Override\n    public final PageSet<? extends StorageMetadata> list() {\n        var set = ImmutableSortedSet.<StorageMetadata>naturalOrder();\n        try (var stream = Files.newDirectoryStream(root)) {\n            for (var path : stream) {\n                var attr = Files.readAttributes(path,\n                        BasicFileAttributes.class);\n                var lastModifiedTime = new Date(\n                        attr.lastModifiedTime().toMillis());\n                var creationTime = new Date(attr.creationTime().toMillis());\n                set.add(new StorageMetadataImpl(StorageType.CONTAINER,\n                        /*id=*/ null, path.getFileName().toString(),\n                        /*location=*/ null, /*uri=*/ null,\n                        /*eTag=*/ null, creationTime, lastModifiedTime,\n                        Map.of(), /*size=*/ null, Tier.STANDARD));\n            }\n        } catch (IOException ioe) {\n            throw new RuntimeException(ioe);\n        }\n        return new PageSetImpl<StorageMetadata>(set.build(), null);\n    }\n\n    @Override\n    public final PageSet<? extends StorageMetadata> list(String container,\n            ListContainerOptions options) {\n        if (!containerExists(container)) {\n            throw new ContainerNotFoundException(container, \"\");\n        }\n\n        var delimiter = options.getDelimiter();\n        if (\"\".equals(delimiter)) {\n            delimiter = null;\n        } else if (delimiter != null && !delimiter.equals(\"/\")) {\n            throw new IllegalArgumentException(\"Delimiters other than / not supported\");\n        }\n\n        var prefix = options.getPrefix();\n        var dirPrefix = root.resolve(container);\n        if (prefix != null) {\n            int idx = prefix.lastIndexOf('/');\n            if (idx != -1) {\n                dirPrefix = dirPrefix.resolve(prefix.substring(0, idx));\n            }\n        } else {\n            prefix = \"\";\n        }\n        var containerPath = root.resolve(container);\n        var pathPrefix = containerPath.resolve(prefix).normalize();\n        checkValidPath(containerPath, pathPrefix);\n        logger.debug(\"Listing blobs at: {}\", pathPrefix);\n        var set = ImmutableSortedSet.<StorageMetadata>naturalOrder();\n        try {\n            listHelper(set, container, dirPrefix, pathPrefix, delimiter);\n            var sorted = set.build();\n            if (options.getMarker() != null) {\n                var found = false;\n                for (var blob : sorted) {\n                    if (blob.getName().compareTo(options.getMarker()) > 0) {\n                        sorted = sorted.tailSet(blob);\n                        found = true;\n                        break;\n                    }\n                }\n                if (!found) {\n                    sorted = ImmutableSortedSet.of();\n                }\n            }\n            String marker = null;\n            if (options.getMaxResults() != null) {\n                // TODO: efficiency?\n                var temp = ImmutableSortedSet.copyOf(sorted.stream().limit(options.getMaxResults().intValue()).collect(Collectors.toSet()));\n                if (!temp.isEmpty()) {\n                    var next = sorted.higher(temp.last());\n                    if (next != null) {\n                        marker = temp.last().getName();\n                    }\n                }\n                sorted = temp;\n            }\n            return new PageSetImpl<StorageMetadata>(sorted, marker);\n        } catch (IOException ioe) {\n            logger.error(\"unexpected exception\", ioe);\n            throw new RuntimeException(ioe);\n        }\n    }\n\n    private void listHelper(ImmutableSortedSet.Builder<StorageMetadata> builder,\n            String container, Path parent, Path prefix, String delimiter)\n            throws IOException {\n        logger.debug(\"recursing at: {} with prefix: {}\", parent, prefix);\n        if (!Files.isDirectory(parent)) {  // TODO: TOCTOU\n            return;\n        }\n        try (var stream = Files.newDirectoryStream(parent)) {\n            for (var path : stream) {\n                logger.debug(\"examining: {}\", path);\n                if (!path.toAbsolutePath().toString().startsWith(root.resolve(prefix).toAbsolutePath().toString())) {\n                    // ignore\n                } else if (Files.isDirectory(path)) {\n                    if (!\"/\".equals(delimiter)) {\n                        listHelper(builder, container, path, prefix, delimiter);\n                    }\n\n                    // Add a prefix if the directory blob exists or if the delimiter causes us not to recuse.\n                    if (safeGetXattrs(path).attributes().contains(XATTR_CONTENT_MD5) || \"/\".equals(delimiter)) {\n                        var name = path.toString().substring((root.resolve(container) + \"/\").length());\n                        if (path.getFileSystem().getSeparator().equals(\"\\\\\")) {\n                            name = name.replace('\\\\', '/');\n                        }\n                        logger.debug(\"adding prefix: {}\", name);\n                        builder.add(new StorageMetadataImpl(\n                                StorageType.RELATIVE_PATH,\n                                /*id=*/ null, name + \"/\",\n                                /*location=*/ null, /*uri=*/ null,\n                                /*eTag=*/ null, /*creationDate=*/ null,\n                                /*lastModified=*/ null,\n                                Map.of(), /*size=*/ null, Tier.STANDARD));\n                    }\n                } else {\n                    var name = path.toString().substring((root.resolve(container) + \"/\").length());\n                    if (path.getFileSystem().getSeparator().equals(\"\\\\\")) {\n                        name = name.replace('\\\\', '/');\n                    }\n                    logger.debug(\"adding: {}\", name);\n                    var attr = Files.readAttributes(path, BasicFileAttributes.class);\n                    var lastModifiedTime = new Date(attr.lastModifiedTime().toMillis());\n                    var creationTime = new Date(attr.creationTime().toMillis());\n\n                    String eTag = null;\n                    Tier tier = Tier.STANDARD;\n                    var xattrs = safeGetXattrs(path);\n                    if (xattrs.view() != null) {\n                        var view = xattrs.view();\n                        var attributes = xattrs.attributes();\n                        if (attributes.contains(XATTR_CONTENT_MD5)) {\n                            var buf = ByteBuffer.allocate(view.size(XATTR_CONTENT_MD5));\n                            view.read(XATTR_CONTENT_MD5, buf);\n                            var etagBytes = buf.array();\n                            if (etagBytes.length == 16) {\n                                // regular object\n                                var hashCode = HashCode.fromBytes(buf.array());\n                                eTag = \"\\\"\" + hashCode + \"\\\"\";\n                            } else {\n                                // multi-part object\n                                eTag = new String(etagBytes, StandardCharsets.US_ASCII);\n                            }\n                        }\n\n                        var tierString = readStringAttributeIfPresent(view, attributes, XATTR_STORAGE_TIER);\n                        if (tierString != null) {\n                            tier = Tier.valueOf(tierString);\n                        }\n                    }\n\n                    builder.add(new StorageMetadataImpl(StorageType.BLOB,\n                            /*id=*/ null, name,\n                            /*location=*/ null, /*uri=*/ null,\n                            eTag, creationTime, lastModifiedTime,\n                            Map.of(), attr.size(), tier));\n                }\n            }\n        } catch (NoSuchFileException nsfe) {\n            // ignore\n        }\n    }\n\n    @Override\n    public final boolean containerExists(String container) {\n        return Files.exists(root.resolve(container));\n    }\n\n    @Override\n    public final boolean createContainerInLocation(Location location,\n            String container) {\n        return createContainerInLocation(location, container,\n                new CreateContainerOptions());\n    }\n\n    @Override\n    public final boolean createContainerInLocation(Location location,\n            String container, CreateContainerOptions options) {\n        try {\n            Files.createDirectory(root.resolve(container));\n        } catch (FileAlreadyExistsException faee) {\n            return false;\n        } catch (IOException ioe) {\n            throw new RuntimeException(ioe);\n        }\n\n        setContainerAccess(container, options.isPublicRead() ? ContainerAccess.PUBLIC_READ : ContainerAccess.PRIVATE);\n\n        return true;\n    }\n\n    @Override\n    public final void deleteContainer(String container) {\n        try {\n            Files.deleteIfExists(root.resolve(container));\n        } catch (DirectoryNotEmptyException dnee) {\n            // TODO: what to do?\n        } catch (IOException ioe) {\n            throw new RuntimeException(ioe);\n        }\n    }\n\n    @Override\n    public final boolean blobExists(String container, String key) {\n        return blobMetadata(container, key) != null;\n    }\n\n    @Override\n    public final Blob getBlob(String container, String key, GetOptions options) {\n        if (!containerExists(container)) {\n            throw new ContainerNotFoundException(container, \"\");\n        }\n\n        var containerPath = root.resolve(container);\n        var path = containerPath.resolve(key);\n        if (path.toString().equals(\"/\")) {\n            path = containerPath;\n        }\n        checkValidPath(containerPath, path);\n        logger.debug(\"Getting blob at: {}\", path);\n\n        try {\n            var isDirectory = Files.isDirectory(path);\n            var attr = Files.readAttributes(path, BasicFileAttributes.class);\n            var xattrs = safeGetXattrs(path);\n            var view = xattrs.view();\n            var attributes = xattrs.attributes();\n            String cacheControl = null;\n            String contentDisposition = null;\n            String contentEncoding = null;\n            String contentLanguage = null;\n            String contentType = isDirectory ? \"application/x-directory\" : null;\n            Date expires = null;\n            HashCode hashCode = null;\n            String eTag = null;\n            var tier = Tier.STANDARD;\n            var userMetadata = ImmutableMap.<String, String>builder();\n            var lastModifiedTime = new Date(attr.lastModifiedTime().toMillis());\n            var creationTime = new Date(attr.creationTime().toMillis());\n\n            if (view != null) {\n                cacheControl = readStringAttributeIfPresent(view, attributes, XATTR_CACHE_CONTROL);\n                contentDisposition = readStringAttributeIfPresent(view, attributes, XATTR_CONTENT_DISPOSITION);\n                contentEncoding = readStringAttributeIfPresent(view, attributes, XATTR_CONTENT_ENCODING);\n                contentLanguage = readStringAttributeIfPresent(view, attributes, XATTR_CONTENT_LANGUAGE);\n                if (!isDirectory) {\n                    contentType = readStringAttributeIfPresent(view, attributes, XATTR_CONTENT_TYPE);\n                }\n            }\n            if (contentType == null && !isDirectory) {\n                contentType = Files.probeContentType(path);\n                if (contentType == null) {\n                    contentType = \"application/octet-stream\";\n                }\n            }\n\n            if (isDirectory) {\n                if (!attributes.contains(XATTR_CONTENT_MD5)) {\n                    // Lacks directory marker -- implicit directory.\n                    return null;\n                }\n            } else if (attributes.contains(XATTR_CONTENT_MD5)) {\n                var buf = ByteBuffer.allocate(view.size(XATTR_CONTENT_MD5));\n                view.read(XATTR_CONTENT_MD5, buf);\n                var etagBytes = buf.array();\n                if (etagBytes.length == 16) {\n                    // regular object\n                    hashCode = HashCode.fromBytes(buf.array());\n                    eTag = \"\\\"\" + hashCode + \"\\\"\";\n                } else {\n                    // multi-part object\n                    eTag = new String(etagBytes, StandardCharsets.US_ASCII);\n                }\n            }\n            if (attributes.contains(XATTR_EXPIRES)) {\n                ByteBuffer buf = ByteBuffer.allocate(view.size(XATTR_EXPIRES));\n                view.read(XATTR_EXPIRES, buf);\n                buf.flip();\n                expires = new Date(buf.asLongBuffer().get());\n            }\n            if (view != null) {\n                var tierString = readStringAttributeIfPresent(view, attributes, XATTR_STORAGE_TIER);\n                if (tierString != null) {\n                    tier = Tier.valueOf(tierString);\n                }\n                for (String attribute : attributes) {\n                    if (!attribute.startsWith(XATTR_USER_METADATA_PREFIX)) {\n                        continue;\n                    }\n                    var value = readStringAttributeIfPresent(view, attributes, attribute);\n                    userMetadata.put(attribute.substring(XATTR_USER_METADATA_PREFIX.length()), value);\n                }\n            }\n\n            // Handle range.\n            String contentRange = null;\n            InputStream inputStream;\n            long size;\n            if (isDirectory) {\n                inputStream = ByteSource.empty().openStream();\n                size = 0;\n            } else {\n                inputStream = Files.newInputStream(path);  // TODO: leaky on exception\n                size = attr.size();\n                if (options.getRanges().size() > 0) {\n                    var range = options.getRanges().get(0);\n                    // HTTP uses a closed interval while Java array indexing uses a\n                    // half-open interval.\n                    long offset = 0;\n                    long last = size;\n                    if (range.startsWith(\"-\")) {\n                        offset = last - Long.parseLong(range.substring(1));\n                        if (offset < 0) {\n                            offset = 0;\n                        }\n                    } else if (range.endsWith(\"-\")) {\n                        offset = Long.parseLong(range.substring(0, range.length() - 1));\n                    } else if (range.contains(\"-\")) {\n                        String[] firstLast = range.split(\"\\\\-\", 2);\n                        offset = Long.parseLong(firstLast[0]);\n                        last = Long.parseLong(firstLast[1]);\n                    } else {\n                        throw new HttpResponseException(\"illegal range: \" + range, null, HttpResponse.builder().statusCode(416).build());\n                    }\n\n                    if (offset >= size) {\n                        throw new HttpResponseException(\"illegal range: \" + range, null, HttpResponse.builder().statusCode(416).build());\n                    }\n                    if (last + 1 > size) {\n                        last = size - 1;\n                    }\n                    inputStream.skipNBytes(offset);\n                    size = last - offset + 1;\n                    inputStream = ByteStreams.limit(inputStream, size);\n                    contentRange = \"bytes \" + offset + \"-\" + last + \"/\" + attr.size();\n                }\n            }\n\n            if (eTag != null) {\n                eTag = maybeQuoteETag(eTag);\n                if (options.getIfMatch() != null) {\n                    if (!eTag.equals(maybeQuoteETag(options.getIfMatch()))) {\n                        HttpResponse response = HttpResponse.builder().statusCode(Status.PRECONDITION_FAILED.getStatusCode()).addHeader(HttpHeaders.ETAG, eTag).build();\n                        throw new HttpResponseException(new HttpCommand(HttpRequest.builder().method(\"GET\").endpoint(\"http://stub\").build()), response);\n                    }\n                }\n                if (options.getIfNoneMatch() != null) {\n                    if (eTag.equals(maybeQuoteETag(options.getIfNoneMatch()))) {\n                        HttpResponse response = HttpResponse.builder().statusCode(Status.NOT_MODIFIED.getStatusCode()).addHeader(HttpHeaders.ETAG, eTag).build();\n                        throw new HttpResponseException(new HttpCommand(HttpRequest.builder().method(\"GET\").endpoint(\"http://stub\").build()), response);\n                    }\n                }\n            }\n            if (options.getIfModifiedSince() != null) {\n                Date modifiedSince = options.getIfModifiedSince();\n                if (lastModifiedTime.before(modifiedSince)) {\n                    @SuppressWarnings(\"rawtypes\")\n                    HttpResponse.Builder response = HttpResponse.builder().statusCode(Status.NOT_MODIFIED.getStatusCode());\n                    if (eTag != null) {\n                        response.addHeader(HttpHeaders.ETAG, eTag);\n                    }\n                    throw new HttpResponseException(\"%1$s is before %2$s\".formatted(lastModifiedTime, modifiedSince), null, response.build());\n                }\n\n            }\n            if (options.getIfUnmodifiedSince() != null) {\n                Date unmodifiedSince = options.getIfUnmodifiedSince();\n                if (lastModifiedTime.after(unmodifiedSince)) {\n                    @SuppressWarnings(\"rawtypes\")\n                    HttpResponse.Builder response = HttpResponse.builder().statusCode(Status.PRECONDITION_FAILED.getStatusCode());\n                    if (eTag != null) {\n                        response.addHeader(HttpHeaders.ETAG, eTag);\n                    }\n                    throw new HttpResponseException(\"%1$s is after %2$s\".formatted(lastModifiedTime, unmodifiedSince), null, response.build());\n                }\n            }\n\n            Blob blob = new BlobBuilderImpl()\n                    .type(isDirectory ? StorageType.FOLDER : StorageType.BLOB)\n                    .name(key)\n                    .userMetadata(userMetadata.build())\n                    .payload(inputStream)\n                    .cacheControl(cacheControl)\n                    .contentDisposition(contentDisposition)\n                    .contentEncoding(contentEncoding)\n                    .contentLanguage(contentLanguage)\n                    .contentLength(size)\n                    .contentMD5(hashCode)\n                    .contentType(contentType)\n                    .eTag(eTag)\n                    .expires(expires)\n                    .tier(tier)\n                    .build();\n            blob.getMetadata().setContainer(container);\n            blob.getMetadata().setCreationDate(creationTime);\n            blob.getMetadata().setLastModified(lastModifiedTime);\n            blob.getMetadata().setSize(size);\n            if (contentRange != null) {\n                blob.getAllHeaders().put(HttpHeaders.CONTENT_RANGE, contentRange);\n            }\n            if (hashCode != null) {\n                blob.getMetadata().setETag(BaseEncoding.base16().lowerCase().encode(hashCode.asBytes()));\n            }\n            return blob;\n        } catch (NoSuchFileException nsfe) {\n            return null;\n        } catch (IOException ioe) {\n            throw new RuntimeException(ioe);\n        }\n    }\n\n    @Override\n    public final String putBlob(String container, Blob blob) {\n        return putBlob(container, blob, new PutOptions());\n    }\n\n    @Override\n    public final String putBlob(String container, Blob blob, PutOptions options) {\n        if (!containerExists(container)) {\n            throw new ContainerNotFoundException(container, \"\");\n        }\n\n        var containerPath = root.resolve(container);\n        var path = containerPath.resolve(blob.getMetadata().getName()).normalize();\n        if (path.toString().equals(\"/\")) {\n            path = containerPath;\n        }\n        checkValidPath(containerPath, path);\n        // TODO: should we use a known suffix to filter these out during list?\n        var tmpPath = root.resolve(container).resolve(blob.getMetadata().getName() + \"-\" + UUID.randomUUID());\n        logger.debug(\"Creating blob at: {}\", path);\n\n        if (blob.getMetadata().getName().endsWith(\"/\")) {\n            try {\n                logger.debug(\"Creating directory blob: {}\", path);\n                Files.createDirectories(path);\n            } catch (FileAlreadyExistsException faee) {\n                logger.debug(\"Parent directories already exist: {}\", path.getParent());\n            } catch (IOException ioe) {\n                throw new RuntimeException(ioe);\n            }\n\n            var view = Files.getFileAttributeView(path, UserDefinedFileAttributeView.class);\n            if (view != null) {\n                try {\n                    writeCommonMetadataAttr(view, blob);\n                    view.write(XATTR_CONTENT_MD5, ByteBuffer.wrap(DIRECTORY_MD5));\n                } catch (IOException ioe) {\n                    logger.debug(\"xattrs not supported on {}\", path);\n                }\n            }\n\n            return BaseEncoding.base16().lowerCase().encode(DIRECTORY_MD5);\n        }\n\n        // Create parent directories.\n        try {\n            Files.createDirectories(path.getParent());\n        } catch (FileAlreadyExistsException faee) {\n            logger.debug(\"Parent directories already exist: {}\", path.getParent());\n        } catch (IOException ioe) {\n            throw new RuntimeException(ioe);\n        }\n\n        var metadata = blob.getMetadata().getContentMetadata();\n        try (var is = new HashingInputStream(md5, blob.getPayload().openStream());\n             var os = Files.newOutputStream(tmpPath)) {\n            is.transferTo(os);\n            var actualHashCode = is.hash();\n            var expectedHashCode = metadata.getContentMD5AsHashCode();\n            if (expectedHashCode != null && !actualHashCode.equals(expectedHashCode)) {\n                Files.delete(tmpPath);\n                throw returnResponseException(400);\n            }\n\n            var view = Files.getFileAttributeView(tmpPath, UserDefinedFileAttributeView.class);\n            if (view != null) {\n                try {\n                    var eTag = actualHashCode.asBytes();\n                    view.write(XATTR_CONTENT_MD5, ByteBuffer.wrap(eTag));\n                    writeStringAttributeIfPresent(view, XATTR_CACHE_CONTROL, metadata.getCacheControl());\n                    writeStringAttributeIfPresent(view, XATTR_CONTENT_DISPOSITION, metadata.getContentDisposition());\n                    writeStringAttributeIfPresent(view, XATTR_CONTENT_ENCODING, metadata.getContentEncoding());\n                    writeStringAttributeIfPresent(view, XATTR_CONTENT_LANGUAGE, metadata.getContentLanguage());\n                    writeStringAttributeIfPresent(view, XATTR_CONTENT_TYPE, metadata.getContentType());\n                    var expires = metadata.getExpires();\n                    if (expires != null) {\n                        ByteBuffer buf = ByteBuffer.allocate(Longs.BYTES).putLong(expires.getTime());\n                        buf.flip();\n                        view.write(XATTR_EXPIRES, buf);\n                    }\n                    writeStringAttributeIfPresent(view, XATTR_STORAGE_TIER, blob.getMetadata().getTier().toString());\n                    for (var entry : blob.getMetadata().getUserMetadata().entrySet()) {\n                        writeStringAttributeIfPresent(view, XATTR_USER_METADATA_PREFIX + entry.getKey(), entry.getValue());\n                    }\n                } catch (IOException e) {\n                    // TODO:\n                    //logger.debug(\"xattrs not supported on %s\", path);\n                }\n            }\n\n            setBlobAccessHelper(tmpPath, options.getBlobAccess());\n\n            Files.move(tmpPath, path, StandardCopyOption.ATOMIC_MOVE, StandardCopyOption.REPLACE_EXISTING);\n\n            return \"\\\"\" + actualHashCode + \"\\\"\";\n        } catch (IOException ioe) {\n            throw new RuntimeException(ioe);\n        }\n    }\n\n    @Override\n    public final String copyBlob(String fromContainer, String fromName,\n            String toContainer, String toName, CopyOptions options) {\n        var blob = getBlob(fromContainer, fromName);\n        if (blob == null) {\n            throw new KeyNotFoundException(fromContainer, fromName, \"while copying\");\n        }\n\n        var eTag = blob.getMetadata().getETag();\n        if (eTag != null) {\n            eTag = maybeQuoteETag(eTag);\n            if (options.ifMatch() != null && !maybeQuoteETag(options.ifMatch()).equals(eTag)) {\n                throw returnResponseException(412);\n            }\n            if (options.ifNoneMatch() != null && maybeQuoteETag(options.ifNoneMatch()).equals(eTag)) {\n                throw returnResponseException(412);\n            }\n        }\n\n        var lastModified = blob.getMetadata().getLastModified();\n        if (lastModified != null) {\n            if (options.ifModifiedSince() != null && lastModified.compareTo(options.ifModifiedSince()) <= 0) {\n                throw returnResponseException(412);\n            }\n            if (options.ifUnmodifiedSince() != null && lastModified.compareTo(options.ifUnmodifiedSince()) >= 0) {\n                throw returnResponseException(412);\n            }\n        }\n\n        try (var is = blob.getPayload().openStream()) {\n            var metadata = blob.getMetadata().getContentMetadata();\n            var builder = blobBuilder(toName).payload(is);\n            Long contentLength = metadata.getContentLength();\n            if (contentLength != null) {\n                builder.contentLength(contentLength);\n            }\n\n            var contentMetadata = options.contentMetadata();\n            if (contentMetadata != null) {\n                String cacheControl = contentMetadata.getCacheControl();\n                if (cacheControl != null) {\n                    builder.cacheControl(cacheControl);\n                }\n                String contentDisposition = contentMetadata.getContentDisposition();\n                if (contentDisposition != null) {\n                    builder.contentDisposition(contentDisposition);\n                }\n                String contentEncoding = contentMetadata.getContentEncoding();\n                if (contentEncoding != null) {\n                    builder.contentEncoding(contentEncoding);\n                }\n                String contentLanguage = contentMetadata.getContentLanguage();\n                if (contentLanguage != null) {\n                    builder.contentLanguage(contentLanguage);\n                }\n                String contentType = contentMetadata.getContentType();\n                if (contentType != null) {\n                    builder.contentType(contentType);\n                }\n            } else {\n                builder.cacheControl(metadata.getCacheControl())\n                        .contentDisposition(metadata.getContentDisposition())\n                        .contentEncoding(metadata.getContentEncoding())\n                        .contentLanguage(metadata.getContentLanguage())\n                        .contentType(metadata.getContentType());\n            }\n\n            var userMetadata = options.userMetadata();\n            if (userMetadata != null) {\n                builder.userMetadata(userMetadata);\n            } else {\n                builder.userMetadata(blob.getMetadata().getUserMetadata());\n            }\n            return putBlob(toContainer, builder.build());\n        } catch (IOException ioe) {\n            throw new RuntimeException(ioe);\n        }\n    }\n\n    @Override\n    public final void removeBlob(String container, String key) {\n        try {\n            var containerPath = root.resolve(container);\n            var path = containerPath.resolve(key).normalize();\n            if (path.toString().equals(\"/\")) {\n                path = containerPath;\n            }\n            checkValidPath(containerPath, path);\n            logger.debug(\"Deleting blob at: {}\", path);\n            Files.delete(path);\n            removeEmptyParentDirectories(containerPath, path.getParent());\n        } catch (NoSuchFileException nsfe) {\n            return;\n        } catch (IOException ioe) {\n            throw new RuntimeException(ioe);\n        }\n    }\n\n    @Override\n    public final BlobMetadata blobMetadata(String container, String key) {\n        Blob blob = getBlob(container, key);\n        if (blob == null) {\n            return null;\n        }\n\n        try {\n            blob.getPayload().openStream().close();\n        } catch (IOException ioe) {\n            throw new RuntimeException(ioe);\n        }\n        return blob != null ? (BlobMetadata) BlobStoreUtils.copy(blob.getMetadata()) : null;\n    }\n\n    @Override\n    protected final boolean deleteAndVerifyContainerGone(String container) {\n        deleteContainer(container);\n        return !containerExists(container);\n    }\n\n    @Override\n    public final ContainerAccess getContainerAccess(String container) {\n        if (!containerExists(container)) {\n            throw new ContainerNotFoundException(container, \"\");\n        }\n\n        var path = root.resolve(container);\n        Set<PosixFilePermission> permissions;\n        try {\n            permissions = Files.getPosixFilePermissions(path);\n        } catch (UnsupportedOperationException uoe) {\n            // Windows/SMB/other non-POSIX: default to PRIVATE\n            return ContainerAccess.PRIVATE;\n        } catch (IOException ioe) {\n            throw new RuntimeException(ioe);\n        }\n        return permissions.contains(PosixFilePermission.OTHERS_READ) ?\n                ContainerAccess.PUBLIC_READ : ContainerAccess.PRIVATE;\n    }\n\n    @Override\n    public final void setContainerAccess(String container, ContainerAccess access) {\n        if (!containerExists(container)) {\n            throw new ContainerNotFoundException(container, \"\");\n        }\n\n        var path = root.resolve(container);\n        Set<PosixFilePermission> permissions;\n        try {\n            permissions = new HashSet<>(Files.getPosixFilePermissions(path));\n            if (access == ContainerAccess.PRIVATE) {\n                permissions.remove(PosixFilePermission.OTHERS_READ);\n            } else if (access == ContainerAccess.PUBLIC_READ) {\n                permissions.add(PosixFilePermission.OTHERS_READ);\n            }\n            Files.setPosixFilePermissions(path, permissions);\n        } catch (UnsupportedOperationException uoe) {\n            // Windows/SMB/other non-POSIX: ignore, cannot set permissions\n            return;\n        } catch (IOException ioe) {\n            throw new RuntimeException(ioe);\n        }\n    }\n\n    @Override\n    public final BlobAccess getBlobAccess(String container, String key) {\n        if (!containerExists(container)) {\n            throw new ContainerNotFoundException(container, \"\");\n        }\n        if (!blobExists(container, key)) {\n            throw new KeyNotFoundException(container, key, \"\");\n        }\n\n        var containerPath = root.resolve(container);\n        var path = containerPath.resolve(key).normalize();\n        if (path.toString().equals(\"/\")) {\n            path = containerPath;\n        }\n        checkValidPath(containerPath, path);\n\n        Set<PosixFilePermission> permissions;\n        try {\n            permissions = Files.getPosixFilePermissions(path);\n        } catch (UnsupportedOperationException uoe) {\n            // Windows/SMB/other non-POSIX: default to PRIVATE\n            return BlobAccess.PRIVATE;\n        } catch (IOException ioe) {\n            throw new RuntimeException(ioe);\n        }\n        return permissions.contains(PosixFilePermission.OTHERS_READ) ?\n                BlobAccess.PUBLIC_READ : BlobAccess.PRIVATE;\n    }\n\n    @Override\n    public final void setBlobAccess(String container, String key, BlobAccess access) {\n        if (!containerExists(container)) {\n            throw new ContainerNotFoundException(container, \"\");\n        }\n        if (!blobExists(container, key)) {\n            throw new KeyNotFoundException(container, key, \"\");\n        }\n\n        var containerPath = root.resolve(container);\n        var path = containerPath.resolve(key).normalize();\n        if (path.toString().equals(\"/\")) {\n            path = containerPath;\n        }\n        checkValidPath(containerPath, path);\n\n        setBlobAccessHelper(path, access);\n    }\n\n    @Override\n    public final MultipartUpload initiateMultipartUpload(String container,\n            BlobMetadata blobMetadata, PutOptions options) {\n        var uploadId = UUID.randomUUID().toString();\n        // create a stub blob\n        var blob = blobBuilder(MULTIPART_PREFIX + uploadId + \"-\" + blobMetadata.getName() + \"-stub\").payload(ByteSource.empty()).build();\n        putBlob(container, blob);\n        return MultipartUpload.create(container, blobMetadata.getName(), uploadId,\n                blobMetadata, options);\n    }\n\n    @Override\n    public final void abortMultipartUpload(MultipartUpload mpu) {\n        var parts = listMultipartUpload(mpu);\n        for (var part : parts) {\n            removeBlob(mpu.containerName(), MULTIPART_PREFIX + mpu.id() + \"-\" + mpu.blobName() + \"-\" + part.partNumber());\n        }\n        removeBlob(mpu.containerName(), MULTIPART_PREFIX + mpu.id() + \"-\" + mpu.blobName() + \"-stub\");\n    }\n\n    @Override\n    public final String completeMultipartUpload(MultipartUpload mpu, List<MultipartPart> parts) {\n        var metas = ImmutableList.<BlobMetadata>builder();\n        long contentLength = 0;\n        var md5Hasher = md5.newHasher();\n\n        for (var part : parts) {\n            var meta = blobMetadata(mpu.containerName(), MULTIPART_PREFIX + mpu.id() + \"-\" + mpu.blobName() + \"-\" + part.partNumber());\n            contentLength += meta.getContentMetadata().getContentLength();\n            metas.add(meta);\n            if (meta.getETag() != null) {\n                var eTag = meta.getETag();\n                if (eTag.startsWith(\"\\\"\") && eTag.endsWith(\"\\\"\") &&\n                       eTag.length() >= 2) {\n                    eTag = eTag.substring(1, eTag.length() - 1);\n                }\n                md5Hasher.putBytes(BaseEncoding.base16().lowerCase().decode(eTag));\n            }\n        }\n        var mpuETag = \"\\\"\" + md5Hasher.hash() + \"-\" + parts.size() + \"\\\"\";\n        var blobBuilder = blobBuilder(mpu.blobName())\n                .userMetadata(mpu.blobMetadata().getUserMetadata())\n                .payload(new MultiBlobInputStream(this, metas.build()))\n                .contentLength(contentLength)\n                .eTag(mpuETag);\n        var cacheControl = mpu.blobMetadata().getContentMetadata().getCacheControl();\n        if (cacheControl != null) {\n            blobBuilder.cacheControl(cacheControl);\n        }\n        var contentDisposition = mpu.blobMetadata().getContentMetadata().getContentDisposition();\n        if (contentDisposition != null) {\n            blobBuilder.contentDisposition(contentDisposition);\n        }\n        var contentEncoding = mpu.blobMetadata().getContentMetadata().getContentEncoding();\n        if (contentEncoding != null) {\n            blobBuilder.contentEncoding(contentEncoding);\n        }\n        var contentLanguage = mpu.blobMetadata().getContentMetadata().getContentLanguage();\n        if (contentLanguage != null) {\n            blobBuilder.contentLanguage(contentLanguage);\n        }\n        // intentionally not copying MD5\n        var contentType = mpu.blobMetadata().getContentMetadata().getContentType();\n        if (contentType != null) {\n            blobBuilder.contentType(contentType);\n        }\n        var expires = mpu.blobMetadata().getContentMetadata().getExpires();\n        if (expires != null) {\n            blobBuilder.expires(expires);\n        }\n        var tier = mpu.blobMetadata().getTier();\n        if (tier != null) {\n            blobBuilder.tier(tier);\n        }\n\n        putBlob(mpu.containerName(), blobBuilder.build());\n\n        for (var part : parts) {\n            removeBlob(mpu.containerName(), MULTIPART_PREFIX + mpu.id() + \"-\" + mpu.blobName() + \"-\" + part.partNumber());\n        }\n        removeBlob(mpu.containerName(), MULTIPART_PREFIX + mpu.id() + \"-\" + mpu.blobName() + \"-stub\");\n\n        setBlobAccess(mpu.containerName(), mpu.blobName(), mpu.putOptions().getBlobAccess());\n\n        return mpuETag;\n    }\n\n    @Override\n    public final MultipartPart uploadMultipartPart(MultipartUpload mpu, int partNumber, Payload payload) {\n        var partName = MULTIPART_PREFIX + mpu.id() + \"-\" + mpu.blobName() + \"-\" + partNumber;\n        var blob = blobBuilder(partName)\n                .payload(payload)\n                .build();\n        var partETag = putBlob(mpu.containerName(), blob);\n        var metadata = blobMetadata(mpu.containerName(), partName);  // TODO: racy, how to get this from payload?\n        var partSize = metadata.getContentMetadata().getContentLength();\n        return MultipartPart.create(partNumber, partSize, partETag, metadata.getLastModified());\n    }\n\n    @Override\n    public final List<MultipartPart> listMultipartUpload(MultipartUpload mpu) {\n        var parts = ImmutableList.<MultipartPart>builder();\n        var options =\n                new ListContainerOptions().prefix(MULTIPART_PREFIX + mpu.id() + \"-\" + mpu.blobName() + \"-\").recursive();\n        while (true) {\n            var pageSet = list(mpu.containerName(), options);\n            for (var sm : pageSet) {\n                if (sm.getName().endsWith(\"-stub\")) {\n                    continue;\n                }\n                int partNumber = Integer.parseInt(sm.getName().substring((MULTIPART_PREFIX + mpu.id() + \"-\" + mpu.blobName() + \"-\").length()));\n                long partSize = sm.getSize();\n                parts.add(MultipartPart.create(partNumber, partSize, sm.getETag(), sm.getLastModified()));\n            }\n            if (pageSet.isEmpty() || pageSet.getNextMarker() == null) {\n                break;\n            }\n            options.afterMarker(pageSet.getNextMarker());\n        }\n        return parts.build();\n    }\n\n    @Override\n    public final List<MultipartUpload> listMultipartUploads(String container) {\n        var mpus = ImmutableList.<MultipartUpload>builder();\n        var options = new ListContainerOptions().prefix(MULTIPART_PREFIX).recursive();\n        int uuidLength = UUID.randomUUID().toString().length();\n        while (true) {\n            var pageSet = list(container, options);\n            for (StorageMetadata sm : pageSet) {\n                if (!sm.getName().endsWith(\"-stub\")) {\n                    continue;\n                }\n                var uploadId = sm.getName().substring(MULTIPART_PREFIX.length(), MULTIPART_PREFIX.length() + uuidLength);\n                var blobName = sm.getName().substring(MULTIPART_PREFIX.length() + uuidLength + 1);\n                int index = blobName.lastIndexOf('-');\n                blobName = blobName.substring(0, index);\n\n                mpus.add(MultipartUpload.create(container, blobName, uploadId, null, null));\n            }\n            if (pageSet.isEmpty() || pageSet.getNextMarker() == null) {\n                break;\n            }\n            options.afterMarker(pageSet.getNextMarker());\n        }\n\n        return mpus.build();\n    }\n\n    @Override\n    public final long getMinimumMultipartPartSize() {\n        return 1;\n    }\n\n    @Override\n    public final long getMaximumMultipartPartSize() {\n        return 100 * 1024 * 1024;\n    }\n\n    @Override\n    public final int getMaximumNumberOfParts() {\n        return 50 * 1000;\n    }\n\n    @Override\n    public final InputStream streamBlob(String container, String name) {\n        throw new UnsupportedOperationException(\"not yet implemented\");\n    }\n\n   /**\n    * Read the String representation of a filesystem attribute, or return null\n    * if not present.\n    */\n    private static String readStringAttributeIfPresent(\n            UserDefinedFileAttributeView view, Set<String> attr, String name)\n            throws IOException {\n        if (!attr.contains(name)) {\n            return null;\n        }\n        ByteBuffer buf = ByteBuffer.allocate(view.size(name));\n        view.read(name, buf);\n        return new String(buf.array(), StandardCharsets.UTF_8);\n    }\n\n    /** Write the String representation of a filesystem attribute. */\n    private static void writeStringAttributeIfPresent(\n            UserDefinedFileAttributeView view, String name, String value)\n            throws IOException {\n        if (value != null) {\n            view.write(name, ByteBuffer.wrap(value.getBytes(StandardCharsets.UTF_8)));\n        }\n    }\n\n    private static final class MultiBlobInputStream extends InputStream {\n        private final BlobStore blobStore;\n        private final Iterator<BlobMetadata> metas;\n        private InputStream current;\n\n        MultiBlobInputStream(BlobStore blobStore, List<BlobMetadata> metas) {\n            this.blobStore = blobStore;\n            this.metas = metas.iterator();\n        }\n\n        @Override\n        public int read() throws IOException {\n            while (true) {\n                if (current == null) {\n                    if (!metas.hasNext()) {\n                        return -1;\n                    }\n                    BlobMetadata meta = metas.next();\n                    current = blobStore.getBlob(meta.getContainer(), meta.getName()).getPayload().openStream();\n                }\n                int result = current.read();\n                if (result == -1) {\n                    current.close();\n                    current = null;\n                    continue;\n                }\n                return result & 0x000000FF;\n            }\n        }\n\n        @Override\n        public int read(byte[] b, int off, int len) throws IOException {\n            while (true) {\n                if (current == null) {\n                    if (!metas.hasNext()) {\n                        return -1;\n                    }\n                    BlobMetadata meta = metas.next();\n                    current = blobStore.getBlob(meta.getContainer(), meta.getName()).getPayload().openStream();\n                }\n                int result = current.read(b, off, len);\n                if (result == -1) {\n                    current.close();\n                    current = null;\n                    continue;\n                }\n                return result;\n            }\n        }\n\n        @Override\n        public void close() throws IOException {\n            if (current != null) {\n                current.close();\n                current = null;\n            }\n        }\n    }\n\n    private static HttpResponseException returnResponseException(int code) {\n        var response = HttpResponse.builder().statusCode(code).build();\n        return new HttpResponseException(new HttpCommand(HttpRequest.builder()\n                .method(\"GET\")\n                .endpoint(\"http://stub\")\n                .build()), response);\n    }\n\n    private static String maybeQuoteETag(String eTag) {\n        if (!eTag.startsWith(\"\\\"\") && !eTag.endsWith(\"\\\"\")) {\n            eTag = \"\\\"\" + eTag + \"\\\"\";\n        }\n        return eTag;\n    }\n\n    /**\n     * AbstractNio2BlobStore implicitly creates directories when creating a key /a/b/c.\n     * When removing /a/b/c, it must clean up /a and /a/b, unless a client explicitly created a subdirectory which has file attributes.\n     */\n    private static void removeEmptyParentDirectories(Path containerPath, Path path) throws IOException {\n        logger.debug(\"removing empty parents: {}\", path);\n        while (true) {\n            var parent = path.getParent();\n            if (parent == null || path.equals(containerPath)) {\n                break;\n            }\n            if (safeGetXattrs(path).attributes().contains(XATTR_CONTENT_MD5)) {\n                break;\n            }\n            try {\n                logger.debug(\"deleting: {}\", path);\n                Files.delete(path);\n            } catch (DirectoryNotEmptyException dnee) {\n                break;\n            }\n            path = path.getParent();\n        }\n    }\n\n    // TODO: call in other places\n    private static void writeCommonMetadataAttr(UserDefinedFileAttributeView view, Blob blob) throws IOException {\n        var metadata = blob.getMetadata().getContentMetadata();\n        writeStringAttributeIfPresent(view, XATTR_CACHE_CONTROL, metadata.getCacheControl());\n        writeStringAttributeIfPresent(view, XATTR_CONTENT_DISPOSITION, metadata.getContentDisposition());\n        writeStringAttributeIfPresent(view, XATTR_CONTENT_ENCODING, metadata.getContentEncoding());\n        writeStringAttributeIfPresent(view, XATTR_CONTENT_LANGUAGE, metadata.getContentLanguage());\n        writeStringAttributeIfPresent(view, XATTR_CONTENT_TYPE, metadata.getContentType());\n        var expires = metadata.getExpires();\n        if (expires != null) {\n            var buf = ByteBuffer.allocate(Longs.BYTES).putLong(expires.getTime());\n            buf.flip();\n            view.write(XATTR_EXPIRES, buf);\n        }\n        writeStringAttributeIfPresent(view, XATTR_STORAGE_TIER, blob.getMetadata().getTier().toString());\n        for (var entry : blob.getMetadata().getUserMetadata().entrySet()) {\n            writeStringAttributeIfPresent(view, XATTR_USER_METADATA_PREFIX + entry.getKey(), entry.getValue());\n        }\n    }\n\n    private record XattrState(UserDefinedFileAttributeView view, Set<String> attributes) {\n        static final XattrState EMPTY = new XattrState(null, NO_ATTRIBUTES);\n    }\n\n    /**\n     * Safely read extended attributes for a path. Returns a view and attribute\n     * set, or EMPTY if the filesystem does not support extended attributes\n     * (e.g., Docker Desktop bind mounts via VirtioFS, some NFS/NAS mounts).\n     */\n    private static XattrState safeGetXattrs(Path path) {\n        var view = Files.getFileAttributeView(path, UserDefinedFileAttributeView.class);\n        if (view == null) {\n            return XattrState.EMPTY;\n        }\n        try {\n            return new XattrState(view, Set.copyOf(view.list()));\n        } catch (IOException e) {\n            logger.debug(\"xattrs not supported on {}\", path);\n            return XattrState.EMPTY;\n        }\n    }\n\n    private static void checkValidPath(Path container, Path path) {\n        if (!path.normalize().startsWith(container)) {\n            throw new IllegalArgumentException(\"Invalid key name: path traversal attempt detected: \" + container + \" \" + path);\n        }\n    }\n\n    private static void setBlobAccessHelper(Path path, BlobAccess access) {\n        try {\n            var permissions = new HashSet<>(Files.getPosixFilePermissions(path));\n            if (access == BlobAccess.PRIVATE) {\n                permissions.remove(PosixFilePermission.OTHERS_READ);\n            } else if (access == BlobAccess.PUBLIC_READ) {\n                permissions.add(PosixFilePermission.OTHERS_READ);\n            }\n            Files.setPosixFilePermissions(path, permissions);\n        } catch (UnsupportedOperationException uoe) {\n            // Windows/SMB/other non-POSIX: ignore, cannot set permissions\n            return;\n        } catch (IOException ioe) {\n            throw new RuntimeException(ioe);\n        }\n    }\n}\n"
  },
  {
    "path": "src/main/java/org/gaul/s3proxy/nio2blob/FilesystemNio2BlobApiMetadata.java",
    "content": "/*\n * Copyright 2014-2026 Andrew Gaul <andrew@gaul.org>\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * https://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\npackage org.gaul.s3proxy.nio2blob;\n\nimport java.net.URI;\nimport java.util.Properties;\nimport java.util.Set;\n\nimport org.jclouds.blobstore.BlobStoreContext;\nimport org.jclouds.reflect.Reflection2;\nimport org.jclouds.rest.internal.BaseHttpApiMetadata;\n\n@SuppressWarnings(\"rawtypes\")\npublic final class FilesystemNio2BlobApiMetadata extends BaseHttpApiMetadata {\n    public FilesystemNio2BlobApiMetadata() {\n        this(builder());\n    }\n\n    protected FilesystemNio2BlobApiMetadata(Builder builder) {\n        super(builder);\n    }\n\n    private static Builder builder() {\n        return new Builder();\n    }\n\n    @Override\n    public Builder toBuilder() {\n        return builder().fromApiMetadata(this);\n    }\n\n    public static Properties defaultProperties() {\n        return BaseHttpApiMetadata.defaultProperties();\n    }\n\n    // Fake API client\n    private interface FilesystemNio2BlobClient {\n    }\n\n    public static final class Builder\n            extends BaseHttpApiMetadata.Builder<FilesystemNio2BlobClient, Builder> {\n        protected Builder() {\n            super(FilesystemNio2BlobClient.class);\n            id(\"filesystem-nio2\")\n                .name(\"Filesystem NIO.2 Blobstore\")\n                .identityName(\"Account Name\")\n                .credentialName(\"Access Key\")\n                .defaultEndpoint(\"http://localhost/\")\n                .documentation(URI.create(\n                        \"http://www.jclouds.org/documentation/userguide\" +\n                        \"/blobstore-guide\"))\n                .defaultProperties(FilesystemNio2BlobApiMetadata.defaultProperties())\n                .view(Reflection2.typeToken(BlobStoreContext.class))\n                .defaultModules(Set.of(FilesystemNio2BlobStoreContextModule.class));\n        }\n\n        @Override\n        public FilesystemNio2BlobApiMetadata build() {\n            return new FilesystemNio2BlobApiMetadata(this);\n        }\n\n        @Override\n        protected Builder self() {\n            return this;\n        }\n    }\n}\n"
  },
  {
    "path": "src/main/java/org/gaul/s3proxy/nio2blob/FilesystemNio2BlobProviderMetadata.java",
    "content": "/*\n * Copyright 2014-2026 Andrew Gaul <andrew@gaul.org>\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * https://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\npackage org.gaul.s3proxy.nio2blob;\n\nimport java.util.Properties;\n\nimport com.google.auto.service.AutoService;\n\nimport org.jclouds.providers.ProviderMetadata;\nimport org.jclouds.providers.internal.BaseProviderMetadata;\n\n/**\n * Implementation of org.jclouds.types.ProviderMetadata for NIO.2 filesystems.\n */\n@AutoService(ProviderMetadata.class)\npublic final class FilesystemNio2BlobProviderMetadata extends BaseProviderMetadata {\n    public FilesystemNio2BlobProviderMetadata() {\n        super(builder());\n    }\n\n    public FilesystemNio2BlobProviderMetadata(Builder builder) {\n        super(builder);\n    }\n\n    public static Builder builder() {\n        return new Builder();\n    }\n\n    @Override\n    public Builder toBuilder() {\n        return builder().fromProviderMetadata(this);\n    }\n\n    public static Properties defaultProperties() {\n        Properties properties = new Properties();\n        // TODO: filesystem basedir\n        return properties;\n    }\n    public static final class Builder extends BaseProviderMetadata.Builder {\n        protected Builder() {\n            id(\"filesystem-nio2\")\n                .name(\"NIO.2 filesystem blobstore\")\n                .apiMetadata(new FilesystemNio2BlobApiMetadata())\n                .endpoint(\"https://127.0.0.1\")  // TODO:\n                .defaultProperties(\n                        FilesystemNio2BlobProviderMetadata.defaultProperties());\n        }\n\n        @Override\n        public FilesystemNio2BlobProviderMetadata build() {\n            return new FilesystemNio2BlobProviderMetadata(this);\n        }\n\n        @Override\n        public Builder fromProviderMetadata(\n                ProviderMetadata in) {\n            super.fromProviderMetadata(in);\n            return this;\n        }\n    }\n}\n"
  },
  {
    "path": "src/main/java/org/gaul/s3proxy/nio2blob/FilesystemNio2BlobStore.java",
    "content": "/*\n * Copyright 2014-2026 Andrew Gaul <andrew@gaul.org>\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * https://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\npackage org.gaul.s3proxy.nio2blob;\n\nimport java.nio.file.FileSystems;\nimport java.nio.file.Files;\nimport java.nio.file.NoSuchFileException;\nimport java.util.Set;\n\nimport com.google.common.base.Supplier;\n\nimport jakarta.inject.Inject;\nimport jakarta.inject.Named;\nimport jakarta.inject.Singleton;\n\nimport org.jclouds.blobstore.BlobStoreContext;\nimport org.jclouds.blobstore.util.BlobUtils;\nimport org.jclouds.collect.Memoized;\nimport org.jclouds.domain.Credentials;\nimport org.jclouds.domain.Location;\nimport org.jclouds.filesystem.reference.FilesystemConstants;\nimport org.jclouds.io.PayloadSlicer;\n\n@Singleton\npublic final class FilesystemNio2BlobStore extends AbstractNio2BlobStore {\n    @Inject\n    FilesystemNio2BlobStore(BlobStoreContext context, BlobUtils blobUtils,\n            Supplier<Location> defaultLocation,\n            @Memoized Supplier<Set<? extends Location>> locations,\n            PayloadSlicer slicer,\n            @org.jclouds.location.Provider Supplier<Credentials> creds,\n            @Named(FilesystemConstants.PROPERTY_BASEDIR) String baseDir) {\n        super(context, blobUtils, defaultLocation, locations, slicer, creds,\n                // cannot be closed\n                FileSystems.getDefault().getPath(baseDir));\n        if (!Files.exists(getRoot())) {\n            throw new RuntimeException(new NoSuchFileException(getRoot().toString()));\n        }\n    }\n}\n"
  },
  {
    "path": "src/main/java/org/gaul/s3proxy/nio2blob/FilesystemNio2BlobStoreContextModule.java",
    "content": "/*\n * Copyright 2014-2026 Andrew Gaul <andrew@gaul.org>\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * https://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\npackage org.gaul.s3proxy.nio2blob;\n\nimport com.google.inject.AbstractModule;\nimport com.google.inject.Scopes;\n\nimport org.jclouds.blobstore.BlobStore;\nimport org.jclouds.blobstore.attr.ConsistencyModel;\n\npublic final class FilesystemNio2BlobStoreContextModule extends AbstractModule {\n    @Override\n    protected void configure() {\n        bind(ConsistencyModel.class).toInstance(ConsistencyModel.STRICT);\n        bind(BlobStore.class).to(FilesystemNio2BlobStore.class).in(Scopes.SINGLETON);\n    }\n}\n"
  },
  {
    "path": "src/main/java/org/gaul/s3proxy/nio2blob/TransientNio2BlobApiMetadata.java",
    "content": "/*\n * Copyright 2014-2026 Andrew Gaul <andrew@gaul.org>\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * https://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\npackage org.gaul.s3proxy.nio2blob;\n\nimport java.net.URI;\nimport java.util.Properties;\nimport java.util.Set;\n\nimport org.jclouds.blobstore.BlobStoreContext;\nimport org.jclouds.reflect.Reflection2;\nimport org.jclouds.rest.internal.BaseHttpApiMetadata;\n\n@SuppressWarnings(\"rawtypes\")\npublic final class TransientNio2BlobApiMetadata extends BaseHttpApiMetadata {\n    public TransientNio2BlobApiMetadata() {\n        this(builder());\n    }\n\n    protected TransientNio2BlobApiMetadata(Builder builder) {\n        super(builder);\n    }\n\n    private static Builder builder() {\n        return new Builder();\n    }\n\n    @Override\n    public Builder toBuilder() {\n        return builder().fromApiMetadata(this);\n    }\n\n    public static Properties defaultProperties() {\n        return BaseHttpApiMetadata.defaultProperties();\n    }\n\n    // Fake API client\n    private interface TransientNio2BlobClient {\n    }\n\n    public static final class Builder\n            extends BaseHttpApiMetadata.Builder<TransientNio2BlobClient, Builder> {\n        protected Builder() {\n            super(TransientNio2BlobClient.class);\n            id(\"transient-nio2\")\n                .name(\"Transient NIO.2 Blobstore\")\n                .identityName(\"Account Name\")\n                .credentialName(\"Access Key\")\n                .defaultEndpoint(\"http://localhost/\")\n                .documentation(URI.create(\n                        \"http://www.jclouds.org/documentation/userguide\" +\n                        \"/blobstore-guide\"))\n                .defaultProperties(TransientNio2BlobApiMetadata.defaultProperties())\n                .view(Reflection2.typeToken(BlobStoreContext.class))\n                .defaultModules(Set.of(TransientNio2BlobStoreContextModule.class));\n        }\n\n        @Override\n        public TransientNio2BlobApiMetadata build() {\n            return new TransientNio2BlobApiMetadata(this);\n        }\n\n        @Override\n        protected Builder self() {\n            return this;\n        }\n    }\n}\n"
  },
  {
    "path": "src/main/java/org/gaul/s3proxy/nio2blob/TransientNio2BlobProviderMetadata.java",
    "content": "/*\n * Copyright 2014-2026 Andrew Gaul <andrew@gaul.org>\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * https://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\npackage org.gaul.s3proxy.nio2blob;\n\nimport java.util.Properties;\n\nimport com.google.auto.service.AutoService;\n\nimport org.jclouds.providers.ProviderMetadata;\nimport org.jclouds.providers.internal.BaseProviderMetadata;\n\n/**\n * Implementation of org.jclouds.types.ProviderMetadata for NIO.2 filesystems.\n */\n@AutoService(ProviderMetadata.class)\npublic final class TransientNio2BlobProviderMetadata extends BaseProviderMetadata {\n    public TransientNio2BlobProviderMetadata() {\n        super(builder());\n    }\n\n    public TransientNio2BlobProviderMetadata(Builder builder) {\n        super(builder);\n    }\n\n    public static Builder builder() {\n        return new Builder();\n    }\n\n    @Override\n    public Builder toBuilder() {\n        return builder().fromProviderMetadata(this);\n    }\n\n    public static Properties defaultProperties() {\n        Properties properties = new Properties();\n        // TODO: filesystem basedir\n        return properties;\n    }\n    public static final class Builder extends BaseProviderMetadata.Builder {\n        protected Builder() {\n            id(\"transient-nio2\")\n                .name(\"Filesystem NIO.2 blobstore\")\n                .apiMetadata(new TransientNio2BlobApiMetadata())\n                .endpoint(\"https://127.0.0.1\")  // TODO:\n                .defaultProperties(\n                        TransientNio2BlobProviderMetadata.defaultProperties());\n        }\n\n        @Override\n        public TransientNio2BlobProviderMetadata build() {\n            return new TransientNio2BlobProviderMetadata(this);\n        }\n\n        @Override\n        public Builder fromProviderMetadata(\n                ProviderMetadata in) {\n            super.fromProviderMetadata(in);\n            return this;\n        }\n    }\n}\n"
  },
  {
    "path": "src/main/java/org/gaul/s3proxy/nio2blob/TransientNio2BlobStore.java",
    "content": "/*\n * Copyright 2014-2026 Andrew Gaul <andrew@gaul.org>\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * https://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\npackage org.gaul.s3proxy.nio2blob;\n\nimport java.nio.file.FileSystem;\nimport java.util.Set;\n\nimport com.google.common.base.Supplier;\nimport com.google.common.jimfs.Configuration;\nimport com.google.common.jimfs.Jimfs;\n\nimport jakarta.inject.Inject;\nimport jakarta.inject.Singleton;\n\nimport org.jclouds.blobstore.BlobStoreContext;\nimport org.jclouds.blobstore.util.BlobUtils;\nimport org.jclouds.collect.Memoized;\nimport org.jclouds.domain.Credentials;\nimport org.jclouds.domain.Location;\nimport org.jclouds.io.PayloadSlicer;\n\n@Singleton\npublic final class TransientNio2BlobStore extends AbstractNio2BlobStore {\n    @Inject\n    TransientNio2BlobStore(BlobStoreContext context, BlobUtils blobUtils,\n            Supplier<Location> defaultLocation,\n            @Memoized Supplier<Set<? extends Location>> locations,\n            PayloadSlicer slicer,\n            @org.jclouds.location.Provider Supplier<Credentials> creds) {\n        this(context, blobUtils, defaultLocation, locations, slicer, creds,\n                Jimfs.newFileSystem(Configuration.unix().toBuilder()\n                        .setAttributeViews(\"posix\", \"user\")\n                        .setWorkingDirectory(\"/\")\n                        .build()));\n    }\n\n    // Helper to create Path\n    private TransientNio2BlobStore(BlobStoreContext context, BlobUtils blobUtils,\n            Supplier<Location> defaultLocation,\n            @Memoized Supplier<Set<? extends Location>> locations,\n            PayloadSlicer slicer,\n            @org.jclouds.location.Provider Supplier<Credentials> creds,\n            FileSystem fs) {\n        // TODO: close fs?\n        super(context, blobUtils, defaultLocation, locations, slicer, creds,\n                fs.getPath(\"\"));\n    }\n}\n"
  },
  {
    "path": "src/main/java/org/gaul/s3proxy/nio2blob/TransientNio2BlobStoreContextModule.java",
    "content": "/*\n * Copyright 2014-2026 Andrew Gaul <andrew@gaul.org>\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * https://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\npackage org.gaul.s3proxy.nio2blob;\n\nimport com.google.inject.AbstractModule;\nimport com.google.inject.Scopes;\n\nimport org.jclouds.blobstore.BlobStore;\nimport org.jclouds.blobstore.attr.ConsistencyModel;\n\npublic final class TransientNio2BlobStoreContextModule extends AbstractModule {\n    @Override\n    protected void configure() {\n        bind(ConsistencyModel.class).toInstance(ConsistencyModel.STRICT);\n        bind(BlobStore.class).to(TransientNio2BlobStore.class).in(Scopes.SINGLETON);\n    }\n}\n"
  },
  {
    "path": "src/main/resources/checkstyle.xml",
    "content": "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n<!DOCTYPE module PUBLIC \"-//Puppy Crawl//DTD Check Configuration 1.3//EN\" \"http://www.puppycrawl.com/dtds/configuration_1_3.dtd\">\n\n<module name=\"Checker\">\n  <property name=\"severity\" value=\"warning\"/>\n  <module name=\"FileTabCharacter\"/>\n  <module name=\"JavadocPackage\">\n    <property name=\"severity\" value=\"ignore\"/>\n    <metadata name=\"net.sf.eclipsecs.core.lastEnabledSeverity\" value=\"inherit\"/>\n  </module>\n  <module name=\"NewlineAtEndOfFile\"/>\n  <module name=\"Translation\"/>\n  <module name=\"TreeWalker\">\n    <module name=\"SuppressionCommentFilter\"/>\n    <module name=\"AbbreviationAsWordInName\"/>\n    <module name=\"AbstractClassName\"/>\n    <module name=\"AnnotationLocation\"/>\n    <module name=\"AnnotationUseStyle\"/>\n    <module name=\"ArrayTypeStyle\"/>\n    <module name=\"AtclauseOrder\"/>\n    <module name=\"AvoidNestedBlocks\"/>\n    <module name=\"AvoidStarImport\"/>\n    <module name=\"AvoidStaticImport\">\n      <property name=\"excludes\" value=\"java.util.Objects.requireNonNull,com.google.common.base.Preconditions.checkArgument,com.google.common.base.Preconditions.checkState,org.assertj.core.api.Assertions.assertThat,org.assertj.core.api.Assertions.catchThrowable,org.junit.Assume.assumeTrue\"/>\n    </module>\n    <module name=\"ClassTypeParameterName\"/>\n    <module name=\"CovariantEquals\"/>\n    <module name=\"DeclarationOrder\"/>\n    <module name=\"DefaultComesLast\"/>\n    <module name=\"DesignForExtension\"/>\n    <module name=\"EmptyBlock\">\n      <property name=\"option\" value=\"text\"/>\n    </module>\n    <module name=\"EmptyForInitializerPad\"/>\n    <module name=\"EmptyForIteratorPad\"/>\n    <module name=\"EmptyStatement\"/>\n    <module name=\"EqualsHashCode\"/>\n    <module name=\"ExplicitInitialization\"/>\n    <module name=\"FallThrough\"/>\n    <module name=\"FinalClass\"/>\n    <module name=\"GenericWhitespace\"/>\n<!--\n    // TODO: Checkstyle dislikes builders without set prefix\n    https://github.com/checkstyle/checkstyle/issues/619\n    <module name=\"HiddenField\">\n      <property name=\"ignoreConstructorParameter\" value=\"true\"/>\n      <property name=\"ignoreSetter\" value=\"true\"/>\n    </module>\n-->\n    <module name=\"HideUtilityClassConstructor\"/>\n    <module name=\"IllegalImport\"/>\n    <module name=\"IllegalInstantiation\">\n      <property name=\"classes\" value=\"java.lang.Boolean,java.lang.Short,java.lang.Integer,java.lang.Long\"/>\n    </module>\n    <module name=\"ImportOrder\">\n      <property name=\"groups\" value=\"java,javax,com,io,jakarta,org\"/>\n      <property name=\"separated\" value=\"true\"/>\n      <property name=\"option\" value=\"top\"/>\n    </module>\n    <module name=\"Indentation\">\n      <property name=\"caseIndent\" value=\"0\"/>\n      <property name=\"throwsIndent\" value=\"8\"/>\n    </module>\n    <module name=\"InnerAssignment\"/>\n    <module name=\"InterfaceIsType\"/>\n    <module name=\"JavadocStyle\"/>\n    <module name=\"LeftCurly\"/>\n    <module name=\"LocalFinalVariableName\"/>\n    <module name=\"LocalVariableName\"/>\n    <module name=\"MagicNumber\">\n      <property name=\"severity\" value=\"ignore\"/>\n      <metadata name=\"net.sf.eclipsecs.core.lastEnabledSeverity\" value=\"inherit\"/>\n    </module>\n    <module name=\"MemberName\">\n        <property name=\"applyToPublic\" value=\"false\"/>\n    </module>\n    <module name=\"MethodName\"/>\n    <module name=\"MethodParamPad\"/>\n    <module name=\"MethodTypeParameterName\"/>\n    <module name=\"MissingDeprecated\"/>\n    <module name=\"MissingOverride\"/>\n    <module name=\"MissingSwitchDefault\"/>\n    <module name=\"ModifierOrder\"/>\n    <module name=\"MultipleVariableDeclarations\"/>\n    <module name=\"MutableException\"/>\n    <module name=\"NeedBraces\"/>\n    <module name=\"NoClone\"/>\n    <module name=\"NoFinalizer\"/>\n    <module name=\"NonEmptyAtclauseDescription\"/>\n    <module name=\"NoWhitespaceAfter\"/>\n    <module name=\"NoWhitespaceBefore\"/>\n    <module name=\"OneStatementPerLine\"/>\n    <module name=\"OperatorWrap\">\n      <property name=\"option\" value=\"eol\"/>\n    </module>\n    <module name=\"OuterTypeFilename\"/>\n    <module name=\"OverloadMethodsDeclarationOrder\"/>\n    <module name=\"PackageDeclaration\"/>\n    <module name=\"PackageName\"/>\n    <module name=\"ParameterName\"/>\n    <module name=\"ParenPad\"/>\n    <module name=\"RedundantImport\"/>\n    <module name=\"RedundantModifier\"/>\n    <module name=\"RightCurly\"/>\n    <module name=\"SeparatorWrap\">\n      <property name=\"tokens\" value=\"DOT\"/>\n      <property name=\"option\" value=\"nl\"/>\n    </module>\n    <module name=\"SimplifyBooleanExpression\"/>\n    <module name=\"SimplifyBooleanReturn\"/>\n    <module name=\"StaticVariableName\"/>\n    <module name=\"StringLiteralEquality\"/>\n    <module name=\"SuppressWarnings\"/>\n    <module name=\"TodoComment\">\n      <property name=\"severity\" value=\"ignore\"/>\n      <metadata name=\"net.sf.eclipsecs.core.lastEnabledSeverity\" value=\"inherit\"/>\n    </module>\n    <module name=\"TypecastParenPad\"/>\n    <module name=\"TypeName\"/>\n    <module name=\"UnusedImports\"/>\n    <module name=\"UpperEll\"/>\n    <module name=\"VisibilityModifier\">\n      <property name=\"protectedAllowed\" value=\"true\"/>\n      <property name=\"publicMemberPattern\"\n          value=\"^[A-Z][A-Z0-9]*(_[A-Z0-9]+)*$\"/>\n    </module>\n    <module name=\"WhitespaceAfter\"/>\n    <module name=\"WhitespaceAround\"/>\n  </module>\n  <module name=\"Header\">\n    <property name=\"fileExtensions\" value=\"java\"/>\n    <property name=\"headerFile\" value=\"${checkstyle.header.file}\"/>\n  </module>\n  <module name=\"RegexpSingleline\">\n    <property name=\"format\" value=\"\\s+$\"/>\n    <property name=\"message\" value=\"Line has trailing spaces.\"/>\n  </module>\n</module>\n"
  },
  {
    "path": "src/main/resources/copyright_header.txt",
    "content": "/*\n * Copyright 2014-2026 Andrew Gaul <andrew@gaul.org>\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * https://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n"
  },
  {
    "path": "src/main/resources/run-docker-container.sh",
    "content": "#!/bin/sh\n\nmkdir \"${JCLOUDS_FILESYSTEM_BASEDIR}\"\n\nexec java \\\n    $S3PROXY_JAVA_OPTS \\\n    -DLOG_LEVEL=\"${LOG_LEVEL}\" \\\n    -Ds3proxy.endpoint=\"${S3PROXY_ENDPOINT}\" \\\n    -Ds3proxy.secure-endpoint=\"${S3PROXY_SECURE_ENDPOINT}\" \\\n    -Ds3proxy.virtual-host=\"${S3PROXY_VIRTUALHOST}\" \\\n    -Ds3proxy.keystore-path=\"${S3PROXY_KEYSTORE_PATH}\" \\\n    -Ds3proxy.keystore-password=\"${S3PROXY_KEYSTORE_PASSWORD}\" \\\n    -Ds3proxy.authorization=\"${S3PROXY_AUTHORIZATION}\" \\\n    -Ds3proxy.identity=\"${S3PROXY_IDENTITY}\" \\\n    -Ds3proxy.credential=\"${S3PROXY_CREDENTIAL}\" \\\n    -Ds3proxy.cors-allow-all=\"${S3PROXY_CORS_ALLOW_ALL}\" \\\n    -Ds3proxy.cors-allow-origins=\"${S3PROXY_CORS_ALLOW_ORIGINS}\" \\\n    -Ds3proxy.cors-allow-methods=\"${S3PROXY_CORS_ALLOW_METHODS}\" \\\n    -Ds3proxy.cors-allow-headers=\"${S3PROXY_CORS_ALLOW_HEADERS}\" \\\n    -Ds3proxy.cors-exposed-headers=\"${S3PROXY_CORS_EXPOSED_HEADERS}\" \\\n    -Ds3proxy.cors-allow-credential=\"${S3PROXY_CORS_ALLOW_CREDENTIAL}\" \\\n    -Ds3proxy.ignore-unknown-headers=\"${S3PROXY_IGNORE_UNKNOWN_HEADERS}\" \\\n    -Ds3proxy.encrypted-blobstore=\"${S3PROXY_ENCRYPTED_BLOBSTORE}\" \\\n    -Ds3proxy.encrypted-blobstore-password=\"${S3PROXY_ENCRYPTED_BLOBSTORE_PASSWORD}\" \\\n    -Ds3proxy.encrypted-blobstore-salt=\"${S3PROXY_ENCRYPTED_BLOBSTORE_SALT}\" \\\n    -Ds3proxy.v4-max-non-chunked-request-size=\"${S3PROXY_V4_MAX_NON_CHUNKED_REQ_SIZE:-134217728}\" \\\n    -Ds3proxy.v4-max-chunk-size=\"${S3PROXY_V4_MAX_CHUNK_SIZE:-16777216}\" \\\n    -Ds3proxy.read-only-blobstore=\"${S3PROXY_READ_ONLY_BLOBSTORE:-false}\" \\\n    -Ds3proxy.no-cache-blobstore=\"${S3PROXY_NO_CACHE_BLOBSTORE:-false}\" \\\n    -Ds3proxy.maximum-timeskew=\"${S3PROXY_MAXIMUM_TIMESKEW}\" \\\n    -Ds3proxy.metrics.enabled=\"${S3PROXY_METRICS_ENABLED}\" \\\n    -Ds3proxy.metrics.port=\"${S3PROXY_METRICS_PORT}\" \\\n    -Ds3proxy.metrics.host=\"${S3PROXY_METRICS_HOST}\" \\\n    -Ds3proxy.service-path=\"${S3PROXY_SERVICE_PATH}\" \\\n    -Djclouds.provider=\"${JCLOUDS_PROVIDER}\" \\\n    -Djclouds.identity=\"${JCLOUDS_IDENTITY}\" \\\n    -Djclouds.credential=\"${JCLOUDS_CREDENTIAL}\" \\\n    -Djclouds.endpoint=\"${JCLOUDS_ENDPOINT}\" \\\n    -Djclouds.region=\"${JCLOUDS_REGION}\" \\\n    -Djclouds.regions=\"${JCLOUDS_REGIONS}\" \\\n    -Djclouds.keystone.version=\"${JCLOUDS_KEYSTONE_VERSION}\" \\\n    -Djclouds.keystone.scope=\"${JCLOUDS_KEYSTONE_SCOPE}\" \\\n    -Djclouds.keystone.project-domain-name=\"${JCLOUDS_KEYSTONE_PROJECT_DOMAIN_NAME}\" \\\n    -Djclouds.filesystem.basedir=\"${JCLOUDS_FILESYSTEM_BASEDIR}\" \\\n    -Djclouds.azureblob.tenantId=\"${JCLOUDS_AZUREBLOB_TENANTID}\" \\\n    -Djclouds.azureblob.auth=\"${JCLOUDS_AZUREBLOB_AUTH}\" \\\n    -Djclouds.azureblob.account=\"${JCLOUDS_AZUREBLOB_ACCOUNT}\" \\\n    -jar /opt/s3proxy/s3proxy \\\n    --properties /dev/null\n"
  },
  {
    "path": "src/test/java/org/gaul/s3proxy/AliasBlobStoreTest.java",
    "content": "/*\n * Copyright 2014-2026 Andrew Gaul <andrew@gaul.org>\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * https://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\npackage org.gaul.s3proxy;\n\nimport static org.assertj.core.api.Assertions.assertThat;\n\nimport java.io.IOException;\nimport java.io.InputStream;\nimport java.util.ArrayList;\nimport java.util.List;\nimport java.util.Properties;\n\nimport com.google.common.collect.ImmutableBiMap;\nimport com.google.common.collect.ImmutableList;\nimport com.google.common.hash.HashCode;\nimport com.google.common.hash.Hashing;\nimport com.google.common.io.ByteSource;\n\nimport org.assertj.core.api.Assertions;\nimport org.jclouds.ContextBuilder;\nimport org.jclouds.blobstore.BlobStore;\nimport org.jclouds.blobstore.BlobStoreContext;\nimport org.jclouds.blobstore.domain.Blob;\nimport org.jclouds.blobstore.domain.BlobMetadata;\nimport org.jclouds.blobstore.domain.MultipartPart;\nimport org.jclouds.blobstore.domain.MultipartUpload;\nimport org.jclouds.blobstore.domain.PageSet;\nimport org.jclouds.blobstore.domain.StorageMetadata;\nimport org.jclouds.blobstore.options.PutOptions;\nimport org.jclouds.io.Payloads;\nimport org.jclouds.logging.slf4j.config.SLF4JLoggingModule;\nimport org.junit.After;\nimport org.junit.Before;\nimport org.junit.Test;\n\npublic final class AliasBlobStoreTest {\n    private String containerName;\n    private String aliasContainerName;\n    private BlobStoreContext context;\n    private BlobStore blobStore;\n    private BlobStore aliasBlobStore;\n    private List<String> createdContainers;\n\n    @Before\n    public void setUp() {\n        containerName = TestUtils.createRandomContainerName();\n        aliasContainerName = String.format(\"alias-%s\", containerName);\n        context = ContextBuilder\n                .newBuilder(\"transient\")\n                .credentials(\"identity\", \"credential\")\n                .modules(List.of(new SLF4JLoggingModule()))\n                .build(BlobStoreContext.class);\n        blobStore = context.getBlobStore();\n        var aliasesBuilder = new ImmutableBiMap.Builder<String, String>();\n        aliasesBuilder.put(aliasContainerName, containerName);\n        aliasBlobStore = AliasBlobStore.newAliasBlobStore(\n                blobStore, aliasesBuilder.build());\n        createdContainers = new ArrayList<>();\n    }\n\n    @After\n    public void tearDown() {\n        if (this.context != null) {\n            for (String container : this.createdContainers) {\n                blobStore.deleteContainer(container);\n            }\n            context.close();\n        }\n    }\n\n    private void createContainer(String container) {\n        assertThat(aliasBlobStore.createContainerInLocation(\n                null, container)).isTrue();\n        if (container.equals(aliasContainerName)) {\n            createdContainers.add(containerName);\n        } else {\n            createdContainers.add(container);\n        }\n    }\n\n    @Test\n    public void testListNoAliasContainers() {\n        String regularContainer = TestUtils.createRandomContainerName();\n        createContainer(regularContainer);\n        PageSet<? extends StorageMetadata> listing = aliasBlobStore.list();\n        assertThat(listing.size()).isEqualTo(1);\n        assertThat(listing.iterator().next().getName()).isEqualTo(\n                regularContainer);\n    }\n\n    @Test\n    public void testListAliasContainer() {\n        createContainer(aliasContainerName);\n        PageSet<? extends StorageMetadata> listing = aliasBlobStore.list();\n        assertThat(listing.size()).isEqualTo(1);\n        assertThat(listing.iterator().next().getName()).isEqualTo(\n                aliasContainerName);\n        listing = blobStore.list();\n        assertThat(listing.size()).isEqualTo(1);\n        assertThat(listing.iterator().next().getName()).isEqualTo(\n                containerName);\n    }\n\n    @Test\n    public void testAliasBlob() throws IOException {\n        createContainer(aliasContainerName);\n        String blobName = TestUtils.createRandomBlobName();\n        ByteSource content = TestUtils.randomByteSource().slice(0, 1024);\n        @SuppressWarnings(\"deprecation\")\n        String contentMD5 = Hashing.md5().hashBytes(content.read()).toString();\n        Blob blob = aliasBlobStore.blobBuilder(blobName).payload(content)\n                .build();\n        String eTag = aliasBlobStore.putBlob(aliasContainerName, blob);\n        assertThat(eTag).isEqualTo(contentMD5);\n        BlobMetadata blobMetadata = aliasBlobStore.blobMetadata(\n                aliasContainerName, blobName);\n        assertThat(blobMetadata.getETag()).isEqualTo(contentMD5);\n        blob = aliasBlobStore.getBlob(aliasContainerName, blobName);\n        try (InputStream actual = blob.getPayload().openStream();\n             InputStream expected = content.openStream()) {\n            assertThat(actual).hasSameContentAs(expected);\n        }\n    }\n\n    @Test\n    public void testAliasMultipartUpload() throws IOException {\n        createContainer(aliasContainerName);\n        String blobName = TestUtils.createRandomBlobName();\n        ByteSource content = TestUtils.randomByteSource().slice(0, 1024);\n        @SuppressWarnings(\"deprecation\")\n        HashCode contentHash = Hashing.md5().hashBytes(content.read());\n        Blob blob = aliasBlobStore.blobBuilder(blobName).build();\n        MultipartUpload mpu = aliasBlobStore.initiateMultipartUpload(\n                aliasContainerName, blob.getMetadata(), PutOptions.NONE);\n        assertThat(mpu.containerName()).isEqualTo(aliasContainerName);\n        MultipartPart part = aliasBlobStore.uploadMultipartPart(\n                mpu, 1, Payloads.newPayload(content));\n        assertThat(part.partETag()).isEqualTo(contentHash.toString());\n        var parts = new ImmutableList.Builder<MultipartPart>();\n        parts.add(part);\n        String mpuETag = aliasBlobStore.completeMultipartUpload(mpu,\n                parts.build());\n        @SuppressWarnings(\"deprecation\")\n        HashCode contentHash2 = Hashing.md5().hashBytes(contentHash.asBytes());\n        assertThat(mpuETag).isEqualTo(\n                String.format(\"\\\"%s-1\\\"\", contentHash2));\n        blob = aliasBlobStore.getBlob(aliasContainerName, blobName);\n        try (InputStream actual = blob.getPayload().openStream();\n             InputStream expected = content.openStream()) {\n            assertThat(actual).hasSameContentAs(expected);\n        }\n    }\n\n    @Test\n    public void testParseDuplicateAliases() {\n        var properties = new Properties();\n        properties.setProperty(String.format(\"%s.alias\",\n                S3ProxyConstants.PROPERTY_ALIAS_BLOBSTORE), \"bucket\");\n        properties.setProperty(String.format(\"%s.other-alias\",\n                S3ProxyConstants.PROPERTY_ALIAS_BLOBSTORE), \"bucket\");\n\n        try {\n            AliasBlobStore.parseAliases(properties);\n            Assertions.failBecauseExceptionWasNotThrown(\n                    IllegalArgumentException.class);\n        } catch (IllegalArgumentException exc) {\n            assertThat(exc.getMessage()).isEqualTo(\n                    \"Backend bucket bucket is aliased twice\");\n        }\n    }\n}\n"
  },
  {
    "path": "src/test/java/org/gaul/s3proxy/AwsS3SdkBlobStoreTest.java",
    "content": "/*\n * Copyright 2014-2026 Andrew Gaul <andrew@gaul.org>\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * https://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\npackage org.gaul.s3proxy;\n\nimport static org.assertj.core.api.Assertions.assertThat;\n\nimport java.util.Properties;\n\nimport org.jclouds.ContextBuilder;\nimport org.jclouds.blobstore.BlobStoreContext;\nimport org.junit.Test;\n\npublic final class AwsS3SdkBlobStoreTest {\n\n    @Test\n    public void testProviderRegistration() {\n        // Verify that the provider is discoverable via jclouds\n        var providers = ContextBuilder.newBuilder(\"aws-s3-sdk\");\n        assertThat(providers).isNotNull();\n    }\n\n    @Test\n    public void testProviderMetadata() {\n        var properties = new Properties();\n        properties.setProperty(\"jclouds.identity\", \"test-identity\");\n        properties.setProperty(\"jclouds.credential\", \"test-credential\");\n        properties.setProperty(\"jclouds.endpoint\", \"http://localhost:9000\");\n\n        // This validates that the provider can be instantiated\n        // without actually connecting to a backend\n        try (BlobStoreContext context = ContextBuilder.newBuilder(\"aws-s3-sdk\")\n                .overrides(properties)\n                .buildView(BlobStoreContext.class)) {\n            assertThat(context).isNotNull();\n            assertThat(context.getBlobStore()).isNotNull();\n        }\n    }\n\n    @Test\n    public void testCustomRegionConfiguration() {\n        var properties = new Properties();\n        properties.setProperty(\"jclouds.identity\", \"test-identity\");\n        properties.setProperty(\"jclouds.credential\", \"test-credential\");\n        properties.setProperty(\"jclouds.endpoint\", \"http://localhost:9000\");\n        properties.setProperty(\"aws-s3-sdk.region\", \"eu-west-1\");\n\n        // Verify that custom region configuration is accepted\n        try (BlobStoreContext context = ContextBuilder.newBuilder(\"aws-s3-sdk\")\n                .overrides(properties)\n                .buildView(BlobStoreContext.class)) {\n            assertThat(context).isNotNull();\n            assertThat(context.getBlobStore()).isNotNull();\n        }\n    }\n}\n"
  },
  {
    "path": "src/test/java/org/gaul/s3proxy/AwsSdk2Test.java",
    "content": "/*\n * Copyright 2014-2026 Andrew Gaul <andrew@gaul.org>\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * https://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\npackage org.gaul.s3proxy;\n\nimport org.jclouds.blobstore.BlobStoreContext;\nimport org.junit.After;\nimport org.junit.Before;\nimport org.junit.Test;\n\nimport software.amazon.awssdk.auth.credentials.AwsBasicCredentials;\nimport software.amazon.awssdk.auth.credentials.StaticCredentialsProvider;\nimport software.amazon.awssdk.awscore.retry.AwsRetryStrategy;\nimport software.amazon.awssdk.core.client.config.ClientOverrideConfiguration;\nimport software.amazon.awssdk.core.sync.RequestBody;\nimport software.amazon.awssdk.http.SdkHttpConfigurationOption;\nimport software.amazon.awssdk.http.apache.ApacheHttpClient;\nimport software.amazon.awssdk.regions.Region;\nimport software.amazon.awssdk.services.s3.S3Client;\nimport software.amazon.awssdk.services.s3.model.ChecksumAlgorithm;\nimport software.amazon.awssdk.services.s3.model.PutObjectRequest;\nimport software.amazon.awssdk.utils.AttributeMap;\n\npublic final class AwsSdk2Test {\n    private BlobStoreContext context;\n    private S3Client s3Client;\n    private String containerName;\n\n    @Before\n    public void setUp() throws Exception {\n        var info = TestUtils.startS3Proxy(System.getProperty(\"s3proxy.test.conf\", \"s3proxy.conf\"));\n        context = info.getBlobStore().getContext();\n\n        var attributeMap = AttributeMap.builder()\n                .put(SdkHttpConfigurationOption.TRUST_ALL_CERTIFICATES, true)\n                .build();\n        s3Client = S3Client.builder()\n                .credentialsProvider(\n                        StaticCredentialsProvider.create(\n                                AwsBasicCredentials.create(info.getS3Identity(), info.getS3Credential())))\n                .region(Region.US_EAST_1)\n                .endpointOverride(info.getSecureEndpoint())\n                .httpClient(ApacheHttpClient.builder()\n                        .buildWithDefaults(attributeMap))\n                .overrideConfiguration(ClientOverrideConfiguration.builder()\n                        .retryStrategy(AwsRetryStrategy.doNotRetry())\n                        .build())\n                .build();\n\n        containerName = AwsSdkTest.createRandomContainerName();\n        info.getBlobStore().createContainerInLocation(null, containerName);\n    }\n\n    @After\n    public void tearDown() throws Exception {\n        if (s3Client != null) {\n            s3Client.close();\n        }\n        if (context != null) {\n            context.getBlobStore().deleteContainer(containerName);\n            context.close();\n        }\n    }\n\n    @Test\n    public void testPutObject() throws Exception {\n        var key = \"testPutObject\";\n        var byteSource = TestUtils.randomByteSource().slice(0, 1024);\n\n        var putRequest = PutObjectRequest.builder()\n                .bucket(containerName)\n                .key(key)\n                // TODO: parameterize test with JUnit 5\n                //.checksumAlgorithm(ChecksumAlgorithm.CRC32)\n                .checksumAlgorithm(ChecksumAlgorithm.CRC32_C)\n                //.checksumAlgorithm(ChecksumAlgorithm.SHA1)\n                //.checksumAlgorithm(ChecksumAlgorithm.SHA256)\n                .build();\n\n        s3Client.putObject(putRequest, RequestBody.fromBytes(byteSource.read()));\n    }\n}\n"
  },
  {
    "path": "src/test/java/org/gaul/s3proxy/AwsSdkAnonymousTest.java",
    "content": "/*\n * Copyright 2014-2026 Andrew Gaul <andrew@gaul.org>\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * https://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\npackage org.gaul.s3proxy;\n\nimport static org.assertj.core.api.Assertions.assertThat;\n\nimport java.io.InputStream;\nimport java.net.HttpURLConnection;\nimport java.net.URI;\nimport java.nio.charset.StandardCharsets;\nimport java.util.Random;\n\nimport com.amazonaws.SDKGlobalConfiguration;\nimport com.amazonaws.auth.AWSCredentials;\nimport com.amazonaws.auth.AWSStaticCredentialsProvider;\nimport com.amazonaws.auth.AnonymousAWSCredentials;\nimport com.amazonaws.client.builder.AwsClientBuilder.EndpointConfiguration;\nimport com.amazonaws.services.s3.AmazonS3;\nimport com.amazonaws.services.s3.AmazonS3ClientBuilder;\nimport com.amazonaws.services.s3.internal.SkipMd5CheckStrategy;\nimport com.amazonaws.services.s3.model.ListBucketsPaginatedRequest;\nimport com.amazonaws.services.s3.model.ObjectMetadata;\nimport com.amazonaws.services.s3.model.S3Object;\nimport com.google.common.io.ByteSource;\n\nimport org.jclouds.blobstore.BlobStoreContext;\nimport org.junit.After;\nimport org.junit.Before;\nimport org.junit.Test;\n\npublic final class AwsSdkAnonymousTest {\n    static {\n        System.setProperty(\n                SDKGlobalConfiguration.DISABLE_CERT_CHECKING_SYSTEM_PROPERTY,\n                \"true\");\n        AwsSdkTest.disableSslVerification();\n    }\n\n    private static final ByteSource BYTE_SOURCE = ByteSource.wrap(new byte[1]);\n\n    private URI s3Endpoint;\n    private URI httpEndpoint;\n    private EndpointConfiguration s3EndpointConfig;\n    private S3Proxy s3Proxy;\n    private BlobStoreContext context;\n    private String blobStoreType;\n    private String containerName;\n    private AWSCredentials awsCreds;\n    private AmazonS3 client;\n    private String servicePath;\n\n    @Before\n    public void setUp() throws Exception {\n        TestUtils.S3ProxyLaunchInfo info = TestUtils.startS3Proxy(\n                \"s3proxy-anonymous.conf\");\n        awsCreds = new AnonymousAWSCredentials();\n        context = info.getBlobStore().getContext();\n        s3Proxy = info.getS3Proxy();\n        httpEndpoint = info.getEndpoint();\n        s3Endpoint = info.getSecureEndpoint();\n        servicePath = info.getServicePath();\n        s3EndpointConfig = new EndpointConfiguration(\n                s3Endpoint.toString() + servicePath, \"us-east-1\");\n        client = AmazonS3ClientBuilder.standard()\n                .withCredentials(new AWSStaticCredentialsProvider(awsCreds))\n                .withEndpointConfiguration(s3EndpointConfig)\n                .build();\n\n        containerName = createRandomContainerName();\n        info.getBlobStore().createContainerInLocation(null, containerName);\n\n        blobStoreType = context.unwrap().getProviderMetadata().getId();\n        if (Quirks.OPAQUE_ETAG.contains(blobStoreType)) {\n            System.setProperty(\n                    SkipMd5CheckStrategy\n                            .DISABLE_GET_OBJECT_MD5_VALIDATION_PROPERTY,\n                    \"true\");\n            System.setProperty(\n                    SkipMd5CheckStrategy\n                            .DISABLE_PUT_OBJECT_MD5_VALIDATION_PROPERTY,\n                    \"true\");\n        }\n    }\n\n    @After\n    public void tearDown() throws Exception {\n        if (s3Proxy != null) {\n            s3Proxy.stop();\n        }\n        if (context != null) {\n            context.getBlobStore().deleteContainer(containerName);\n            context.close();\n        }\n    }\n\n    @Test\n    public void testListBuckets() throws Exception {\n        client.listBuckets(new ListBucketsPaginatedRequest());\n    }\n\n    @Test\n    public void testAwsV4SignatureChunkedAnonymous() throws Exception {\n        client = AmazonS3ClientBuilder.standard()\n            .withChunkedEncodingDisabled(false)\n            .withEndpointConfiguration(s3EndpointConfig)\n            .build();\n\n        var metadata = new ObjectMetadata();\n        metadata.setContentLength(BYTE_SOURCE.size());\n        client.putObject(containerName, \"foo\", BYTE_SOURCE.openStream(),\n                metadata);\n\n        S3Object object = client.getObject(containerName, \"foo\");\n        assertThat(object.getObjectMetadata().getContentLength()).isEqualTo(\n                BYTE_SOURCE.size());\n        try (InputStream actual = object.getObjectContent();\n            InputStream expected = BYTE_SOURCE.openStream()) {\n            assertThat(actual).hasSameContentAs(expected);\n        }\n    }\n\n    @Test\n    public void testHealthzEndpoint() throws Exception {\n        URI baseUri = httpEndpoint != null ? httpEndpoint : s3Endpoint;\n        String path = (servicePath == null ? \"\" : servicePath) + \"/healthz\";\n        URI healthzUri = new URI(baseUri.getScheme(), baseUri.getUserInfo(),\n                baseUri.getHost(), baseUri.getPort(), path,\n                baseUri.getQuery(), baseUri.getFragment());\n\n        HttpURLConnection connection =\n                (HttpURLConnection) healthzUri.toURL().openConnection();\n        connection.setRequestMethod(\"GET\");\n\n        assertThat(connection.getResponseCode()).isEqualTo(200);\n\n        String body;\n        try (InputStream stream = connection.getInputStream()) {\n            body = new String(stream.readAllBytes(), StandardCharsets.UTF_8);\n        } finally {\n            connection.disconnect();\n        }\n\n        assertThat(body).contains(\"\\\"status\\\":\\\"OK\\\"\");\n        assertThat(body).contains(\"\\\"gitHash\\\":\\\"\");\n        assertThat(body).contains(\"\\\"launchTime\\\":\\\"\");\n        assertThat(body).contains(\"\\\"currentTime\\\":\\\"\");\n        assertThat(body).startsWith(\"{\").endsWith(\"}\");\n    }\n\n    private static String createRandomContainerName() {\n        return \"s3proxy-\" + new Random().nextInt(Integer.MAX_VALUE);\n    }\n}\n"
  },
  {
    "path": "src/test/java/org/gaul/s3proxy/AwsSdkTest.java",
    "content": "/*\n * Copyright 2014-2026 Andrew Gaul <andrew@gaul.org>\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * https://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\npackage org.gaul.s3proxy;\n\nimport static org.assertj.core.api.Assertions.assertThat;\nimport static org.junit.Assume.assumeTrue;\n\nimport java.io.InputStream;\nimport java.io.OutputStream;\nimport java.net.URI;\nimport java.net.URL;\nimport java.net.URLConnection;\nimport java.security.KeyManagementException;\nimport java.security.NoSuchAlgorithmException;\nimport java.security.cert.X509Certificate;\nimport java.util.Date;\nimport java.util.List;\nimport java.util.Map;\nimport java.util.Random;\nimport java.util.concurrent.TimeUnit;\n\nimport javax.net.ssl.HostnameVerifier;\nimport javax.net.ssl.HttpsURLConnection;\nimport javax.net.ssl.SSLContext;\nimport javax.net.ssl.SSLSession;\nimport javax.net.ssl.TrustManager;\nimport javax.net.ssl.X509TrustManager;\n\nimport com.amazonaws.ClientConfiguration;\nimport com.amazonaws.HttpMethod;\nimport com.amazonaws.SDKGlobalConfiguration;\nimport com.amazonaws.auth.AWSCredentials;\nimport com.amazonaws.auth.AWSStaticCredentialsProvider;\nimport com.amazonaws.auth.BasicAWSCredentials;\nimport com.amazonaws.client.builder.AwsClientBuilder.EndpointConfiguration;\nimport com.amazonaws.services.s3.AmazonS3;\nimport com.amazonaws.services.s3.AmazonS3ClientBuilder;\nimport com.amazonaws.services.s3.internal.SkipMd5CheckStrategy;\nimport com.amazonaws.services.s3.model.AbortMultipartUploadRequest;\nimport com.amazonaws.services.s3.model.AccessControlList;\nimport com.amazonaws.services.s3.model.AmazonS3Exception;\nimport com.amazonaws.services.s3.model.Bucket;\nimport com.amazonaws.services.s3.model.BucketLoggingConfiguration;\nimport com.amazonaws.services.s3.model.CannedAccessControlList;\nimport com.amazonaws.services.s3.model.CompleteMultipartUploadRequest;\nimport com.amazonaws.services.s3.model.CopyObjectRequest;\nimport com.amazonaws.services.s3.model.CopyPartRequest;\nimport com.amazonaws.services.s3.model.CopyPartResult;\nimport com.amazonaws.services.s3.model.DeleteObjectsRequest;\nimport com.amazonaws.services.s3.model.DeleteObjectsResult;\nimport com.amazonaws.services.s3.model.GeneratePresignedUrlRequest;\nimport com.amazonaws.services.s3.model.GetObjectRequest;\nimport com.amazonaws.services.s3.model.GroupGrantee;\nimport com.amazonaws.services.s3.model.HeadBucketRequest;\nimport com.amazonaws.services.s3.model.InitiateMultipartUploadRequest;\nimport com.amazonaws.services.s3.model.InitiateMultipartUploadResult;\nimport com.amazonaws.services.s3.model.ListBucketsPaginatedRequest;\nimport com.amazonaws.services.s3.model.ListMultipartUploadsRequest;\nimport com.amazonaws.services.s3.model.ListObjectsRequest;\nimport com.amazonaws.services.s3.model.ListObjectsV2Request;\nimport com.amazonaws.services.s3.model.ListObjectsV2Result;\nimport com.amazonaws.services.s3.model.ListPartsRequest;\nimport com.amazonaws.services.s3.model.MultipartUploadListing;\nimport com.amazonaws.services.s3.model.ObjectListing;\nimport com.amazonaws.services.s3.model.ObjectMetadata;\nimport com.amazonaws.services.s3.model.ObjectTagging;\nimport com.amazonaws.services.s3.model.PartETag;\nimport com.amazonaws.services.s3.model.PartListing;\nimport com.amazonaws.services.s3.model.Permission;\nimport com.amazonaws.services.s3.model.PutObjectRequest;\nimport com.amazonaws.services.s3.model.PutObjectResult;\nimport com.amazonaws.services.s3.model.ResponseHeaderOverrides;\nimport com.amazonaws.services.s3.model.S3Object;\nimport com.amazonaws.services.s3.model.S3ObjectSummary;\nimport com.amazonaws.services.s3.model.SetBucketLoggingConfigurationRequest;\nimport com.amazonaws.services.s3.model.UploadPartRequest;\nimport com.amazonaws.services.s3.model.UploadPartResult;\nimport com.google.common.collect.ImmutableList;\nimport com.google.common.io.ByteSource;\n\nimport org.assertj.core.api.Fail;\nimport org.jclouds.ContextBuilder;\nimport org.jclouds.blobstore.BlobStore;\nimport org.jclouds.blobstore.BlobStoreContext;\nimport org.jclouds.rest.HttpClient;\nimport org.jspecify.annotations.Nullable;\nimport org.junit.After;\nimport org.junit.Before;\nimport org.junit.Ignore;\nimport org.junit.Test;\n\npublic final class AwsSdkTest {\n    static {\n        System.setProperty(\n                SDKGlobalConfiguration.DISABLE_CERT_CHECKING_SYSTEM_PROPERTY,\n                \"true\");\n        disableSslVerification();\n    }\n\n    private static final ByteSource BYTE_SOURCE = ByteSource.wrap(new byte[1]);\n    private static final ClientConfiguration V2_SIGNER_CONFIG =\n            new ClientConfiguration()\n                    .withMaxErrorRetry(0)\n                    .withSignerOverride(\"S3SignerType\");\n    private static final long MINIMUM_MULTIPART_SIZE = 5 * 1024 * 1024;\n    private static final int MINIO_PORT = 9000;\n    private static final int LOCALSTACK_PORT = 4566;\n\n    private URI s3Endpoint;\n    private EndpointConfiguration s3EndpointConfig;\n    private S3Proxy s3Proxy;\n    private BlobStoreContext context;\n    private URI blobStoreEndpoint;\n    private String blobStoreType;\n    private String containerName;\n    private AWSCredentials awsCreds;\n    private AmazonS3 client;\n    private String servicePath;\n\n    @Before\n    public void setUp() throws Exception {\n        TestUtils.S3ProxyLaunchInfo info = TestUtils.startS3Proxy(\n                System.getProperty(\"s3proxy.test.conf\", \"s3proxy.conf\"));\n        awsCreds = new BasicAWSCredentials(info.getS3Identity(),\n                info.getS3Credential());\n        context = info.getBlobStore().getContext();\n        s3Proxy = info.getS3Proxy();\n        s3Endpoint = info.getSecureEndpoint();\n        servicePath = info.getServicePath();\n        s3EndpointConfig = new EndpointConfiguration(\n                s3Endpoint.toString() + servicePath, \"us-east-1\");\n        client = AmazonS3ClientBuilder.standard()\n                .withClientConfiguration(\n                        new ClientConfiguration().withMaxErrorRetry(0))\n                .withCredentials(new AWSStaticCredentialsProvider(awsCreds))\n                .withEndpointConfiguration(s3EndpointConfig)\n                .build();\n\n        containerName = createRandomContainerName();\n        info.getBlobStore().createContainerInLocation(null, containerName);\n\n        blobStoreEndpoint = URI.create(\n                context.unwrap().getProviderMetadata().getEndpoint());\n        blobStoreType = context.unwrap().getProviderMetadata().getId();\n        if (Quirks.OPAQUE_ETAG.contains(blobStoreType)) {\n            System.setProperty(\n                    SkipMd5CheckStrategy\n                            .DISABLE_GET_OBJECT_MD5_VALIDATION_PROPERTY,\n                    \"true\");\n            System.setProperty(\n                    SkipMd5CheckStrategy\n                            .DISABLE_PUT_OBJECT_MD5_VALIDATION_PROPERTY,\n                    \"true\");\n        }\n    }\n\n    @After\n    public void tearDown() throws Exception {\n        if (s3Proxy != null) {\n            s3Proxy.stop();\n        }\n        if (context != null) {\n            context.getBlobStore().deleteContainer(containerName);\n            context.close();\n        }\n    }\n\n    @Test\n    public void testAwsV2Signature() throws Exception {\n        client = AmazonS3ClientBuilder.standard()\n                .withClientConfiguration(V2_SIGNER_CONFIG)\n                .withCredentials(new AWSStaticCredentialsProvider(awsCreds))\n                .withEndpointConfiguration(s3EndpointConfig)\n                .build();\n\n        var metadata = new ObjectMetadata();\n        metadata.setContentLength(BYTE_SOURCE.size());\n        client.putObject(containerName, \"foo\", BYTE_SOURCE.openStream(),\n                metadata);\n\n        S3Object object = client.getObject(containerName, \"foo\");\n        assertThat(object.getObjectMetadata().getContentLength()).isEqualTo(\n                BYTE_SOURCE.size());\n        try (InputStream actual = object.getObjectContent();\n                InputStream expected = BYTE_SOURCE.openStream()) {\n            assertThat(actual).hasSameContentAs(expected);\n        }\n    }\n\n    @Test\n    public void testAwsV2SignatureWithOverrideParameters() throws Exception {\n        client = AmazonS3ClientBuilder.standard()\n                .withClientConfiguration(V2_SIGNER_CONFIG)\n                .withCredentials(new AWSStaticCredentialsProvider(awsCreds))\n                .withEndpointConfiguration(s3EndpointConfig).build();\n\n        var metadata = new ObjectMetadata();\n        metadata.setContentLength(BYTE_SOURCE.size());\n        client.putObject(containerName, \"foo\", BYTE_SOURCE.openStream(),\n                metadata);\n\n        String blobName = \"foo\";\n\n        var headerOverride = new ResponseHeaderOverrides();\n\n        String expectedContentDisposition = \"attachment; \" + blobName;\n        headerOverride.setContentDisposition(expectedContentDisposition);\n\n        String expectedContentType = \"text/plain\";\n        headerOverride.setContentType(expectedContentType);\n\n        var request = new GetObjectRequest(containerName,\n                blobName);\n        request.setResponseHeaders(headerOverride);\n\n        S3Object object = client.getObject(request);\n        assertThat(object.getObjectMetadata().getContentLength()).isEqualTo(\n                BYTE_SOURCE.size());\n        assertThat(object.getObjectMetadata().getContentDisposition())\n                .isEqualTo(expectedContentDisposition);\n        assertThat(object.getObjectMetadata().getContentType()).isEqualTo(\n                expectedContentType);\n        try (InputStream actual = object.getObjectContent();\n             InputStream expected = BYTE_SOURCE.openStream()) {\n            assertThat(actual).hasSameContentAs(expected);\n        }\n    }\n\n    @Test\n    public void testAwsV4Signature() throws Exception {\n        var metadata = new ObjectMetadata();\n        metadata.setContentLength(BYTE_SOURCE.size());\n        client.putObject(containerName, \"foo\",\n                BYTE_SOURCE.openStream(), metadata);\n\n        S3Object object = client.getObject(containerName, \"foo\");\n        assertThat(object.getObjectMetadata().getContentLength()).isEqualTo(\n                BYTE_SOURCE.size());\n        try (InputStream actual = object.getObjectContent();\n                InputStream expected = BYTE_SOURCE.openStream()) {\n            assertThat(actual).hasSameContentAs(expected);\n        }\n    }\n\n    @Test\n    public void testAwsV4SignatureChunkedSigned() throws Exception {\n        client = AmazonS3ClientBuilder.standard()\n                .withChunkedEncodingDisabled(false)\n                .withPayloadSigningEnabled(true)\n                .withCredentials(new AWSStaticCredentialsProvider(awsCreds))\n                .withEndpointConfiguration(s3EndpointConfig)\n                .build();\n\n        var metadata = new ObjectMetadata();\n        metadata.setContentLength(BYTE_SOURCE.size());\n        client.putObject(containerName, \"foo\",\n                BYTE_SOURCE.openStream(), metadata);\n\n        var object = client.getObject(containerName, \"foo\");\n        assertThat(object.getObjectMetadata().getContentLength()).isEqualTo(\n                BYTE_SOURCE.size());\n        try (var actual = object.getObjectContent();\n                var expected = BYTE_SOURCE.openStream()) {\n            assertThat(actual).hasSameContentAs(expected);\n        }\n    }\n\n    @Test\n    public void testAwsV4SignatureNonChunked() throws Exception {\n        client = AmazonS3ClientBuilder.standard()\n                .withChunkedEncodingDisabled(true)\n                .withCredentials(new AWSStaticCredentialsProvider(awsCreds))\n                .withEndpointConfiguration(s3EndpointConfig)\n                .build();\n\n        var metadata = new ObjectMetadata();\n        metadata.setContentLength(BYTE_SOURCE.size());\n        client.putObject(containerName, \"foo\",\n                BYTE_SOURCE.openStream(), metadata);\n\n        S3Object object = client.getObject(containerName, \"foo\");\n        assertThat(object.getObjectMetadata().getContentLength()).isEqualTo(\n                BYTE_SOURCE.size());\n        try (InputStream actual = object.getObjectContent();\n                InputStream expected = BYTE_SOURCE.openStream()) {\n            assertThat(actual).hasSameContentAs(expected);\n        }\n    }\n\n    @Test\n    public void testAwsV4SignaturePayloadUnsigned() throws Exception {\n        client = AmazonS3ClientBuilder.standard()\n                .withChunkedEncodingDisabled(true)\n                .withPayloadSigningEnabled(false)\n                .withCredentials(new AWSStaticCredentialsProvider(awsCreds))\n                .withEndpointConfiguration(s3EndpointConfig)\n                .build();\n\n        var metadata = new ObjectMetadata();\n        metadata.setContentLength(BYTE_SOURCE.size());\n        client.putObject(containerName, \"foo\",\n                BYTE_SOURCE.openStream(), metadata);\n\n        S3Object object = client.getObject(containerName, \"foo\");\n        assertThat(object.getObjectMetadata().getContentLength()).isEqualTo(\n                BYTE_SOURCE.size());\n        try (InputStream actual = object.getObjectContent();\n                InputStream expected = BYTE_SOURCE.openStream()) {\n            assertThat(actual).hasSameContentAs(expected);\n        }\n    }\n\n    @Test\n    public void testAwsV4SignatureBadIdentity() throws Exception {\n        client = AmazonS3ClientBuilder.standard()\n                .withCredentials(new AWSStaticCredentialsProvider(\n                        new BasicAWSCredentials(\n                                \"bad-access-key\", awsCreds.getAWSSecretKey())))\n                .withEndpointConfiguration(s3EndpointConfig)\n                .build();\n\n        var metadata = new ObjectMetadata();\n        metadata.setContentLength(BYTE_SOURCE.size());\n\n        try {\n            client.putObject(containerName, \"foo\",\n                    BYTE_SOURCE.openStream(), metadata);\n            Fail.failBecauseExceptionWasNotThrown(AmazonS3Exception.class);\n        } catch (AmazonS3Exception e) {\n            assertThat(e.getErrorCode()).isEqualTo(\"InvalidAccessKeyId\");\n        }\n    }\n\n    // This randomly fails with SocketException: Broken pipe\n    @Ignore\n    @Test\n    public void testAwsV4SignatureBadCredential() throws Exception {\n        client = AmazonS3ClientBuilder.standard()\n                .withCredentials(new AWSStaticCredentialsProvider(\n                        new BasicAWSCredentials(\n                                awsCreds.getAWSAccessKeyId(),\n                                \"bad-secret-key\")))\n                .withEndpointConfiguration(s3EndpointConfig)\n                .build();\n\n        var metadata = new ObjectMetadata();\n        metadata.setContentLength(BYTE_SOURCE.size());\n\n        try {\n            client.putObject(containerName, \"foo\",\n                    BYTE_SOURCE.openStream(), metadata);\n            Fail.failBecauseExceptionWasNotThrown(AmazonS3Exception.class);\n        } catch (AmazonS3Exception e) {\n            assertThat(e.getErrorCode()).isEqualTo(\"SignatureDoesNotMatch\");\n        }\n    }\n\n    @Test\n    public void testAwsV2UrlSigning() throws Exception {\n        client = AmazonS3ClientBuilder.standard()\n                .withClientConfiguration(V2_SIGNER_CONFIG)\n                .withCredentials(new AWSStaticCredentialsProvider(awsCreds))\n                .withEndpointConfiguration(s3EndpointConfig)\n                .build();\n\n        String blobName = \"foo\";\n        var metadata = new ObjectMetadata();\n        metadata.setContentLength(BYTE_SOURCE.size());\n        client.putObject(containerName, blobName, BYTE_SOURCE.openStream(),\n                metadata);\n\n        var expiration = new Date(System.currentTimeMillis() +\n                TimeUnit.HOURS.toMillis(1));\n        URL url = client.generatePresignedUrl(containerName, blobName,\n                expiration, HttpMethod.GET);\n        try (InputStream actual = url.openStream();\n                InputStream expected = BYTE_SOURCE.openStream()) {\n            assertThat(actual).hasSameContentAs(expected);\n        }\n    }\n\n    @Test\n    public void testAwsV2UrlSigningWithOverrideParameters() throws Exception {\n        client = AmazonS3ClientBuilder.standard()\n                .withClientConfiguration(V2_SIGNER_CONFIG)\n                .withCredentials(new AWSStaticCredentialsProvider(awsCreds))\n                .withEndpointConfiguration(s3EndpointConfig).build();\n\n        String blobName = \"foo\";\n        var metadata = new ObjectMetadata();\n        metadata.setContentLength(BYTE_SOURCE.size());\n        client.putObject(containerName, blobName, BYTE_SOURCE.openStream(),\n                metadata);\n\n        GeneratePresignedUrlRequest generatePresignedUrlRequest =\n                new GeneratePresignedUrlRequest(containerName, blobName);\n        generatePresignedUrlRequest.setMethod(HttpMethod.GET);\n\n        var headerOverride = new ResponseHeaderOverrides();\n\n        headerOverride.setContentDisposition(\"attachment; \" + blobName);\n        headerOverride.setContentType(\"text/plain\");\n        generatePresignedUrlRequest.setResponseHeaders(headerOverride);\n\n        var expiration = new Date(System.currentTimeMillis() +\n                TimeUnit.HOURS.toMillis(1));\n        generatePresignedUrlRequest.setExpiration(expiration);\n\n        URL url = client.generatePresignedUrl(generatePresignedUrlRequest);\n        URLConnection connection =  url.openConnection();\n        try (InputStream actual = connection.getInputStream();\n             InputStream expected = BYTE_SOURCE.openStream()) {\n\n            String value = connection.getHeaderField(\"Content-Disposition\");\n            assertThat(value).isEqualTo(headerOverride.getContentDisposition());\n\n            value = connection.getHeaderField(\"Content-Type\");\n            assertThat(value).isEqualTo(headerOverride.getContentType());\n\n            assertThat(actual).hasSameContentAs(expected);\n        }\n    }\n\n    @Test\n    public void testAwsV4UrlSigning() throws Exception {\n        String blobName = \"foo\";\n        var metadata = new ObjectMetadata();\n        metadata.setContentLength(BYTE_SOURCE.size());\n        client.putObject(containerName, blobName, BYTE_SOURCE.openStream(),\n                metadata);\n\n        var expiration = new Date(System.currentTimeMillis() +\n                TimeUnit.HOURS.toMillis(1));\n        URL url = client.generatePresignedUrl(containerName, blobName,\n                expiration, HttpMethod.GET);\n        try (InputStream actual = url.openStream();\n                InputStream expected = BYTE_SOURCE.openStream()) {\n            assertThat(actual).hasSameContentAs(expected);\n        }\n    }\n\n    @Test\n    public void testMultipartCopy() throws Exception {\n        assumeTrue(!blobStoreType.equals(\"azureblob-sdk\"));\n        // B2 requires two parts to issue an MPU\n        assumeTrue(!blobStoreType.equals(\"b2\"));\n\n        String sourceBlobName = \"testMultipartCopy-source\";\n        String targetBlobName = \"testMultipartCopy-target\";\n\n        var metadata = new ObjectMetadata();\n        metadata.setContentLength(BYTE_SOURCE.size());\n        client.putObject(containerName, sourceBlobName,\n                BYTE_SOURCE.openStream(), metadata);\n\n        InitiateMultipartUploadRequest initiateRequest =\n                new InitiateMultipartUploadRequest(containerName,\n                        targetBlobName);\n        InitiateMultipartUploadResult initResult =\n                client.initiateMultipartUpload(initiateRequest);\n        String uploadId = initResult.getUploadId();\n\n        var copyRequest = new CopyPartRequest()\n                .withDestinationBucketName(containerName)\n                .withDestinationKey(targetBlobName)\n                .withSourceBucketName(containerName)\n                .withSourceKey(sourceBlobName)\n                .withUploadId(uploadId)\n                .withFirstByte(0L)\n                .withLastByte(BYTE_SOURCE.size() - 1)\n                .withPartNumber(1);\n        CopyPartResult copyPartResult = client.copyPart(copyRequest);\n\n        CompleteMultipartUploadRequest completeRequest =\n                new CompleteMultipartUploadRequest(\n                        containerName, targetBlobName, uploadId,\n                        List.of(copyPartResult.getPartETag()));\n        client.completeMultipartUpload(completeRequest);\n\n        S3Object object = client.getObject(containerName, targetBlobName);\n        assertThat(object.getObjectMetadata().getContentLength()).isEqualTo(\n                BYTE_SOURCE.size());\n        try (InputStream actual = object.getObjectContent();\n                InputStream expected = BYTE_SOURCE.openStream()) {\n            assertThat(actual).hasSameContentAs(expected);\n        }\n    }\n\n    @Test\n    public void testBigMultipartUpload() throws Exception {\n        String key = \"multipart-upload\";\n        long partSize = MINIMUM_MULTIPART_SIZE;\n        long size = partSize + 1;\n        ByteSource byteSource = TestUtils.randomByteSource().slice(0, size);\n\n        InitiateMultipartUploadRequest initRequest =\n                new InitiateMultipartUploadRequest(containerName, key);\n        InitiateMultipartUploadResult initResponse =\n                client.initiateMultipartUpload(initRequest);\n        String uploadId = initResponse.getUploadId();\n\n        ByteSource byteSource1 = byteSource.slice(0, partSize);\n        var uploadRequest1 = new UploadPartRequest()\n                .withBucketName(containerName)\n                .withKey(key)\n                .withUploadId(uploadId)\n                .withPartNumber(1)\n                .withInputStream(byteSource1.openStream())\n                .withPartSize(byteSource1.size());\n        uploadRequest1.getRequestClientOptions().setReadLimit(\n                (int) byteSource1.size());\n        UploadPartResult uploadPartResult1 = client.uploadPart(uploadRequest1);\n\n        ByteSource byteSource2 = byteSource.slice(partSize, size - partSize);\n        var uploadRequest2 = new UploadPartRequest()\n                .withBucketName(containerName)\n                .withKey(key)\n                .withUploadId(uploadId)\n                .withPartNumber(2)\n                .withInputStream(byteSource2.openStream())\n                .withPartSize(byteSource2.size());\n        uploadRequest2.getRequestClientOptions().setReadLimit(\n                (int) byteSource2.size());\n        UploadPartResult uploadPartResult2 = client.uploadPart(uploadRequest2);\n\n        CompleteMultipartUploadRequest completeRequest =\n                new CompleteMultipartUploadRequest(\n                        containerName, key, uploadId,\n                        List.of(\n                                uploadPartResult1.getPartETag(),\n                                uploadPartResult2.getPartETag()));\n        client.completeMultipartUpload(completeRequest);\n\n        S3Object object = client.getObject(containerName, key);\n        assertThat(object.getObjectMetadata().getContentLength()).isEqualTo(\n                size);\n        try (InputStream actual = object.getObjectContent();\n                InputStream expected = byteSource.openStream()) {\n            assertThat(actual).hasSameContentAs(expected);\n        }\n    }\n\n    @Test\n    public void testMultipartUploadReplace() throws Exception {\n        String key = \"multipart-upload\";\n        long partSize = MINIMUM_MULTIPART_SIZE;\n        long size = partSize + 1;\n        ByteSource byteSource = TestUtils.randomByteSource().slice(0, size);\n\n        // Create\n        InitiateMultipartUploadRequest initRequest1 =\n                new InitiateMultipartUploadRequest(containerName, key);\n        InitiateMultipartUploadResult initResponse1 =\n                client.initiateMultipartUpload(initRequest1);\n        String uploadId1 = initResponse1.getUploadId();\n\n        ByteSource byteSource1 = byteSource.slice(0, partSize);\n        var uploadRequest1 = new UploadPartRequest()\n                .withBucketName(containerName)\n                .withKey(key)\n                .withUploadId(uploadId1)\n                .withPartNumber(1)\n                .withInputStream(byteSource1.openStream())\n                .withPartSize(byteSource1.size());\n        uploadRequest1.getRequestClientOptions().setReadLimit(\n                (int) byteSource1.size());\n        UploadPartResult uploadPartResult1 = client.uploadPart(uploadRequest1);\n\n        CompleteMultipartUploadRequest completeRequest1 =\n                new CompleteMultipartUploadRequest(\n                        containerName, key, uploadId1,\n                        List.of(uploadPartResult1.getPartETag()));\n        client.completeMultipartUpload(completeRequest1);\n\n        // Replace\n        InitiateMultipartUploadRequest initRequest2 =\n                new InitiateMultipartUploadRequest(containerName, key);\n        InitiateMultipartUploadResult initResponse2 =\n                client.initiateMultipartUpload(initRequest2);\n        String uploadId2 = initResponse2.getUploadId();\n\n        ByteSource byteSource2 = byteSource.slice(partSize, size - partSize);\n        var uploadRequest2 = new UploadPartRequest()\n                .withBucketName(containerName)\n                .withKey(key)\n                .withUploadId(uploadId2)\n                .withPartNumber(1)\n                .withInputStream(byteSource2.openStream())\n                .withPartSize(byteSource2.size());\n        uploadRequest2.getRequestClientOptions().setReadLimit(\n                (int) byteSource2.size());\n        UploadPartResult uploadPartResult2 = client.uploadPart(uploadRequest2);\n\n        CompleteMultipartUploadRequest completeRequest2 =\n                new CompleteMultipartUploadRequest(\n                        containerName, key, uploadId2,\n                        List.of(uploadPartResult2.getPartETag()));\n        client.completeMultipartUpload(completeRequest2);\n\n        S3Object object = client.getObject(containerName, key);\n        assertThat(object.getObjectMetadata().getContentLength()).isEqualTo(\n                byteSource2.size());\n        try (InputStream actual = object.getObjectContent();\n                InputStream expected = byteSource2.openStream()) {\n            assertThat(actual).hasSameContentAs(expected);\n        }\n    }\n\n    // TODO: testMultipartUploadConditionalCopy\n\n    @Test\n    public void testUpdateBlobXmlAcls() throws Exception {\n        // TODO:\n        assumeTrue(!blobStoreType.equals(\"transient-nio2\"));\n        assumeTrue(!Quirks.NO_BLOB_ACCESS_CONTROL.contains(blobStoreType));\n        assumeTrue(blobStoreEndpoint.getPort() != MINIO_PORT);\n\n        String blobName = \"testUpdateBlobXmlAcls-blob\";\n        var metadata = new ObjectMetadata();\n        metadata.setContentLength(BYTE_SOURCE.size());\n        client.putObject(containerName, blobName, BYTE_SOURCE.openStream(),\n                metadata);\n        AccessControlList acl = client.getObjectAcl(containerName, blobName);\n\n        acl.grantPermission(GroupGrantee.AllUsers, Permission.Read);\n        client.setObjectAcl(containerName, blobName, acl);\n        assertThat(client.getObjectAcl(containerName, blobName)).isEqualTo(acl);\n\n        acl.revokeAllPermissions(GroupGrantee.AllUsers);\n        client.setObjectAcl(containerName, blobName, acl);\n        assertThat(client.getObjectAcl(containerName, blobName)).isEqualTo(acl);\n\n        acl.grantPermission(GroupGrantee.AllUsers, Permission.Write);\n        try {\n            client.setObjectAcl(containerName, blobName, acl);\n            Fail.failBecauseExceptionWasNotThrown(AmazonS3Exception.class);\n        } catch (AmazonS3Exception e) {\n            assertThat(e.getErrorCode()).isEqualTo(\"NotImplemented\");\n        }\n    }\n\n    @Test\n    public void testUnicodeObject() throws Exception {\n        String blobName = \"ŪņЇЌœđЗ/☺ unicode € rocks ™\";\n        var metadata = new ObjectMetadata();\n        metadata.setContentLength(BYTE_SOURCE.size());\n        client.putObject(containerName, blobName, BYTE_SOURCE.openStream(),\n                metadata);\n\n        metadata = client.getObjectMetadata(containerName, blobName);\n        assertThat(metadata).isNotNull();\n\n        ObjectListing listing = client.listObjects(containerName);\n        List<S3ObjectSummary> summaries = listing.getObjectSummaries();\n        assertThat(summaries).hasSize(1);\n        S3ObjectSummary summary = summaries.iterator().next();\n        assertThat(summary.getKey()).isEqualTo(blobName);\n    }\n\n    @Test\n    public void testSpecialCharacters() throws Exception {\n        // TODO: fixed in jclouds 2.6.1\n        assumeTrue(blobStoreEndpoint.getPort() != MINIO_PORT);\n        assumeTrue(blobStoreEndpoint.getPort() != LOCALSTACK_PORT);\n\n        String prefix = \"special !\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~\";\n        if (blobStoreType.equals(\"azureblob\") ||\n                blobStoreType.equals(\"azureblob-sdk\") ||\n                blobStoreType.equals(\"b2\")) {\n            prefix = prefix.replace(\"\\\\\", \"\");\n        }\n        if (blobStoreType.equals(\"azureblob\") ||\n                blobStoreType.equals(\"azureblob-sdk\")) {\n            // Avoid blob names that end with a dot (.), a forward slash (/), or\n            // a sequence or combination of the two.\n            prefix = prefix.replace(\"./\", \"/\") + \".\";\n        }\n        String blobName = prefix + \"foo\";\n        var metadata = new ObjectMetadata();\n        metadata.setContentLength(BYTE_SOURCE.size());\n        client.putObject(containerName, blobName, BYTE_SOURCE.openStream(),\n                metadata);\n\n        ObjectListing listing = client.listObjects(new ListObjectsRequest()\n                .withBucketName(containerName)\n                .withPrefix(prefix));\n        List<S3ObjectSummary> summaries = listing.getObjectSummaries();\n        assertThat(summaries).hasSize(1);\n        S3ObjectSummary summary = summaries.iterator().next();\n        assertThat(summary.getKey()).isEqualTo(blobName);\n    }\n\n    @Test\n    public void testAtomicMpuAbort() throws Exception {\n        String key = \"testAtomicMpuAbort\";\n        var metadata = new ObjectMetadata();\n        metadata.setContentLength(BYTE_SOURCE.size());\n        client.putObject(containerName, key, BYTE_SOURCE.openStream(),\n                metadata);\n\n        InitiateMultipartUploadRequest initRequest =\n                new InitiateMultipartUploadRequest(containerName, key);\n        InitiateMultipartUploadResult initResponse =\n                client.initiateMultipartUpload(initRequest);\n        String uploadId = initResponse.getUploadId();\n\n        client.abortMultipartUpload(new AbortMultipartUploadRequest(\n                    containerName, key, uploadId));\n\n        S3Object object = client.getObject(containerName, key);\n        assertThat(object.getObjectMetadata().getContentLength()).isEqualTo(\n                BYTE_SOURCE.size());\n        try (InputStream actual = object.getObjectContent();\n                InputStream expected = BYTE_SOURCE.openStream()) {\n            assertThat(actual).hasSameContentAs(expected);\n        }\n    }\n\n    @Test\n    public void testOverrideResponseHeader() throws Exception {\n        String blobName = \"foo\";\n        var metadata = new ObjectMetadata();\n        metadata.setContentLength(BYTE_SOURCE.size());\n        client.putObject(containerName, blobName, BYTE_SOURCE.openStream(),\n                metadata);\n\n        String cacheControl = \"no-cache\";\n        String contentDisposition = \"attachment; filename=foo.html\";\n        String contentEncoding = \"gzip\";\n        String contentLanguage = \"en\";\n        String contentType = \"text/html;charset=utf-8\";\n        String expires = \"Wed, 13 Jul 2016 21:23:51 GMT\";\n        long expiresTime = 1468445031000L;\n\n        var getObjectRequest = new GetObjectRequest(containerName,\n                blobName);\n        getObjectRequest.setResponseHeaders(\n                new ResponseHeaderOverrides()\n                    .withCacheControl(cacheControl)\n                    .withContentDisposition(contentDisposition)\n                    .withContentEncoding(contentEncoding)\n                    .withContentLanguage(contentLanguage)\n                    .withContentType(contentType)\n                    .withExpires(expires));\n        S3Object object = client.getObject(getObjectRequest);\n        try (InputStream is = object.getObjectContent()) {\n            assertThat(is).isNotNull();\n            is.transferTo(OutputStream.nullOutputStream());\n        }\n\n        ObjectMetadata responseMetadata = object.getObjectMetadata();\n        assertThat(responseMetadata.getCacheControl()).isEqualTo(\n                cacheControl);\n        assertThat(responseMetadata.getContentDisposition()).isEqualTo(\n                contentDisposition);\n        assertThat(responseMetadata.getContentEncoding()).isEqualTo(\n                contentEncoding);\n        assertThat(responseMetadata.getContentLanguage()).isEqualTo(\n                contentLanguage);\n        assertThat(responseMetadata.getContentType()).isEqualTo(\n                contentType);\n        assertThat(responseMetadata.getHttpExpiresDate().getTime())\n            .isEqualTo(expiresTime);\n    }\n\n    @Test\n    public void testDeleteMultipleObjectsEmpty() throws Exception {\n        var request = new DeleteObjectsRequest(containerName)\n                .withKeys();\n\n        try {\n            client.deleteObjects(request);\n            Fail.failBecauseExceptionWasNotThrown(AmazonS3Exception.class);\n        } catch (AmazonS3Exception e) {\n            assertThat(e.getErrorCode()).isEqualTo(\"MalformedXML\");\n        }\n    }\n\n    @Test\n    public void testDeleteMultipleObjects() throws Exception {\n        String blobName = \"foo\";\n        var metadata = new ObjectMetadata();\n        metadata.setContentLength(BYTE_SOURCE.size());\n\n        var request = new DeleteObjectsRequest(containerName)\n                .withKeys(blobName);\n\n        // without quiet\n        client.putObject(containerName, blobName, BYTE_SOURCE.openStream(),\n                metadata);\n\n        DeleteObjectsResult result = client.deleteObjects(request);\n        assertThat(result.getDeletedObjects()).hasSize(1);\n        assertThat(result.getDeletedObjects().iterator().next().getKey())\n                .isEqualTo(blobName);\n\n        // with quiet\n        client.putObject(containerName, blobName, BYTE_SOURCE.openStream(),\n                metadata);\n\n        result = client.deleteObjects(request.withQuiet(true));\n        assertThat(result.getDeletedObjects()).isEmpty();\n    }\n\n    @Test\n    public void testPartNumberMarker() throws Exception {\n        String blobName = \"test-part-number-marker\";\n        InitiateMultipartUploadResult result = client.initiateMultipartUpload(\n                new InitiateMultipartUploadRequest(containerName, blobName));\n        var request = new ListPartsRequest(containerName,\n                blobName, result.getUploadId());\n\n        client.listParts(request.withPartNumberMarker(0));\n\n        try {\n            client.listParts(request.withPartNumberMarker(1));\n            Fail.failBecauseExceptionWasNotThrown(AmazonS3Exception.class);\n        } catch (AmazonS3Exception e) {\n            assertThat(e.getErrorCode()).isEqualTo(\"NotImplemented\");\n        } finally {\n            client.abortMultipartUpload(new AbortMultipartUploadRequest(containerName, blobName, result.getUploadId()));\n        }\n    }\n\n    @Test\n    public void testHttpClient() throws Exception {\n        assumeTrue(blobStoreEndpoint.getPort() != MINIO_PORT);\n        // aws-s3-sdk doesn't support jclouds HTTP client\n        assumeTrue(!blobStoreType.equals(\"aws-s3-sdk\"));\n        assumeTrue(!blobStoreType.equals(\"google-cloud-storage-sdk\"));\n\n        String blobName = \"blob-name\";\n        var metadata = new ObjectMetadata();\n        metadata.setContentLength(BYTE_SOURCE.size());\n        client.putObject(containerName, blobName, BYTE_SOURCE.openStream(),\n                metadata);\n\n        if (Quirks.NO_BLOB_ACCESS_CONTROL.contains(blobStoreType)) {\n            client.setBucketAcl(containerName,\n                    CannedAccessControlList.PublicRead);\n        } else {\n            client.setObjectAcl(containerName, blobName,\n                    CannedAccessControlList.PublicRead);\n        }\n\n        HttpClient httpClient = context.utils().http();\n        var uri = new URI(s3Endpoint.getScheme(), s3Endpoint.getUserInfo(),\n                s3Endpoint.getHost(), s3Proxy.getSecurePort(),\n                servicePath + \"/\" + containerName + \"/\" + blobName,\n                /*query=*/ null, /*fragment=*/ null);\n        try (InputStream actual = httpClient.get(uri);\n             InputStream expected = BYTE_SOURCE.openStream()) {\n            assertThat(actual).hasSameContentAs(expected);\n        }\n    }\n\n    @Test\n    public void testListBuckets() throws Exception {\n        var builder = ImmutableList.<String>builder();\n        for (Bucket bucket : client.listBuckets(new ListBucketsPaginatedRequest()).getBuckets()) {\n            builder.add(bucket.getName());\n        }\n        assertThat(builder.build()).contains(containerName);\n    }\n\n    @Test\n    public void testContainerExists() throws Exception {\n        client.headBucket(new HeadBucketRequest(containerName));\n        try {\n            client.headBucket(new HeadBucketRequest(\n                    createRandomContainerName()));\n            Fail.failBecauseExceptionWasNotThrown(AmazonS3Exception.class);\n        } catch (AmazonS3Exception e) {\n            assertThat(e.getErrorCode()).isEqualTo(\"404 Not Found\");\n        }\n    }\n\n    @Test\n    public void testContainerCreateDelete() throws Exception {\n        assumeTrue(blobStoreEndpoint.getPort() != LOCALSTACK_PORT);\n        // LocalStack in us-east-1 returns 200 OK for duplicate bucket creation (legacy S3 behavior)\n        // https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html\n        assumeTrue(!blobStoreType.equals(\"aws-s3-sdk\"));\n        String containerName2 = createRandomContainerName();\n        client.createBucket(containerName2);\n        try {\n            client.createBucket(containerName2);\n            client.deleteBucket(containerName2);\n            Fail.failBecauseExceptionWasNotThrown(AmazonS3Exception.class);\n        } catch (AmazonS3Exception e) {\n            assertThat(e.getErrorCode()).isEqualTo(\"BucketAlreadyOwnedByYou\");\n        }\n    }\n\n    @Test\n    public void testContainerDelete() throws Exception {\n        client.headBucket(new HeadBucketRequest(containerName));\n        client.deleteBucket(containerName);\n        try {\n            client.headBucket(new HeadBucketRequest(containerName));\n            Fail.failBecauseExceptionWasNotThrown(AmazonS3Exception.class);\n        } catch (AmazonS3Exception e) {\n            assertThat(e.getErrorCode()).isEqualTo(\"404 Not Found\");\n        }\n    }\n\n    private void putBlobAndCheckIt(String blobName) throws Exception {\n        var metadata = new ObjectMetadata();\n        metadata.setContentLength(BYTE_SOURCE.size());\n\n        client.putObject(containerName, blobName, BYTE_SOURCE.openStream(),\n                metadata);\n\n        S3Object object = client.getObject(containerName, blobName);\n        try (InputStream actual = object.getObjectContent();\n                InputStream expected = BYTE_SOURCE.openStream()) {\n            assertThat(actual).hasSameContentAs(expected);\n        }\n    }\n\n    @Test\n    public void testBlobPutGet() throws Exception {\n        putBlobAndCheckIt(\"blob\");\n        putBlobAndCheckIt(\"blob%\");\n        putBlobAndCheckIt(\"blob%%\");\n    }\n\n    @Test\n    public void testBlobEscape() throws Exception {\n        ObjectListing listing = client.listObjects(containerName);\n        assertThat(listing.getObjectSummaries()).isEmpty();\n\n        putBlobAndCheckIt(\"blob%\");\n\n        listing = client.listObjects(containerName);\n        assertThat(listing.getObjectSummaries()).hasSize(1);\n        assertThat(listing.getObjectSummaries().iterator().next().getKey())\n                .isEqualTo(\"blob%\");\n    }\n\n    @Test\n    public void testBlobList() throws Exception {\n        ObjectListing listing = client.listObjects(containerName);\n        assertThat(listing.getObjectSummaries()).isEmpty();\n\n        var metadata = new ObjectMetadata();\n        metadata.setContentLength(BYTE_SOURCE.size());\n\n        var builder = ImmutableList.<String>builder();\n        client.putObject(containerName, \"blob1\", BYTE_SOURCE.openStream(),\n                metadata);\n        listing = client.listObjects(containerName);\n        for (S3ObjectSummary summary : listing.getObjectSummaries()) {\n            builder.add(summary.getKey());\n        }\n        assertThat(builder.build()).containsOnly(\"blob1\");\n\n        builder = ImmutableList.builder();\n        client.putObject(containerName, \"blob2\", BYTE_SOURCE.openStream(),\n                metadata);\n        listing = client.listObjects(containerName);\n        for (S3ObjectSummary summary : listing.getObjectSummaries()) {\n            builder.add(summary.getKey());\n        }\n        assertThat(builder.build()).containsOnly(\"blob1\", \"blob2\");\n    }\n\n    @Test\n    public void testBlobListRecursive() throws Exception {\n        ObjectListing listing = client.listObjects(containerName);\n        assertThat(listing.getObjectSummaries()).isEmpty();\n\n        var metadata = new ObjectMetadata();\n        metadata.setContentLength(BYTE_SOURCE.size());\n        client.putObject(containerName, \"prefix/blob1\",\n                BYTE_SOURCE.openStream(), metadata);\n        client.putObject(containerName, \"prefix/blob2\",\n                BYTE_SOURCE.openStream(), metadata);\n\n        var builder = ImmutableList.<String>builder();\n        listing = client.listObjects(new ListObjectsRequest()\n                .withBucketName(containerName)\n                .withDelimiter(\"/\"));\n        assertThat(listing.getObjectSummaries()).isEmpty();\n        for (String prefix : listing.getCommonPrefixes()) {\n            builder.add(prefix);\n        }\n        assertThat(builder.build()).containsOnly(\"prefix/\");\n\n        builder = ImmutableList.builder();\n        listing = client.listObjects(containerName);\n        for (S3ObjectSummary summary : listing.getObjectSummaries()) {\n            builder.add(summary.getKey());\n        }\n        assertThat(builder.build()).containsOnly(\"prefix/blob1\",\n                \"prefix/blob2\");\n        assertThat(listing.getCommonPrefixes()).isEmpty();\n    }\n\n    @Test\n    public void testBlobListRecursiveImplicitMarker() throws Exception {\n        assumeTrue(!Quirks.OPAQUE_MARKERS.contains(blobStoreType));\n        assumeTrue(!blobStoreType.equals(\"transient-nio2\"));  // TODO:\n\n        ObjectListing listing = client.listObjects(containerName);\n        assertThat(listing.getObjectSummaries()).isEmpty();\n\n        var metadata = new ObjectMetadata();\n        metadata.setContentLength(BYTE_SOURCE.size());\n        client.putObject(containerName, \"blob1\", BYTE_SOURCE.openStream(),\n                metadata);\n        client.putObject(containerName, \"blob2\", BYTE_SOURCE.openStream(),\n                metadata);\n\n        listing = client.listObjects(new ListObjectsRequest()\n                .withBucketName(containerName)\n                .withMaxKeys(1));\n        assertThat(listing.getObjectSummaries()).hasSize(1);\n        assertThat(listing.getObjectSummaries().iterator().next().getKey())\n                .isEqualTo(\"blob1\");\n\n        listing = client.listObjects(new ListObjectsRequest()\n                .withBucketName(containerName)\n                .withMaxKeys(1)\n                .withMarker(\"blob1\"));\n        assertThat(listing.getObjectSummaries()).hasSize(1);\n        assertThat(listing.getObjectSummaries().iterator().next().getKey())\n                .isEqualTo(\"blob2\");\n    }\n\n    @Test\n    public void testBlobListV2() throws Exception {\n        assumeTrue(!Quirks.OPAQUE_MARKERS.contains(blobStoreType));\n\n        var metadata = new ObjectMetadata();\n        metadata.setContentLength(BYTE_SOURCE.size());\n        for (int i = 1; i < 5; ++i) {\n            client.putObject(containerName, String.valueOf(i),\n                    BYTE_SOURCE.openStream(), metadata);\n        }\n\n        ListObjectsV2Result result = client.listObjectsV2(\n                new ListObjectsV2Request()\n                .withBucketName(containerName)\n                .withMaxKeys(1)\n                .withStartAfter(\"1\"));\n        assertThat(result.getContinuationToken()).isEmpty();\n        assertThat(result.getStartAfter()).isEqualTo(\"1\");\n        if (blobStoreEndpoint.getPort() != MINIO_PORT) {\n            // Minio returns \"2[minio_cache:v2,return:]\"\n            assertThat(result.getNextContinuationToken()).isEqualTo(\"2\");\n        }\n        assertThat(result.isTruncated()).isTrue();\n        assertThat(result.getObjectSummaries()).hasSize(1);\n        assertThat(result.getObjectSummaries().get(0).getKey()).isEqualTo(\"2\");\n\n        result = client.listObjectsV2(\n                new ListObjectsV2Request()\n                .withBucketName(containerName)\n                .withMaxKeys(1)\n                .withContinuationToken(result.getNextContinuationToken()));\n        if (blobStoreEndpoint.getPort() != MINIO_PORT) {\n            // Minio returns \"2[minio_cache:v2,return:]\"\n            assertThat(result.getContinuationToken()).isEqualTo(\"2\");\n            assertThat(result.getNextContinuationToken()).isEqualTo(\"3\");\n        }\n        assertThat(result.getStartAfter()).isEmpty();\n        assertThat(result.isTruncated()).isTrue();\n        assertThat(result.getObjectSummaries()).hasSize(1);\n        assertThat(result.getObjectSummaries().get(0).getKey()).isEqualTo(\"3\");\n\n        result = client.listObjectsV2(\n                new ListObjectsV2Request()\n                .withBucketName(containerName)\n                .withMaxKeys(1)\n                .withContinuationToken(result.getNextContinuationToken()));\n        if (blobStoreEndpoint.getPort() != MINIO_PORT) {\n            // Minio returns \"3[minio_cache:v2,return:]\"\n            assertThat(result.getContinuationToken()).isEqualTo(\"3\");\n            assertThat(result.getNextContinuationToken()).isNull();\n        }\n        assertThat(result.getStartAfter()).isEmpty();\n        if (blobStoreEndpoint.getPort() != MINIO_PORT) {\n            // TODO: why does this fail?\n            assertThat(result.isTruncated()).isFalse();\n        }\n        assertThat(result.getObjectSummaries()).hasSize(1);\n        assertThat(result.getObjectSummaries().get(0).getKey()).isEqualTo(\"4\");\n    }\n\n    @Test\n    public void testBlobMetadata() throws Exception {\n        String blobName = \"blob\";\n        var metadata = new ObjectMetadata();\n        metadata.setContentLength(BYTE_SOURCE.size());\n        client.putObject(containerName, blobName, BYTE_SOURCE.openStream(),\n                metadata);\n\n        ObjectMetadata newMetadata = client.getObjectMetadata(containerName,\n                blobName);\n        assertThat(newMetadata.getContentLength())\n                .isEqualTo(BYTE_SOURCE.size());\n    }\n\n    @Test\n    public void testBlobRemove() throws Exception {\n        String blobName = \"blob\";\n        var metadata = new ObjectMetadata();\n        metadata.setContentLength(BYTE_SOURCE.size());\n        client.putObject(containerName, blobName, BYTE_SOURCE.openStream(),\n                metadata);\n        assertThat(client.getObjectMetadata(containerName, blobName))\n                .isNotNull();\n\n        client.deleteObject(containerName, blobName);\n        try {\n            client.getObjectMetadata(containerName, blobName);\n            Fail.failBecauseExceptionWasNotThrown(AmazonS3Exception.class);\n        } catch (AmazonS3Exception e) {\n            assertThat(e.getErrorCode()).isEqualTo(\"404 Not Found\");\n        }\n\n        client.deleteObject(containerName, blobName);\n    }\n\n    @Test\n    public void testSinglepartUploadJettyCachedHeader() throws Exception {\n        String blobName = \"singlepart-upload-jetty-cached\";\n        String contentType = \"text/plain\";\n        var metadata = new ObjectMetadata();\n        metadata.setContentLength(BYTE_SOURCE.size());\n        metadata.setContentType(contentType);\n\n        client.putObject(containerName, blobName, BYTE_SOURCE.openStream(),\n            metadata);\n\n        S3Object object = client.getObject(containerName, blobName);\n        try (InputStream actual = object.getObjectContent();\n             InputStream expected = BYTE_SOURCE.openStream()) {\n            assertThat(actual).hasSameContentAs(expected);\n        }\n        ObjectMetadata newContentMetadata = object.getObjectMetadata();\n        assertThat(newContentMetadata.getContentType()).isEqualTo(\n            contentType);\n    }\n\n    @Test\n    public void testSinglepartUpload() throws Exception {\n        String blobName = \"singlepart-upload\";\n        String cacheControl = \"max-age=3600\";\n        String contentDisposition = \"attachment; filename=new.jpg\";\n        String contentEncoding = \"gzip\";\n        String contentLanguage = \"fr\";\n        String contentType = \"audio/mp4\";\n        var userMetadata = Map.of(\n                \"key1\", \"value1\",\n                \"key2\", \"value2\");\n        var metadata = new ObjectMetadata();\n        if (!Quirks.NO_CACHE_CONTROL_SUPPORT.contains(blobStoreType)) {\n            metadata.setCacheControl(cacheControl);\n        }\n        if (!Quirks.NO_CONTENT_DISPOSITION.contains(blobStoreType)) {\n            metadata.setContentDisposition(contentDisposition);\n        }\n        if (!Quirks.NO_CONTENT_ENCODING.contains(blobStoreType)) {\n            metadata.setContentEncoding(contentEncoding);\n        }\n        if (!Quirks.NO_CONTENT_LANGUAGE.contains(blobStoreType)) {\n            metadata.setContentLanguage(contentLanguage);\n        }\n        metadata.setContentLength(BYTE_SOURCE.size());\n        metadata.setContentType(contentType);\n        // TODO: expires\n        metadata.setUserMetadata(userMetadata);\n\n        client.putObject(containerName, blobName, BYTE_SOURCE.openStream(),\n                metadata);\n\n        S3Object object = client.getObject(containerName, blobName);\n        try (InputStream actual = object.getObjectContent();\n                InputStream expected = BYTE_SOURCE.openStream()) {\n            assertThat(actual).hasSameContentAs(expected);\n        }\n        ObjectMetadata newContentMetadata = object.getObjectMetadata();\n        if (!Quirks.NO_CACHE_CONTROL_SUPPORT.contains(blobStoreType)) {\n            assertThat(newContentMetadata.getCacheControl()).isEqualTo(\n                    cacheControl);\n        }\n        if (!Quirks.NO_CONTENT_DISPOSITION.contains(blobStoreType)) {\n            assertThat(newContentMetadata.getContentDisposition()).isEqualTo(\n                    contentDisposition);\n        }\n        if (!Quirks.NO_CONTENT_ENCODING.contains(blobStoreType)) {\n            assertThat(newContentMetadata.getContentEncoding()).isEqualTo(\n                    contentEncoding);\n        }\n        if (!Quirks.NO_CONTENT_LANGUAGE.contains(blobStoreType)) {\n            assertThat(newContentMetadata.getContentLanguage()).isEqualTo(\n                    contentLanguage);\n        }\n        assertThat(newContentMetadata.getContentType()).isEqualTo(\n                contentType);\n        // TODO: expires\n        assertThat(newContentMetadata.getUserMetadata()).isEqualTo(\n                userMetadata);\n    }\n\n    // TODO: fails for GCS (jclouds not implemented)\n    @Test\n    public void testMultipartUpload() throws Exception {\n\n        String blobName = \"multipart-upload\";\n        String cacheControl = \"max-age=3600\";\n        String contentDisposition = \"attachment; filename=new.jpg\";\n        String contentEncoding = \"gzip\";\n        String contentLanguage = \"fr\";\n        String contentType = \"audio/mp4\";\n        var userMetadata = Map.of(\n                \"key1\", \"value1\",\n                \"key2\", \"value2\");\n        var metadata = new ObjectMetadata();\n        if (!Quirks.NO_CACHE_CONTROL_SUPPORT.contains(blobStoreType)) {\n            metadata.setCacheControl(cacheControl);\n        }\n        if (!Quirks.NO_CONTENT_DISPOSITION.contains(blobStoreType)) {\n            metadata.setContentDisposition(contentDisposition);\n        }\n        if (!Quirks.NO_CONTENT_ENCODING.contains(blobStoreType)) {\n            metadata.setContentEncoding(contentEncoding);\n        }\n        if (!Quirks.NO_CONTENT_LANGUAGE.contains(blobStoreType)) {\n            metadata.setContentLanguage(contentLanguage);\n        }\n        metadata.setContentType(contentType);\n        // TODO: expires\n        metadata.setUserMetadata(userMetadata);\n        InitiateMultipartUploadResult result = client.initiateMultipartUpload(\n                new InitiateMultipartUploadRequest(containerName, blobName,\n                        metadata));\n\n        ByteSource byteSource = TestUtils.randomByteSource().slice(\n                0, MINIMUM_MULTIPART_SIZE + 1);\n        ByteSource byteSource1 = byteSource.slice(0, MINIMUM_MULTIPART_SIZE);\n        ByteSource byteSource2 = byteSource.slice(MINIMUM_MULTIPART_SIZE, 1);\n        UploadPartResult part1 = client.uploadPart(new UploadPartRequest()\n                .withBucketName(containerName)\n                .withKey(blobName)\n                .withUploadId(result.getUploadId())\n                .withPartNumber(1)\n                .withPartSize(byteSource1.size())\n                .withInputStream(byteSource1.openStream()));\n        UploadPartResult part2 = client.uploadPart(new UploadPartRequest()\n                .withBucketName(containerName)\n                .withKey(blobName)\n                .withUploadId(result.getUploadId())\n                .withPartNumber(2)\n                .withPartSize(byteSource2.size())\n                .withInputStream(byteSource2.openStream()));\n\n        client.completeMultipartUpload(new CompleteMultipartUploadRequest(\n                containerName, blobName, result.getUploadId(),\n                List.of(part1.getPartETag(), part2.getPartETag())));\n        ObjectListing listing = client.listObjects(containerName);\n        assertThat(listing.getObjectSummaries()).hasSize(1);\n\n        S3Object object = client.getObject(containerName, blobName);\n        try (InputStream actual = object.getObjectContent();\n                InputStream expected = byteSource.openStream()) {\n            assertThat(actual).hasSameContentAs(expected);\n        }\n        ObjectMetadata newContentMetadata = object.getObjectMetadata();\n        if (!Quirks.NO_CACHE_CONTROL_SUPPORT.contains(blobStoreType)) {\n            assertThat(newContentMetadata.getCacheControl()).isEqualTo(\n                    cacheControl);\n        }\n        if (!Quirks.NO_CONTENT_DISPOSITION.contains(blobStoreType)) {\n            assertThat(newContentMetadata.getContentDisposition()).isEqualTo(\n                    contentDisposition);\n        }\n        if (!Quirks.NO_CONTENT_ENCODING.contains(blobStoreType)) {\n            assertThat(newContentMetadata.getContentEncoding()).isEqualTo(\n                    contentEncoding);\n        }\n        if (!Quirks.NO_CONTENT_LANGUAGE.contains(blobStoreType)) {\n            assertThat(newContentMetadata.getContentLanguage()).isEqualTo(\n                    contentLanguage);\n        }\n        assertThat(newContentMetadata.getContentType()).isEqualTo(\n                contentType);\n        // TODO: expires\n        assertThat(newContentMetadata.getUserMetadata()).isEqualTo(\n                userMetadata);\n    }\n\n    // this test runs for several minutes\n    @Ignore\n    @Test\n    public void testMaximumMultipartUpload() throws Exception {\n        // skip with remote blobstores to avoid excessive run-times\n        assumeTrue(blobStoreType.equals(\"filesystem\") ||\n                blobStoreType.equals(\"transient\"));\n\n        String blobName = \"multipart-upload\";\n        int numParts = 32;\n        long partSize = MINIMUM_MULTIPART_SIZE;\n        ByteSource byteSource = TestUtils.randomByteSource().slice(\n                0, partSize * numParts);\n\n        InitiateMultipartUploadResult result = client.initiateMultipartUpload(\n                new InitiateMultipartUploadRequest(containerName, blobName));\n        var parts = ImmutableList.<PartETag>builder();\n\n        for (int i = 0; i < numParts; ++i) {\n            ByteSource partByteSource = byteSource.slice(\n                    i * partSize, partSize);\n            UploadPartResult partResult = client.uploadPart(\n                    new UploadPartRequest()\n                    .withBucketName(containerName)\n                    .withKey(blobName)\n                    .withUploadId(result.getUploadId())\n                    .withPartNumber(i + 1)\n                    .withPartSize(partByteSource.size())\n                    .withInputStream(partByteSource.openStream()));\n            parts.add(partResult.getPartETag());\n        }\n\n        client.completeMultipartUpload(new CompleteMultipartUploadRequest(\n                containerName, blobName, result.getUploadId(), parts.build()));\n        ObjectListing listing = client.listObjects(containerName);\n        assertThat(listing.getObjectSummaries()).hasSize(1);\n\n        S3Object object = client.getObject(containerName, blobName);\n        ObjectMetadata contentMetadata = object.getObjectMetadata();\n        assertThat(contentMetadata.getContentLength()).isEqualTo(\n                partSize * numParts);\n\n        try (InputStream actual = object.getObjectContent();\n                InputStream expected = byteSource.openStream()) {\n            assertThat(actual).hasSameContentAs(expected);\n        }\n    }\n\n    @Test\n    public void testMultipartUploadAbort() throws Exception {\n        assumeTrue(!blobStoreType.equals(\"google-cloud-storage\"));\n        // TODO: fixed in jclouds 2.6.1\n        assumeTrue(blobStoreEndpoint.getPort() != MINIO_PORT);\n\n        String blobName = \"multipart-upload-abort\";\n        ByteSource byteSource = TestUtils.randomByteSource().slice(\n                0, MINIMUM_MULTIPART_SIZE);\n\n        InitiateMultipartUploadResult result = client.initiateMultipartUpload(\n                new InitiateMultipartUploadRequest(containerName, blobName));\n\n        // TODO: google-cloud-storage and openstack-swift cannot list multipart\n        // uploads\n        MultipartUploadListing multipartListing = client.listMultipartUploads(\n                new ListMultipartUploadsRequest(containerName));\n        assertThat(multipartListing.getMultipartUploads()).hasSize(1);\n\n        PartListing partListing = client.listParts(new ListPartsRequest(\n                containerName, blobName, result.getUploadId()));\n        assertThat(partListing.getParts()).isEmpty();\n\n        client.uploadPart(new UploadPartRequest()\n                .withBucketName(containerName)\n                .withKey(blobName)\n                .withUploadId(result.getUploadId())\n                .withPartNumber(1)\n                .withPartSize(byteSource.size())\n                .withInputStream(byteSource.openStream()));\n\n        multipartListing = client.listMultipartUploads(\n                new ListMultipartUploadsRequest(containerName));\n        assertThat(multipartListing.getMultipartUploads()).hasSize(1);\n\n        partListing = client.listParts(new ListPartsRequest(\n                containerName, blobName, result.getUploadId()));\n        assertThat(partListing.getParts()).hasSize(1);\n\n        client.abortMultipartUpload(new AbortMultipartUploadRequest(\n                containerName, blobName, result.getUploadId()));\n\n        multipartListing = client.listMultipartUploads(\n                new ListMultipartUploadsRequest(containerName));\n        assertThat(multipartListing.getMultipartUploads()).isEmpty();\n\n        ObjectListing listing = client.listObjects(containerName);\n        assertThat(listing.getObjectSummaries()).isEmpty();\n    }\n\n    // TODO: Fails since B2 returns the Cache-Control header on reads but does\n    // not accept it on writes.\n    @Test\n    public void testCopyObjectPreserveMetadata() throws Exception {\n        if (blobStoreType.equals(\"azureblob\") ||\n                blobStoreType.equals(\"azureblob-sdk\")) {\n            // Azurite does not support copying blobs\n            assumeTrue(!blobStoreEndpoint.getHost().equals(\"127.0.0.1\"));\n        }\n\n        String fromName = \"from-name\";\n        String toName = \"to-name\";\n        String cacheControl = \"max-age=3600\";\n        String contentDisposition = \"attachment; filename=old.jpg\";\n        String contentEncoding = \"gzip\";\n        String contentLanguage = \"en\";\n        String contentType = \"audio/ogg\";\n        var userMetadata = Map.of(\n                \"key1\", \"value1\",\n                \"key2\", \"value2\");\n        var metadata = new ObjectMetadata();\n        if (!Quirks.NO_CACHE_CONTROL_SUPPORT.contains(blobStoreType)) {\n            metadata.setCacheControl(cacheControl);\n        }\n        metadata.setContentLength(BYTE_SOURCE.size());\n        if (!Quirks.NO_CONTENT_DISPOSITION.contains(blobStoreType)) {\n            metadata.setContentDisposition(contentDisposition);\n        }\n        if (!Quirks.NO_CONTENT_ENCODING.contains(blobStoreType)) {\n            metadata.setContentEncoding(contentEncoding);\n        }\n        if (!Quirks.NO_CONTENT_LANGUAGE.contains(blobStoreType)) {\n            metadata.setContentLanguage(contentLanguage);\n        }\n        metadata.setContentType(contentType);\n        // TODO: expires\n        metadata.setUserMetadata(userMetadata);\n        client.putObject(containerName, fromName, BYTE_SOURCE.openStream(),\n                metadata);\n\n        client.copyObject(containerName, fromName, containerName, toName);\n\n        S3Object object = client.getObject(containerName, toName);\n\n        try (InputStream actual = object.getObjectContent();\n                InputStream expected = BYTE_SOURCE.openStream()) {\n            assertThat(actual).hasSameContentAs(expected);\n        }\n\n        ObjectMetadata contentMetadata = object.getObjectMetadata();\n        assertThat(contentMetadata.getContentLength()).isEqualTo(\n                BYTE_SOURCE.size());\n        if (!Quirks.NO_CACHE_CONTROL_SUPPORT.contains(blobStoreType)) {\n            assertThat(contentMetadata.getCacheControl()).isEqualTo(\n                    cacheControl);\n        }\n        if (!Quirks.NO_CONTENT_DISPOSITION.contains(blobStoreType)) {\n            assertThat(contentMetadata.getContentDisposition()).isEqualTo(\n                    contentDisposition);\n        }\n        if (!Quirks.NO_CONTENT_ENCODING.contains(blobStoreType)) {\n            assertThat(contentMetadata.getContentEncoding()).isEqualTo(\n                    contentEncoding);\n        }\n        if (!Quirks.NO_CONTENT_LANGUAGE.contains(blobStoreType)) {\n            assertThat(contentMetadata.getContentLanguage()).isEqualTo(\n                    contentLanguage);\n        }\n        assertThat(contentMetadata.getContentType()).isEqualTo(\n                contentType);\n        // TODO: expires\n        assertThat(contentMetadata.getUserMetadata()).isEqualTo(\n                userMetadata);\n    }\n\n    @Test\n    public void testCopyObjectReplaceMetadata() throws Exception {\n        if (blobStoreType.equals(\"azureblob\") ||\n                blobStoreType.equals(\"azureblob-sdk\")) {\n            // Azurite does not support copying blobs\n            assumeTrue(!blobStoreEndpoint.getHost().equals(\"127.0.0.1\"));\n        }\n\n        String fromName = \"from-name\";\n        String toName = \"to-name\";\n        var metadata = new ObjectMetadata();\n        metadata.setContentLength(BYTE_SOURCE.size());\n        if (!Quirks.NO_CACHE_CONTROL_SUPPORT.contains(blobStoreType)) {\n            metadata.setCacheControl(\"max-age=3600\");\n        }\n        if (!Quirks.NO_CONTENT_DISPOSITION.contains(blobStoreType)) {\n            metadata.setContentDisposition(\"attachment; filename=old.jpg\");\n        }\n        if (!Quirks.NO_CONTENT_ENCODING.contains(blobStoreType)) {\n            metadata.setContentEncoding(\"compress\");\n        }\n        if (!Quirks.NO_CONTENT_LANGUAGE.contains(blobStoreType)) {\n            metadata.setContentLanguage(\"en\");\n        }\n        metadata.setContentType(\"audio/ogg\");\n        // TODO: expires\n        metadata.setUserMetadata(Map.of(\n                        \"key1\", \"value1\",\n                        \"key2\", \"value2\"));\n        client.putObject(containerName, fromName, BYTE_SOURCE.openStream(),\n                metadata);\n\n        String cacheControl = \"max-age=1800\";\n        String contentDisposition = \"attachment; filename=new.jpg\";\n        String contentEncoding = \"gzip\";\n        String contentLanguage = \"fr\";\n        String contentType = \"audio/mp4\";\n        var contentMetadata = new ObjectMetadata();\n        if (!Quirks.NO_CACHE_CONTROL_SUPPORT.contains(blobStoreType)) {\n            contentMetadata.setCacheControl(cacheControl);\n        }\n        if (!Quirks.NO_CONTENT_DISPOSITION.contains(blobStoreType)) {\n            contentMetadata.setContentDisposition(contentDisposition);\n        }\n        if (!Quirks.NO_CONTENT_ENCODING.contains(blobStoreType)) {\n            contentMetadata.setContentEncoding(contentEncoding);\n        }\n        if (!Quirks.NO_CONTENT_LANGUAGE.contains(blobStoreType)) {\n            contentMetadata.setContentLanguage(contentLanguage);\n        }\n        contentMetadata.setContentType(contentType);\n        // TODO: expires\n        var userMetadata = Map.of(\n                \"key3\", \"value3\",\n                \"key4\", \"value4\");\n        contentMetadata.setUserMetadata(userMetadata);\n        client.copyObject(new CopyObjectRequest(\n                    containerName, fromName, containerName, toName)\n                            .withNewObjectMetadata(contentMetadata));\n\n        S3Object object = client.getObject(containerName, toName);\n\n        try (InputStream actual = object.getObjectContent();\n                InputStream expected = BYTE_SOURCE.openStream()) {\n            assertThat(actual).hasSameContentAs(expected);\n        }\n\n        ObjectMetadata toContentMetadata = object.getObjectMetadata();\n        if (!Quirks.NO_CACHE_CONTROL_SUPPORT.contains(blobStoreType)) {\n            assertThat(contentMetadata.getCacheControl()).isEqualTo(\n                    cacheControl);\n        }\n        if (!Quirks.NO_CONTENT_DISPOSITION.contains(blobStoreType)) {\n            assertThat(toContentMetadata.getContentDisposition()).isEqualTo(\n                    contentDisposition);\n        }\n        if (!Quirks.NO_CONTENT_ENCODING.contains(blobStoreType)) {\n            assertThat(toContentMetadata.getContentEncoding()).isEqualTo(\n                    contentEncoding);\n        }\n        if (!Quirks.NO_CONTENT_LANGUAGE.contains(blobStoreType)) {\n            assertThat(toContentMetadata.getContentLanguage()).isEqualTo(\n                    contentLanguage);\n        }\n        assertThat(toContentMetadata.getContentType()).isEqualTo(\n                contentType);\n        // TODO: expires\n        assertThat(toContentMetadata.getUserMetadata()).isEqualTo(\n                userMetadata);\n    }\n\n    @Test\n    public void testConditionalGet() throws Exception {\n        assumeTrue(!blobStoreType.equals(\"b2\"));\n        // TODO:\n        assumeTrue(!blobStoreType.equals(\"google-cloud-storage-sdk\"));\n        assumeTrue(!blobStoreType.equals(\"transient-nio2\"));\n\n        String blobName = \"blob-name\";\n        var metadata = new ObjectMetadata();\n        metadata.setContentLength(BYTE_SOURCE.size());\n        PutObjectResult result = client.putObject(containerName, blobName,\n                BYTE_SOURCE.openStream(), metadata);\n\n        S3Object object = client.getObject(\n                new GetObjectRequest(containerName, blobName)\n                        .withMatchingETagConstraint(result.getETag()));\n        try (InputStream is = object.getObjectContent()) {\n            assertThat(is).isNotNull();\n            is.transferTo(OutputStream.nullOutputStream());\n        }\n\n        object = client.getObject(\n                new GetObjectRequest(containerName, blobName)\n                        .withNonmatchingETagConstraint(result.getETag()));\n        assertThat(object).isNull();\n    }\n\n    @Test\n    public void testStorageClass() throws Exception {\n        // Minio only supports STANDARD and REDUCED_REDUNDANCY\n        assumeTrue(blobStoreEndpoint.getPort() != MINIO_PORT);\n        // TODO:\n        assumeTrue(!blobStoreType.equals(\"google-cloud-storage-sdk\"));\n        String blobName = \"test-storage-class\";\n        var metadata = new ObjectMetadata();\n        metadata.setContentLength(BYTE_SOURCE.size());\n        var request = new PutObjectRequest(\n                containerName, blobName, BYTE_SOURCE.openStream(), metadata)\n                .withStorageClass(\"STANDARD_IA\");\n        client.putObject(request);\n        metadata = client.getObjectMetadata(containerName, blobName);\n        assertThat(metadata.getStorageClass()).isEqualTo(\"STANDARD_IA\");\n    }\n\n    @Test\n    public void testGetObjectRange() throws Exception {\n        var blobName = \"test-range\";\n        var metadata = new ObjectMetadata();\n        var byteSource = TestUtils.randomByteSource().slice(0, 1024);\n        metadata.setContentLength(byteSource.size());\n        var request = new PutObjectRequest(\n                containerName, blobName, byteSource.openStream(), metadata);\n        client.putObject(request);\n\n        var object = client.getObject(\n                new GetObjectRequest(containerName, blobName)\n                        .withRange(42, 101));\n        assertThat(object.getObjectMetadata().getContentLength()).isEqualTo(\n                101 - 42 + 1);\n        try (var actual = object.getObjectContent();\n             var expected = byteSource.slice(42, 101 - 42 + 1).openStream()) {\n            assertThat(actual).hasSameContentAs(expected);\n        }\n    }\n\n    @Test\n    public void testUnknownHeader() throws Exception {\n        String blobName = \"test-unknown-header\";\n        var metadata = new ObjectMetadata();\n        metadata.setContentLength(BYTE_SOURCE.size());\n        var request = new PutObjectRequest(\n                containerName, blobName, BYTE_SOURCE.openStream(), metadata)\n                .withTagging(new ObjectTagging(List.of()));\n        try {\n            client.putObject(request);\n            Fail.failBecauseExceptionWasNotThrown(AmazonS3Exception.class);\n        } catch (AmazonS3Exception e) {\n            assertThat(e.getErrorCode()).isEqualTo(\"NotImplemented\");\n        }\n    }\n\n    @Test\n    public void testGetBucketPolicy() throws Exception {\n        try {\n            client.getBucketPolicy(containerName);\n            Fail.failBecauseExceptionWasNotThrown(AmazonS3Exception.class);\n        } catch (AmazonS3Exception e) {\n            assertThat(e.getErrorCode()).isEqualTo(\"NoSuchPolicy\");\n        }\n    }\n\n    @Test\n    public void testUnknownParameter() throws Exception {\n        try {\n            client.setBucketLoggingConfiguration(\n                    new SetBucketLoggingConfigurationRequest(\n                            containerName, new BucketLoggingConfiguration()));\n            Fail.failBecauseExceptionWasNotThrown(AmazonS3Exception.class);\n        } catch (AmazonS3Exception e) {\n            assertThat(e.getErrorCode()).isEqualTo(\"NotImplemented\");\n        }\n    }\n\n    @Test\n    public void testBlobStoreLocator() throws Exception {\n        assumeTrue(blobStoreType.equals(\"filesystem\") ||\n                blobStoreType.equals(\"transient\"));\n        final BlobStore blobStore1 = context.getBlobStore();\n        final BlobStore blobStore2 = ContextBuilder\n                .newBuilder(blobStoreType)\n                .credentials(\"other-identity\", \"credential\")\n                .build(BlobStoreContext.class)\n                .getBlobStore();\n        s3Proxy.setBlobStoreLocator(new BlobStoreLocator() {\n            @Override\n            public Map.@Nullable Entry<String, BlobStore> locateBlobStore(\n                    String identity, String container, String blob) {\n                if (identity.equals(awsCreds.getAWSAccessKeyId())) {\n                    return Map.entry(awsCreds.getAWSSecretKey(), blobStore1);\n                } else if (identity.equals(\"other-identity\")) {\n                    return Map.entry(\"credential\", blobStore2);\n                } else {\n                    return null;\n                }\n            }\n        });\n\n        // check first access key\n        var buckets = client.listBuckets(new ListBucketsPaginatedRequest()).getBuckets();\n        assertThat(buckets).hasSize(1);\n        assertThat(buckets.get(0).getName()).isEqualTo(containerName);\n\n        // check second access key\n        client = AmazonS3ClientBuilder.standard()\n                .withClientConfiguration(\n                        new ClientConfiguration().withMaxErrorRetry(0))\n                .withCredentials(new AWSStaticCredentialsProvider(\n                        new BasicAWSCredentials(\"other-identity\",\n                                \"credential\")))\n                .withEndpointConfiguration(s3EndpointConfig)\n                .build();\n        buckets = client.listBuckets(new ListBucketsPaginatedRequest()).getBuckets();\n        assertThat(buckets).isEmpty();\n\n        // check invalid access key\n        client = AmazonS3ClientBuilder.standard()\n                .withCredentials(new AWSStaticCredentialsProvider(\n                        new BasicAWSCredentials(\"bad-identity\", \"credential\")))\n                .withEndpointConfiguration(s3EndpointConfig)\n                .build();\n        try {\n            client.listBuckets(new ListBucketsPaginatedRequest());\n            Fail.failBecauseExceptionWasNotThrown(AmazonS3Exception.class);\n        } catch (AmazonS3Exception e) {\n            assertThat(e.getErrorCode()).isEqualTo(\"InvalidAccessKeyId\");\n        }\n    }\n\n    @Test\n    public void testCopyRelativePath() throws Exception {\n        assumeTrue(!blobStoreType.equals(\"azureblob-sdk\"));\n        try {\n            client.copyObject(new CopyObjectRequest(\n                    containerName, \"../evil.txt\", containerName, \"good.txt\"));\n            Fail.failBecauseExceptionWasNotThrown(AmazonS3Exception.class);\n        } catch (AmazonS3Exception e) {\n            // expected\n        }\n    }\n\n    @Test\n    public void testDeleteRelativePath() throws Exception {\n        try {\n            client.deleteObject(containerName, \"../evil.txt\");\n            if (blobStoreType.equals(\"filesystem\") || blobStoreType.equals(\"filesystem-nio2\") || blobStoreType.equals(\"transient-nio2\")) {\n                Fail.failBecauseExceptionWasNotThrown(AmazonS3Exception.class);\n            }\n        } catch (AmazonS3Exception e) {\n            // expected\n        }\n    }\n\n    @Test\n    public void testGetRelativePath() throws Exception {\n        try {\n            client.getObject(containerName, \"../evil.txt\");\n            Fail.failBecauseExceptionWasNotThrown(AmazonS3Exception.class);\n        } catch (AmazonS3Exception e) {\n            // expected\n        }\n    }\n\n    @Test\n    public void testPutRelativePath() throws Exception {\n        try {\n            var metadata = new ObjectMetadata();\n            metadata.setContentLength(BYTE_SOURCE.size());\n            client.putObject(containerName, \"../evil.txt\",\n                    BYTE_SOURCE.openStream(), metadata);\n            if (blobStoreType.equals(\"filesystem\") || blobStoreType.equals(\"filesystem-nio2\") || blobStoreType.equals(\"transient-nio2\")) {\n                Fail.failBecauseExceptionWasNotThrown(AmazonS3Exception.class);\n            }\n        } catch (AmazonS3Exception e) {\n            // expected\n        }\n    }\n\n    @Test\n    public void testListRelativePath() throws Exception {\n        assumeTrue(!blobStoreType.equals(\"filesystem\"));\n        try {\n            client.listObjects(new ListObjectsRequest()\n                    .withBucketName(containerName)\n                    .withPrefix(\"../evil/\"));\n            if (blobStoreType.equals(\"filesystem\") || blobStoreType.equals(\"filesystem-nio2\") || blobStoreType.equals(\"transient-nio2\")) {\n                Fail.failBecauseExceptionWasNotThrown(AmazonS3Exception.class);\n            }\n        } catch (AmazonS3Exception e) {\n            // expected\n        }\n    }\n\n    private static final class NullX509TrustManager\n            implements X509TrustManager {\n        @Override\n        @Nullable\n        public X509Certificate[] getAcceptedIssuers() {\n            return null;\n        }\n\n        @Override\n        public void checkClientTrusted(X509Certificate[] certs,\n                String authType) {\n        }\n\n        @Override\n        public void checkServerTrusted(X509Certificate[] certs,\n                String authType) {\n        }\n    }\n\n    static void disableSslVerification() {\n        try {\n            // Create a trust manager that does not validate certificate chains\n            var trustAllCerts = new TrustManager[] {\n                new NullX509TrustManager() };\n\n            // Install the all-trusting trust manager\n            SSLContext sc = SSLContext.getInstance(\"SSL\");\n            sc.init(null, trustAllCerts, new java.security.SecureRandom());\n            HttpsURLConnection.setDefaultSSLSocketFactory(\n                    sc.getSocketFactory());\n\n            // Create all-trusting host name verifier\n            var allHostsValid = new HostnameVerifier() {\n                @Override\n                public boolean verify(String hostname, SSLSession session) {\n                    return true;\n                }\n            };\n\n            // Install the all-trusting host verifier\n            HttpsURLConnection.setDefaultHostnameVerifier(allHostsValid);\n        } catch (KeyManagementException | NoSuchAlgorithmException e) {\n            throw new RuntimeException(e);\n        }\n    }\n\n    static String createRandomContainerName() {\n        return \"s3proxy-\" + new Random().nextInt(Integer.MAX_VALUE);\n    }\n}\n"
  },
  {
    "path": "src/test/java/org/gaul/s3proxy/CrossOriginResourceSharingAllowAllResponseTest.java",
    "content": "/*\n * Copyright 2014-2026 Andrew Gaul <andrew@gaul.org>\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * https://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\npackage org.gaul.s3proxy;\n\nimport static org.assertj.core.api.Assertions.assertThat;\n\nimport java.net.URI;\nimport java.nio.charset.StandardCharsets;\nimport java.security.KeyManagementException;\nimport java.security.KeyStoreException;\nimport java.security.NoSuchAlgorithmException;\nimport java.security.cert.CertificateException;\nimport java.security.cert.X509Certificate;\nimport java.util.Date;\nimport java.util.Random;\nimport java.util.concurrent.TimeUnit;\n\nimport com.amazonaws.HttpMethod;\nimport com.amazonaws.SDKGlobalConfiguration;\nimport com.amazonaws.auth.AWSCredentials;\nimport com.amazonaws.auth.AWSStaticCredentialsProvider;\nimport com.amazonaws.auth.BasicAWSCredentials;\nimport com.amazonaws.client.builder.AwsClientBuilder.EndpointConfiguration;\nimport com.amazonaws.services.s3.AmazonS3;\nimport com.amazonaws.services.s3.AmazonS3ClientBuilder;\nimport com.amazonaws.services.s3.model.CannedAccessControlList;\nimport com.google.common.io.ByteSource;\nimport com.google.common.net.HttpHeaders;\n\nimport org.apache.http.HttpResponse;\nimport org.apache.http.HttpStatus;\nimport org.apache.http.client.methods.HttpGet;\nimport org.apache.http.client.methods.HttpOptions;\nimport org.apache.http.config.Registry;\nimport org.apache.http.config.RegistryBuilder;\nimport org.apache.http.conn.socket.ConnectionSocketFactory;\nimport org.apache.http.conn.socket.PlainConnectionSocketFactory;\nimport org.apache.http.conn.ssl.NoopHostnameVerifier;\nimport org.apache.http.conn.ssl.SSLConnectionSocketFactory;\nimport org.apache.http.conn.ssl.TrustStrategy;\nimport org.apache.http.impl.client.CloseableHttpClient;\nimport org.apache.http.impl.client.HttpClients;\nimport org.apache.http.impl.conn.PoolingHttpClientConnectionManager;\nimport org.apache.http.ssl.SSLContextBuilder;\nimport org.jclouds.blobstore.BlobStoreContext;\nimport org.jclouds.blobstore.domain.Blob;\nimport org.junit.After;\nimport org.junit.Before;\nimport org.junit.Test;\n\npublic final class CrossOriginResourceSharingAllowAllResponseTest {\n    static {\n        System.setProperty(\n                SDKGlobalConfiguration.DISABLE_CERT_CHECKING_SYSTEM_PROPERTY,\n                \"true\");\n        AwsSdkTest.disableSslVerification();\n    }\n\n    private URI s3Endpoint;\n    private EndpointConfiguration s3EndpointConfig;\n    private S3Proxy s3Proxy;\n    private BlobStoreContext context;\n    private String containerName;\n    private AWSCredentials awsCreds;\n    private AmazonS3 s3Client;\n    private String servicePath;\n    private CloseableHttpClient httpClient;\n    private URI presignedGET;\n\n    @Before\n    public void setUp() throws Exception {\n        TestUtils.S3ProxyLaunchInfo info = TestUtils.startS3Proxy(\n                \"s3proxy-cors-allow-all.conf\");\n        awsCreds = new BasicAWSCredentials(info.getS3Identity(),\n                info.getS3Credential());\n        context = info.getBlobStore().getContext();\n        s3Proxy = info.getS3Proxy();\n        s3Endpoint = info.getSecureEndpoint();\n        servicePath = info.getServicePath();\n        s3EndpointConfig = new EndpointConfiguration(\n                s3Endpoint.toString() + servicePath, \"us-east-1\");\n        s3Client = AmazonS3ClientBuilder.standard()\n                .withCredentials(new AWSStaticCredentialsProvider(awsCreds))\n                .withEndpointConfiguration(s3EndpointConfig)\n                .build();\n        httpClient = getHttpClient();\n\n        containerName = createRandomContainerName();\n        info.getBlobStore().createContainerInLocation(null, containerName);\n\n        s3Client.setBucketAcl(containerName,\n                CannedAccessControlList.PublicRead);\n\n        String blobName = \"test\";\n        ByteSource payload = ByteSource.wrap(\"blob-content\".getBytes(\n                StandardCharsets.UTF_8));\n        Blob blob = info.getBlobStore().blobBuilder(blobName)\n                .payload(payload).contentLength(payload.size()).build();\n        info.getBlobStore().putBlob(containerName, blob);\n\n        var expiration = new Date(System.currentTimeMillis() +\n                TimeUnit.HOURS.toMillis(1));\n        presignedGET = s3Client.generatePresignedUrl(containerName, blobName,\n                expiration, HttpMethod.GET).toURI();\n    }\n\n    @After\n    public void tearDown() throws Exception {\n        if (s3Proxy != null) {\n            s3Proxy.stop();\n        }\n        if (context != null) {\n            context.getBlobStore().deleteContainer(containerName);\n            context.close();\n        }\n        if (httpClient != null) {\n            httpClient.close();\n        }\n    }\n\n    @Test\n    public void testCorsPreflight() throws Exception {\n        // Allowed origin, method and header combination\n        var request = new HttpOptions(presignedGET);\n        request.setHeader(HttpHeaders.ORIGIN, \"https://example.com\");\n        request.setHeader(HttpHeaders.ACCESS_CONTROL_REQUEST_METHOD, \"GET\");\n        request.setHeader(HttpHeaders.ACCESS_CONTROL_REQUEST_HEADERS,\n                \"Accept, Content-Type\");\n        HttpResponse response = httpClient.execute(request);\n        assertThat(response.getStatusLine().getStatusCode())\n                .isEqualTo(HttpStatus.SC_OK);\n        assertThat(response.containsHeader(\n                HttpHeaders.ACCESS_CONTROL_ALLOW_ORIGIN)).isTrue();\n        assertThat(response.getFirstHeader(\n                HttpHeaders.ACCESS_CONTROL_ALLOW_ORIGIN).getValue())\n                .isEqualTo(\"*\");\n        assertThat(response.containsHeader(\n                HttpHeaders.ACCESS_CONTROL_ALLOW_METHODS)).isTrue();\n        assertThat(response.getFirstHeader(\n                HttpHeaders.ACCESS_CONTROL_ALLOW_METHODS).getValue())\n                .isEqualTo(\"GET, HEAD, PUT, POST, DELETE\");\n        assertThat(response.containsHeader(\n                HttpHeaders.ACCESS_CONTROL_ALLOW_HEADERS)).isTrue();\n        assertThat(response.getFirstHeader(\n                HttpHeaders.ACCESS_CONTROL_ALLOW_HEADERS).getValue())\n                .isEqualTo(\"Accept, Content-Type\");\n        assertThat(response.containsHeader(\n                HttpHeaders.ACCESS_CONTROL_EXPOSE_HEADERS)).isTrue();\n        assertThat(response.getFirstHeader(\n                HttpHeaders.ACCESS_CONTROL_EXPOSE_HEADERS).getValue())\n                .isEqualTo(\"*\");\n    }\n\n    @Test\n    public void testCorsActual() throws Exception {\n        var request = new HttpGet(presignedGET);\n        request.setHeader(HttpHeaders.ORIGIN, \"https://example.com\");\n        HttpResponse response = httpClient.execute(request);\n        assertThat(response.getStatusLine().getStatusCode())\n                .isEqualTo(HttpStatus.SC_OK);\n        assertThat(response.containsHeader(\n                HttpHeaders.ACCESS_CONTROL_ALLOW_ORIGIN)).isTrue();\n        assertThat(response.getFirstHeader(\n                HttpHeaders.ACCESS_CONTROL_ALLOW_ORIGIN).getValue())\n                .isEqualTo(\"*\");\n        assertThat(response.containsHeader(\n                HttpHeaders.ACCESS_CONTROL_ALLOW_METHODS)).isTrue();\n        assertThat(response.getFirstHeader(\n                HttpHeaders.ACCESS_CONTROL_ALLOW_METHODS).getValue())\n                .isEqualTo(\"GET, HEAD, PUT, POST, DELETE\");\n        assertThat(response.containsHeader(\n                HttpHeaders.ACCESS_CONTROL_EXPOSE_HEADERS)).isTrue();\n        assertThat(response.getFirstHeader(\n                HttpHeaders.ACCESS_CONTROL_EXPOSE_HEADERS).getValue())\n                .isEqualTo(\"*\");\n    }\n\n    @Test\n    public void testNonCors() throws Exception {\n        var request = new HttpGet(presignedGET);\n        HttpResponse response = httpClient.execute(request);\n        assertThat(response.getStatusLine().getStatusCode())\n                .isEqualTo(HttpStatus.SC_OK);\n        assertThat(response.containsHeader(\n                HttpHeaders.ACCESS_CONTROL_ALLOW_ORIGIN)).isFalse();\n    }\n\n    private static String createRandomContainerName() {\n        return \"s3proxy-\" + new Random().nextInt(Integer.MAX_VALUE);\n    }\n\n    private static CloseableHttpClient getHttpClient() throws\n            KeyManagementException, NoSuchAlgorithmException,\n            KeyStoreException {\n        // Relax SSL Certificate check\n        var sslContext = new SSLContextBuilder().loadTrustMaterial(\n                null, new TrustStrategy() {\n                    @Override\n                    public boolean isTrusted(X509Certificate[] arg0,\n                            String arg1) throws CertificateException {\n                        return true;\n                    }\n                }).build();\n\n        Registry<ConnectionSocketFactory> registry = RegistryBuilder\n                .<ConnectionSocketFactory>create()\n                .register(\"http\", PlainConnectionSocketFactory.INSTANCE)\n                .register(\"https\", new SSLConnectionSocketFactory(sslContext,\n                NoopHostnameVerifier.INSTANCE)).build();\n\n        PoolingHttpClientConnectionManager connectionManager = new\n                PoolingHttpClientConnectionManager(registry);\n\n        return HttpClients.custom().setConnectionManager(connectionManager)\n                .build();\n    }\n}\n"
  },
  {
    "path": "src/test/java/org/gaul/s3proxy/CrossOriginResourceSharingResponseTest.java",
    "content": "/*\n * Copyright 2014-2026 Andrew Gaul <andrew@gaul.org>\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * https://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\npackage org.gaul.s3proxy;\n\nimport static org.assertj.core.api.Assertions.assertThat;\n\nimport java.net.URI;\nimport java.nio.charset.StandardCharsets;\nimport java.security.KeyManagementException;\nimport java.security.KeyStoreException;\nimport java.security.NoSuchAlgorithmException;\nimport java.security.cert.CertificateException;\nimport java.security.cert.X509Certificate;\nimport java.util.Date;\nimport java.util.Random;\nimport java.util.concurrent.TimeUnit;\n\nimport com.amazonaws.HttpMethod;\nimport com.amazonaws.SDKGlobalConfiguration;\nimport com.amazonaws.auth.AWSCredentials;\nimport com.amazonaws.auth.AWSStaticCredentialsProvider;\nimport com.amazonaws.auth.BasicAWSCredentials;\nimport com.amazonaws.client.builder.AwsClientBuilder.EndpointConfiguration;\nimport com.amazonaws.services.s3.AmazonS3;\nimport com.amazonaws.services.s3.AmazonS3ClientBuilder;\nimport com.amazonaws.services.s3.model.CannedAccessControlList;\nimport com.google.common.io.ByteSource;\nimport com.google.common.net.HttpHeaders;\n\nimport org.apache.http.HttpResponse;\nimport org.apache.http.HttpStatus;\nimport org.apache.http.client.methods.HttpGet;\nimport org.apache.http.client.methods.HttpOptions;\nimport org.apache.http.config.Registry;\nimport org.apache.http.config.RegistryBuilder;\nimport org.apache.http.conn.socket.ConnectionSocketFactory;\nimport org.apache.http.conn.socket.PlainConnectionSocketFactory;\nimport org.apache.http.conn.ssl.NoopHostnameVerifier;\nimport org.apache.http.conn.ssl.SSLConnectionSocketFactory;\nimport org.apache.http.conn.ssl.TrustStrategy;\nimport org.apache.http.impl.client.CloseableHttpClient;\nimport org.apache.http.impl.client.HttpClients;\nimport org.apache.http.impl.conn.PoolingHttpClientConnectionManager;\nimport org.apache.http.ssl.SSLContextBuilder;\nimport org.jclouds.blobstore.BlobStoreContext;\nimport org.jclouds.blobstore.domain.Blob;\nimport org.junit.After;\nimport org.junit.Before;\nimport org.junit.Test;\n\npublic final class CrossOriginResourceSharingResponseTest {\n    static {\n        System.setProperty(\n                SDKGlobalConfiguration.DISABLE_CERT_CHECKING_SYSTEM_PROPERTY,\n                \"true\");\n        AwsSdkTest.disableSslVerification();\n    }\n\n    private URI s3Endpoint;\n    private EndpointConfiguration s3EndpointConfig;\n    private S3Proxy s3Proxy;\n    private BlobStoreContext context;\n    private String containerName;\n    private AWSCredentials awsCreds;\n    private AmazonS3 s3Client;\n    private String servicePath;\n    private CloseableHttpClient httpClient;\n    private URI presignedGET;\n    private URI publicGET;\n\n    @Before\n    public void setUp() throws Exception {\n        TestUtils.S3ProxyLaunchInfo info = TestUtils.startS3Proxy(\n                \"s3proxy-cors.conf\");\n        awsCreds = new BasicAWSCredentials(info.getS3Identity(),\n                info.getS3Credential());\n        context = info.getBlobStore().getContext();\n        s3Proxy = info.getS3Proxy();\n        s3Endpoint = info.getSecureEndpoint();\n        servicePath = info.getServicePath();\n        s3EndpointConfig = new EndpointConfiguration(\n                s3Endpoint.toString() + servicePath, \"us-east-1\");\n        s3Client = AmazonS3ClientBuilder.standard()\n                .withCredentials(new AWSStaticCredentialsProvider(awsCreds))\n                .withEndpointConfiguration(s3EndpointConfig)\n                .build();\n        httpClient = getHttpClient();\n\n        containerName = createRandomContainerName();\n        info.getBlobStore().createContainerInLocation(null, containerName);\n\n        s3Client.setBucketAcl(containerName,\n                CannedAccessControlList.PublicRead);\n\n        String blobName = \"test\";\n        ByteSource payload = ByteSource.wrap(\"blob-content\".getBytes(\n                StandardCharsets.UTF_8));\n        Blob blob = info.getBlobStore().blobBuilder(blobName)\n                .payload(payload).contentLength(payload.size()).build();\n        info.getBlobStore().putBlob(containerName, blob);\n\n        var expiration = new Date(System.currentTimeMillis() +\n                TimeUnit.HOURS.toMillis(1));\n        presignedGET = s3Client.generatePresignedUrl(containerName, blobName,\n                expiration, HttpMethod.GET).toURI();\n\n        publicGET = s3Client.getUrl(containerName, blobName).toURI();\n    }\n\n    @After\n    public void tearDown() throws Exception {\n        if (s3Proxy != null) {\n            s3Proxy.stop();\n        }\n        if (context != null) {\n            context.getBlobStore().deleteContainer(containerName);\n            context.close();\n        }\n        if (httpClient != null) {\n            httpClient.close();\n        }\n    }\n\n    @Test\n    public void testCorsPreflight() throws Exception {\n        // Allowed origin and method\n        var request = new HttpOptions(presignedGET);\n        request.setHeader(HttpHeaders.ORIGIN, \"https://example.com\");\n        request.setHeader(HttpHeaders.ACCESS_CONTROL_REQUEST_METHOD, \"GET\");\n        HttpResponse response = httpClient.execute(request);\n        assertThat(response.getStatusLine().getStatusCode())\n                .isEqualTo(HttpStatus.SC_OK);\n        assertThat(response.containsHeader(\n                HttpHeaders.ACCESS_CONTROL_ALLOW_ORIGIN)).isTrue();\n        assertThat(response.getFirstHeader(\n                HttpHeaders.ACCESS_CONTROL_ALLOW_ORIGIN).getValue())\n                .isEqualTo(\"https://example.com\");\n        assertThat(response.containsHeader(\n                HttpHeaders.ACCESS_CONTROL_ALLOW_METHODS)).isTrue();\n        assertThat(response.getFirstHeader(\n                HttpHeaders.ACCESS_CONTROL_ALLOW_METHODS).getValue())\n                .isEqualTo(\"GET, PUT\");\n        assertThat(response.getFirstHeader(\n                HttpHeaders.ACCESS_CONTROL_EXPOSE_HEADERS).getValue())\n                .isEqualTo(\"ETag\");\n\n        // Allowed origin, method and header\n        request.reset();\n        request.setHeader(HttpHeaders.ORIGIN, \"https://example.com\");\n        request.setHeader(HttpHeaders.ACCESS_CONTROL_REQUEST_METHOD, \"GET\");\n        request.setHeader(HttpHeaders.ACCESS_CONTROL_REQUEST_HEADERS, \"Accept\");\n        response = httpClient.execute(request);\n        assertThat(response.getStatusLine().getStatusCode())\n                .isEqualTo(HttpStatus.SC_OK);\n        assertThat(response.containsHeader(\n                HttpHeaders.ACCESS_CONTROL_ALLOW_ORIGIN)).isTrue();\n        assertThat(response.getFirstHeader(\n                HttpHeaders.ACCESS_CONTROL_ALLOW_ORIGIN).getValue())\n                .isEqualTo(\"https://example.com\");\n        assertThat(response.containsHeader(\n                HttpHeaders.ACCESS_CONTROL_ALLOW_METHODS)).isTrue();\n        assertThat(response.getFirstHeader(\n                HttpHeaders.ACCESS_CONTROL_ALLOW_METHODS).getValue())\n                .isEqualTo(\"GET, PUT\");\n        assertThat(response.containsHeader(\n                HttpHeaders.ACCESS_CONTROL_ALLOW_HEADERS)).isTrue();\n        assertThat(response.getFirstHeader(\n                HttpHeaders.ACCESS_CONTROL_ALLOW_HEADERS).getValue())\n                .isEqualTo(\"Accept\");\n        assertThat(response.getFirstHeader(\n                HttpHeaders.ACCESS_CONTROL_EXPOSE_HEADERS).getValue())\n                .isEqualTo(\"ETag\");\n\n        // Allowed origin, method and header combination\n        request.reset();\n        request.setHeader(HttpHeaders.ORIGIN, \"https://example.com\");\n        request.setHeader(HttpHeaders.ACCESS_CONTROL_REQUEST_METHOD, \"GET\");\n        request.setHeader(HttpHeaders.ACCESS_CONTROL_REQUEST_HEADERS,\n                \"Accept, Content-Type\");\n        response = httpClient.execute(request);\n        assertThat(response.getStatusLine().getStatusCode())\n                .isEqualTo(HttpStatus.SC_OK);\n        assertThat(response.containsHeader(\n                HttpHeaders.ACCESS_CONTROL_ALLOW_ORIGIN)).isTrue();\n        assertThat(response.getFirstHeader(\n                HttpHeaders.ACCESS_CONTROL_ALLOW_ORIGIN).getValue())\n                .isEqualTo(\"https://example.com\");\n        assertThat(response.containsHeader(\n                HttpHeaders.ACCESS_CONTROL_ALLOW_METHODS)).isTrue();\n        assertThat(response.getFirstHeader(\n                HttpHeaders.ACCESS_CONTROL_ALLOW_METHODS).getValue())\n                .isEqualTo(\"GET, PUT\");\n        assertThat(response.containsHeader(\n                HttpHeaders.ACCESS_CONTROL_ALLOW_HEADERS)).isTrue();\n        assertThat(response.getFirstHeader(\n                HttpHeaders.ACCESS_CONTROL_ALLOW_HEADERS).getValue())\n                .isEqualTo(\"Accept, Content-Type\");\n        assertThat(response.getFirstHeader(\n                HttpHeaders.ACCESS_CONTROL_EXPOSE_HEADERS).getValue())\n                .isEqualTo(\"ETag\");\n    }\n\n    @Test\n    public void testCorsPreflightPublicRead() throws Exception {\n        // No CORS headers\n        var request = new HttpOptions(publicGET);\n        HttpResponse response = httpClient.execute(request);\n\n        assertThat(response.getStatusLine().getStatusCode())\n                .isEqualTo(HttpStatus.SC_BAD_REQUEST);\n\n        // Not allowed method\n        request.reset();\n        request.setHeader(HttpHeaders.ORIGIN, \"https://example.com\");\n        request.setHeader(HttpHeaders.ACCESS_CONTROL_REQUEST_METHOD, \"PATCH\");\n        response = httpClient.execute(request);\n        assertThat(response.getStatusLine().getStatusCode())\n                .isEqualTo(HttpStatus.SC_BAD_REQUEST);\n\n        // Allowed origin and method\n        request.reset();\n        request.setHeader(HttpHeaders.ORIGIN, \"https://example.com\");\n        request.setHeader(HttpHeaders.ACCESS_CONTROL_REQUEST_METHOD, \"GET\");\n        request.setHeader(HttpHeaders.ACCESS_CONTROL_REQUEST_HEADERS,\n                \"Accept, Content-Type\");\n        response = httpClient.execute(request);\n        assertThat(response.getStatusLine().getStatusCode())\n                .isEqualTo(HttpStatus.SC_OK);\n        assertThat(response.containsHeader(\n                HttpHeaders.ACCESS_CONTROL_ALLOW_ORIGIN)).isTrue();\n        assertThat(response.getFirstHeader(\n                HttpHeaders.ACCESS_CONTROL_ALLOW_ORIGIN).getValue())\n                .isEqualTo(\"https://example.com\");\n        assertThat(response.containsHeader(\n                HttpHeaders.ACCESS_CONTROL_ALLOW_METHODS)).isTrue();\n        assertThat(response.getFirstHeader(\n                HttpHeaders.ACCESS_CONTROL_ALLOW_METHODS).getValue())\n                .isEqualTo(\"GET, PUT\");\n        assertThat(response.containsHeader(\n                HttpHeaders.ACCESS_CONTROL_ALLOW_HEADERS)).isTrue();\n        assertThat(response.getFirstHeader(\n                HttpHeaders.ACCESS_CONTROL_ALLOW_HEADERS).getValue())\n                .isEqualTo(\"Accept, Content-Type\");\n        assertThat(response.getFirstHeader(\n                HttpHeaders.ACCESS_CONTROL_EXPOSE_HEADERS).getValue())\n                .isEqualTo(\"ETag\");\n        assertThat(response.getFirstHeader(\n                HttpHeaders.ACCESS_CONTROL_ALLOW_CREDENTIALS))\n                .isNull();\n    }\n\n    @Test\n    public void testCorsActual() throws Exception {\n        var request = new HttpGet(presignedGET);\n        request.setHeader(HttpHeaders.ORIGIN, \"https://example.com\");\n        HttpResponse response = httpClient.execute(request);\n        assertThat(response.getStatusLine().getStatusCode())\n                .isEqualTo(HttpStatus.SC_OK);\n        assertThat(response.containsHeader(\n                HttpHeaders.ACCESS_CONTROL_ALLOW_ORIGIN)).isTrue();\n        assertThat(response.getFirstHeader(\n                HttpHeaders.ACCESS_CONTROL_ALLOW_ORIGIN).getValue())\n                .isEqualTo(\"https://example.com\");\n        assertThat(response.containsHeader(\n                HttpHeaders.ACCESS_CONTROL_ALLOW_METHODS)).isTrue();\n        assertThat(response.getFirstHeader(\n                HttpHeaders.ACCESS_CONTROL_ALLOW_METHODS).getValue())\n                    .isEqualTo(\"GET, PUT\");\n        assertThat(response.getFirstHeader(\n                HttpHeaders.ACCESS_CONTROL_EXPOSE_HEADERS).getValue())\n                .isEqualTo(\"ETag\");\n    }\n\n    @Test\n    public void testNonCors() throws Exception {\n        var request = new HttpGet(presignedGET);\n        HttpResponse response = httpClient.execute(request);\n        assertThat(response.getStatusLine().getStatusCode())\n                .isEqualTo(HttpStatus.SC_OK);\n        assertThat(response.containsHeader(\n                HttpHeaders.ACCESS_CONTROL_ALLOW_ORIGIN)).isFalse();\n        assertThat(response.containsHeader(\n                HttpHeaders.ACCESS_CONTROL_EXPOSE_HEADERS)).isFalse();\n    }\n\n    private static String createRandomContainerName() {\n        return \"s3proxy-\" + new Random().nextInt(Integer.MAX_VALUE);\n    }\n\n    private static CloseableHttpClient getHttpClient() throws\n            KeyManagementException, NoSuchAlgorithmException,\n            KeyStoreException {\n        // Relax SSL Certificate check\n        var sslContext = new SSLContextBuilder().loadTrustMaterial(\n                null, new TrustStrategy() {\n                    @Override\n                    public boolean isTrusted(X509Certificate[] arg0,\n                            String arg1) throws CertificateException {\n                        return true;\n                    }\n                }).build();\n\n        Registry<ConnectionSocketFactory> registry = RegistryBuilder\n                .<ConnectionSocketFactory>create()\n                .register(\"http\", PlainConnectionSocketFactory.INSTANCE)\n                .register(\"https\", new SSLConnectionSocketFactory(sslContext,\n                NoopHostnameVerifier.INSTANCE)).build();\n\n        PoolingHttpClientConnectionManager connectionManager = new\n                PoolingHttpClientConnectionManager(registry);\n\n        return HttpClients.custom().setConnectionManager(connectionManager)\n                .build();\n    }\n}\n"
  },
  {
    "path": "src/test/java/org/gaul/s3proxy/CrossOriginResourceSharingRuleTest.java",
    "content": "/*\n * Copyright 2014-2026 Andrew Gaul <andrew@gaul.org>\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * https://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\npackage org.gaul.s3proxy;\n\nimport static org.assertj.core.api.Assertions.assertThat;\n\nimport java.util.List;\n\nimport org.junit.Before;\nimport org.junit.Test;\n\npublic final class CrossOriginResourceSharingRuleTest {\n    private CrossOriginResourceSharing corsAll;\n    private CrossOriginResourceSharing corsCfg;\n    private CrossOriginResourceSharing corsOff;\n\n    @Before\n    public void setUp() throws Exception {\n        // CORS Allow All\n        corsAll = new CrossOriginResourceSharing();\n        // CORS Configured\n        corsCfg = new CrossOriginResourceSharing(\n                List.of(\"https://example\\\\.com\",\n                        \"https://.+\\\\.example\\\\.com\",\n                        \"https://example\\\\.cloud\"),\n                List.of(\"GET\", \"PUT\"),\n                List.of(\"Accept\", \"Content-Type\"),\n                List.of(),\n                \"true\");\n        // CORS disabled\n        corsOff = new CrossOriginResourceSharing(null, null, null, null, null);\n    }\n\n    @Test\n    public void testCorsOffOrigin() throws Exception {\n        String probe = \"\";\n        assertThat(corsOff.isOriginAllowed(probe))\n                .as(\"check '%s' as origin\", probe).isFalse();\n        probe = \"https://example.com\";\n        assertThat(corsOff.isOriginAllowed(probe))\n                .as(\"check '%s' as origin\", probe).isFalse();\n    }\n\n    @Test\n    public void testCorsOffMethod() throws Exception {\n        String probe = \"\";\n        assertThat(corsOff.isMethodAllowed(probe))\n                .as(\"check '%s' as method\", probe).isFalse();\n        probe = \"GET\";\n        assertThat(corsOff.isMethodAllowed(probe))\n                .as(\"check '%s' as method\", probe).isFalse();\n    }\n\n    @Test\n    public void testCorsOffHeader() throws Exception {\n        String probe = \"\";\n        assertThat(corsOff.isEveryHeaderAllowed(probe))\n                .as(\"check '%s' as header\", probe).isFalse();\n        probe = \"Accept\";\n        assertThat(corsOff.isEveryHeaderAllowed(probe))\n                .as(\"check '%s' as header\", probe).isFalse();\n        probe = \"Accept, Content-Type\";\n        assertThat(corsOff.isEveryHeaderAllowed(probe))\n                .as(\"check '%s' as header\", probe).isFalse();\n    }\n\n    @Test\n    public void testCorsAllOrigin() throws Exception {\n        String probe = \"\";\n        assertThat(corsAll.isOriginAllowed(probe))\n                .as(\"check '%s' as origin\", probe).isFalse();\n        probe = \"https://example.com\";\n        assertThat(corsAll.isOriginAllowed(probe))\n                .as(\"check '%s' as origin\", probe).isTrue();\n        probe = \"https://sub.example.com\";\n        assertThat(corsAll.isOriginAllowed(probe))\n                .as(\"check '%s' as origin\", probe).isTrue();\n    }\n\n    @Test\n    public void testCorsAllMethod() throws Exception {\n        String probe = \"\";\n        assertThat(corsAll.isMethodAllowed(probe))\n                .as(\"check '%s' as method\", probe).isFalse();\n        probe = \"PATCH\";\n        assertThat(corsAll.isMethodAllowed(probe))\n                .as(\"check '%s' as method\", probe).isFalse();\n        probe = \"GET\";\n        assertThat(corsAll.isMethodAllowed(probe))\n                .as(\"check '%s' as method\", probe).isTrue();\n        probe = \"PUT\";\n        assertThat(corsAll.isMethodAllowed(probe))\n                .as(\"check '%s' as method\", probe).isTrue();\n        probe = \"POST\";\n        assertThat(corsAll.isMethodAllowed(probe))\n                .as(\"check '%s' as method\", probe).isTrue();\n        probe = \"HEAD\";\n        assertThat(corsAll.isMethodAllowed(probe))\n                .as(\"check '%s' as method\", probe).isTrue();\n        probe = \"DELETE\";\n        assertThat(corsAll.isMethodAllowed(probe))\n                .as(\"check '%s' as method\", probe).isTrue();\n    }\n\n    @Test\n    public void testCorsAllHeader() throws Exception {\n        String probe = \"\";\n        assertThat(corsAll.isEveryHeaderAllowed(probe))\n                .as(\"check '%s' as header\", probe).isFalse();\n        probe = \"Accept\";\n        assertThat(corsAll.isEveryHeaderAllowed(probe))\n                .as(\"check '%s' as header\", probe).isTrue();\n        probe = \"Accept, Content-Type\";\n        assertThat(corsAll.isEveryHeaderAllowed(probe))\n                .as(\"check '%s' as header\", probe).isTrue();\n    }\n\n    @Test\n    public void testCorsCfgOrigin() throws Exception {\n        String probe = \"\";\n        assertThat(corsCfg.isOriginAllowed(probe))\n                .as(\"check '%s' as origin\", probe).isFalse();\n        probe = \"https://example.org\";\n        assertThat(corsCfg.isOriginAllowed(probe))\n                .as(\"check '%s' as origin\", probe).isFalse();\n        probe = \"https://example.com\";\n        assertThat(corsCfg.isOriginAllowed(probe))\n                .as(\"check '%s' as origin\", probe).isTrue();\n        probe = \"https://sub.example.com\";\n        assertThat(corsCfg.isOriginAllowed(probe))\n                .as(\"check '%s' as origin\", probe).isTrue();\n        probe = \"https://example.cloud\";\n        assertThat(corsCfg.isOriginAllowed(probe))\n                .as(\"check '%s' as origin\", probe).isTrue();\n    }\n\n    @Test\n    public void testCorsCfgMethod() throws Exception {\n        String probe = \"\";\n        assertThat(corsCfg.isMethodAllowed(probe))\n                .as(\"check '%s' as method\", probe).isFalse();\n        probe = \"PATCH\";\n        assertThat(corsCfg.isMethodAllowed(probe))\n                .as(\"check '%s' as method\", probe).isFalse();\n        probe = \"GET\";\n        assertThat(corsCfg.isMethodAllowed(probe))\n                .as(\"check '%s' as method\", probe).isTrue();\n        probe = \"PUT\";\n        assertThat(corsCfg.isMethodAllowed(probe))\n                .as(\"check '%s' as method\", probe).isTrue();\n    }\n\n    @Test\n    public void testCorsCfgHeader() throws Exception {\n        String probe = \"\";\n        assertThat(corsCfg.isEveryHeaderAllowed(probe))\n                .as(\"check '%s' as header\", probe).isFalse();\n        probe = \"Accept-Language\";\n        assertThat(corsCfg.isEveryHeaderAllowed(probe))\n                .as(\"check '%s' as header\", probe).isFalse();\n        probe = \"Accept, Accept-Encoding\";\n        assertThat(corsCfg.isEveryHeaderAllowed(probe))\n                .as(\"check '%s' as header\", probe).isFalse();\n        probe = \"Accept\";\n        assertThat(corsCfg.isEveryHeaderAllowed(probe))\n                .as(\"check '%s' as header\", probe).isTrue();\n        probe = \"Accept, Content-Type\";\n        assertThat(corsCfg.isEveryHeaderAllowed(probe))\n                .as(\"check '%s' as header\", probe).isTrue();\n    }\n\n    @Test\n    public void testAllowCredentials() {\n        assertThat(corsOff.isAllowCredentials()).isFalse();\n        assertThat(corsCfg.isAllowCredentials()).isTrue();\n    }\n}\n"
  },
  {
    "path": "src/test/java/org/gaul/s3proxy/EncryptedBlobStoreTest.java",
    "content": "/*\n * Copyright 2014-2026 Andrew Gaul <andrew@gaul.org>\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * https://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\npackage org.gaul.s3proxy;\n\nimport static org.assertj.core.api.Assertions.assertThat;\n\nimport java.io.BufferedReader;\nimport java.io.ByteArrayInputStream;\nimport java.io.IOException;\nimport java.io.InputStream;\nimport java.io.InputStreamReader;\nimport java.nio.charset.StandardCharsets;\nimport java.util.ArrayList;\nimport java.util.HashMap;\nimport java.util.List;\nimport java.util.Map;\nimport java.util.Properties;\nimport java.util.Random;\nimport java.util.stream.Collectors;\n\nimport org.gaul.s3proxy.crypto.Constants;\nimport org.jclouds.ContextBuilder;\nimport org.jclouds.blobstore.BlobStore;\nimport org.jclouds.blobstore.BlobStoreContext;\nimport org.jclouds.blobstore.domain.Blob;\nimport org.jclouds.blobstore.domain.BlobAccess;\nimport org.jclouds.blobstore.domain.BlobMetadata;\nimport org.jclouds.blobstore.domain.MultipartPart;\nimport org.jclouds.blobstore.domain.MultipartUpload;\nimport org.jclouds.blobstore.domain.PageSet;\nimport org.jclouds.blobstore.domain.StorageMetadata;\nimport org.jclouds.blobstore.domain.StorageType;\nimport org.jclouds.blobstore.options.CopyOptions;\nimport org.jclouds.blobstore.options.GetOptions;\nimport org.jclouds.blobstore.options.ListContainerOptions;\nimport org.jclouds.blobstore.options.PutOptions;\nimport org.jclouds.http.HttpResponseException;\nimport org.jclouds.io.Payload;\nimport org.jclouds.io.Payloads;\nimport org.jclouds.logging.slf4j.config.SLF4JLoggingModule;\nimport org.junit.After;\nimport org.junit.Assert;\nimport org.junit.Before;\nimport org.junit.Test;\nimport org.slf4j.Logger;\nimport org.slf4j.LoggerFactory;\n\n@SuppressWarnings(\"UnstableApiUsage\")\npublic final class EncryptedBlobStoreTest {\n    private static final Logger logger =\n        LoggerFactory.getLogger(EncryptedBlobStoreTest.class);\n\n    private BlobStoreContext context;\n    private BlobStore blobStore;\n    private String containerName;\n    private BlobStore encryptedBlobStore;\n\n    private static Blob makeBlob(BlobStore blobStore, String blobName,\n        InputStream is, long contentLength) {\n\n        return blobStore.blobBuilder(blobName)\n            .payload(is)\n            .contentLength(contentLength)\n            .build();\n    }\n\n    private static Blob makeBlob(BlobStore blobStore, String blobName,\n        byte[] payload, long contentLength) {\n\n        return blobStore.blobBuilder(blobName)\n            .payload(payload)\n            .contentLength(contentLength)\n            .build();\n    }\n\n    private static Blob makeBlobWithContentType(BlobStore blobStore,\n        String blobName,\n        long contentLength,\n        InputStream is,\n        String contentType) {\n\n        return blobStore.blobBuilder(blobName)\n            .payload(is)\n            .contentLength(contentLength)\n            .contentType(contentType)\n            .build();\n    }\n\n    @Before\n    public void setUp() throws Exception {\n        String password = \"Password1234567!\";\n        String salt = \"12345678\";\n\n        containerName = TestUtils.createRandomContainerName();\n\n        //noinspection UnstableApiUsage\n        context = ContextBuilder\n            .newBuilder(\"transient\")\n            .credentials(\"identity\", \"credential\")\n            .modules(List.of(new SLF4JLoggingModule()))\n            .build(BlobStoreContext.class);\n        blobStore = context.getBlobStore();\n        blobStore.createContainerInLocation(null, containerName);\n\n        var properties = new Properties();\n        properties.put(S3ProxyConstants.PROPERTY_ENCRYPTED_BLOBSTORE, \"true\");\n        properties.put(S3ProxyConstants.PROPERTY_ENCRYPTED_BLOBSTORE_PASSWORD,\n            password);\n        properties.put(S3ProxyConstants.PROPERTY_ENCRYPTED_BLOBSTORE_SALT,\n            salt);\n\n        encryptedBlobStore =\n            EncryptedBlobStore.newEncryptedBlobStore(blobStore, properties);\n    }\n\n    @After\n    public void tearDown() throws Exception {\n        if (context != null) {\n            blobStore.deleteContainer(containerName);\n            context.close();\n        }\n    }\n\n    @Test\n    public void testBlobNotExists() {\n\n        String blobName = TestUtils.createRandomBlobName();\n        Blob blob = encryptedBlobStore.getBlob(containerName, blobName);\n        assertThat(blob).isNull();\n\n        blob = encryptedBlobStore.getBlob(containerName, blobName,\n            new GetOptions());\n        assertThat(blob).isNull();\n    }\n\n    @Test\n    public void testBlobNotEncrypted() throws Exception {\n\n        var tests = new String[] {\n            \"1\", // only 1 char\n            \"123456789A12345\", // lower then the AES block\n            \"123456789A1234567\", // one byte bigger then the AES block\n            \"123456789A123456123456789B123456123456789C\" +\n                \"1234123456789A123456123456789B123456123456789C1234\"\n        };\n\n        Map<String, Long> contentLengths = new HashMap<>();\n        for (String content : tests) {\n            String blobName = TestUtils.createRandomBlobName();\n\n            InputStream is = new ByteArrayInputStream(\n                content.getBytes(StandardCharsets.UTF_8));\n            contentLengths.put(blobName, (long) content.length());\n            Blob blob = makeBlob(blobStore, blobName, is, content.length());\n            blobStore.putBlob(containerName, blob);\n            blob = encryptedBlobStore.getBlob(containerName, blobName);\n\n            try (InputStream blobIs = blob.getPayload().openStream()) {\n                var reader = new BufferedReader(new InputStreamReader(blobIs));\n                String plaintext = reader.lines().collect(Collectors.joining());\n                logger.debug(\"plaintext {}\", plaintext);\n                assertThat(content).isEqualTo(plaintext);\n            }\n\n            var options = new GetOptions();\n            blob = encryptedBlobStore.getBlob(containerName, blobName, options);\n\n            try (InputStream blobIs = blob.getPayload().openStream()) {\n                var reader = new BufferedReader(new InputStreamReader(blobIs));\n                String plaintext = reader.lines().collect(Collectors.joining());\n                logger.debug(\"plaintext {} with empty options \", plaintext);\n                assertThat(content).isEqualTo(plaintext);\n            }\n        }\n\n        PageSet<? extends StorageMetadata> blobs =\n            encryptedBlobStore.list(containerName, new ListContainerOptions());\n        for (StorageMetadata blob : blobs) {\n            assertThat(blob.getSize()).isEqualTo(\n                contentLengths.get(blob.getName()));\n        }\n\n        blobs = encryptedBlobStore.list();\n        StorageMetadata metadata = blobs.iterator().next();\n        assertThat(StorageType.CONTAINER).isEqualTo(metadata.getType());\n    }\n\n    @Test\n    public void testListEncrypted() {\n        var contents = new String[] {\n            \"1\", // only 1 char\n            \"123456789A12345\", // lower then the AES block\n            \"123456789A1234567\", // one byte bigger then the AES block\n            \"123456789A123456123456789B123456123456789C1234\"\n        };\n\n        Map<String, Long> contentLengths = new HashMap<>();\n        for (String content : contents) {\n            String blobName = TestUtils.createRandomBlobName();\n\n            InputStream is = new ByteArrayInputStream(\n                content.getBytes(StandardCharsets.UTF_8));\n            contentLengths.put(blobName, (long) content.length());\n            Blob blob =\n                makeBlob(encryptedBlobStore, blobName, is, content.length());\n            encryptedBlobStore.putBlob(containerName, blob);\n        }\n\n        PageSet<? extends StorageMetadata> blobs =\n            encryptedBlobStore.list(containerName);\n        for (StorageMetadata blob : blobs) {\n            assertThat(blob.getSize()).isEqualTo(\n                contentLengths.get(blob.getName()));\n        }\n\n        blobs =\n            encryptedBlobStore.list(containerName, new ListContainerOptions());\n        for (StorageMetadata blob : blobs) {\n            assertThat(blob.getSize()).isEqualTo(\n                contentLengths.get(blob.getName()));\n            encryptedBlobStore.removeBlob(containerName, blob.getName());\n        }\n\n        blobs =\n            encryptedBlobStore.list(containerName, new ListContainerOptions());\n        assertThat(blobs.size()).isEqualTo(0);\n    }\n\n    @Test\n    public void testListEncryptedMultipart() {\n\n        String blobName = TestUtils.createRandomBlobName();\n\n        var contentParts = new String[] {\n            \"123456789A123456123456789B123456123456789C1234\",\n            \"123456789D123456123456789E123456123456789F123456\",\n            \"123456789G123456123456789H123456123456789I123\"\n        };\n\n        String content = contentParts[0] + contentParts[1] + contentParts[2];\n        BlobMetadata blobMetadata = makeBlob(encryptedBlobStore, blobName,\n            content.getBytes(StandardCharsets.UTF_8),\n            content.length()).getMetadata();\n\n        MultipartUpload mpu =\n            encryptedBlobStore.initiateMultipartUpload(containerName,\n                blobMetadata, new PutOptions());\n\n        Payload payload1 = Payloads.newByteArrayPayload(\n            contentParts[0].getBytes(StandardCharsets.UTF_8));\n        Payload payload2 = Payloads.newByteArrayPayload(\n            contentParts[1].getBytes(StandardCharsets.UTF_8));\n        Payload payload3 = Payloads.newByteArrayPayload(\n            contentParts[2].getBytes(StandardCharsets.UTF_8));\n\n        encryptedBlobStore.uploadMultipartPart(mpu, 1, payload1);\n        encryptedBlobStore.uploadMultipartPart(mpu, 2, payload2);\n        encryptedBlobStore.uploadMultipartPart(mpu, 3, payload3);\n\n        List<MultipartPart> parts = encryptedBlobStore.listMultipartUpload(mpu);\n\n        int index = 0;\n        for (MultipartPart part : parts) {\n            assertThat((long) contentParts[index].length()).isEqualTo(\n                part.partSize());\n            index++;\n        }\n\n        encryptedBlobStore.completeMultipartUpload(mpu, parts);\n\n        PageSet<? extends StorageMetadata> blobs =\n            encryptedBlobStore.list(containerName);\n        StorageMetadata metadata = blobs.iterator().next();\n        assertThat((long) content.length()).isEqualTo(metadata.getSize());\n\n        var options = new ListContainerOptions();\n        blobs = encryptedBlobStore.list(containerName, options.withDetails());\n        metadata = blobs.iterator().next();\n        assertThat((long) content.length()).isEqualTo(metadata.getSize());\n\n        blobs = encryptedBlobStore.list();\n        metadata = blobs.iterator().next();\n        assertThat(StorageType.CONTAINER).isEqualTo(metadata.getType());\n\n        List<String> singleList = new ArrayList<>();\n        singleList.add(blobName);\n        encryptedBlobStore.removeBlobs(containerName, singleList);\n        blobs = encryptedBlobStore.list(containerName);\n        assertThat(blobs.size()).isEqualTo(0);\n    }\n\n    @Test\n    public void testBlobNotEncryptedRanges() throws Exception {\n\n        for (int run = 0; run < 100; run++) {\n            var tests = new String[] {\n                \"123456789A12345\", // lower then the AES block\n                \"123456789A1234567\", // one byte bigger then the AES block\n                \"123456789A123456123456789B123456123456789C\" +\n                    \"1234123456789A123456123456789B123456123456789C1234\"\n            };\n\n            for (String content : tests) {\n                String blobName = TestUtils.createRandomBlobName();\n                var rand = new Random();\n\n                InputStream is = new ByteArrayInputStream(\n                    content.getBytes(StandardCharsets.UTF_8));\n                Blob blob = makeBlob(blobStore, blobName, is, content.length());\n                blobStore.putBlob(containerName, blob);\n\n                var options = new GetOptions();\n                int offset = rand.nextInt(content.length() - 1);\n                logger.debug(\"content {} with offset {}\", content, offset);\n\n                options.startAt(offset);\n                blob = encryptedBlobStore.getBlob(containerName, blobName,\n                    options);\n\n                try (InputStream blobIs = blob.getPayload().openStream()) {\n                    var reader = new BufferedReader(\n                        new InputStreamReader(blobIs));\n                    String plaintext = reader.lines().collect(\n                        Collectors.joining());\n                    logger.debug(\"plaintext {} with offset {}\", plaintext,\n                        offset);\n                    assertThat(plaintext).isEqualTo(content.substring(offset));\n                }\n\n                options = new GetOptions();\n                int tail = rand.nextInt(content.length());\n                if (tail == 0) {\n                    tail++;\n                }\n                logger.debug(\"content {} with tail {}\", content, tail);\n\n                options.tail(tail);\n                blob = encryptedBlobStore.getBlob(containerName, blobName,\n                    options);\n\n                try (InputStream blobIs = blob.getPayload().openStream()) {\n                    var reader = new BufferedReader(\n                        new InputStreamReader(blobIs));\n                    String plaintext = reader.lines().collect(\n                        Collectors.joining());\n                    logger.debug(\"plaintext {} with tail {}\", plaintext, tail);\n                    assertThat(plaintext).isEqualTo(\n                        content.substring(content.length() - tail));\n                }\n\n                options = new GetOptions();\n                offset = 1;\n                int end = content.length() - 2;\n                logger.debug(\"content {} with range {}-{}\", content, offset,\n                    end);\n\n                options.range(offset, end);\n                blob = encryptedBlobStore.getBlob(containerName, blobName,\n                    options);\n\n                try (InputStream blobIs = blob.getPayload().openStream()) {\n                    var reader = new BufferedReader(\n                        new InputStreamReader(blobIs));\n                    String plaintext = reader.lines().collect(\n                        Collectors.joining());\n                    logger.debug(\"plaintext {} with range {}-{}\", plaintext,\n                        offset, end);\n                    assertThat(plaintext).isEqualTo(\n                        content.substring(offset, end + 1));\n                }\n            }\n        }\n    }\n\n    @Test\n    public void testEncryptContent() throws Exception {\n        var tests = new String[] {\n            \"1\", // only 1 char\n            \"123456789A12345\", // lower then the AES block\n            \"123456789A1234567\", // one byte bigger then the AES block\n            \"123456789A123456123456789B123456123456789C1234\"\n        };\n\n        for (String content : tests) {\n            String blobName = TestUtils.createRandomBlobName();\n            String contentType = \"plain/text\";\n\n            InputStream is = new ByteArrayInputStream(\n                content.getBytes(StandardCharsets.UTF_8));\n            Blob blob = makeBlobWithContentType(encryptedBlobStore, blobName,\n                content.length(), is, contentType);\n            encryptedBlobStore.putBlob(containerName, blob);\n\n            blob = encryptedBlobStore.getBlob(containerName, blobName);\n\n            try (InputStream blobIs = blob.getPayload().openStream()) {\n                var reader = new BufferedReader(new InputStreamReader(blobIs));\n                String plaintext = reader.lines().collect(Collectors.joining());\n                logger.debug(\"plaintext {}\", plaintext);\n                assertThat(plaintext).isEqualTo(content);\n            }\n\n            blob = blobStore.getBlob(containerName,\n                blobName + Constants.S3_ENC_SUFFIX);\n\n            try (InputStream blobIs = blob.getPayload().openStream()) {\n                var reader = new BufferedReader(new InputStreamReader(blobIs));\n                String encrypted = reader.lines().collect(Collectors.joining());\n                logger.debug(\"encrypted {}\", encrypted);\n                assertThat(content).isNotEqualTo(encrypted);\n            }\n\n            assertThat(encryptedBlobStore.blobExists(containerName,\n                blobName)).isTrue();\n\n            BlobAccess access =\n                encryptedBlobStore.getBlobAccess(containerName, blobName);\n            assertThat(access).isEqualTo(BlobAccess.PRIVATE);\n\n            encryptedBlobStore.setBlobAccess(containerName, blobName,\n                BlobAccess.PUBLIC_READ);\n            access = encryptedBlobStore.getBlobAccess(containerName, blobName);\n            assertThat(access).isEqualTo(BlobAccess.PUBLIC_READ);\n        }\n    }\n\n    @Test\n    public void testEncryptContentWithOptions() throws Exception {\n        var tests = new String[] {\n            \"1\", // only 1 char\n            \"123456789A12345\", // lower then the AES block\n            \"123456789A1234567\", // one byte bigger then the AES block\n            \"123456789A123456123456789B123456123456789C1234\"\n        };\n\n        for (String content : tests) {\n            String blobName = TestUtils.createRandomBlobName();\n            String contentType = \"plain/text; charset=utf-8\";\n\n            InputStream is = new ByteArrayInputStream(\n                content.getBytes(StandardCharsets.UTF_8));\n            Blob blob = makeBlobWithContentType(encryptedBlobStore, blobName,\n                content.length(), is, contentType);\n            var options = new PutOptions();\n            encryptedBlobStore.putBlob(containerName, blob, options);\n\n            blob = encryptedBlobStore.getBlob(containerName, blobName);\n\n            try (InputStream blobIs = blob.getPayload().openStream()) {\n                var reader = new BufferedReader(new InputStreamReader(blobIs));\n                String plaintext = reader.lines().collect(Collectors.joining());\n                logger.debug(\"plaintext {}\", plaintext);\n                assertThat(content).isEqualTo(plaintext);\n            }\n\n            blob = blobStore.getBlob(containerName,\n                blobName + Constants.S3_ENC_SUFFIX);\n\n            try (InputStream blobIs = blob.getPayload().openStream()) {\n                var reader = new BufferedReader(new InputStreamReader(blobIs));\n                String encrypted = reader.lines().collect(Collectors.joining());\n                logger.debug(\"encrypted {}\", encrypted);\n                assertThat(content).isNotEqualTo(encrypted);\n            }\n\n            BlobMetadata metadata =\n                encryptedBlobStore.blobMetadata(containerName,\n                    blobName + Constants.S3_ENC_SUFFIX);\n            assertThat(contentType).isEqualTo(\n                metadata.getContentMetadata().getContentType());\n\n            encryptedBlobStore.copyBlob(containerName, blobName,\n                containerName, blobName + \"-copy\", CopyOptions.NONE);\n\n            blob = blobStore.getBlob(containerName,\n                blobName + Constants.S3_ENC_SUFFIX);\n\n            try (InputStream blobIs = blob.getPayload().openStream()) {\n                var reader = new BufferedReader(new InputStreamReader(blobIs));\n                String encrypted = reader.lines().collect(Collectors.joining());\n                logger.debug(\"encrypted {}\", encrypted);\n                assertThat(content).isNotEqualTo(encrypted);\n            }\n\n            blob =\n                encryptedBlobStore.getBlob(containerName, blobName + \"-copy\");\n\n            try (InputStream blobIs = blob.getPayload().openStream()) {\n                var reader = new BufferedReader(new InputStreamReader(blobIs));\n                String plaintext = reader.lines().collect(Collectors.joining());\n                logger.debug(\"plaintext {}\", plaintext);\n                assertThat(content).isEqualTo(plaintext);\n            }\n        }\n    }\n\n    @Test\n    public void testEncryptMultipartContent() throws Exception {\n        String blobName = TestUtils.createRandomBlobName();\n\n        String content1 = \"123456789A123456123456789B123456123456789C1234\";\n        String content2 = \"123456789D123456123456789E123456123456789F123456\";\n        String content3 = \"123456789G123456123456789H123456123456789I123\";\n\n        String content = content1 + content2 + content3;\n        BlobMetadata blobMetadata = makeBlob(encryptedBlobStore, blobName,\n            content.getBytes(StandardCharsets.UTF_8),\n            content.length()).getMetadata();\n        MultipartUpload mpu =\n            encryptedBlobStore.initiateMultipartUpload(containerName,\n                blobMetadata, new PutOptions());\n\n        Payload payload1 = Payloads.newByteArrayPayload(\n            content1.getBytes(StandardCharsets.UTF_8));\n        Payload payload2 = Payloads.newByteArrayPayload(\n            content2.getBytes(StandardCharsets.UTF_8));\n        Payload payload3 = Payloads.newByteArrayPayload(\n            content3.getBytes(StandardCharsets.UTF_8));\n\n        encryptedBlobStore.uploadMultipartPart(mpu, 1, payload1);\n        encryptedBlobStore.uploadMultipartPart(mpu, 2, payload2);\n        encryptedBlobStore.uploadMultipartPart(mpu, 3, payload3);\n\n        List<MultipartUpload> mpus =\n            encryptedBlobStore.listMultipartUploads(containerName);\n        assertThat(mpus.size()).isEqualTo(1);\n\n        List<MultipartPart> parts = encryptedBlobStore.listMultipartUpload(mpu);\n        assertThat(mpus.get(0).id()).isEqualTo(mpu.id());\n\n        encryptedBlobStore.completeMultipartUpload(mpu, parts);\n        Blob blob = encryptedBlobStore.getBlob(containerName, blobName);\n\n        try (InputStream blobIs = blob.getPayload().openStream()) {\n            var reader = new BufferedReader(new InputStreamReader(blobIs));\n            String plaintext = reader.lines().collect(Collectors.joining());\n            logger.debug(\"plaintext {}\", plaintext);\n            assertThat(plaintext).isEqualTo(content);\n        }\n\n        blob = blobStore.getBlob(containerName,\n            blobName + Constants.S3_ENC_SUFFIX);\n\n        try (InputStream blobIs = blob.getPayload().openStream()) {\n            var reader = new BufferedReader(new InputStreamReader(blobIs));\n            String encrypted = reader.lines().collect(Collectors.joining());\n            logger.debug(\"encrypted {}\", encrypted);\n            assertThat(content).isNotEqualTo(encrypted);\n        }\n    }\n\n    @Test\n    public void testReadPartial() throws Exception {\n\n        for (int offset = 0; offset < 60; offset++) {\n            logger.debug(\"Test with offset {}\", offset);\n\n            String blobName = TestUtils.createRandomBlobName();\n            String content =\n                \"123456789A123456123456789B123456123456789\" +\n                    \"C123456789D123456789E12345\";\n            InputStream is = new ByteArrayInputStream(\n                content.getBytes(StandardCharsets.UTF_8));\n\n            Blob blob =\n                makeBlob(encryptedBlobStore, blobName, is, content.length());\n            encryptedBlobStore.putBlob(containerName, blob);\n\n            var options = new GetOptions();\n            options.startAt(offset);\n            blob = encryptedBlobStore.getBlob(containerName, blobName, options);\n\n            try (InputStream blobIs = blob.getPayload().openStream()) {\n                var reader = new BufferedReader(new InputStreamReader(blobIs));\n                String plaintext = reader.lines().collect(Collectors.joining());\n                logger.debug(\"plaintext {}\", plaintext);\n                assertThat(plaintext).isEqualTo(content.substring(offset));\n            }\n\n            long expectedEndRange = (offset != 0) ? content.length() : 0;\n            assertThat(blob.getAllHeaders().get(\"Content-Range\"))\n                .contains(\"bytes \" + offset + \"-\" + expectedEndRange + \"/\" + content.length());\n        }\n    }\n\n    @Test\n    public void testReadTail() throws Exception {\n\n        for (int length = 1; length < 60; length++) {\n            logger.debug(\"Test with length {}\", length);\n\n            String blobName = TestUtils.createRandomBlobName();\n\n            String content =\n                \"123456789A123456123456789B123456123456789C\" +\n                    \"123456789D123456789E12345\";\n            InputStream is = new ByteArrayInputStream(\n                content.getBytes(StandardCharsets.UTF_8));\n\n            Blob blob =\n                makeBlob(encryptedBlobStore, blobName, is, content.length());\n            encryptedBlobStore.putBlob(containerName, blob);\n\n            var options = new GetOptions();\n            options.tail(length);\n            blob = encryptedBlobStore.getBlob(containerName, blobName, options);\n\n            try (InputStream blobIs = blob.getPayload().openStream()) {\n                var reader = new BufferedReader(new InputStreamReader(blobIs));\n                String plaintext = reader.lines().collect(Collectors.joining());\n                logger.debug(\"plaintext {}\", plaintext);\n                assertThat(plaintext).isEqualTo(\n                    content.substring(content.length() - length));\n            }\n\n            assertThat(blob.getAllHeaders().get(\"Content-Range\"))\n                .contains(\"bytes \" + 0 + \"-\" + length + \"/\" + content.length());\n        }\n    }\n\n    @Test\n    public void testReadPartialWithRandomEnd() throws Exception {\n\n        for (int run = 0; run < 100; run++) {\n            for (int offset = 0; offset < 50; offset++) {\n                var rand = new Random();\n                int end = offset + rand.nextInt(20) + 2;\n                int size = end - offset + 1;\n\n                logger.debug(\"Test with offset {} and end {} size {}\",\n                    offset, end, size);\n\n                String blobName = TestUtils.createRandomBlobName();\n\n                String content =\n                    \"123456789A123456-123456789B123456-123456789C123456-\" +\n                        \"123456789D123456-123456789E123456\";\n                InputStream is = new ByteArrayInputStream(\n                    content.getBytes(StandardCharsets.UTF_8));\n\n                Blob blob = makeBlob(encryptedBlobStore, blobName, is,\n                    content.length());\n                encryptedBlobStore.putBlob(containerName, blob);\n\n                var options = new GetOptions();\n                options.range(offset, end);\n                blob = encryptedBlobStore.getBlob(containerName, blobName,\n                    options);\n\n                try (InputStream blobIs = blob.getPayload().openStream()) {\n                    var reader = new BufferedReader(\n                        new InputStreamReader(blobIs));\n                    String plaintext = reader.lines().collect(\n                        Collectors.joining());\n                    logger.debug(\"plaintext {}\", plaintext);\n                    assertThat(plaintext).hasSize(size);\n                    assertThat(plaintext).isEqualTo(\n                        content.substring(offset, end + 1));\n                }\n\n                assertThat(blob.getAllHeaders().get(\"Content-Range\"))\n                    .contains(\"bytes \" + offset + \"-\" + end + \"/\" + content.length());\n            }\n        }\n    }\n\n    @Test\n    public void testMultipartReadPartial() throws Exception {\n\n        for (int offset = 0; offset < 130; offset++) {\n            logger.debug(\"Test with offset {}\", offset);\n\n            String blobName = TestUtils.createRandomBlobName();\n\n            String content1 = \"PART1-789A123456123456789B123456123456789C1234\";\n            String content2 =\n                \"PART2-789D123456123456789E123456123456789F123456\";\n            String content3 = \"PART3-789G123456123456789H123456123456789I123\";\n            String content = content1 + content2 + content3;\n\n            BlobMetadata blobMetadata = makeBlob(encryptedBlobStore, blobName,\n                content.getBytes(StandardCharsets.UTF_8),\n                content.length()).getMetadata();\n            MultipartUpload mpu =\n                encryptedBlobStore.initiateMultipartUpload(containerName,\n                    blobMetadata, new PutOptions());\n\n            Payload payload1 = Payloads.newByteArrayPayload(\n                content1.getBytes(StandardCharsets.UTF_8));\n            Payload payload2 = Payloads.newByteArrayPayload(\n                content2.getBytes(StandardCharsets.UTF_8));\n            Payload payload3 = Payloads.newByteArrayPayload(\n                content3.getBytes(StandardCharsets.UTF_8));\n\n            encryptedBlobStore.uploadMultipartPart(mpu, 1, payload1);\n            encryptedBlobStore.uploadMultipartPart(mpu, 2, payload2);\n            encryptedBlobStore.uploadMultipartPart(mpu, 3, payload3);\n\n            List<MultipartPart> parts =\n                encryptedBlobStore.listMultipartUpload(mpu);\n            encryptedBlobStore.completeMultipartUpload(mpu, parts);\n\n            var options = new GetOptions();\n            options.startAt(offset);\n            Blob blob =\n                encryptedBlobStore.getBlob(containerName, blobName, options);\n\n            try (InputStream blobIs = blob.getPayload().openStream()) {\n                var reader = new BufferedReader(new InputStreamReader(blobIs));\n                String plaintext = reader.lines().collect(Collectors.joining());\n                logger.debug(\"plaintext {}\", plaintext);\n                assertThat(plaintext).isEqualTo(content.substring(offset));\n            }\n        }\n    }\n\n    @Test\n    public void testMultipartReadTail() throws Exception {\n\n        for (int length = 1; length < 130; length++) {\n            logger.debug(\"Test with length {}\", length);\n\n            String blobName = TestUtils.createRandomBlobName();\n\n            String content1 = \"PART1-789A123456123456789B123456123456789C1234\";\n            String content2 =\n                \"PART2-789D123456123456789E123456123456789F123456\";\n            String content3 = \"PART3-789G123456123456789H123456123456789I123\";\n            String content = content1 + content2 + content3;\n            BlobMetadata blobMetadata = makeBlob(encryptedBlobStore, blobName,\n                content.getBytes(StandardCharsets.UTF_8),\n                content.length()).getMetadata();\n            MultipartUpload mpu =\n                encryptedBlobStore.initiateMultipartUpload(containerName,\n                    blobMetadata, new PutOptions());\n\n            Payload payload1 = Payloads.newByteArrayPayload(\n                content1.getBytes(StandardCharsets.UTF_8));\n            Payload payload2 = Payloads.newByteArrayPayload(\n                content2.getBytes(StandardCharsets.UTF_8));\n            Payload payload3 = Payloads.newByteArrayPayload(\n                content3.getBytes(StandardCharsets.UTF_8));\n\n            encryptedBlobStore.uploadMultipartPart(mpu, 1, payload1);\n            encryptedBlobStore.uploadMultipartPart(mpu, 2, payload2);\n            encryptedBlobStore.uploadMultipartPart(mpu, 3, payload3);\n\n            List<MultipartPart> parts =\n                encryptedBlobStore.listMultipartUpload(mpu);\n            encryptedBlobStore.completeMultipartUpload(mpu, parts);\n\n            var options = new GetOptions();\n            options.tail(length);\n            Blob blob =\n                encryptedBlobStore.getBlob(containerName, blobName, options);\n\n            try (InputStream blobIs = blob.getPayload().openStream()) {\n                var reader = new BufferedReader(new InputStreamReader(blobIs));\n                String plaintext = reader.lines().collect(Collectors.joining());\n                logger.debug(\"plaintext {}\", plaintext);\n                assertThat(plaintext).isEqualTo(\n                    content.substring(content.length() - length));\n            }\n        }\n    }\n\n    @Test\n    public void testMultipartReadPartialWithRandomEnd() throws Exception {\n\n        for (int run = 0; run < 100; run++) {\n            // total len = 139\n            for (int offset = 0; offset < 70; offset++) {\n                var rand = new Random();\n                int end = offset + rand.nextInt(60) + 2;\n                int size = end - offset + 1;\n                logger.debug(\"Test with offset {} and end {} size {}\",\n                    offset, end, size);\n\n                String blobName = TestUtils.createRandomBlobName();\n\n                String content1 =\n                    \"PART1-789A123456123456789B123456123456789C1234\";\n                String content2 =\n                    \"PART2-789D123456123456789E123456123456789F123456\";\n                String content3 =\n                    \"PART3-789G123456123456789H123456123456789I123\";\n\n                String content = content1 + content2 + content3;\n                BlobMetadata blobMetadata =\n                    makeBlob(encryptedBlobStore, blobName,\n                        content.getBytes(StandardCharsets.UTF_8),\n                        content.length()).getMetadata();\n                MultipartUpload mpu =\n                    encryptedBlobStore.initiateMultipartUpload(containerName,\n                        blobMetadata, new PutOptions());\n\n                Payload payload1 = Payloads.newByteArrayPayload(\n                    content1.getBytes(StandardCharsets.UTF_8));\n                Payload payload2 = Payloads.newByteArrayPayload(\n                    content2.getBytes(StandardCharsets.UTF_8));\n                Payload payload3 = Payloads.newByteArrayPayload(\n                    content3.getBytes(StandardCharsets.UTF_8));\n\n                encryptedBlobStore.uploadMultipartPart(mpu, 1, payload1);\n                encryptedBlobStore.uploadMultipartPart(mpu, 2, payload2);\n                encryptedBlobStore.uploadMultipartPart(mpu, 3, payload3);\n\n                List<MultipartPart> parts =\n                    encryptedBlobStore.listMultipartUpload(mpu);\n                encryptedBlobStore.completeMultipartUpload(mpu, parts);\n\n                var options = new GetOptions();\n                options.range(offset, end);\n                Blob blob = encryptedBlobStore.getBlob(containerName, blobName,\n                    options);\n\n                try (InputStream blobIs = blob.getPayload().openStream()) {\n                    var reader = new BufferedReader(\n                        new InputStreamReader(blobIs));\n                    String plaintext = reader.lines().collect(\n                        Collectors.joining());\n                    logger.debug(\"plaintext {}\", plaintext);\n                    assertThat(plaintext).isEqualTo(\n                        content.substring(offset, end + 1));\n                }\n            }\n        }\n    }\n\n    @Test\n    public void testReadConditional() {\n        String blobName = TestUtils.createRandomBlobName();\n        String content = \"Hello world.\";\n        InputStream is = new ByteArrayInputStream(content.getBytes(StandardCharsets.UTF_8));\n\n        Blob blob = makeBlob(encryptedBlobStore, blobName, is, content.length());\n        encryptedBlobStore.putBlob(containerName, blob);\n\n        GetOptions options = new GetOptions();\n        blob = encryptedBlobStore.getBlob(containerName, blobName, options);\n        String etag = blob.getMetadata().getETag();\n\n        GetOptions conditionalOptions = GetOptions.Builder.ifETagDoesntMatch(etag);\n        var e = Assert.assertThrows(HttpResponseException.class,\n            () -> encryptedBlobStore.getBlob(containerName, blobName, conditionalOptions));\n        assertThat(e.getResponse().getStatusCode()).isEqualTo(304);\n    }\n\n    @Test\n    public void testReadDoubleZeroRange() throws IOException {\n        String blobName = TestUtils.createRandomBlobName();\n        String content = \"Hello world.\";\n        InputStream is = new ByteArrayInputStream(content.getBytes(StandardCharsets.UTF_8));\n\n        Blob blob = makeBlob(encryptedBlobStore, blobName, is, content.length());\n        encryptedBlobStore.putBlob(containerName, blob);\n\n        GetOptions rangeOptions = new GetOptions();\n        rangeOptions.getRanges().add(\"0-0\");\n\n        var result = encryptedBlobStore.getBlob(containerName, blobName, rangeOptions);\n        assertThat(result.getPayload().openStream().readAllBytes().length).isEqualTo(1);\n\n        assertThat(result.getAllHeaders().get(\"Content-Range\"))\n            .contains(\"bytes 0-0/\" + content.length());\n    }\n}\n"
  },
  {
    "path": "src/test/java/org/gaul/s3proxy/EventualBlobStoreTest.java",
    "content": "/*\n * Copyright 2014-2026 Andrew Gaul <andrew@gaul.org>\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * https://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\npackage org.gaul.s3proxy;\n\nimport static org.assertj.core.api.Assertions.assertThat;\n\nimport java.io.IOException;\nimport java.io.InputStream;\nimport java.util.List;\nimport java.util.Map;\nimport java.util.Random;\nimport java.util.concurrent.Executors;\nimport java.util.concurrent.ScheduledExecutorService;\nimport java.util.concurrent.TimeUnit;\n\nimport com.google.common.io.ByteSource;\nimport com.google.common.net.MediaType;\n\nimport org.jclouds.ContextBuilder;\nimport org.jclouds.blobstore.BlobStore;\nimport org.jclouds.blobstore.BlobStoreContext;\nimport org.jclouds.blobstore.domain.Blob;\nimport org.jclouds.blobstore.domain.MultipartPart;\nimport org.jclouds.blobstore.domain.MultipartUpload;\nimport org.jclouds.blobstore.options.CopyOptions;\nimport org.jclouds.blobstore.options.PutOptions;\nimport org.jclouds.io.ContentMetadata;\nimport org.jclouds.logging.slf4j.config.SLF4JLoggingModule;\nimport org.junit.After;\nimport org.junit.Before;\nimport org.junit.Test;\n\npublic final class EventualBlobStoreTest {\n    private static final int DELAY = 5;\n    private static final TimeUnit DELAY_UNIT = TimeUnit.SECONDS;\n    private static final ByteSource BYTE_SOURCE =\n            TestUtils.randomByteSource().slice(0, 1024);\n    private BlobStoreContext nearContext;\n    private BlobStoreContext farContext;\n    private BlobStore nearBlobStore;\n    private BlobStore farBlobStore;\n    private String containerName;\n    private ScheduledExecutorService executorService;\n    private BlobStore eventualBlobStore;\n\n    @Before\n    public void setUp() throws Exception {\n        containerName = createRandomContainerName();\n\n        nearContext = ContextBuilder\n                .newBuilder(\"transient\")\n                .credentials(\"identity\", \"credential\")\n                .modules(List.of(new SLF4JLoggingModule()))\n                .build(BlobStoreContext.class);\n        nearBlobStore = nearContext.getBlobStore();\n        nearBlobStore.createContainerInLocation(null, containerName);\n\n        farContext = ContextBuilder\n                .newBuilder(\"transient\")\n                .credentials(\"identity\", \"credential\")\n                .modules(List.of(new SLF4JLoggingModule()))\n                .build(BlobStoreContext.class);\n        farBlobStore = farContext.getBlobStore();\n        farBlobStore.createContainerInLocation(null, containerName);\n\n        executorService = Executors.newScheduledThreadPool(1);\n\n        eventualBlobStore = EventualBlobStore.newEventualBlobStore(\n                nearBlobStore, farBlobStore, executorService, DELAY,\n                DELAY_UNIT, 1.0);\n    }\n\n    @After\n    public void tearDown() throws Exception {\n        if (nearContext != null) {\n            nearBlobStore.deleteContainer(containerName);\n            nearContext.close();\n        }\n        if (farContext != null) {\n            farBlobStore.deleteContainer(containerName);\n            farContext.close();\n        }\n        if (executorService != null) {\n            executorService.shutdown();\n        }\n    }\n\n    @Test\n    public void testReadAfterCreate() throws Exception {\n        String blobName = createRandomBlobName();\n        Blob blob = makeBlob(eventualBlobStore, blobName);\n        eventualBlobStore.putBlob(containerName, blob);\n        assertThat(eventualBlobStore.getBlob(containerName, blobName))\n                .isNull();\n        delay();\n        validateBlob(eventualBlobStore.getBlob(containerName, blobName));\n    }\n\n    @Test\n    public void testReadAfterDelete() throws Exception {\n        String blobName = createRandomBlobName();\n        Blob blob = makeBlob(eventualBlobStore, blobName);\n        eventualBlobStore.putBlob(containerName, blob);\n        assertThat(eventualBlobStore.getBlob(containerName, blobName))\n                .isNull();\n        delay();\n        eventualBlobStore.removeBlob(containerName, blobName);\n        validateBlob(eventualBlobStore.getBlob(containerName, blobName));\n        delay();\n        assertThat(eventualBlobStore.getBlob(containerName, blobName))\n                .isNull();\n    }\n\n    @Test\n    public void testOverwriteAfterDelete() throws Exception {\n        String blobName = createRandomBlobName();\n        Blob blob = makeBlob(eventualBlobStore, blobName);\n        eventualBlobStore.putBlob(containerName, blob);\n        delay();\n        eventualBlobStore.removeBlob(containerName, blobName);\n        blob = makeBlob(eventualBlobStore, blobName);\n        eventualBlobStore.putBlob(containerName, blob);\n        delay();\n        validateBlob(eventualBlobStore.getBlob(containerName, blobName));\n    }\n\n    @Test\n    public void testReadAfterCopy() throws Exception {\n        String fromName = createRandomBlobName();\n        String toName = createRandomBlobName();\n        Blob blob = makeBlob(eventualBlobStore, fromName);\n        eventualBlobStore.putBlob(containerName, blob);\n        delay();\n        eventualBlobStore.copyBlob(containerName, fromName, containerName,\n                toName, CopyOptions.NONE);\n        assertThat(eventualBlobStore.getBlob(containerName, toName))\n                .isNull();\n        delay();\n        validateBlob(eventualBlobStore.getBlob(containerName, toName));\n    }\n\n    @Test\n    public void testReadAfterMultipartUpload() throws Exception {\n        String blobName = createRandomBlobName();\n        Blob blob = makeBlob(eventualBlobStore, blobName);\n        MultipartUpload mpu = eventualBlobStore.initiateMultipartUpload(\n                containerName, blob.getMetadata(), new PutOptions());\n        MultipartPart part = eventualBlobStore.uploadMultipartPart(mpu,\n                /*partNumber=*/ 1, blob.getPayload());\n        eventualBlobStore.completeMultipartUpload(mpu, List.of(part));\n        assertThat(eventualBlobStore.getBlob(containerName, blobName))\n                .isNull();\n        delay();\n        validateBlob(eventualBlobStore.getBlob(containerName, blobName));\n    }\n\n    @Test\n    public void testListAfterCreate() throws Exception {\n        String blobName = createRandomBlobName();\n        Blob blob = makeBlob(eventualBlobStore, blobName);\n        eventualBlobStore.putBlob(containerName, blob);\n        assertThat(eventualBlobStore.list(containerName)).isEmpty();\n        delay();\n        assertThat(eventualBlobStore.list(containerName)).isNotEmpty();\n    }\n\n    private static String createRandomContainerName() {\n        return \"container-\" + new Random().nextInt(Integer.MAX_VALUE);\n    }\n\n    private static String createRandomBlobName() {\n        return \"blob-\" + new Random().nextInt(Integer.MAX_VALUE);\n    }\n\n    private static Blob makeBlob(BlobStore blobStore, String blobName)\n            throws IOException {\n        return blobStore.blobBuilder(blobName)\n                .payload(BYTE_SOURCE)\n                .contentDisposition(\"attachment; filename=foo.mp4\")\n                .contentEncoding(\"compress\")\n                .contentLength(BYTE_SOURCE.size())\n                .contentType(MediaType.MP4_AUDIO)\n                .contentMD5(BYTE_SOURCE.hash(TestUtils.MD5))\n                .userMetadata(Map.of(\"key\", \"value\"))\n                .build();\n    }\n\n    private static void validateBlob(Blob blob) throws IOException {\n        assertThat(blob).isNotNull();\n\n        ContentMetadata contentMetadata =\n                blob.getMetadata().getContentMetadata();\n        assertThat(contentMetadata.getContentDisposition())\n                .isEqualTo(\"attachment; filename=foo.mp4\");\n        assertThat(contentMetadata.getContentEncoding())\n                .isEqualTo(\"compress\");\n        assertThat(contentMetadata.getContentLength())\n                .isEqualTo(BYTE_SOURCE.size());\n        assertThat(contentMetadata.getContentType())\n                .isEqualTo(MediaType.MP4_AUDIO.toString());\n\n        assertThat(blob.getMetadata().getUserMetadata())\n                .isEqualTo(Map.of(\"key\", \"value\"));\n\n        try (InputStream actual = blob.getPayload().openStream();\n                InputStream expected = BYTE_SOURCE.openStream()) {\n            assertThat(actual).hasSameContentAs(expected);\n        }\n    }\n\n    private static void delay() throws InterruptedException {\n        DELAY_UNIT.sleep(1 + DELAY);\n    }\n}\n"
  },
  {
    "path": "src/test/java/org/gaul/s3proxy/GlobBlobStoreLocatorTest.java",
    "content": "/*\n * Copyright 2014-2026 Andrew Gaul <andrew@gaul.org>\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * https://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\npackage org.gaul.s3proxy;\n\nimport static org.assertj.core.api.Assertions.assertThat;\n\nimport java.nio.file.FileSystems;\nimport java.nio.file.PathMatcher;\nimport java.util.List;\nimport java.util.Map;\n\nimport com.google.common.collect.ImmutableMap;\nimport com.google.common.collect.ImmutableSortedMap;\nimport com.google.common.collect.Maps;\n\nimport org.jclouds.ContextBuilder;\nimport org.jclouds.blobstore.BlobStore;\nimport org.jclouds.blobstore.BlobStoreContext;\nimport org.jclouds.logging.slf4j.config.SLF4JLoggingModule;\nimport org.junit.Before;\nimport org.junit.Test;\n\npublic final class GlobBlobStoreLocatorTest {\n    private BlobStore blobStoreOne;\n    private BlobStore blobStoreTwo;\n\n    @Before\n    public void setUp() {\n        blobStoreOne = ContextBuilder\n                .newBuilder(\"transient\")\n                .credentials(\"identity\", \"credential\")\n                .modules(List.of(new SLF4JLoggingModule()))\n                .build(BlobStoreContext.class).getBlobStore();\n        blobStoreTwo = ContextBuilder\n                .newBuilder(\"transient\")\n                .credentials(\"identity\", \"credential\")\n                .modules(List.of(new SLF4JLoggingModule()))\n                .build(BlobStoreContext.class).getBlobStore();\n\n    }\n\n    @Test\n    public void testLocateIdentity() {\n        var credsMap = ImmutableSortedMap.of(\n                \"id1\", Map.entry(\"one\", blobStoreOne),\n                \"id2\", Map.entry(\"two\", blobStoreTwo));\n        var locator = new GlobBlobStoreLocator(\n                credsMap, Map.of());\n        assertThat(locator.locateBlobStore(\"id2\", null, null).getKey())\n                .isEqualTo(\"two\");\n        assertThat(locator.locateBlobStore(null, null, null).getKey())\n                .isEqualTo(\"one\");\n        assertThat(locator.locateBlobStore(\"foo\", null, null)).isNull();\n    }\n\n    @Test\n    public void testLocateContainer() {\n        // Must support null keys\n        var credsMap = ImmutableMap.of(\n                \"id1\", Map.entry(\"one\", blobStoreOne),\n                \"id2\", Map.entry(\"two\", blobStoreTwo));\n        var globMap = Map.of(\n                FileSystems.getDefault().getPathMatcher(\"glob:container1\"),\n                Map.entry(\"id1\", blobStoreOne),\n                FileSystems.getDefault().getPathMatcher(\"glob:container2\"),\n                Map.entry(\"id2\", blobStoreTwo));\n        var locator = new GlobBlobStoreLocator(credsMap,\n                globMap);\n\n        assertThat(locator.locateBlobStore(null, \"container1\", null)\n                .getValue()).isSameAs(blobStoreOne);\n        assertThat(locator.locateBlobStore(null, \"container2\", null)\n                .getValue()).isSameAs(blobStoreTwo);\n        assertThat(locator.locateBlobStore(\"id1\", \"foo\", null)\n                .getValue()).isSameAs(blobStoreOne);\n        assertThat(locator.locateBlobStore(\"id2\", \"foo\", null)\n                .getValue()).isSameAs(blobStoreTwo);\n        assertThat(locator.locateBlobStore(\"foo\", \"container1\", null))\n                .isNull();\n        assertThat(locator.locateBlobStore(\"foo\", \"container2\", null))\n                .isNull();\n    }\n\n    @Test\n    public void testLocateGlob() {\n        var credsMap =\n                ImmutableSortedMap.<String, Map.Entry<String, BlobStore>>of(\n                    \"id0\", Maps.immutableEntry(\"zero\", null),\n                    \"id1\", Map.entry(\"one\", blobStoreOne),\n                    \"id2\", Map.entry(\"two\", blobStoreTwo));\n        var globMap =\n                Map.<PathMatcher, Map.Entry<String, BlobStore>>of(\n                        FileSystems.getDefault().getPathMatcher(\n                                \"glob:{one,two}\"),\n                        Map.entry(\"id1\", blobStoreOne),\n                        FileSystems.getDefault().getPathMatcher(\"glob:cont?X*\"),\n                        Map.entry(\"id2\", blobStoreTwo));\n        var locator = new GlobBlobStoreLocator(credsMap,\n                globMap);\n\n        assertThat(locator.locateBlobStore(null, \"one\", null)\n                .getValue()).isSameAs(blobStoreOne);\n        assertThat(locator.locateBlobStore(\"id1\", \"two\", null)\n                .getValue()).isSameAs(blobStoreOne);\n        assertThat(locator.locateBlobStore(\"id2\", \"cont5X.extra\", null)\n                .getValue()).isSameAs(blobStoreTwo);\n    }\n\n    @Test\n    public void testGlobLocatorAnonymous() {\n        // Must support null keys\n        var globMap =\n                ImmutableMap.<PathMatcher, Map.Entry<String, BlobStore>>of(\n                        FileSystems.getDefault().getPathMatcher(\"glob:one\"),\n                        Maps.immutableEntry(null, blobStoreOne),\n                        FileSystems.getDefault().getPathMatcher(\"glob:two\"),\n                        Maps.immutableEntry(null, blobStoreTwo));\n        var locator = new GlobBlobStoreLocator(\n                ImmutableMap.of(), globMap);\n\n        assertThat(locator.locateBlobStore(null, null, null)\n                .getValue()).isSameAs(blobStoreOne);\n        assertThat(locator.locateBlobStore(null, \"one\", null)\n                .getValue()).isSameAs(blobStoreOne);\n        assertThat(locator.locateBlobStore(null, \"two\", null)\n                .getValue()).isSameAs(blobStoreTwo);\n    }\n}\n"
  },
  {
    "path": "src/test/java/org/gaul/s3proxy/LatencyBlobStoreTest.java",
    "content": "/*\n * Copyright 2014-2026 Andrew Gaul <andrew@gaul.org>\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * https://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\npackage org.gaul.s3proxy;\n\nimport static org.assertj.core.api.Assertions.assertThat;\n\nimport java.io.ByteArrayInputStream;\nimport java.io.IOException;\nimport java.io.InputStream;\nimport java.util.ArrayList;\nimport java.util.List;\nimport java.util.Map;\nimport java.util.Properties;\nimport java.util.Random;\nimport java.util.concurrent.Callable;\nimport java.util.concurrent.ExecutorService;\nimport java.util.concurrent.Executors;\n\nimport com.google.common.io.ByteSource;\n\nimport org.assertj.core.api.Assertions;\nimport org.jclouds.ContextBuilder;\nimport org.jclouds.blobstore.BlobStore;\nimport org.jclouds.blobstore.BlobStoreContext;\nimport org.jclouds.blobstore.domain.Blob;\nimport org.jclouds.io.Payload;\nimport org.jclouds.io.Payloads;\nimport org.jclouds.logging.slf4j.config.SLF4JLoggingModule;\nimport org.junit.After;\nimport org.junit.Before;\nimport org.junit.Test;\n\npublic final class LatencyBlobStoreTest {\n    private BlobStoreContext context;\n    private BlobStore delegate;\n    private String containerName;\n\n    @Before\n    public void setUp() throws Exception {\n        containerName = createRandomContainerName();\n\n        context = ContextBuilder\n                .newBuilder(\"transient\")\n                .credentials(\"identity\", \"credential\")\n                .modules(List.of(new SLF4JLoggingModule()))\n                .build(BlobStoreContext.class);\n        delegate = context.getBlobStore();\n        delegate.createContainerInLocation(null, containerName);\n    }\n\n    @After\n    public void tearDown() throws Exception {\n        if (context != null) {\n            delegate.deleteContainer(containerName);\n            context.close();\n        }\n    }\n\n    @Test\n    public void testLoadProperties() throws Exception {\n        String propertiesString = \"s3proxy.latency-blobstore.*.latency=1000\\n\" +\n                \"s3proxy.latency-blobstore.put.speed=10\";\n        InputStream stream = new ByteArrayInputStream(propertiesString.getBytes());\n        Properties properties = new Properties();\n        properties.load(stream);\n\n        Map<String, Long> latencies = LatencyBlobStore.parseLatencies(properties);\n        Map<String, Long> speeds = LatencyBlobStore.parseSpeeds(properties);\n\n        assertThat(latencies.containsKey(\"*\")).isTrue();\n        assertThat(latencies.get(\"*\")).isEqualTo(1000L);\n        assertThat(speeds.containsKey(\"put\")).isTrue();\n        assertThat(speeds.get(\"put\")).isEqualTo(10);\n        assertThat(speeds.containsKey(\"*\")).isFalse();\n    }\n\n    @Test\n    public void testAllLatency() {\n        BlobStore latencyBlobStore = LatencyBlobStore.newLatencyBlobStore(delegate,\n                Map.ofEntries(Map.entry(\"*\", 1000L)), Map.ofEntries());\n\n        long timeTaken = time(() -> latencyBlobStore.containerExists(containerName));\n        assertThat(timeTaken).isGreaterThanOrEqualTo(1000L);\n    }\n\n    @Test\n    public void testSpecificLatency() {\n        BlobStore latencyBlobStore = LatencyBlobStore.newLatencyBlobStore(delegate,\n                Map.ofEntries(Map.entry(\"*\", 0L),\n                        Map.entry(\"container-exists\", 1000L)), Map.ofEntries());\n\n        long timeTaken = time(() -> latencyBlobStore.containerExists(containerName));\n        assertThat(timeTaken).isGreaterThanOrEqualTo(1000L);\n    }\n\n    @Test\n    public void testAllSpeed() throws Exception {\n        BlobStore latencyBlobStore = LatencyBlobStore.newLatencyBlobStore(delegate,\n                Map.ofEntries(), Map.ofEntries(Map.entry(\"*\", 1L)));\n\n        String blobName = createRandomBlobName();\n        ByteSource content = TestUtils.randomByteSource().slice(0, 1024);\n        Payload payload = Payloads.newByteSourcePayload(content);\n        payload.getContentMetadata().setContentLength(content.size());\n        Blob blob = latencyBlobStore.blobBuilder(blobName).payload(payload).build();\n\n        long timeTaken = time(() -> latencyBlobStore.putBlob(containerName, blob));\n        assertThat(timeTaken).isGreaterThanOrEqualTo(1000L);\n    }\n\n    @Test\n    public void testSpecificSpeed() throws Exception {\n        BlobStore latencyBlobStore = LatencyBlobStore.newLatencyBlobStore(delegate,\n                Map.ofEntries(), Map.ofEntries(Map.entry(\"*\", 1000L),\n                        Map.entry(\"put\", 1L)));\n\n        String blobName = createRandomBlobName();\n        ByteSource content = TestUtils.randomByteSource().slice(0, 1024);\n        Payload payload = Payloads.newByteSourcePayload(content);\n        payload.getContentMetadata().setContentLength(content.size());\n        Blob blob = latencyBlobStore.blobBuilder(blobName).payload(payload).build();\n\n        long timeTaken = time(() -> latencyBlobStore.putBlob(containerName, blob));\n        assertThat(timeTaken).isGreaterThanOrEqualTo(1000L);\n    }\n\n    @Test\n    public void testInvalidLatency() {\n        Assertions.assertThatIllegalArgumentException().isThrownBy(() -> LatencyBlobStore.newLatencyBlobStore(delegate,\n                Map.ofEntries(Map.entry(\"*\", -1000L)), Map.ofEntries()));\n    }\n\n    @Test\n    public void testInvalidSpeed() {\n        Assertions.assertThatIllegalArgumentException().isThrownBy(() -> LatencyBlobStore.newLatencyBlobStore(delegate,\n                Map.ofEntries(), Map.ofEntries(Map.entry(\"*\", 0L))));\n        Assertions.assertThatIllegalArgumentException().isThrownBy(() -> LatencyBlobStore.newLatencyBlobStore(delegate,\n                Map.ofEntries(), Map.ofEntries(Map.entry(\"*\", -1000L))));\n    }\n\n    @Test\n    public void testLatencyAndSpeed() throws Exception {\n        BlobStore latencyBlobStore = LatencyBlobStore.newLatencyBlobStore(delegate,\n                Map.ofEntries(Map.entry(\"*\", 1000L)), Map.ofEntries(Map.entry(\"put\", 1L)));\n\n        String blobName = createRandomBlobName();\n        ByteSource content = TestUtils.randomByteSource().slice(0, 1024);\n        Payload payload = Payloads.newByteSourcePayload(content);\n        payload.getContentMetadata().setContentLength(content.size());\n        Blob blob = latencyBlobStore.blobBuilder(blobName).payload(payload).build();\n\n        long timeTaken = time(() -> latencyBlobStore.putBlob(containerName, blob));\n        assertThat(timeTaken).isGreaterThanOrEqualTo(2000L);\n    }\n\n    @Test\n    public void testLatencyAndSpeedWithEmptyContent() throws Exception {\n        BlobStore latencyBlobStore = LatencyBlobStore.newLatencyBlobStore(delegate,\n                Map.ofEntries(Map.entry(\"put\", 1000L)), Map.ofEntries(Map.entry(\"put\", 1L)));\n\n        String blobName = createRandomBlobName();\n        ByteSource content = TestUtils.randomByteSource().slice(0, 0);\n        Payload payload = Payloads.newByteSourcePayload(content);\n        payload.getContentMetadata().setContentLength(content.size());\n        Blob blob = latencyBlobStore.blobBuilder(blobName).payload(payload).build();\n\n        long timeTaken = time(() -> latencyBlobStore.putBlob(containerName, blob));\n        assertThat(timeTaken).isGreaterThanOrEqualTo(1000L);\n    }\n\n    @Test\n    public void testMultipleOperations() throws Exception {\n        BlobStore latencyBlobStore = LatencyBlobStore.newLatencyBlobStore(delegate,\n                Map.ofEntries(Map.entry(\"*\", 1000L)), Map.ofEntries(Map.entry(\"get\", 1L)));\n\n        String blobName = createRandomBlobName();\n        ByteSource content = TestUtils.randomByteSource().slice(0, 1024);\n        Payload payload = Payloads.newByteSourcePayload(content);\n        payload.getContentMetadata().setContentLength(content.size());\n        Blob blob = latencyBlobStore.blobBuilder(blobName).payload(payload).build();\n\n        long timeTaken = time(() -> {\n            latencyBlobStore.putBlob(containerName, blob);\n            consume(latencyBlobStore.getBlob(containerName, blobName));\n        });\n        assertThat(timeTaken).isGreaterThanOrEqualTo(3000L);\n    }\n\n    @Test\n    public void testSimultaneousOperations() throws Exception {\n        BlobStore latencyBlobStore = LatencyBlobStore.newLatencyBlobStore(delegate,\n                Map.ofEntries(Map.entry(\"*\", 1000L)), Map.ofEntries(Map.entry(\"get\", 1L)));\n\n        String blobName = createRandomBlobName();\n        ByteSource content = TestUtils.randomByteSource().slice(0, 1024);\n        Payload payload = Payloads.newByteSourcePayload(content);\n        payload.getContentMetadata().setContentLength(content.size());\n        Blob blob = latencyBlobStore.blobBuilder(blobName).payload(payload).build();\n        latencyBlobStore.putBlob(containerName, blob);\n\n        ExecutorService executorService = null;\n        try {\n            executorService = Executors.newFixedThreadPool(5);\n\n            List<Callable<Object>> tasks = new ArrayList<>();\n            for (int i = 0; i < 5; i++) {\n                tasks.add(Executors.callable(() -> consume(latencyBlobStore.getBlob(containerName, blobName))));\n            }\n\n            final ExecutorService service = executorService;\n            long timeTaken = time(() -> {\n                try {\n                    service.invokeAll(tasks);\n                } catch (Exception e) {\n                    // Ignore\n                }\n            });\n            assertThat(timeTaken).isGreaterThanOrEqualTo(2000L);\n        } finally {\n            if (executorService != null) {\n                executorService.shutdown();\n            }\n        }\n    }\n\n    private static String createRandomContainerName() {\n        return \"container-\" + new Random().nextInt(Integer.MAX_VALUE);\n    }\n\n    private static String createRandomBlobName() {\n        return \"blob-\" + new Random().nextInt(Integer.MAX_VALUE);\n    }\n\n    private static long time(Runnable runnable) {\n        long startTime = System.currentTimeMillis();\n        runnable.run();\n        return System.currentTimeMillis() - startTime;\n    }\n\n    private static void consume(Blob blob) {\n        try (InputStream stream = blob.getPayload().openStream()) {\n            stream.readAllBytes();\n        } catch (IOException ioe) {\n            // Ignore\n        }\n    }\n}\n"
  },
  {
    "path": "src/test/java/org/gaul/s3proxy/NoCacheBlobStoreTest.java",
    "content": "/*\n * Copyright 2014-2026 Andrew Gaul <andrew@gaul.org>\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * https://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\npackage org.gaul.s3proxy;\n\nimport static org.assertj.core.api.Assertions.assertThat;\n\nimport java.time.Instant;\nimport java.util.Date;\n\nimport org.jclouds.blobstore.options.GetOptions;\nimport org.junit.jupiter.api.Test;\n\npublic final class NoCacheBlobStoreTest {\n    @Test\n    public void testResetCacheHeadersKeepRange() {\n        var options = GetOptions.Builder.range(1, 5);\n        var optionsResult = NoCacheBlobStore.resetCacheHeaders(options);\n        assertThat(optionsResult.getRanges()).isEqualTo(options.getRanges());\n    }\n\n    @Test\n    public void testResetCacheHeadersKeepTail() {\n        var options = GetOptions.Builder.range(1, 5).tail(3).startAt(10);\n        var optionsResult = NoCacheBlobStore.resetCacheHeaders(options);\n        assertThat(optionsResult.getRanges()).isEqualTo(options.getRanges());\n    }\n\n    @Test\n    public void testResetCacheHeadersRangeDropCache() {\n        var options = GetOptions.Builder\n                .range(1, 5)\n                .tail(3)\n                .startAt(10)\n                .ifETagDoesntMatch(\"abc\")\n                .ifModifiedSince(Date.from(Instant.EPOCH));\n        var optionsResult = NoCacheBlobStore.resetCacheHeaders(options);\n        assertThat(optionsResult.getRanges()).isEqualTo(options.getRanges());\n        assertThat(optionsResult.getIfNoneMatch()).isEqualTo(null);\n        assertThat(optionsResult.getIfModifiedSince()).isEqualTo((Date) null);\n    }\n\n    @Test\n    public void testResetCacheHeadersNoRange() {\n        var options = GetOptions.Builder\n                .ifETagMatches(\"abc\")\n                .ifUnmodifiedSince(Date.from(Instant.EPOCH));\n        var optionsResult = NoCacheBlobStore.resetCacheHeaders(options);\n        assertThat(optionsResult.getRanges()).isEqualTo(options.getRanges());\n        assertThat(optionsResult.getIfMatch()).isEqualTo(null);\n        assertThat(optionsResult.getIfUnmodifiedSince()).isEqualTo((Date) null);\n    }\n}\n"
  },
  {
    "path": "src/test/java/org/gaul/s3proxy/NullBlobStoreTest.java",
    "content": "/*\n * Copyright 2014-2026 Andrew Gaul <andrew@gaul.org>\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * https://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\npackage org.gaul.s3proxy;\n\nimport static org.assertj.core.api.Assertions.assertThat;\n\nimport java.io.IOException;\nimport java.io.InputStream;\nimport java.io.OutputStream;\nimport java.util.List;\nimport java.util.Map;\nimport java.util.Random;\n\nimport com.google.common.io.ByteSource;\nimport com.google.common.net.MediaType;\n\nimport org.jclouds.ContextBuilder;\nimport org.jclouds.blobstore.BlobStore;\nimport org.jclouds.blobstore.BlobStoreContext;\nimport org.jclouds.blobstore.domain.Blob;\nimport org.jclouds.blobstore.domain.BlobMetadata;\nimport org.jclouds.blobstore.domain.MultipartPart;\nimport org.jclouds.blobstore.domain.MultipartUpload;\nimport org.jclouds.blobstore.domain.PageSet;\nimport org.jclouds.blobstore.domain.StorageMetadata;\nimport org.jclouds.blobstore.options.PutOptions;\nimport org.jclouds.io.ContentMetadata;\nimport org.jclouds.io.Payload;\nimport org.jclouds.io.Payloads;\nimport org.jclouds.logging.slf4j.config.SLF4JLoggingModule;\nimport org.junit.After;\nimport org.junit.Before;\nimport org.junit.Test;\n\npublic final class NullBlobStoreTest {\n    private static final ByteSource BYTE_SOURCE =\n            TestUtils.randomByteSource().slice(0, 1024);\n    private BlobStoreContext context;\n    private BlobStore blobStore;\n    private String containerName;\n    private BlobStore nullBlobStore;\n\n    @Before\n    public void setUp() throws Exception {\n        containerName = createRandomContainerName();\n\n        context = ContextBuilder\n                .newBuilder(\"transient\")\n                .credentials(\"identity\", \"credential\")\n                .modules(List.of(new SLF4JLoggingModule()))\n                .build(BlobStoreContext.class);\n        blobStore = context.getBlobStore();\n        blobStore.createContainerInLocation(null, containerName);\n\n        nullBlobStore = NullBlobStore.newNullBlobStore(blobStore);\n    }\n\n    @After\n    public void tearDown() throws Exception {\n        if (context != null) {\n            blobStore.deleteContainer(containerName);\n            context.close();\n        }\n    }\n\n    @Test\n    public void testCreateBlobGetBlob() throws Exception {\n        String blobName = createRandomBlobName();\n        Blob blob = makeBlob(nullBlobStore, blobName);\n        nullBlobStore.putBlob(containerName, blob);\n\n        blob = nullBlobStore.getBlob(containerName, blobName);\n        validateBlobMetadata(blob.getMetadata());\n\n        // content differs, only compare length\n        try (InputStream actual = blob.getPayload().openStream();\n                InputStream expected = BYTE_SOURCE.openStream()) {\n            long actualLength = actual.transferTo(\n                    OutputStream.nullOutputStream());\n            long expectedLength = expected.transferTo(\n                    OutputStream.nullOutputStream());\n            assertThat(actualLength).isEqualTo(expectedLength);\n        }\n\n        PageSet<? extends StorageMetadata> pageSet = nullBlobStore.list(\n                containerName);\n        assertThat(pageSet).hasSize(1);\n        StorageMetadata sm = pageSet.iterator().next();\n        assertThat(sm.getName()).isEqualTo(blobName);\n        assertThat(sm.getSize()).isEqualTo(0);\n    }\n\n    @Test\n    public void testCreateBlobBlobMetadata() throws Exception {\n        String blobName = createRandomBlobName();\n        Blob blob = makeBlob(nullBlobStore, blobName);\n        nullBlobStore.putBlob(containerName, blob);\n        BlobMetadata metadata = nullBlobStore.blobMetadata(containerName,\n                blobName);\n        validateBlobMetadata(metadata);\n    }\n\n    @Test\n    public void testCreateMultipartBlobGetBlob() throws Exception {\n        String blobName = \"multipart-upload\";\n        BlobMetadata blobMetadata = makeBlob(nullBlobStore, blobName)\n                .getMetadata();\n        MultipartUpload mpu = nullBlobStore.initiateMultipartUpload(\n                containerName, blobMetadata, new PutOptions());\n\n        ByteSource byteSource = TestUtils.randomByteSource().slice(\n                0, nullBlobStore.getMinimumMultipartPartSize() + 1);\n        ByteSource byteSource1 = byteSource.slice(\n                0, nullBlobStore.getMinimumMultipartPartSize());\n        ByteSource byteSource2 = byteSource.slice(\n                nullBlobStore.getMinimumMultipartPartSize(), 1);\n        Payload payload1 = Payloads.newByteSourcePayload(byteSource1);\n        Payload payload2 = Payloads.newByteSourcePayload(byteSource2);\n        payload1.getContentMetadata().setContentLength(byteSource1.size());\n        payload2.getContentMetadata().setContentLength(byteSource2.size());\n        MultipartPart part1 = nullBlobStore.uploadMultipartPart(mpu, 1,\n                payload1);\n        MultipartPart part2 = nullBlobStore.uploadMultipartPart(mpu, 2,\n                payload2);\n\n        List<MultipartPart> parts = nullBlobStore.listMultipartUpload(mpu);\n        assertThat(parts.get(0).partNumber()).isEqualTo(1);\n        assertThat(parts.get(0).partSize()).isEqualTo(byteSource1.size());\n        assertThat(parts.get(0).partETag()).isEqualTo(part1.partETag());\n        assertThat(parts.get(1).partNumber()).isEqualTo(2);\n        assertThat(parts.get(1).partSize()).isEqualTo(byteSource2.size());\n        assertThat(parts.get(1).partETag()).isEqualTo(part2.partETag());\n\n        assertThat(nullBlobStore.listMultipartUpload(mpu)).hasSize(2);\n\n        nullBlobStore.completeMultipartUpload(mpu, parts);\n\n        Blob newBlob = nullBlobStore.getBlob(containerName, blobName);\n        validateBlobMetadata(newBlob.getMetadata());\n\n        // content differs, only compare length\n        try (InputStream actual = newBlob.getPayload().openStream();\n                InputStream expected = byteSource.openStream()) {\n            long actualLength = actual.transferTo(\n                    OutputStream.nullOutputStream());\n            long expectedLength = expected.transferTo(\n                    OutputStream.nullOutputStream());\n            assertThat(actualLength).isEqualTo(expectedLength);\n        }\n\n        nullBlobStore.removeBlob(containerName, blobName);\n        assertThat(nullBlobStore.list(containerName)).isEmpty();\n    }\n\n    private static String createRandomContainerName() {\n        return \"container-\" + new Random().nextInt(Integer.MAX_VALUE);\n    }\n\n    private static String createRandomBlobName() {\n        return \"blob-\" + new Random().nextInt(Integer.MAX_VALUE);\n    }\n\n    private static Blob makeBlob(BlobStore blobStore, String blobName)\n            throws IOException {\n        return blobStore.blobBuilder(blobName)\n                .payload(BYTE_SOURCE)\n                .contentDisposition(\"attachment; filename=foo.mp4\")\n                .contentEncoding(\"compress\")\n                .contentLength(BYTE_SOURCE.size())\n                .contentType(MediaType.MP4_AUDIO)\n                .contentMD5(BYTE_SOURCE.hash(TestUtils.MD5))\n                .userMetadata(Map.of(\"key\", \"value\"))\n                .build();\n    }\n\n    private static void validateBlobMetadata(BlobMetadata metadata)\n            throws IOException {\n        assertThat(metadata).isNotNull();\n\n        ContentMetadata contentMetadata = metadata.getContentMetadata();\n        assertThat(contentMetadata.getContentDisposition())\n                .isEqualTo(\"attachment; filename=foo.mp4\");\n        assertThat(contentMetadata.getContentEncoding())\n                .isEqualTo(\"compress\");\n        assertThat(contentMetadata.getContentType())\n                .isEqualTo(MediaType.MP4_AUDIO.toString());\n\n        assertThat(metadata.getUserMetadata())\n                .isEqualTo(Map.of(\"key\", \"value\"));\n    }\n}\n"
  },
  {
    "path": "src/test/java/org/gaul/s3proxy/PrefixBlobStoreTest.java",
    "content": "/*\n * Copyright 2014-2026 Andrew Gaul <andrew@gaul.org>\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * https://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\npackage org.gaul.s3proxy;\n\nimport static org.assertj.core.api.Assertions.assertThat;\n\nimport java.io.IOException;\nimport java.io.InputStream;\nimport java.util.List;\nimport java.util.Map;\nimport java.util.Properties;\n\nimport com.google.common.collect.ImmutableList;\nimport com.google.common.io.ByteSource;\n\nimport org.assertj.core.api.Assertions;\nimport org.jclouds.ContextBuilder;\nimport org.jclouds.blobstore.BlobStore;\nimport org.jclouds.blobstore.BlobStoreContext;\nimport org.jclouds.blobstore.domain.Blob;\nimport org.jclouds.blobstore.domain.MultipartPart;\nimport org.jclouds.blobstore.domain.MultipartUpload;\nimport org.jclouds.blobstore.domain.PageSet;\nimport org.jclouds.blobstore.domain.StorageMetadata;\nimport org.jclouds.blobstore.options.PutOptions;\nimport org.jclouds.io.Payloads;\nimport org.jclouds.logging.slf4j.config.SLF4JLoggingModule;\nimport org.junit.After;\nimport org.junit.Before;\nimport org.junit.Test;\n\npublic final class PrefixBlobStoreTest {\n    private String containerName;\n    private String prefix;\n    private BlobStoreContext context;\n    private BlobStore blobStore;\n    private BlobStore prefixBlobStore;\n\n    @Before\n    public void setUp() {\n        containerName = TestUtils.createRandomContainerName();\n        prefix = \"forward-prefix/\";\n        context = ContextBuilder\n                .newBuilder(\"transient\")\n                .credentials(\"identity\", \"credential\")\n                .modules(List.of(new SLF4JLoggingModule()))\n                .build(BlobStoreContext.class);\n        blobStore = context.getBlobStore();\n        blobStore.createContainerInLocation(null, containerName);\n        prefixBlobStore = PrefixBlobStore.newPrefixBlobStore(\n                blobStore, Map.of(containerName, prefix));\n    }\n\n    @After\n    public void tearDown() {\n        if (context != null) {\n            blobStore.clearContainer(containerName);\n            blobStore.deleteContainer(containerName);\n            context.close();\n        }\n    }\n\n    @Test\n    public void testPutAndGetBlob() throws IOException {\n        ByteSource content = TestUtils.randomByteSource().slice(0, 256);\n        Blob blob = prefixBlobStore.blobBuilder(\"object.txt\")\n                .payload(content)\n                .build();\n        prefixBlobStore.putBlob(containerName, blob);\n\n        assertThat(blobStore.blobExists(containerName,\n                prefix + \"object.txt\")).isTrue();\n\n        Blob stored = prefixBlobStore.getBlob(containerName, \"object.txt\");\n        assertThat(stored).isNotNull();\n        assertThat(stored.getMetadata().getName()).isEqualTo(\"object.txt\");\n        try (InputStream expected = content.openStream();\n             InputStream actual = stored.getPayload().openStream()) {\n            assertThat(actual).hasSameContentAs(expected);\n        }\n    }\n\n    @Test\n    public void testListTrimsPrefix() throws IOException {\n        ByteSource content = TestUtils.randomByteSource().slice(0, 64);\n        prefixBlobStore.putBlob(containerName, prefixBlobStore.blobBuilder(\n                \"file-one.txt\").payload(content).build());\n        blobStore.putBlob(containerName, blobStore.blobBuilder(\n                prefix + \"file-two.txt\").payload(content).build());\n        blobStore.putBlob(containerName, blobStore.blobBuilder(\n                \"outside.txt\").payload(content).build());\n\n        PageSet<? extends StorageMetadata> listing =\n                prefixBlobStore.list(containerName);\n        List<String> names = ImmutableList.copyOf(listing).stream()\n                .map(StorageMetadata::getName)\n                .collect(ImmutableList.toImmutableList());\n        assertThat(names).containsExactlyInAnyOrder(\n                \"file-one.txt\", \"file-two.txt\");\n        assertThat(listing.getNextMarker()).isNull();\n    }\n\n    @Test\n    public void testClearContainerKeepsOtherObjects() {\n        ByteSource content = TestUtils.randomByteSource().slice(0, 32);\n        prefixBlobStore.putBlob(containerName, prefixBlobStore.blobBuilder(\n                \"inside.txt\").payload(content).build());\n        blobStore.putBlob(containerName, blobStore.blobBuilder(\n                \"outside.txt\").payload(content).build());\n\n        prefixBlobStore.clearContainer(containerName);\n\n        assertThat(blobStore.blobExists(containerName,\n                prefix + \"inside.txt\")).isFalse();\n        assertThat(blobStore.blobExists(containerName,\n                \"outside.txt\")).isTrue();\n    }\n\n    @Test\n    public void testMultipartUploadUsesPrefix() throws IOException {\n        ByteSource content = TestUtils.randomByteSource().slice(0, 512);\n        Blob blob = prefixBlobStore.blobBuilder(\"archive.bin\").build();\n        MultipartUpload mpu = prefixBlobStore.initiateMultipartUpload(\n                containerName, blob.getMetadata(), PutOptions.NONE);\n        assertThat(mpu.containerName()).isEqualTo(containerName);\n        assertThat(mpu.blobName()).isEqualTo(\"archive.bin\");\n\n        MultipartPart part = prefixBlobStore.uploadMultipartPart(\n                mpu, 1, Payloads.newPayload(content));\n        prefixBlobStore.completeMultipartUpload(mpu, List.of(part));\n\n        assertThat(blobStore.blobExists(containerName,\n                prefix + \"archive.bin\")).isTrue();\n    }\n\n    @Test\n    public void testListMultipartUploadsTrimsPrefix() {\n        Blob blob = prefixBlobStore.blobBuilder(\"pending.bin\").build();\n        MultipartUpload mpu = prefixBlobStore.initiateMultipartUpload(\n                containerName, blob.getMetadata(), PutOptions.NONE);\n\n        try {\n            List<MultipartUpload> uploads =\n                    prefixBlobStore.listMultipartUploads(containerName);\n            assertThat(uploads).hasSize(1);\n            assertThat(uploads.get(0).blobName()).isEqualTo(\"pending.bin\");\n        } finally {\n            prefixBlobStore.abortMultipartUpload(mpu);\n        }\n    }\n\n    @Test\n    public void testParseRejectsEmptyPrefix() {\n        var properties = new Properties();\n        properties.setProperty(String.format(\"%s.bucket\",\n                S3ProxyConstants.PROPERTY_PREFIX_BLOBSTORE), \"\");\n\n        try {\n            PrefixBlobStore.parsePrefixes(properties);\n            Assertions.failBecauseExceptionWasNotThrown(\n                    IllegalArgumentException.class);\n        } catch (IllegalArgumentException exc) {\n            assertThat(exc.getMessage()).isEqualTo(\n                    \"Prefix for bucket bucket must not be empty\");\n        }\n    }\n}\n"
  },
  {
    "path": "src/test/java/org/gaul/s3proxy/ReadOnlyBlobStoreTest.java",
    "content": "/*\n * Copyright 2014-2026 Andrew Gaul <andrew@gaul.org>\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * https://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\npackage org.gaul.s3proxy;\n\nimport static org.assertj.core.api.Assertions.assertThat;\n\nimport java.util.List;\nimport java.util.Random;\n\nimport org.assertj.core.api.Fail;\nimport org.jclouds.ContextBuilder;\nimport org.jclouds.blobstore.BlobStore;\nimport org.jclouds.blobstore.BlobStoreContext;\nimport org.jclouds.blobstore.options.PutOptions;\nimport org.jclouds.logging.slf4j.config.SLF4JLoggingModule;\nimport org.junit.After;\nimport org.junit.Before;\nimport org.junit.Test;\n\npublic final class ReadOnlyBlobStoreTest {\n    private BlobStoreContext context;\n    private BlobStore blobStore;\n    private String containerName;\n    private BlobStore readOnlyBlobStore;\n\n    @Before\n    public void setUp() throws Exception {\n        containerName = createRandomContainerName();\n\n        context = ContextBuilder\n                .newBuilder(\"transient\")\n                .credentials(\"identity\", \"credential\")\n                .modules(List.of(new SLF4JLoggingModule()))\n                .build(BlobStoreContext.class);\n        blobStore = context.getBlobStore();\n        blobStore.createContainerInLocation(null, containerName);\n        readOnlyBlobStore = ReadOnlyBlobStore.newReadOnlyBlobStore(blobStore);\n    }\n\n    @After\n    public void tearDown() throws Exception {\n        if (context != null) {\n            blobStore.deleteContainer(containerName);\n            context.close();\n        }\n    }\n\n    @Test\n    public void testContainerExists() throws Exception {\n        assertThat(readOnlyBlobStore.containerExists(containerName)).isTrue();\n        assertThat(readOnlyBlobStore.containerExists(\n                containerName + \"-fake\")).isFalse();\n    }\n\n    @Test\n    public void testPutBlob() throws Exception {\n        try {\n            readOnlyBlobStore.putBlob(containerName, null);\n            Fail.failBecauseExceptionWasNotThrown(\n                    UnsupportedOperationException.class);\n        } catch (UnsupportedOperationException ne) {\n            // expected\n        }\n    }\n\n    @Test\n    public void testPutBlobOptions() throws Exception {\n        try {\n            readOnlyBlobStore.putBlob(containerName, null, new PutOptions());\n            Fail.failBecauseExceptionWasNotThrown(\n                    UnsupportedOperationException.class);\n        } catch (UnsupportedOperationException ne) {\n            // expected\n        }\n    }\n\n    private static String createRandomContainerName() {\n        return \"container-\" + new Random().nextInt(Integer.MAX_VALUE);\n    }\n}\n"
  },
  {
    "path": "src/test/java/org/gaul/s3proxy/RegexBlobStoreTest.java",
    "content": "/*\n * Copyright 2014-2026 Andrew Gaul <andrew@gaul.org>\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * https://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\npackage org.gaul.s3proxy;\n\nimport static org.assertj.core.api.Assertions.assertThat;\n\nimport java.io.IOException;\nimport java.io.InputStream;\nimport java.util.AbstractMap.SimpleEntry;\nimport java.util.List;\nimport java.util.Map;\nimport java.util.Properties;\nimport java.util.Random;\nimport java.util.regex.Pattern;\n\nimport com.google.common.hash.Hashing;\nimport com.google.common.io.ByteSource;\n\nimport org.assertj.core.api.Assertions;\nimport org.jclouds.ContextBuilder;\nimport org.jclouds.blobstore.BlobStore;\nimport org.jclouds.blobstore.BlobStoreContext;\nimport org.jclouds.blobstore.domain.Blob;\nimport org.jclouds.blobstore.domain.BlobMetadata;\nimport org.jclouds.logging.slf4j.config.SLF4JLoggingModule;\nimport org.junit.After;\nimport org.junit.Before;\nimport org.junit.Test;\n\npublic final class RegexBlobStoreTest {\n    private BlobStoreContext context;\n    private BlobStore delegate;\n    private String containerName;\n\n    @Before\n    public void setUp() throws Exception {\n        containerName = createRandomContainerName();\n\n        context = ContextBuilder\n                .newBuilder(\"transient\")\n                .credentials(\"identity\", \"credential\")\n                .modules(List.of(new SLF4JLoggingModule()))\n                .build(BlobStoreContext.class);\n        delegate = context.getBlobStore();\n        delegate.createContainerInLocation(null, containerName);\n\n    }\n\n    @After\n    public void tearDown() throws Exception {\n        if (context != null) {\n            delegate.deleteContainer(containerName);\n            context.close();\n        }\n    }\n\n    @Test\n    public void testRemoveSomeCharsFromName() throws IOException {\n        var regexes = List.<Map.Entry<Pattern, String>>of(\n                new SimpleEntry<Pattern, String>(\n                        Pattern.compile(\"[^a-zA-Z0-9/_.]\"), \"_\"));\n        BlobStore regexBlobStore = RegexBlobStore.newRegexBlobStore(delegate,\n                regexes);\n\n        String initialBlobName = \"test/remove:badchars-folder/blob.txt\";\n        String targetBlobName = \"test/remove_badchars_folder/blob.txt\";\n        ByteSource content = TestUtils.randomByteSource().slice(0, 1024);\n        @SuppressWarnings(\"deprecation\")\n        String contentHash = Hashing.md5().hashBytes(content.read()).toString();\n        Blob blob = regexBlobStore.blobBuilder(initialBlobName).payload(\n                content).build();\n\n        String eTag = regexBlobStore.putBlob(containerName, blob);\n        assertThat(eTag).isEqualTo(contentHash);\n\n        BlobMetadata blobMetadata = regexBlobStore.blobMetadata(\n                containerName, targetBlobName);\n\n        assertThat(blobMetadata.getETag()).isEqualTo(contentHash);\n        blob = regexBlobStore.getBlob(containerName, targetBlobName);\n        try (InputStream actual = blob.getPayload().openStream();\n             InputStream expected = content.openStream()) {\n            assertThat(actual).hasSameContentAs(expected);\n        }\n\n        blob = regexBlobStore.getBlob(containerName, initialBlobName);\n        try (InputStream actual = blob.getPayload().openStream();\n             InputStream expected = content.openStream()) {\n            assertThat(actual).hasSameContentAs(expected);\n        }\n    }\n\n    @Test\n    public void testParseMatchWithoutReplace() {\n        var properties = new Properties();\n        properties.put(\n                String.format(\"%s.%s.sample1\",\n                        S3ProxyConstants.PROPERTY_REGEX_BLOBSTORE,\n                        S3ProxyConstants.PROPERTY_REGEX_BLOBSTORE_MATCH),\n                \"test\");\n        properties.put(\n                String.format(\"%s.%s.sample2\",\n                        S3ProxyConstants.PROPERTY_REGEX_BLOBSTORE,\n                        S3ProxyConstants.PROPERTY_REGEX_BLOBSTORE_MATCH),\n                \"test\");\n        properties.put(\n                String.format(\"%s.%s.sample1\",\n                        S3ProxyConstants.PROPERTY_REGEX_BLOBSTORE,\n                        S3ProxyConstants.PROPERTY_REGEX_BLOBSTORE_REPLACE),\n                \"test\");\n\n        try {\n            RegexBlobStore.parseRegexs(properties);\n            Assertions.failBecauseExceptionWasNotThrown(\n                    IllegalArgumentException.class);\n        } catch (IllegalArgumentException exc) {\n            assertThat(exc.getMessage()).isEqualTo(\n                    \"Regex sample2 has no replace property associated\");\n        }\n    }\n\n    private static String createRandomContainerName() {\n        return \"container-\" + new Random().nextInt(Integer.MAX_VALUE);\n    }\n}\n"
  },
  {
    "path": "src/test/java/org/gaul/s3proxy/ShardedBlobStoreTest.java",
    "content": "/*\n * Copyright 2014-2026 Andrew Gaul <andrew@gaul.org>\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * https://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\npackage org.gaul.s3proxy;\n\nimport static org.assertj.core.api.Assertions.assertThat;\n\nimport java.io.InputStream;\nimport java.util.ArrayList;\nimport java.util.List;\nimport java.util.Map;\n\nimport com.google.common.io.ByteSource;\n\nimport org.jclouds.ContextBuilder;\nimport org.jclouds.blobstore.BlobStore;\nimport org.jclouds.blobstore.BlobStoreContext;\nimport org.jclouds.blobstore.domain.Blob;\nimport org.jclouds.blobstore.domain.PageSet;\nimport org.jclouds.blobstore.domain.StorageMetadata;\nimport org.jclouds.blobstore.options.CopyOptions;\nimport org.jclouds.logging.slf4j.config.SLF4JLoggingModule;\nimport org.junit.After;\nimport org.junit.Before;\nimport org.junit.Test;\n\npublic final class ShardedBlobStoreTest {\n    private int shards;\n    private String prefix;\n    private String containerName;\n    private BlobStoreContext context;\n    private BlobStore blobStore;\n    private BlobStore shardedBlobStore;\n    private List<String> createdContainers;\n    private Map<String, String> prefixesMap;\n\n    @Before\n    public void setUp() {\n        containerName = TestUtils.createRandomContainerName();\n        shards = 10;\n        prefix = TestUtils.createRandomContainerName();\n        context = ContextBuilder\n                .newBuilder(\"transient\")\n                .credentials(\"identity\", \"credential\")\n                .modules(List.of(new SLF4JLoggingModule()))\n                .build(BlobStoreContext.class);\n        blobStore = context.getBlobStore();\n        var shardsMap = Map.of(containerName, shards);\n        prefixesMap = Map.of(containerName, prefix);\n        shardedBlobStore = ShardedBlobStore.newShardedBlobStore(\n                blobStore, shardsMap, prefixesMap);\n        createdContainers = new ArrayList<>();\n    }\n\n    @After\n    public void tearDown() {\n        if (this.context != null) {\n            for (String container : this.createdContainers) {\n                blobStore.deleteContainer(container);\n            }\n            context.close();\n        }\n    }\n\n    private void createContainer(String container) {\n        String prefix = this.prefixesMap.get(container);\n        if (prefix != null) {\n            for (int n = 0; n < this.shards; ++n) {\n                this.createdContainers.add(\n                        String.format(\"%s-%d\", this.prefix, n));\n            }\n        } else {\n            this.createdContainers.add(container);\n        }\n        assertThat(shardedBlobStore.createContainerInLocation(\n                null, container)).isTrue();\n    }\n\n    public int countShards() {\n        PageSet<? extends StorageMetadata> listing = blobStore.list();\n        int blobStoreShards = 0;\n        for (StorageMetadata entry: listing) {\n            if (entry.getName().startsWith(prefix)) {\n                blobStoreShards++;\n            }\n        }\n        return blobStoreShards;\n    }\n\n    @Test\n    public void testCreateContainer() {\n        this.createContainer(containerName);\n        assertThat(blobStore.containerExists(containerName)).isFalse();\n        assertThat(this.countShards()).isEqualTo(this.shards);\n    }\n\n    @Test\n    public void testDeleteContainer() {\n        this.createContainer(containerName);\n        assertThat(this.countShards()).isEqualTo(this.shards);\n        assertThat(shardedBlobStore.deleteContainerIfEmpty(containerName))\n                .isTrue();\n        assertThat(this.countShards()).isZero();\n    }\n\n    @Test\n    public void testPutBlob() throws Exception {\n        String blobName = \"foo\";\n        String blobName2 = \"bar\";\n        ByteSource content = TestUtils.randomByteSource().slice(0, 1024);\n        ByteSource content2 = TestUtils.randomByteSource().slice(1024, 1024);\n        Blob blob = shardedBlobStore.blobBuilder(blobName).payload(content)\n                .build();\n        Blob blob2 = shardedBlobStore.blobBuilder(blobName2).payload(content2)\n                .build();\n\n        createContainer(containerName);\n        shardedBlobStore.putBlob(containerName, blob);\n        shardedBlobStore.putBlob(containerName, blob2);\n\n        blob = shardedBlobStore.getBlob(containerName, blobName);\n        try (InputStream actual = blob.getPayload().openStream();\n             InputStream expected = content.openStream()) {\n            assertThat(actual).hasSameContentAs(expected);\n        }\n        blob2 = shardedBlobStore.getBlob(containerName, blobName2);\n        try (InputStream actual = blob2.getPayload().openStream();\n             InputStream expected = content2.openStream()) {\n            assertThat(actual).hasSameContentAs(expected);\n        }\n\n        String blobContainer = null;\n        String blob2Container = null;\n        for (int i = 0; i < shards; i++) {\n            String shard = String.format(\"%s-%d\", prefix, i);\n            for (StorageMetadata entry : blobStore.list(shard)) {\n                if (entry.getName().equals(blobName)) {\n                    blobContainer = shard;\n                }\n                if (entry.getName().equals(blobName2)) {\n                    blob2Container = shard;\n                }\n            }\n        }\n        assertThat(blobContainer).isNotNull();\n        assertThat(blob2Container).isNotNull();\n        assertThat(blobContainer).isNotEqualTo(blob2Container);\n    }\n\n    @Test\n    public void testDeleteBlob() {\n        String blobName = TestUtils.createRandomBlobName();\n        ByteSource content = TestUtils.randomByteSource().slice(0, 1024);\n        Blob blob = shardedBlobStore.blobBuilder(blobName).payload(content)\n                .build();\n        this.createContainer(containerName);\n        shardedBlobStore.putBlob(containerName, blob);\n        assertThat(shardedBlobStore.blobExists(containerName, blobName))\n                .isTrue();\n        shardedBlobStore.removeBlob(containerName, blobName);\n        assertThat(shardedBlobStore.blobExists(containerName, blobName))\n                .isFalse();\n    }\n\n    @Test\n    public void testPutBlobUnsharded() throws Exception {\n        String unshardedContainer = TestUtils.createRandomContainerName();\n        String blobName = TestUtils.createRandomBlobName();\n        ByteSource content = TestUtils.randomByteSource().slice(0, 1024);\n        Blob blob = shardedBlobStore.blobBuilder(blobName).payload(content)\n                .build();\n        this.createContainer(unshardedContainer);\n        shardedBlobStore.putBlob(unshardedContainer, blob);\n        blob = blobStore.getBlob(unshardedContainer, blobName);\n        try (InputStream actual = blob.getPayload().openStream();\n             InputStream expected = content.openStream()) {\n            assertThat(actual).hasSameContentAs(expected);\n        }\n    }\n\n    @Test\n    public void testCopyBlob() throws Exception {\n        String blobName = TestUtils.createRandomBlobName();\n        ByteSource content = TestUtils.randomByteSource().slice(0, 1024);\n        Blob blob = shardedBlobStore.blobBuilder(blobName).payload(content)\n                .build();\n        this.createContainer(containerName);\n        shardedBlobStore.putBlob(containerName, blob);\n        String copyBlobName = TestUtils.createRandomBlobName();\n        shardedBlobStore.copyBlob(\n                containerName, blobName, containerName, copyBlobName,\n                CopyOptions.NONE);\n        blob = shardedBlobStore.getBlob(containerName, copyBlobName);\n        try (InputStream actual = blob.getPayload().openStream();\n             InputStream expected = content.openStream()) {\n            assertThat(actual).hasSameContentAs(expected);\n        }\n    }\n\n    @Test\n    public void testCopyBlobUnshardedToSharded() throws Exception {\n        String blobName = TestUtils.createRandomBlobName();\n        String unshardedContainer = TestUtils.createRandomContainerName();\n        ByteSource content = TestUtils.randomByteSource().slice(0, 1024);\n        Blob blob = shardedBlobStore.blobBuilder(blobName).payload(content)\n                .build();\n        this.createContainer(containerName);\n        this.createContainer(unshardedContainer);\n        shardedBlobStore.putBlob(unshardedContainer, blob);\n        shardedBlobStore.copyBlob(\n                unshardedContainer, blobName, containerName, blobName,\n                CopyOptions.NONE);\n        blob = shardedBlobStore.getBlob(containerName, blobName);\n        try (InputStream actual = blob.getPayload().openStream();\n             InputStream expected = content.openStream()) {\n            assertThat(actual).hasSameContentAs(expected);\n        }\n    }\n\n    @Test\n    public void testCopyBlobShardedToUnsharded() throws Exception {\n        String blobName = TestUtils.createRandomBlobName();\n        String unshardedContainer = TestUtils.createRandomContainerName();\n        ByteSource content = TestUtils.randomByteSource().slice(0, 1024);\n        Blob blob = shardedBlobStore.blobBuilder(blobName).payload(content)\n                .build();\n        this.createContainer(containerName);\n        this.createContainer(unshardedContainer);\n        shardedBlobStore.putBlob(containerName, blob);\n        shardedBlobStore.copyBlob(\n                containerName, blobName, unshardedContainer, blobName,\n                CopyOptions.NONE);\n        blob = shardedBlobStore.getBlob(unshardedContainer, blobName);\n        try (InputStream actual = blob.getPayload().openStream();\n             InputStream expected = content.openStream()) {\n            assertThat(actual).hasSameContentAs(expected);\n        }\n    }\n}\n"
  },
  {
    "path": "src/test/java/org/gaul/s3proxy/TestUtils.java",
    "content": "/*\n * Copyright 2014-2026 Andrew Gaul <andrew@gaul.org>\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * https://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\npackage org.gaul.s3proxy;\n\nimport java.io.IOException;\nimport java.io.InputStream;\nimport java.net.URI;\nimport java.nio.charset.StandardCharsets;\nimport java.nio.file.FileSystems;\nimport java.nio.file.Files;\nimport java.util.List;\nimport java.util.Properties;\nimport java.util.Random;\n\nimport com.google.common.base.Strings;\nimport com.google.common.hash.HashFunction;\nimport com.google.common.hash.Hashing;\nimport com.google.common.io.ByteSource;\nimport com.google.common.io.MoreFiles;\nimport com.google.common.io.Resources;\n\nimport org.eclipse.jetty.util.component.AbstractLifeCycle;\nimport org.jclouds.Constants;\nimport org.jclouds.ContextBuilder;\nimport org.jclouds.JcloudsVersion;\nimport org.jclouds.blobstore.BlobStore;\nimport org.jclouds.blobstore.BlobStoreContext;\nimport org.jclouds.logging.slf4j.config.SLF4JLoggingModule;\n\nfinal class TestUtils {\n    @SuppressWarnings(\"deprecation\")\n    static final HashFunction MD5 = Hashing.md5();\n\n    private TestUtils() {\n        throw new AssertionError(\"intentionally unimplemented\");\n    }\n\n    static ByteSource randomByteSource() {\n        return randomByteSource(0);\n    }\n\n    static ByteSource randomByteSource(long seed) {\n        return new RandomByteSource(seed);\n    }\n\n    private static final class RandomByteSource extends ByteSource {\n        private final long seed;\n\n        RandomByteSource(long seed) {\n            this.seed = seed;\n        }\n\n        @Override\n        public InputStream openStream() {\n            return new RandomInputStream(seed);\n        }\n    }\n\n    private static final class RandomInputStream extends InputStream {\n        private final Random random;\n        private boolean closed;\n\n        RandomInputStream(long seed) {\n            this.random = new Random(seed);\n        }\n\n        @Override\n        public synchronized int read() throws IOException {\n            if (closed) {\n                throw new IOException(\"Stream already closed\");\n            }\n            // return value between 0 and 255\n            return random.nextInt() & 0xff;\n        }\n\n        @Override\n        public synchronized int read(byte[] b) throws IOException {\n            return read(b, 0, b.length);\n        }\n\n        @Override\n        public synchronized int read(byte[] b, int off, int len)\n                throws IOException {\n            for (int i = 0; i < len; ++i) {\n                b[off + i] = (byte) read();\n            }\n            return len;\n        }\n\n        @Override\n        public void close() throws IOException {\n            super.close();\n            closed = true;\n        }\n    }\n\n    static final class S3ProxyLaunchInfo {\n        private S3Proxy s3Proxy;\n        private final Properties properties = new Properties();\n        private String s3Identity;\n        private String s3Credential;\n        private BlobStore blobStore;\n        private URI endpoint;\n        private URI secureEndpoint;\n        private String servicePath;\n\n        S3Proxy getS3Proxy() {\n            return s3Proxy;\n        }\n\n        Properties getProperties() {\n            return properties;\n        }\n\n        String getS3Identity() {\n            return s3Identity;\n        }\n\n        String getS3Credential() {\n            return s3Credential;\n        }\n\n        String getServicePath() {\n            return servicePath;\n        }\n\n        BlobStore getBlobStore() {\n            return blobStore;\n        }\n\n        URI getEndpoint() {\n            return endpoint;\n        }\n\n        URI getSecureEndpoint() {\n            return secureEndpoint;\n        }\n    }\n\n    static S3ProxyLaunchInfo startS3Proxy(String configFile) throws Exception {\n        var info = new S3ProxyLaunchInfo();\n\n        try (InputStream is = Resources.asByteSource(Resources.getResource(\n                configFile)).openStream()) {\n            info.getProperties().load(is);\n        }\n\n        String provider = info.getProperties().getProperty(\n                Constants.PROPERTY_PROVIDER);\n        String identity = info.getProperties().getProperty(\n                Constants.PROPERTY_IDENTITY);\n        String credential = info.getProperties().getProperty(\n                Constants.PROPERTY_CREDENTIAL);\n        if (provider.equals(\"google-cloud-storage\") ||\n                provider.equals(\"google-cloud-storage-sdk\")) {\n            if (credential != null && !credential.isEmpty()) {\n                var path = FileSystems.getDefault().getPath(credential);\n                if (Files.exists(path)) {\n                    credential = MoreFiles.asCharSource(path,\n                            StandardCharsets.UTF_8).read();\n                }\n            }\n            identity = Strings.nullToEmpty(identity);\n            credential = Strings.nullToEmpty(credential);\n            info.getProperties().remove(Constants.PROPERTY_CREDENTIAL);\n        }\n        String endpoint = info.getProperties().getProperty(\n                Constants.PROPERTY_ENDPOINT);\n\n        ContextBuilder builder = ContextBuilder\n                .newBuilder(provider)\n                .credentials(identity, credential)\n                .modules(List.of(new SLF4JLoggingModule()))\n                .overrides(info.getProperties());\n        if (!Strings.isNullOrEmpty(endpoint)) {\n            builder.endpoint(endpoint);\n        }\n        BlobStoreContext context = builder.build(BlobStoreContext.class);\n        info.blobStore = context.getBlobStore();\n\n        String encrypted = info.getProperties().getProperty(\n                S3ProxyConstants.PROPERTY_ENCRYPTED_BLOBSTORE);\n        if (encrypted != null && encrypted.equals(\"true\")) {\n            info.blobStore =\n                EncryptedBlobStore.newEncryptedBlobStore(info.blobStore,\n                    info.getProperties());\n        }\n\n        S3Proxy.Builder s3ProxyBuilder = S3Proxy.Builder.fromProperties(\n                info.getProperties());\n        s3ProxyBuilder.blobStore(info.blobStore);\n        info.endpoint = s3ProxyBuilder.getEndpoint();\n        info.secureEndpoint = s3ProxyBuilder.getSecureEndpoint();\n        info.s3Identity = s3ProxyBuilder.getIdentity();\n        info.s3Credential = s3ProxyBuilder.getCredential();\n        info.servicePath = s3ProxyBuilder.getServicePath();\n        info.getProperties().setProperty(Constants.PROPERTY_USER_AGENT,\n                String.format(\"s3proxy/%s jclouds/%s java/%s\",\n                        TestUtils.class.getPackage().getImplementationVersion(),\n                        JcloudsVersion.get(),\n                        System.getProperty(\"java.version\")));\n\n        // resolve relative path for tests\n        String keyStorePath = info.getProperties().getProperty(\n                S3ProxyConstants.PROPERTY_KEYSTORE_PATH);\n        String keyStorePassword = info.getProperties().getProperty(\n                S3ProxyConstants.PROPERTY_KEYSTORE_PASSWORD);\n        if (keyStorePath != null || keyStorePassword != null) {\n            s3ProxyBuilder.keyStore(\n                    Resources.getResource(keyStorePath).toString(),\n                    keyStorePassword);\n        }\n        info.s3Proxy = s3ProxyBuilder.build();\n        info.s3Proxy.start();\n        while (!info.s3Proxy.getState().equals(AbstractLifeCycle.STARTED)) {\n            Thread.sleep(1);\n        }\n\n        // reset endpoint to handle zero port\n        info.endpoint = new URI(info.endpoint.getScheme(),\n                info.endpoint.getUserInfo(), info.endpoint.getHost(),\n                info.s3Proxy.getPort(), info.endpoint.getPath(),\n                info.endpoint.getQuery(), info.endpoint.getFragment());\n        if (info.secureEndpoint != null) {\n            info.secureEndpoint = new URI(info.secureEndpoint.getScheme(),\n                    info.secureEndpoint.getUserInfo(),\n                    info.secureEndpoint.getHost(),\n                    info.s3Proxy.getSecurePort(),\n                    info.secureEndpoint.getPath(),\n                    info.secureEndpoint.getQuery(),\n                    info.secureEndpoint.getFragment());\n        }\n\n        return info;\n    }\n\n    static String createRandomContainerName() {\n        return \"container-\" + new Random().nextInt(Integer.MAX_VALUE);\n    }\n\n    static String createRandomBlobName() {\n        return \"blob-\" + new Random().nextInt(Integer.MAX_VALUE);\n    }\n}\n"
  },
  {
    "path": "src/test/java/org/gaul/s3proxy/TierBlobStoreTest.java",
    "content": "/*\n * Copyright 2014-2026 Andrew Gaul <andrew@gaul.org>\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * https://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\npackage org.gaul.s3proxy;\n\nimport static org.assertj.core.api.Assertions.assertThat;\n\nimport java.util.List;\n\nimport org.jclouds.ContextBuilder;\nimport org.jclouds.blobstore.BlobStore;\nimport org.jclouds.blobstore.BlobStoreContext;\nimport org.jclouds.blobstore.domain.Tier;\nimport org.jclouds.blobstore.options.PutOptions;\nimport org.jclouds.io.Payloads;\nimport org.jclouds.logging.slf4j.config.SLF4JLoggingModule;\nimport org.jclouds.s3.domain.ObjectMetadata.StorageClass;\nimport org.junit.After;\nimport org.junit.Before;\nimport org.junit.Test;\n\n@SuppressWarnings(\"UnstableApiUsage\")\npublic final class TierBlobStoreTest {\n    private BlobStoreContext context;\n    private BlobStore blobStore;\n    private String containerName;\n    private BlobStore tierBlobStore;\n\n    @Before\n    public void setUp() throws Exception {\n        containerName = TestUtils.createRandomContainerName();\n\n        //noinspection UnstableApiUsage\n        context = ContextBuilder\n                .newBuilder(\"transient\")\n                .credentials(\"identity\", \"credential\")\n                .modules(List.of(new SLF4JLoggingModule()))\n                .build(BlobStoreContext.class);\n        blobStore = context.getBlobStore();\n        blobStore.createContainerInLocation(null, containerName);\n\n        tierBlobStore = StorageClassBlobStore.newStorageClassBlobStore(\n                blobStore, StorageClass.DEEP_ARCHIVE.toString());\n    }\n\n    @After\n    public void tearDown() throws Exception {\n        if (context != null) {\n            blobStore.deleteContainer(containerName);\n            context.close();\n        }\n    }\n\n    @Test\n    public void testPutNewBlob() {\n        var blobName = TestUtils.createRandomBlobName();\n        var content = TestUtils.randomByteSource().slice(0, 1024);\n        var blob = tierBlobStore.blobBuilder(blobName).payload(content).build();\n        tierBlobStore.putBlob(containerName, blob);\n\n        var blobMetadata = tierBlobStore.blobMetadata(containerName, blobName);\n        assertThat(blobMetadata.getTier()).isEqualTo(Tier.ARCHIVE);\n    }\n\n    @Test\n    public void testGetExistingBlob() {\n        var blobName = TestUtils.createRandomBlobName();\n        var content = TestUtils.randomByteSource().slice(0, 1024);\n        var blob = blobStore.blobBuilder(blobName).payload(content).build();\n        blobStore.putBlob(containerName, blob);\n\n        var blobMetadata = tierBlobStore.blobMetadata(containerName, blobName);\n        assertThat(blobMetadata.getTier()).isEqualTo(Tier.STANDARD);\n    }\n\n    @Test\n    public void testPutNewMpu() {\n        var blobName = TestUtils.createRandomBlobName();\n        var content = TestUtils.randomByteSource().slice(0, 1024);\n        var blob = tierBlobStore.blobBuilder(blobName).payload(content).build();\n\n        var mpu = tierBlobStore.initiateMultipartUpload(\n                containerName, blob.getMetadata(), new PutOptions());\n\n        var payload = Payloads.newByteSourcePayload(content);\n        tierBlobStore.uploadMultipartPart(mpu, 1, payload);\n\n        var parts = tierBlobStore.listMultipartUpload(mpu);\n        tierBlobStore.completeMultipartUpload(mpu, parts);\n\n        var blobMetadata = tierBlobStore.blobMetadata(containerName, blobName);\n        assertThat(blobMetadata.getTier()).isEqualTo(Tier.ARCHIVE);\n    }\n\n    @Test\n    public void testGetExistingMpu() {\n        var blobName = TestUtils.createRandomBlobName();\n        var content = TestUtils.randomByteSource().slice(0, 1024);\n        var blob = blobStore.blobBuilder(blobName).payload(content).build();\n\n        var mpu = blobStore.initiateMultipartUpload(\n                containerName, blob.getMetadata(), new PutOptions());\n\n        var payload = Payloads.newByteSourcePayload(content);\n        blobStore.uploadMultipartPart(mpu, 1, payload);\n\n        var parts = blobStore.listMultipartUpload(mpu);\n        blobStore.completeMultipartUpload(mpu, parts);\n\n        var blobMetadata = tierBlobStore.blobMetadata(containerName, blobName);\n        assertThat(blobMetadata.getTier()).isEqualTo(Tier.STANDARD);\n    }\n}\n"
  },
  {
    "path": "src/test/java/org/gaul/s3proxy/UserMetadataReplacerBlobStoreTest.java",
    "content": "/*\n * Copyright 2014-2026 Andrew Gaul <andrew@gaul.org>\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * https://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\npackage org.gaul.s3proxy;\n\nimport static org.assertj.core.api.Assertions.assertThat;\n\nimport java.util.List;\nimport java.util.Map;\n\nimport org.jclouds.ContextBuilder;\nimport org.jclouds.blobstore.BlobStore;\nimport org.jclouds.blobstore.BlobStoreContext;\nimport org.jclouds.blobstore.options.PutOptions;\nimport org.jclouds.logging.slf4j.config.SLF4JLoggingModule;\nimport org.junit.After;\nimport org.junit.Before;\nimport org.junit.Test;\n\n@SuppressWarnings(\"UnstableApiUsage\")\npublic final class UserMetadataReplacerBlobStoreTest {\n    private BlobStoreContext context;\n    private BlobStore blobStore;\n    private String containerName;\n    // TODO: better name?\n    private BlobStore userMetadataReplacerBlobStore;\n\n    @Before\n    public void setUp() throws Exception {\n        containerName = TestUtils.createRandomContainerName();\n\n        //noinspection UnstableApiUsage\n        context = ContextBuilder\n                .newBuilder(\"transient\")\n                .credentials(\"identity\", \"credential\")\n                .modules(List.of(new SLF4JLoggingModule()))\n                .build(BlobStoreContext.class);\n        blobStore = context.getBlobStore();\n        blobStore.createContainerInLocation(null, containerName);\n\n        userMetadataReplacerBlobStore = UserMetadataReplacerBlobStore\n                .newUserMetadataReplacerBlobStore(blobStore, \"-\", \"_\");\n    }\n\n    @After\n    public void tearDown() throws Exception {\n        if (context != null) {\n            blobStore.deleteContainer(containerName);\n            context.close();\n        }\n    }\n\n    @Test\n    public void testPutNewBlob() {\n        var blobName = TestUtils.createRandomBlobName();\n        var content = TestUtils.randomByteSource().slice(0, 1024);\n        var blob = userMetadataReplacerBlobStore.blobBuilder(blobName)\n                .payload(content)\n                .userMetadata(Map.of(\"my-key\", \"my-value-\"))\n                .build();\n        userMetadataReplacerBlobStore.putBlob(containerName, blob);\n\n        // check underlying blobStore\n        var mutableBlobMetadata = blobStore.getBlob(containerName, blobName)\n                .getMetadata();\n        var userMetadata = mutableBlobMetadata.getUserMetadata();\n        assertThat(userMetadata).hasSize(1);\n        var entry = userMetadata.entrySet().iterator().next();\n        assertThat(entry.getKey()).isEqualTo(\"my_key\");\n        assertThat(entry.getValue()).isEqualTo(\"my_value_\");\n\n        // check getBlob\n        mutableBlobMetadata = userMetadataReplacerBlobStore.getBlob(\n                containerName, blobName).getMetadata();\n        userMetadata = mutableBlobMetadata.getUserMetadata();\n        assertThat(userMetadata).hasSize(1);\n        entry = userMetadata.entrySet().iterator().next();\n        assertThat(entry.getKey()).isEqualTo(\"my-key\");\n        assertThat(entry.getValue()).isEqualTo(\"my-value-\");\n\n        // check blobMetadata\n        var blobMetadata = userMetadataReplacerBlobStore.blobMetadata(\n                containerName, blobName);\n        userMetadata = blobMetadata.getUserMetadata();\n        assertThat(userMetadata).hasSize(1);\n        entry = userMetadata.entrySet().iterator().next();\n        assertThat(entry.getKey()).isEqualTo(\"my-key\");\n        assertThat(entry.getValue()).isEqualTo(\"my-value-\");\n    }\n\n    @Test\n    public void testPutNewMultipartBlob() {\n        var blobName = TestUtils.createRandomBlobName();\n        var content = TestUtils.randomByteSource().slice(0, 1024);\n        var blob = userMetadataReplacerBlobStore.blobBuilder(blobName)\n                .payload(content)\n                .userMetadata(Map.of(\"my-key\", \"my-value-\"))\n                .build();\n        var mpu = userMetadataReplacerBlobStore.initiateMultipartUpload(\n                containerName, blob.getMetadata(), new PutOptions());\n        var part = userMetadataReplacerBlobStore.uploadMultipartPart(\n                mpu, 1, blob.getPayload());\n        userMetadataReplacerBlobStore.completeMultipartUpload(\n                mpu, List.of(part));\n\n        // check underlying blobStore\n        var mutableBlobMetadata = blobStore.getBlob(containerName, blobName)\n                .getMetadata();\n        var userMetadata = mutableBlobMetadata.getUserMetadata();\n        assertThat(userMetadata).hasSize(1);\n        var entry = userMetadata.entrySet().iterator().next();\n        assertThat(entry.getKey()).isEqualTo(\"my_key\");\n        assertThat(entry.getValue()).isEqualTo(\"my_value_\");\n\n        // check getBlob\n        mutableBlobMetadata = userMetadataReplacerBlobStore.getBlob(\n                containerName, blobName).getMetadata();\n        userMetadata = mutableBlobMetadata.getUserMetadata();\n        assertThat(userMetadata).hasSize(1);\n        entry = userMetadata.entrySet().iterator().next();\n        assertThat(entry.getKey()).isEqualTo(\"my-key\");\n        assertThat(entry.getValue()).isEqualTo(\"my-value-\");\n\n        // check blobMetadata\n        var blobMetadata = userMetadataReplacerBlobStore.blobMetadata(\n                containerName, blobName);\n        userMetadata = blobMetadata.getUserMetadata();\n        assertThat(userMetadata).hasSize(1);\n        entry = userMetadata.entrySet().iterator().next();\n        assertThat(entry.getKey()).isEqualTo(\"my-key\");\n        assertThat(entry.getValue()).isEqualTo(\"my-value-\");\n    }\n}\n"
  },
  {
    "path": "src/test/java/org/gaul/s3proxy/junit/S3ProxyExtensionTest.java",
    "content": "/*\n * Copyright 2014-2026 Andrew Gaul <andrew@gaul.org>\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * https://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\npackage org.gaul.s3proxy.junit;\n\nimport static org.assertj.core.api.Assertions.assertThat;\n\nimport java.util.List;\n\nimport com.amazonaws.auth.AWSStaticCredentialsProvider;\nimport com.amazonaws.auth.BasicAWSCredentials;\nimport com.amazonaws.client.builder.AwsClientBuilder;\nimport com.amazonaws.regions.Regions;\nimport com.amazonaws.services.s3.AmazonS3;\nimport com.amazonaws.services.s3.AmazonS3ClientBuilder;\nimport com.amazonaws.services.s3.model.Bucket;\nimport com.amazonaws.services.s3.model.ListBucketsPaginatedRequest;\nimport com.amazonaws.services.s3.model.S3ObjectSummary;\n\nimport org.junit.jupiter.api.BeforeEach;\nimport org.junit.jupiter.api.Test;\nimport org.junit.jupiter.api.extension.RegisterExtension;\n\n/**\n * This is an example of how one would use the S3Proxy JUnit extension in a unit\n * test as opposed to a proper test of the S3ProxyExtension class.\n */\npublic class S3ProxyExtensionTest {\n\n    @RegisterExtension\n    static final S3ProxyExtension EXTENSION = S3ProxyExtension\n            .builder()\n            .withCredentials(\"access\", \"secret\")\n            .build();\n\n    private static final String MY_TEST_BUCKET = \"my-test-bucket\";\n\n    private AmazonS3 s3Client;\n\n    @BeforeEach\n    public final void setUp() throws Exception {\n        s3Client = AmazonS3ClientBuilder\n        .standard()\n        .withCredentials(\n            new AWSStaticCredentialsProvider(\n                new BasicAWSCredentials(\n                    EXTENSION.getAccessKey(), EXTENSION.getSecretKey())))\n        .withEndpointConfiguration(\n            new AwsClientBuilder.EndpointConfiguration(\n                EXTENSION.getUri().toString(), Regions.US_EAST_1.getName()))\n        .build();\n\n        s3Client.createBucket(MY_TEST_BUCKET);\n    }\n\n    @Test\n    public final void listBucket() {\n        List<Bucket> buckets = s3Client.listBuckets(new ListBucketsPaginatedRequest()).getBuckets();\n        assertThat(buckets).hasSize(1);\n        assertThat(buckets.get(0).getName())\n                .isEqualTo(MY_TEST_BUCKET);\n    }\n\n    @Test\n    public final void uploadFile() throws Exception {\n        String testInput = \"content\";\n        s3Client.putObject(MY_TEST_BUCKET, \"file.txt\", testInput);\n\n        List<S3ObjectSummary> summaries = s3Client\n                .listObjects(MY_TEST_BUCKET)\n                .getObjectSummaries();\n        assertThat(summaries).hasSize(1);\n        assertThat(summaries.get(0).getKey()).isEqualTo(\"file.txt\");\n        assertThat(summaries.get(0).getSize()).isEqualTo(testInput.length());\n    }\n\n    @Test\n    public final void doesBucketExistV2() {\n        assertThat(s3Client.doesBucketExistV2(MY_TEST_BUCKET)).isTrue();\n\n        // Issue #299\n        assertThat(s3Client.doesBucketExistV2(\"nonexistingbucket\")).isFalse();\n    }\n\n    @Test\n    public final void createExtensionWithoutCredentials() {\n        S3ProxyExtension extension = S3ProxyExtension\n                .builder()\n                .build();\n        assertThat(extension.getAccessKey()).isNull();\n        assertThat(extension.getSecretKey()).isNull();\n        assertThat(extension.getUri()).isNull();\n    }\n}\n"
  },
  {
    "path": "src/test/java/org/gaul/s3proxy/junit/S3ProxyRuleTest.java",
    "content": "/*\n * Copyright 2014-2026 Andrew Gaul <andrew@gaul.org>\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * https://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\npackage org.gaul.s3proxy.junit;\n\nimport static org.assertj.core.api.Assertions.assertThat;\n\nimport java.util.List;\n\nimport com.amazonaws.auth.AWSStaticCredentialsProvider;\nimport com.amazonaws.auth.BasicAWSCredentials;\nimport com.amazonaws.client.builder.AwsClientBuilder.EndpointConfiguration;\nimport com.amazonaws.regions.Regions;\nimport com.amazonaws.services.s3.AmazonS3;\nimport com.amazonaws.services.s3.AmazonS3ClientBuilder;\nimport com.amazonaws.services.s3.model.Bucket;\nimport com.amazonaws.services.s3.model.ListBucketsPaginatedRequest;\nimport com.amazonaws.services.s3.model.S3ObjectSummary;\n\nimport org.junit.Before;\nimport org.junit.Rule;\nimport org.junit.Test;\nimport org.junit.rules.TemporaryFolder;\n\n/**\n * This is an example of how one would use the S3Proxy JUnit rule in a unit\n * test as opposed to a proper test of the S3ProxyRule class.\n */\npublic class S3ProxyRuleTest {\n\n    private static final String MY_TEST_BUCKET = \"my-test-bucket\";\n\n    @Rule public TemporaryFolder temporaryFolder = new TemporaryFolder();\n    @Rule public S3ProxyRule s3Proxy = S3ProxyRule\n        .builder()\n        .withCredentials(\"access\", \"secret\")\n        .build();\n\n    private AmazonS3 s3Client;\n\n    @Before\n    public final void setUp() throws Exception {\n        s3Client = AmazonS3ClientBuilder\n            .standard()\n            .withCredentials(\n                new AWSStaticCredentialsProvider(\n                    new BasicAWSCredentials(\n                        s3Proxy.getAccessKey(), s3Proxy.getSecretKey())))\n            .withEndpointConfiguration(\n                new EndpointConfiguration(\n                    s3Proxy.getUri().toString(), Regions.US_EAST_1.getName()))\n            .build();\n\n        s3Client.createBucket(MY_TEST_BUCKET);\n    }\n\n    @Test\n    public final void listBucket() {\n        List<Bucket> buckets = s3Client.listBuckets(new ListBucketsPaginatedRequest()).getBuckets();\n        assertThat(buckets).hasSize(1);\n        assertThat(buckets.get(0).getName())\n            .isEqualTo(MY_TEST_BUCKET);\n    }\n\n    @Test\n    public final void uploadFile() throws Exception {\n        String testInput = \"content\";\n        s3Client.putObject(MY_TEST_BUCKET, \"file.txt\", testInput);\n\n        List<S3ObjectSummary> summaries = s3Client\n                                            .listObjects(MY_TEST_BUCKET)\n                                            .getObjectSummaries();\n        assertThat(summaries).hasSize(1);\n        assertThat(summaries.get(0).getKey()).isEqualTo(\"file.txt\");\n        assertThat(summaries.get(0).getSize()).isEqualTo(testInput.length());\n    }\n\n    @Test\n    public final void doesBucketExistV2() {\n        assertThat(s3Client.doesBucketExistV2(MY_TEST_BUCKET)).isTrue();\n\n        // Issue #299\n        assertThat(s3Client.doesBucketExistV2(\"nonexistingbucket\")).isFalse();\n    }\n\n    @Test\n    public final void createExtensionWithoutCredentials() {\n        S3ProxyRule extension = S3ProxyRule\n                .builder()\n                .build();\n        assertThat(extension.getAccessKey()).isNull();\n        assertThat(extension.getSecretKey()).isNull();\n        assertThat(extension.getUri()).isNull();\n    }\n}\n"
  },
  {
    "path": "src/test/resources/logback.xml",
    "content": "<configuration>\n  <appender name=\"STDOUT\" class=\"ch.qos.logback.core.ConsoleAppender\">\n    <encoder>\n      <pattern>[s3proxy] %.-1p %d{MM-dd HH:mm:ss.SSS} %t %c{30}:%L %X{clientId}|%X{sessionId}:%X{messageId}:%X{fileId}] %m%n</pattern>\n    </encoder>\n    <filter class=\"ch.qos.logback.classic.filter.ThresholdFilter\">\n      <level>${LOG_LEVEL:-info}</level>\n    </filter>\n  </appender>\n\n  <logger name=\"org.eclipse.jetty\" level=\"${JETTY_LOG_LEVEL:-info}\" />\n\n  <root level=\"${LOG_LEVEL:-info}\">\n    <appender-ref ref=\"STDOUT\" />\n  </root>\n</configuration>\n"
  },
  {
    "path": "src/test/resources/run-s3-tests.sh",
    "content": "#!/bin/bash\n\nset -o errexit\nset -o nounset\n\n# Optional first argument selects a config; remaining args pass through to pytest via tox.\n# Example single test: ./src/test/resources/run-s3-tests.sh s3proxy-localstack.conf \\\n#     s3tests_boto3/functional/test_s3.py::test_bucket_list_delimiter_prefix\nS3PROXY_CONF=\"s3proxy.conf\"\nif (($# > 0)) && [[ \"$1\" == *.conf ]]; then\n    S3PROXY_CONF=\"$1\"\n    shift\nfi\n\nif (($# > 0)) && [[ \"$1\" == -- ]]; then\n    shift\nfi\n\nS3PROXY_BIN=\"${PWD}/target/s3proxy\"\nS3PROXY_PORT=\"${S3PROXY_PORT:-8081}\"\nexport S3TEST_CONF=\"${PWD}/src/test/resources/s3-tests.conf\"\nTOX_TEST_ARGS=(\"$@\")\n\n# launch S3Proxy using HTTP and a fixed port\nsed \"s,^\\(s3proxy.endpoint\\)=.*,\\1=http://127.0.0.1:${S3PROXY_PORT},\" \\\n        < \"src/test/resources/$S3PROXY_CONF\" | grep -v secure-endpoint > target/s3proxy.conf\njava -DLOG_LEVEL=${LOG_LEVEL:-info} -jar $S3PROXY_BIN --properties target/s3proxy.conf &\nS3PROXY_PID=$!\n\nfunction finish {\n    kill $S3PROXY_PID\n}\ntrap finish EXIT\n\n# wait for S3Proxy to start\nfor i in $(seq 30);\ndo\n    if exec 3<>\"/dev/tcp/localhost/${S3PROXY_PORT}\";\n    then\n        exec 3<&-  # Close for read\n        exec 3>&-  # Close for write\n        break\n    fi\n    sleep 1\ndone\n\ntags='not fails_on_s3proxy'\\\n' and not appendobject'\\\n' and not bucket_policy'\\\n' and not checksum'\\\n' and not copy'\\\n' and not cors'\\\n' and not encryption'\\\n' and not fails_strict_rfc2616'\\\n' and not iam_tenant'\\\n' and not lifecycle'\\\n' and not object_lock'\\\n' and not policy'\\\n' and not policy_status'\\\n' and not s3select'\\\n' and not s3website'\\\n' and not sse_s3'\\\n' and not tagging'\\\n' and not test_of_sts'\\\n' and not user_policy'\\\n' and not versioning'\\\n' and not webidentity_test'\n\nif [ \"${S3PROXY_CONF}\" = \"s3proxy-azurite.conf\" ]; then\n    tags=\"${tags} and not fails_on_s3proxy_azureblob\"\nelif [ \"${S3PROXY_CONF}\" = \"s3proxy-fake-gcs-server.conf\" ]; then\n    tags=\"${tags} and not fails_on_s3proxy_gcs\"\nelif [ \"${S3PROXY_CONF}\" = \"s3proxy-minio.conf\" ]; then\n    tags=\"${tags} and not fails_on_s3proxy_minio\"\nelif [[ \"${S3PROXY_CONF}\" == s3proxy-localstack*.conf ]]; then\n    tags=\"${tags} and not fails_on_s3proxy_localstack and not fails_on_s3proxy_minio and not fails_on_aws\"\nelif [ \"${S3PROXY_CONF}\" = \"s3proxy-transient-nio2.conf\" ]; then\n    tags=\"${tags} and not fails_on_s3proxy_nio2\"\nfi\n\n# execute s3-tests\npushd s3-tests\nif [ ${#TOX_TEST_ARGS[@]} -eq 0 ]; then\n    tox -- -m \"${tags}\"\nelse\n    tox -- -m \"${tags}\" \"${TOX_TEST_ARGS[@]}\"\nfi\n"
  },
  {
    "path": "src/test/resources/s3-tests.conf",
    "content": "[DEFAULT]\n## this section is just used as default for all the \"s3 *\"\n## sections, you can place these variables also directly there\n\n## replace with e.g. \"localhost\" to run against local software\nhost = 127.0.0.1\n\n## uncomment the port to use something other than 80\nport = 8081\n\n## say \"no\" to disable TLS\nis_secure = no\n\n[fixtures]\n## all the buckets created will start with this prefix;\n## {random} will be filled with random characters to pad\n## the prefix to 30 characters long, and avoid collisions\nbucket prefix = s3proxy-{random}-\n\n[s3 main]\n## the tests assume two accounts are defined, \"main\" and \"alt\".\n\n## user_id is a 64-character hexstring\nuser_id = 0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef\n\nemail = tester@ceph.com\n\n## display name typically looks more like a unix login, \"jdoe\" etc\ndisplay_name = CustomersName@amazon.com\n\n## replace these with your access keys\naccess_key = local-identity\nsecret_key = local-credential\n\n[s3 alt]\n## another user account, used for ACL-related tests\nuser_id = 56789abcdef0123456789abcdef0123456789abcdef0123456789abcdef01234\ndisplay_name = john.doe\n## the \"alt\" user needs to have email set, too\nemail = john.doe@example.com\naccess_key = local-identity\nsecret_key = local-credential\n\n[s3 tenant]\nuser_id = 9876543210abcdef0123456789abcdef0123456789abcdef0123456789abcdef\ndisplay_name = testx$tenanteduser\nemail = tenanteduser@example.com\naccess_key = local-identity\nsecret_key = local-credential\ntenant = testx\n\n[iam]\n#used for iam operations in sts-tests\n#email from vstart.sh\nemail = s3@example.com\n\n#user_id from vstart.sh\nuser_id = 0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef\n\n#access_key from vstart.sh\naccess_key = ABCDEFGHIJKLMNOPQRST\n\n#secret_key vstart.sh\nsecret_key = abcdefghijklmnopqrstuvwxyzabcdefghijklmn\n\n#display_name from vstart.sh\ndisplay_name = youruseridhere\n\n# iam account root user for iam_account tests\n[iam root]\naccess_key = AAAAAAAAAAAAAAAAAAaa\nsecret_key = aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\nuser_id = RGW11111111111111111\nemail = account1@ceph.com\n\n# iam account root user in a different account than [iam root]\n[iam alt root]\naccess_key = BBBBBBBBBBBBBBBBBBbb\nsecret_key = bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb\nuser_id = RGW22222222222222222\nemail = account2@ceph.com\n\n[webidentity]\n# TODO: obvious garbage\n#used for assume role with web identity test in sts-tests\n#all parameters will be obtained from ceph/qa/tasks/keycloak.py\ntoken=<access_token>\n\naud=<obtained after introspecting token>\n\nsub=<obtained after introspecting token>\n\nazp=<obtained after introspecting token>\n\nuser_token=<access token for a user, with attribute Department=[Engineering, Marketing>]\n\nthumbprint=<obtained from x509 certificate>\n\nKC_REALM=<name of the realm>\n"
  },
  {
    "path": "src/test/resources/s3proxy-anonymous.conf",
    "content": "s3proxy.endpoint=http://127.0.0.1:0\ns3proxy.secure-endpoint=https://127.0.0.1:0\n#s3proxy.service-path=s3proxy\n# authorization must be aws-v2, aws-v4, aws-v2-or-v4, or none\ns3proxy.authorization=none\ns3proxy.keystore-path=keystore.jks\ns3proxy.keystore-password=password\n\njclouds.provider=transient\njclouds.identity=remote-identity\njclouds.credential=remote-credential\n# endpoint is optional for some providers\n#jclouds.endpoint=http://127.0.0.1:8081\njclouds.filesystem.basedir=/tmp/blobstore\n"
  },
  {
    "path": "src/test/resources/s3proxy-azurite.conf",
    "content": "s3proxy.endpoint=http://127.0.0.1:0\ns3proxy.secure-endpoint=https://127.0.0.1:0\n#s3proxy.service-path=s3proxy\n# authorization must be aws-v2, aws-v4, aws-v2-or-v4, or none\ns3proxy.authorization=aws-v2-or-v4\ns3proxy.identity=local-identity\ns3proxy.credential=local-credential\ns3proxy.keystore-path=keystore.jks\ns3proxy.keystore-password=password\n\njclouds.provider=azureblob-sdk\njclouds.endpoint=http://127.0.0.1:10000/devstoreaccount1\njclouds.identity=devstoreaccount1\njclouds.credential=Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw==\n"
  },
  {
    "path": "src/test/resources/s3proxy-cors-allow-all.conf",
    "content": "s3proxy.endpoint=http://127.0.0.1:0\ns3proxy.secure-endpoint=https://127.0.0.1:0\n# authorization must be aws-v2, aws-v4, aws-v2-or-v4, or none\ns3proxy.authorization=aws-v2-or-v4\ns3proxy.identity=local-identity\ns3proxy.credential=local-credential\ns3proxy.keystore-path=keystore.jks\ns3proxy.keystore-password=password\ns3proxy.cors-allow-all=true\n\njclouds.provider=transient\njclouds.identity=remote-identity\njclouds.credential=remote-credential\n# endpoint is optional for some providers\n#jclouds.endpoint=http://127.0.0.1:8081\njclouds.filesystem.basedir=/tmp/blobstore\n"
  },
  {
    "path": "src/test/resources/s3proxy-cors.conf",
    "content": "s3proxy.endpoint=http://127.0.0.1:0\ns3proxy.secure-endpoint=https://127.0.0.1:0\n# authorization must be aws-v2, aws-v4, aws-v2-or-v4, or none\ns3proxy.authorization=aws-v2-or-v4\ns3proxy.identity=local-identity\ns3proxy.credential=local-credential\ns3proxy.keystore-path=keystore.jks\ns3proxy.keystore-password=password\ns3proxy.cors-allow-origins=https://example\\.com https://.+\\.example\\.com https://example\\.cloud\ns3proxy.cors-allow-methods=GET PUT\ns3proxy.cors-allow-headers=Accept Content-Type\ns3proxy.cors-exposed-headers=ETag\n\njclouds.provider=transient\njclouds.identity=remote-identity\njclouds.credential=remote-credential\n# endpoint is optional for some providers\n#jclouds.endpoint=http://127.0.0.1:8081\njclouds.filesystem.basedir=/tmp/blobstore\n"
  },
  {
    "path": "src/test/resources/s3proxy-encryption.conf",
    "content": "s3proxy.endpoint=http://127.0.0.1:0\ns3proxy.secure-endpoint=https://127.0.0.1:0\n#s3proxy.service-path=s3proxy\n# authorization must be aws-v2, aws-v4, aws-v2-or-v4, or none\ns3proxy.authorization=aws-v2-or-v4\ns3proxy.identity=local-identity\ns3proxy.credential=local-credential\ns3proxy.keystore-path=keystore.jks\ns3proxy.keystore-password=password\n\njclouds.provider=transient\njclouds.identity=remote-identity\njclouds.credential=remote-credential\n# endpoint is optional for some providers\n#jclouds.endpoint=http://127.0.0.1:8081\njclouds.filesystem.basedir=/tmp/blobstore\n\ns3proxy.encrypted-blobstore=true\ns3proxy.encrypted-blobstore-password=1234567890123456\ns3proxy.encrypted-blobstore-salt=12345678\n"
  },
  {
    "path": "src/test/resources/s3proxy-fake-gcs-server.conf",
    "content": "s3proxy.endpoint=http://127.0.0.1:0\ns3proxy.secure-endpoint=https://127.0.0.1:0\n#s3proxy.service-path=s3proxy\n# authorization must be aws-v2, aws-v4, aws-v2-or-v4, or none\ns3proxy.authorization=aws-v2-or-v4\ns3proxy.identity=local-identity\ns3proxy.credential=local-credential\ns3proxy.keystore-path=keystore.jks\ns3proxy.keystore-password=password\n\njclouds.provider=google-cloud-storage-sdk\njclouds.endpoint=http://localhost:4443\njclouds.identity=identity\njclouds.credential=\n"
  },
  {
    "path": "src/test/resources/s3proxy-filesystem-nio2.conf",
    "content": "s3proxy.endpoint=http://127.0.0.1:0\ns3proxy.secure-endpoint=https://127.0.0.1:0\n#s3proxy.service-path=s3proxy\n# authorization must be aws-v2, aws-v4, aws-v2-or-v4, or none\ns3proxy.authorization=aws-v2-or-v4\ns3proxy.identity=local-identity\ns3proxy.credential=local-credential\ns3proxy.keystore-path=keystore.jks\ns3proxy.keystore-password=password\n\njclouds.provider=filesystem-nio2\njclouds.identity=remote-identity\njclouds.credential=remote-credential\n# endpoint is optional for some providers\n#jclouds.endpoint=http://127.0.0.1:8081\njclouds.filesystem.basedir=/tmp/blobstore\n"
  },
  {
    "path": "src/test/resources/s3proxy-localstack-aws-s3-sdk.conf",
    "content": "s3proxy.endpoint=http://127.0.0.1:0\ns3proxy.secure-endpoint=https://127.0.0.1:0\n#s3proxy.service-path=s3proxy\n# authorization must be aws-v2, aws-v4, aws-v2-or-v4, or none\ns3proxy.authorization=aws-v2-or-v4\ns3proxy.identity=local-identity\ns3proxy.credential=local-credential\ns3proxy.keystore-path=keystore.jks\ns3proxy.keystore-password=password\n\njclouds.provider=aws-s3-sdk\njclouds.identity=remote-identity\njclouds.credential=remote-credential\njclouds.endpoint=http://127.0.0.1:4566\n# Region may be needed by the backend to locate the bucket (default: us-east-1)\naws-s3-sdk.region=us-east-1\n# Conditional writes mode: \"native\" (default) or \"emulated\"\naws-s3-sdk.conditional-writes=native\n"
  },
  {
    "path": "src/test/resources/s3proxy-localstack-s3.conf",
    "content": "s3proxy.endpoint=http://127.0.0.1:0\ns3proxy.secure-endpoint=https://127.0.0.1:0\n#s3proxy.service-path=s3proxy\n# authorization must be aws-v2, aws-v4, aws-v2-or-v4, or none\ns3proxy.authorization=aws-v2-or-v4\ns3proxy.identity=local-identity\ns3proxy.credential=local-credential\ns3proxy.keystore-path=keystore.jks\ns3proxy.keystore-password=password\n\njclouds.provider=s3\njclouds.identity=remote-identity\njclouds.credential=remote-credential\njclouds.endpoint=http://127.0.0.1:4566\n"
  },
  {
    "path": "src/test/resources/s3proxy-transient-nio2.conf",
    "content": "s3proxy.endpoint=http://127.0.0.1:0\ns3proxy.secure-endpoint=https://127.0.0.1:0\n#s3proxy.service-path=s3proxy\n# authorization must be aws-v2, aws-v4, aws-v2-or-v4, or none\ns3proxy.authorization=aws-v2-or-v4\ns3proxy.identity=local-identity\ns3proxy.credential=local-credential\ns3proxy.keystore-path=keystore.jks\ns3proxy.keystore-password=password\n\njclouds.provider=transient-nio2\njclouds.identity=remote-identity\njclouds.credential=remote-credential\n# endpoint is optional for some providers\n#jclouds.endpoint=http://127.0.0.1:8081\njclouds.filesystem.basedir=/tmp/blobstore\n"
  },
  {
    "path": "src/test/resources/s3proxy.conf",
    "content": "s3proxy.endpoint=http://127.0.0.1:0\ns3proxy.secure-endpoint=https://127.0.0.1:0\n#s3proxy.service-path=s3proxy\n# authorization must be aws-v2, aws-v4, aws-v2-or-v4, or none\ns3proxy.authorization=aws-v2-or-v4\ns3proxy.identity=local-identity\ns3proxy.credential=local-credential\ns3proxy.keystore-path=keystore.jks\ns3proxy.keystore-password=password\n\njclouds.provider=transient\njclouds.identity=remote-identity\njclouds.credential=remote-credential\n# endpoint is optional for some providers\n#jclouds.endpoint=http://127.0.0.1:8081\njclouds.filesystem.basedir=/tmp/blobstore\n"
  }
]