[
  {
    "path": ".github/dependabot.yml",
    "content": "version: 2\nupdates:\n  - package-ecosystem: \"pip\"\n    directory: \"/\"\n    schedule:\n      interval: \"daily\"\n"
  },
  {
    "path": ".github/workflows/publish.yml",
    "content": "name: Publish Python Package\n\non:\n  release:\n    types: [created]\n\npermissions:\n  contents: read\n\njobs:\n  test:\n    runs-on: ubuntu-latest\n    strategy:\n      matrix:\n        python-version: [\"3.10\", \"3.11\", \"3.12\", \"3.13\", \"3.14\"]\n    steps:\n    - uses: actions/checkout@v6\n    - name: Set up Python ${{ matrix.python-version }}\n      uses: actions/setup-python@v6\n      with:\n        python-version: ${{ matrix.python-version }}\n        cache: pip\n        cache-dependency-path: pyproject.toml\n    - name: Install dependencies\n      run: |\n        pip install . --group dev\n    - name: Run tests\n      run: |\n        pytest\n  deploy:\n    runs-on: ubuntu-latest\n    needs: [test]\n    environment: release\n    permissions:\n      id-token: write\n    steps:\n    - uses: actions/checkout@v6\n    - name: Set up Python\n      uses: actions/setup-python@v6\n      with:\n        python-version: \"3.14\"\n        cache: pip\n        cache-dependency-path: pyproject.toml\n    - name: Install dependencies\n      run: |\n        pip install setuptools wheel build\n    - name: Build\n      run: |\n        python -m build\n    - name: Publish\n      uses: pypa/gh-action-pypi-publish@release/v1\n\n"
  },
  {
    "path": ".github/workflows/test.yml",
    "content": "name: Test\n\non: [push, pull_request]\n\npermissions:\n  contents: read\n\njobs:\n  test:\n    runs-on: ubuntu-latest\n    strategy:\n      matrix:\n        python-version: [\"3.10\", \"3.11\", \"3.12\", \"3.13\", \"3.14\"]\n    steps:\n    - uses: actions/checkout@v6\n    - name: Set up Python ${{ matrix.python-version }}\n      uses: actions/setup-python@v6\n      with:\n        python-version: ${{ matrix.python-version }}\n        cache: pip\n        cache-dependency-path: pyproject.toml\n    - name: Install dependencies\n      run: |\n        pip install . --group dev\n    - name: Run tests\n      run: |\n        pytest\n    - name: Check if cog needs to run\n      run: |\n        cog --check README.md\n        cog --check docs/*.md"
  },
  {
    "path": ".gitignore",
    "content": ".venv\n__pycache__/\n*.py[cod]\n*$py.class\nvenv\n.eggs\n.pytest_cache\n*.egg-info\n.DS_Store\n"
  },
  {
    "path": ".readthedocs.yaml",
    "content": "version: 2\n\nbuild:\n  os: ubuntu-22.04\n  tools:\n    python: \"3.11\"\n\nsphinx:\n  configuration: docs/conf.py\n\nformats:\n   - pdf\n   - epub\n\npython:\n   install:\n   - requirements: docs/requirements.txt\n"
  },
  {
    "path": "LICENSE",
    "content": "                                 Apache License\n                           Version 2.0, January 2004\n                        http://www.apache.org/licenses/\n\n   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION\n\n   1. Definitions.\n\n      \"License\" shall mean the terms and conditions for use, reproduction,\n      and distribution as defined by Sections 1 through 9 of this document.\n\n      \"Licensor\" shall mean the copyright owner or entity authorized by\n      the copyright owner that is granting the License.\n\n      \"Legal Entity\" shall mean the union of the acting entity and all\n      other entities that control, are controlled by, or are under common\n      control with that entity. For the purposes of this definition,\n      \"control\" means (i) the power, direct or indirect, to cause the\n      direction or management of such entity, whether by contract or\n      otherwise, or (ii) ownership of fifty percent (50%) or more of the\n      outstanding shares, or (iii) beneficial ownership of such entity.\n\n      \"You\" (or \"Your\") shall mean an individual or Legal Entity\n      exercising permissions granted by this License.\n\n      \"Source\" form shall mean the preferred form for making modifications,\n      including but not limited to software source code, documentation\n      source, and configuration files.\n\n      \"Object\" form shall mean any form resulting from mechanical\n      transformation or translation of a Source form, including but\n      not limited to compiled object code, generated documentation,\n      and conversions to other media types.\n\n      \"Work\" shall mean the work of authorship, whether in Source or\n      Object form, made available under the License, as indicated by a\n      copyright notice that is included in or attached to the work\n      (an example is provided in the Appendix below).\n\n      \"Derivative Works\" shall mean any work, whether in Source or Object\n      form, that is based on (or derived from) the Work and for which the\n      editorial revisions, annotations, elaborations, or other modifications\n      represent, as a whole, an original work of authorship. For the purposes\n      of this License, Derivative Works shall not include works that remain\n      separable from, or merely link (or bind by name) to the interfaces of,\n      the Work and Derivative Works thereof.\n\n      \"Contribution\" shall mean any work of authorship, including\n      the original version of the Work and any modifications or additions\n      to that Work or Derivative Works thereof, that is intentionally\n      submitted to Licensor for inclusion in the Work by the copyright owner\n      or by an individual or Legal Entity authorized to submit on behalf of\n      the copyright owner. For the purposes of this definition, \"submitted\"\n      means any form of electronic, verbal, or written communication sent\n      to the Licensor or its representatives, including but not limited to\n      communication on electronic mailing lists, source code control systems,\n      and issue tracking systems that are managed by, or on behalf of, the\n      Licensor for the purpose of discussing and improving the Work, but\n      excluding communication that is conspicuously marked or otherwise\n      designated in writing by the copyright owner as \"Not a Contribution.\"\n\n      \"Contributor\" shall mean Licensor and any individual or Legal Entity\n      on behalf of whom a Contribution has been received by Licensor and\n      subsequently incorporated within the Work.\n\n   2. Grant of Copyright License. Subject to the terms and conditions of\n      this License, each Contributor hereby grants to You a perpetual,\n      worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n      copyright license to reproduce, prepare Derivative Works of,\n      publicly display, publicly perform, sublicense, and distribute the\n      Work and such Derivative Works in Source or Object form.\n\n   3. Grant of Patent License. Subject to the terms and conditions of\n      this License, each Contributor hereby grants to You a perpetual,\n      worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n      (except as stated in this section) patent license to make, have made,\n      use, offer to sell, sell, import, and otherwise transfer the Work,\n      where such license applies only to those patent claims licensable\n      by such Contributor that are necessarily infringed by their\n      Contribution(s) alone or by combination of their Contribution(s)\n      with the Work to which such Contribution(s) was submitted. If You\n      institute patent litigation against any entity (including a\n      cross-claim or counterclaim in a lawsuit) alleging that the Work\n      or a Contribution incorporated within the Work constitutes direct\n      or contributory patent infringement, then any patent licenses\n      granted to You under this License for that Work shall terminate\n      as of the date such litigation is filed.\n\n   4. Redistribution. You may reproduce and distribute copies of the\n      Work or Derivative Works thereof in any medium, with or without\n      modifications, and in Source or Object form, provided that You\n      meet the following conditions:\n\n      (a) You must give any other recipients of the Work or\n          Derivative Works a copy of this License; and\n\n      (b) You must cause any modified files to carry prominent notices\n          stating that You changed the files; and\n\n      (c) You must retain, in the Source form of any Derivative Works\n          that You distribute, all copyright, patent, trademark, and\n          attribution notices from the Source form of the Work,\n          excluding those notices that do not pertain to any part of\n          the Derivative Works; and\n\n      (d) If the Work includes a \"NOTICE\" text file as part of its\n          distribution, then any Derivative Works that You distribute must\n          include a readable copy of the attribution notices contained\n          within such NOTICE file, excluding those notices that do not\n          pertain to any part of the Derivative Works, in at least one\n          of the following places: within a NOTICE text file distributed\n          as part of the Derivative Works; within the Source form or\n          documentation, if provided along with the Derivative Works; or,\n          within a display generated by the Derivative Works, if and\n          wherever such third-party notices normally appear. The contents\n          of the NOTICE file are for informational purposes only and\n          do not modify the License. You may add Your own attribution\n          notices within Derivative Works that You distribute, alongside\n          or as an addendum to the NOTICE text from the Work, provided\n          that such additional attribution notices cannot be construed\n          as modifying the License.\n\n      You may add Your own copyright statement to Your modifications and\n      may provide additional or different license terms and conditions\n      for use, reproduction, or distribution of Your modifications, or\n      for any such Derivative Works as a whole, provided Your use,\n      reproduction, and distribution of the Work otherwise complies with\n      the conditions stated in this License.\n\n   5. Submission of Contributions. Unless You explicitly state otherwise,\n      any Contribution intentionally submitted for inclusion in the Work\n      by You to the Licensor shall be under the terms and conditions of\n      this License, without any additional terms or conditions.\n      Notwithstanding the above, nothing herein shall supersede or modify\n      the terms of any separate license agreement you may have executed\n      with Licensor regarding such Contributions.\n\n   6. Trademarks. This License does not grant permission to use the trade\n      names, trademarks, service marks, or product names of the Licensor,\n      except as required for reasonable and customary use in describing the\n      origin of the Work and reproducing the content of the NOTICE file.\n\n   7. Disclaimer of Warranty. Unless required by applicable law or\n      agreed to in writing, Licensor provides the Work (and each\n      Contributor provides its Contributions) on an \"AS IS\" BASIS,\n      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n      implied, including, without limitation, any warranties or conditions\n      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A\n      PARTICULAR PURPOSE. You are solely responsible for determining the\n      appropriateness of using or redistributing the Work and assume any\n      risks associated with Your exercise of permissions under this License.\n\n   8. Limitation of Liability. In no event and under no legal theory,\n      whether in tort (including negligence), contract, or otherwise,\n      unless required by applicable law (such as deliberate and grossly\n      negligent acts) or agreed to in writing, shall any Contributor be\n      liable to You for damages, including any direct, indirect, special,\n      incidental, or consequential damages of any character arising as a\n      result of this License or out of the use or inability to use the\n      Work (including but not limited to damages for loss of goodwill,\n      work stoppage, computer failure or malfunction, or any and all\n      other commercial damages or losses), even if such Contributor\n      has been advised of the possibility of such damages.\n\n   9. Accepting Warranty or Additional Liability. While redistributing\n      the Work or Derivative Works thereof, You may choose to offer,\n      and charge a fee for, acceptance of support, warranty, indemnity,\n      or other liability obligations and/or rights consistent with this\n      License. However, in accepting such obligations, You may act only\n      on Your own behalf and on Your sole responsibility, not on behalf\n      of any other Contributor, and only if You agree to indemnify,\n      defend, and hold each Contributor harmless for any liability\n      incurred by, or claims asserted against, such Contributor by reason\n      of your accepting any such warranty or additional liability.\n\n   END OF TERMS AND CONDITIONS\n\n   APPENDIX: How to apply the Apache License to your work.\n\n      To apply the Apache License to your work, attach the following\n      boilerplate notice, with the fields enclosed by brackets \"[]\"\n      replaced with your own identifying information. (Don't include\n      the brackets!)  The text should be enclosed in the appropriate\n      comment syntax for the file format. We also recommend that a\n      file or class name and description of purpose be included on the\n      same \"printed page\" as the copyright notice for easier\n      identification within third-party archives.\n\n   Copyright [yyyy] [name of copyright owner]\n\n   Licensed under the Apache License, Version 2.0 (the \"License\");\n   you may not use this file except in compliance with the License.\n   You may obtain a copy of the License at\n\n       http://www.apache.org/licenses/LICENSE-2.0\n\n   Unless required by applicable law or agreed to in writing, software\n   distributed under the License is distributed on an \"AS IS\" BASIS,\n   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n   See the License for the specific language governing permissions and\n   limitations under the License.\n"
  },
  {
    "path": "README.md",
    "content": "# s3-credentials\n\n[![PyPI](https://img.shields.io/pypi/v/s3-credentials.svg)](https://pypi.org/project/s3-credentials/)\n[![Changelog](https://img.shields.io/github/v/release/simonw/s3-credentials?include_prereleases&label=changelog)](https://github.com/simonw/s3-credentials/releases)\n[![Tests](https://github.com/simonw/s3-credentials/workflows/Test/badge.svg)](https://github.com/simonw/s3-credentials/actions?query=workflow%3ATest)\n[![Documentation Status](https://readthedocs.org/projects/s3-credentials/badge/?version=latest)](https://s3-credentials.readthedocs.org/)\n[![License](https://img.shields.io/badge/license-Apache%202.0-blue.svg)](https://github.com/simonw/s3-credentials/blob/master/LICENSE)\n\nA tool for creating credentials for accessing S3 buckets\n\nFor project background, see [s3-credentials: a tool for creating credentials for S3 buckets](https://simonwillison.net/2021/Nov/3/s3-credentials/) on my blog.\n\n## Installation\n\n    pip install s3-credentials\n\n## Basic usage\n\nTo create a new S3 bucket and output credentials that can be used with only that bucket:\n```\n% s3-credentials create my-new-s3-bucket --create-bucket\nCreated bucket:  my-new-s3-bucket\nCreated user: s3.read-write.my-new-s3-bucket with permissions boundary: arn:aws:iam::aws:policy/AmazonS3FullAccess\nAttached policy s3.read-write.my-new-s3-bucket to user s3.read-write.my-new-s3-bucket\nCreated access key for user: s3.read-write.my-new-s3-bucket\n{\n    \"UserName\": \"s3.read-write.my-new-s3-bucket\",\n    \"AccessKeyId\": \"AKIAWXFXAIOZOYLZAEW5\",\n    \"Status\": \"Active\",\n    \"SecretAccessKey\": \"...\",\n    \"CreateDate\": \"2021-11-03 01:38:24+00:00\"\n}\n```\nThe tool can do a lot more than this. See the [documentation](https://s3-credentials.readthedocs.io/) for details.\n\n## Documentation\n\n- [Full documentation](https://s3-credentials.readthedocs.io/)\n- [Command help reference](https://s3-credentials.readthedocs.io/en/stable/help.html)\n- [Release notes](https://github.com/simonw/s3-credentials/releases)\n"
  },
  {
    "path": "docs/.gitignore",
    "content": "_build\n"
  },
  {
    "path": "docs/Makefile",
    "content": "# Minimal makefile for Sphinx documentation\n#\n\n# You can set these variables from the command line.\nSPHINXOPTS    =\nSPHINXBUILD   = sphinx-build\nSPHINXPROJ    = sqlite-utils\nSOURCEDIR     = .\nBUILDDIR      = _build\n\n# Put it first so that \"make\" without argument is like \"make help\".\nhelp:\n\t@$(SPHINXBUILD) -M help \"$(SOURCEDIR)\" \"$(BUILDDIR)\" $(SPHINXOPTS) $(O)\n\n.PHONY: help Makefile\n\n# Catch-all target: route all unknown targets to Sphinx using the new\n# \"make mode\" option.  $(O) is meant as a shortcut for $(SPHINXOPTS).\n%: Makefile\n\t@$(SPHINXBUILD) -M $@ \"$(SOURCEDIR)\" \"$(BUILDDIR)\" $(SPHINXOPTS) $(O)\n\nlivehtml:\n\tsphinx-autobuild -b html \"$(SOURCEDIR)\" \"$(BUILDDIR)\" $(SPHINXOPTS) $(0)\n"
  },
  {
    "path": "docs/conf.py",
    "content": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nfrom subprocess import PIPE, Popen\n\n# This file is execfile()d with the current directory set to its\n# containing dir.\n#\n# Note that not all possible configuration values are present in this\n# autogenerated file.\n#\n# All configuration values have a default; values that are commented out\n# serve to show the default.\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\n#\n# import os\n# import sys\n# sys.path.insert(0, os.path.abspath('.'))\n\n\n# -- General configuration ------------------------------------------------\n\n# If your documentation needs a minimal Sphinx version, state it here.\n#\n# needs_sphinx = '1.0'\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n# ones.\nextensions = [\"myst_parser\"]\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = [\"_templates\"]\n\n# The suffix(es) of source filenames.\n# You can specify multiple suffix as a list of string:\n#\n# source_suffix = ['.rst', '.md']\nsource_suffix = \".rst\"\n\n# The master toctree document.\nmaster_doc = \"index\"\n\n# General information about the project.\nproject = \"s3-credentials\"\ncopyright = \"2022, Simon Willison\"\nauthor = \"Simon Willison\"\n\n# The version info for the project you're documenting, acts as replacement for\n# |version| and |release|, also used in various other places throughout the\n# built documents.\n#\n# The short X.Y version.\npipe = Popen(\"git describe --tags --always\", stdout=PIPE, shell=True)\ngit_version = pipe.stdout.read().decode(\"utf8\")\n\nif git_version:\n    version = git_version.rsplit(\"-\", 1)[0]\n    release = git_version\nelse:\n    version = \"\"\n    release = \"\"\n\n# The language for content autogenerated by Sphinx. Refer to documentation\n# for a list of supported languages.\n#\n# This is also used if you do content translation via gettext catalogs.\n# Usually you set \"language\" from the command line for these cases.\nlanguage = \"en\"\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\n# This patterns also effect to html_static_path and html_extra_path\nexclude_patterns = [\"_build\", \"Thumbs.db\", \".DS_Store\"]\n\n# The name of the Pygments (syntax highlighting) style to use.\npygments_style = \"sphinx\"\n\n# If true, `todo` and `todoList` produce output, else they produce nothing.\ntodo_include_todos = False\n\n\n# -- Options for HTML output ----------------------------------------------\n\n# The theme to use for HTML and HTML Help pages.  See the documentation for\n# a list of builtin themes.\n#\nhtml_theme = \"furo\"\n\n# Theme options are theme-specific and customize the look and feel of a theme\n# further.  For a list of options available for each theme, see the\n# documentation.\n\nhtml_theme_options = {}\nhtml_title = \"s3-credentials\"\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = [\"_static\"]\n\n\n# -- Options for HTMLHelp output ------------------------------------------\n\n# Output file base name for HTML help builder.\nhtmlhelp_basename = \"s3-credentials-doc\"\n\n\n# -- Options for LaTeX output ---------------------------------------------\n\nlatex_elements = {\n    # The paper size ('letterpaper' or 'a4paper').\n    #\n    # 'papersize': 'letterpaper',\n    # The font size ('10pt', '11pt' or '12pt').\n    #\n    # 'pointsize': '10pt',\n    # Additional stuff for the LaTeX preamble.\n    #\n    # 'preamble': '',\n    # Latex figure (float) alignment\n    #\n    # 'figure_align': 'htbp',\n}\n\n# Grouping the document tree into LaTeX files. List of tuples\n# (source start file, target name, title,\n#  author, documentclass [howto, manual, or own class]).\nlatex_documents = [\n    (\n        master_doc,\n        \"s3-credentials.tex\",\n        \"s3-credentials documentation\",\n        \"Simon Willison\",\n        \"manual\",\n    )\n]\n\n\n# -- Options for manual page output ---------------------------------------\n\n# One entry per manual page. List of tuples\n# (source start file, name, description, authors, manual section).\nman_pages = [\n    (\n        master_doc,\n        \"s3-credentials\",\n        \"s3-credentials documentation\",\n        [author],\n        1,\n    )\n]\n\n\n# -- Options for Texinfo output -------------------------------------------\n\n# Grouping the document tree into Texinfo files. List of tuples\n# (source start file, target name, title, author,\n#  dir menu entry, description, category)\ntexinfo_documents = [\n    (\n        master_doc,\n        \"s3-credentials\",\n        \"s3-credentials documentation\",\n        author,\n        \"s3-credentials\",\n        \" A tool for creating credentials for accessing S3 buckets \",\n        \"Miscellaneous\",\n    )\n]\n"
  },
  {
    "path": "docs/configuration.md",
    "content": "# Configuration\n\nThis tool uses [boto3](https://boto3.amazonaws.com/) under the hood which supports [a number of different ways](https://boto3.amazonaws.com/v1/documentation/api/latest/guide/credentials.html) of providing your AWS credentials.\n\nIf you have an existing `~/.aws/config` or `~/.aws/credentials` file the tool will use that.\n\nOne way to create those files is using the `aws configure` command, available if you first run `pip install awscli`.\n\nAlternatively, you can set the `AWS_ACCESS_KEY_ID` and `AWS_SECRET_ACCESS_KEY` environment variables before calling this tool.\n\nYou can also use the `--access-key=`, `--secret-key=`, `--session-token` and `--auth` options documented below.\n\n## Common command options\n\nAll of the `s3-credentials` commands also accept the following options for authenticating against AWS:\n\n- `--access-key`: AWS access key ID\n- `--secret-key`: AWS secret access key\n- `--session-token`: AWS session token\n- `--endpoint-url`: Custom endpoint URL\n- `--auth`: file (or `-` for standard input) containing credentials to use\n\nThe file passed to `--auth` can be either a JSON file or an INI file. JSON files should contain the following:\n\n```json\n{\n    \"AccessKeyId\": \"AKIAWXFXAIOZA5IR5PY4\",\n    \"SecretAccessKey\": \"g63...\"\n}\n```\nThe JSON file can also optionally include a session token in a `\"SessionToken\"` key.\n\nThe INI format variant of this file should look like this:\n\n```ini\n[default]\naws_access_key_id=AKIAWXFXAIOZNCR2ST7S\naws_secret_access_key=g63...\n```\nAny section headers will do - the tool will use the information from the first section it finds in the file which has a `aws_access_key_id` key.\n\nThese auth file formats are the same as those that can be created using the `create` command.\n"
  },
  {
    "path": "docs/contributing.md",
    "content": "# Contributing\n\nTo contribute to this tool, first checkout [the code](https://github.com/simonw/s3-credentials). You can run the tests locally using `pytest` and `uv`:\n\n    cd s3-credentials\n    uv run pytest\n\nAny changes to the generated policies require an update to the docs using [Cog](https://github.com/nedbat/cog):\n\n    uv run poe cog\n\nTo preview the documentation locally, you can use:\n\n    uv run poe livehtml\n\n## Integration tests\n\nThe main tests all use stubbed interfaces to AWS, so will not make any outbound API calls.\n\nThere is also a suite of integration tests in `tests/test_integration.py` which DO make API calls to AWS, using credentials from your environment variables or `~/.aws/credentials` file.\n\nThese tests are skipped by default. If you have AWS configured with an account that has permission to run the actions required by `s3-credentials` (create users, roles, buckets etc) you can run these tests using:\n\n    uv run pytest --integration\n\nThe tests will create a number of different users and buckets and should then delete them once they finish running.\n"
  },
  {
    "path": "docs/create.md",
    "content": "# Creating S3 credentials\n\nThe `s3-credentials create` command is the core feature of this tool. Pass it one or more S3 bucket names, specify a policy (read-write, read-only or write-only) and it will return AWS credentials that can be used to access those buckets.\n\nThese credentials can be **temporary** or **permanent**.\n\n- Temporary credentials can last for between 15 minutes and 12 hours. They are created using [STS.AssumeRole()](https://docs.aws.amazon.com/STS/latest/APIReference/API_AssumeRole.html).\n- Permanent credentials never expire. They are created by first creating a dedicated AWS user, then assigning a policy to that user and creating and returning an access key for it.\n\nMake sure to record the `SecretAccessKey` because it will only be displayed once and cannot be recreated later on.\n\nIn this example I create permanent credentials for reading and writing files in my `static.niche-museums.com` S3 bucket:\n\n```\n% s3-credentials create static.niche-museums.com\n\nCreated user: s3.read-write.static.niche-museums.com with permissions boundary: arn:aws:iam::aws:policy/AmazonS3FullAccess\nAttached policy s3.read-write.static.niche-museums.com to user s3.read-write.static.niche-museums.com\nCreated access key for user: s3.read-write.static.niche-museums.com\n{\n    \"UserName\": \"s3.read-write.static.niche-museums.com\",\n    \"AccessKeyId\": \"AKIAWXFXAIOZOYLZAEW5\",\n    \"Status\": \"Active\",\n    \"SecretAccessKey\": \"...\",\n    \"CreateDate\": \"2021-11-03 01:38:24+00:00\"\n}\n```\nIf you add `--format ini` the credentials will be output in INI format, suitable for pasting into a `~/.aws/credentials` file:\n```\n% s3-credentials create static.niche-museums.com --format ini > ini.txt\nCreated user: s3.read-write.static.niche-museums.com with permissions boundary: arn:aws:iam::aws:policy/AmazonS3FullAccess\nAttached policy s3.read-write.static.niche-museums.com to user s3.read-write.static.niche-museums.com\nCreated access key for user: s3.read-write.static.niche-museums.com\n% cat ini.txt\n[default]\naws_access_key_id=AKIAWXFXAIOZKGXI4PVO\naws_secret_access_key=...\n```\n\nTo create temporary credentials, add `--duration 15m` (or `1h` or `1200s`). The specified duration must be between 15 minutes and 12 hours.\n\n```\n% s3-credentials create static.niche-museums.com --duration 15m\nAssume role against arn:aws:iam::462092780466:role/s3-credentials.AmazonS3FullAccess for 900s\n{\n    \"AccessKeyId\": \"ASIAWXFXAIOZPAHAYHUG\",\n    \"SecretAccessKey\": \"Nrnoc...\",\n    \"SessionToken\": \"FwoGZXIvYXd...mr9Fjs=\",\n    \"Expiration\": \"2021-11-11 03:24:07+00:00\"\n}\n```\nWhen using temporary credentials the session token must be passed in addition to the access key and secret key.\n\nThe `create` command has a number of options:\n\n- `--format TEXT`: The output format to use. Defaults to `json`, but can also be `ini`.\n- `--duration 15m`: For temporary credentials, how long should they last? This can be specified in seconds, minutes or hours using a suffix of `s`, `m` or `h` - but must be between 15 minutes and 12 hours.\n- `--username TEXT`: The username to use for the user that is created by the command (or the username of an existing user if you do not want to create a new one). If ommitted a default such as `s3.read-write.static.niche-museums.com` will be used.\n- `-c, --create-bucket`: Create the buckets if they do not exist. Without this any missing buckets will be treated as an error.\n- `--prefix my-prefix/`: Credentials should only allow access to keys in the S3 bucket that start with this prefix.\n- `--public`: When creating a bucket, set it so that any file uploaded to that bucket can be downloaded by anyone who knows its filename. This attaches the {ref}`public_bucket_policy` and sets the `PublicAccessBlockConfiguration` to `false` for [every option](https://docs.aws.amazon.com/AmazonS3/latest/API/API_PublicAccessBlockConfiguration.html).\n- `--website`: Sets the bucket to public and configures it to act as a website, with `index.html` treated as an index page and `error.html` used to display custom errors. The URL for the website will be `http://<bucket-name>.s3-website.<region>.amazonaws.com/` - the region defaults to `us-east-1` unless you specify a `--bucket-region`.\n- `--read-only`: The user should only be allowed to read files from the bucket.\n- `--write-only`: The user should only be allowed to write files to the bucket, but not read them. This can be useful for logging and backups.\n- `--policy filepath-or-string`: A custom policy document (as a file path, literal JSON string or `-` for standard input) - see below.\n- `--statement json-statement`: Custom JSON statement block to be added to the generated policy.\n- `--bucket-region`: If creating buckets, the region in which they should be created.\n- `--silent`: Don't output details of what is happening, just output the JSON for the created access credentials at the end.\n- `--dry-run`: Output details of AWS changes that would have been made without applying them.\n- `--user-permissions-boundary`: Custom [permissions boundary](https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies_boundaries.html) to use for users created by this tool. The default is to restrict those users to only interacting with S3, taking the `--read-only` option into account. Use `none` to create users without any permissions boundary at all.\n\n## Changes that will be made to your AWS account\n\nHow the tool works varies depending on if you are creating temporary or permanent credentials.\n\nFor permanent credentials, the steps are as follows:\n\n1. Confirm that each of the specified buckets exists. If they do not and `--create-bucket` was passed create them - otherwise exit with an error.\n2. If a username was not specified, derive a username using the `s3.$permission.$buckets` format.\n3. If a user with that username does not exist, create one with an S3 permissions boundary of [AmazonS3ReadOnlyAccess](https://github.com/glassechidna/trackiam/blob/master/policies/AmazonS3ReadOnlyAccess.json) for `--read-only` or [AmazonS3FullAccess](https://github.com/glassechidna/trackiam/blob/master/policies/AmazonS3FullAccess.json) otherwise - unless `--user-permissions-boundary=none` was passed, or a custom permissions boundary string.\n4. For each specified bucket, add an inline IAM policy to the user that gives them permission to either read-only, write-only or read-write against that bucket.\n5. Create a new access key for that user and output the key and its secret to the console.\n\nFor temporary credentials:\n\n1. Confirm or create buckets, in the same way as for permanent credentials.\n2. Check if an AWS role called `s3-credentials.AmazonS3FullAccess` exists. If it does not exist create it, configured to allow the user's AWS account to assume it and with the `arn:aws:iam::aws:policy/AmazonS3FullAccess` policy attached.\n3. Use `STS.AssumeRole()` to return temporary credentials that are restricted to just the specified buckets and specified read-only/read-write/write-only policy.\n\nYou can run the `create` command with the `--dry-run` option to see a summary of changes that would be applied, including details of generated policy documents, without actually applying those changes.\n\n## Using a custom policy\n\nThe policy documents applied by this tool [are listed here](policy-documents.md).\n\nIf you want to use a custom policy document you can do so using the `--policy` option.\n\nFirst, create your policy document as a JSON file that looks something like this:\n\n```json\n{\n  \"Version\": \"2012-10-17\",\n  \"Statement\": [\n    {\n      \"Effect\": \"Allow\",\n      \"Action\": [\"s3:GetObject*\", \"s3:ListBucket\"],\n      \"Resource\": [\n        \"arn:aws:s3:::$!BUCKET_NAME!$\",\n        \"arn:aws:s3:::$!BUCKET_NAME!$/*\"\n      ],\n    }\n  ]\n}\n```\nNote the `$!BUCKET_NAME!$` strings - these will be replaced with the name of the relevant S3 bucket before the policy is applied.\n\nSave that as `custom-policy.json` and apply it using the following command:\n\n    % s3-credentials create my-s3-bucket \\\n        --policy custom-policy.json\n\nYou can also pass `-` to read from standard input, or you can pass the literal JSON string directly to the `--policy` option:\n```\n% s3-credentials create my-s3-bucket --policy '{\n  \"Version\": \"2012-10-17\",\n  \"Statement\": [\n    {\n      \"Effect\": \"Allow\",\n      \"Action\": [\"s3:GetObject*\", \"s3:ListBucket\"],\n      \"Resource\": [\n        \"arn:aws:s3:::$!BUCKET_NAME!$\",\n        \"arn:aws:s3:::$!BUCKET_NAME!$/*\"\n      ],\n    }\n  ]\n}'\n```\nYou can also specify one or more extra statement blocks that should be added to the generated policy, using `--statement JSON`. This example enables the AWS `textract:` APIs for the generated credentials, useful for using with the [s3-ocr](https://datasette.io/tools/s3-ocr) tool:\n```\n% s3-credentials create my-s3-bucket --statement '{\n  \"Effect\": \"Allow\",\n  \"Action\": \"textract:*\",\n  \"Resource\": \"*\"\n}'\n```\n"
  },
  {
    "path": "docs/help.md",
    "content": "# Command help\n\nThis page shows the `--help` output for all of the `s3-credentials` commands.\n\n<!-- [[[cog\nimport cog\nfrom s3_credentials import cli\nfrom click.testing import CliRunner\nrunner = CliRunner()\n# Get a list of all the commands\nresult = runner.invoke(cli.cli, [\"--help\"])\nlines = result.output.split(\"Commands:\")[1].strip().split(\"\\n\")\ncommands = [l.strip().split()[0] for l in lines if l]\nfor command in [\"\"] + commands:\n    result = runner.invoke(cli.cli, ([command] if command else []) + [\"--help\"])\n    help = result.output.replace(\"Usage: cli\", \"Usage: s3-credentials\")\n    cog.out(\n        \"## s3-credentials {} --help\\n\\n```\\n{}\\n```\\n\".format(command, help.strip())\n    )\n\n]]] -->\n## s3-credentials  --help\n\n```\nUsage: s3-credentials [OPTIONS] COMMAND [ARGS]...\n\n  A tool for creating credentials for accessing S3 buckets\n\n  Documentation: https://s3-credentials.readthedocs.io/\n\nOptions:\n  --version  Show the version and exit.\n  --help     Show this message and exit.\n\nCommands:\n  create                   Create and return new AWS credentials for...\n  debug-bucket             Run a bunch of diagnostics to help debug a bucket\n  delete-objects           Delete one or more object from an S3 bucket\n  delete-user              Delete specified users, their access keys and...\n  get-bucket-policy        Get bucket policy for a bucket\n  get-cors-policy          Get CORS policy for a bucket\n  get-object               Download an object from an S3 bucket\n  get-objects              Download multiple objects from an S3 bucket\n  get-public-access-block  Get the public access settings for an S3 bucket\n  list-bucket              List contents of bucket\n  list-buckets             List buckets\n  list-roles               List roles\n  list-user-policies       List inline policies for specified users\n  list-users               List all users for this account\n  localserver              Start a localhost server that serves S3...\n  policy                   Output generated JSON policy for one or more...\n  put-object               Upload an object to an S3 bucket\n  put-objects              Upload multiple objects to an S3 bucket\n  set-bucket-policy        Set bucket policy for a bucket\n  set-cors-policy          Set CORS policy for a bucket\n  set-public-access-block  Configure public access settings for an S3 bucket.\n  whoami                   Identify currently authenticated user\n```\n## s3-credentials create --help\n\n```\nUsage: s3-credentials create [OPTIONS] BUCKETS...\n\n  Create and return new AWS credentials for specified S3 buckets - optionally\n  also creating the bucket if it does not yet exist.\n\n  To create a new bucket and output read-write credentials:\n\n      s3-credentials create my-new-bucket -c\n\n  To create read-only credentials for an existing bucket:\n\n      s3-credentials create my-existing-bucket --read-only\n\n  To create write-only credentials that are only valid for 15 minutes:\n\n      s3-credentials create my-existing-bucket --write-only -d 15m\n\nOptions:\n  -f, --format [ini|json]         Output format for credentials\n  -d, --duration DURATION         How long should these credentials work for?\n                                  Default is forever, use 3600 for 3600 seconds,\n                                  15m for 15 minutes, 1h for 1 hour\n  --username TEXT                 Username to create or existing user to use\n  -c, --create-bucket             Create buckets if they do not already exist\n  --prefix TEXT                   Restrict to keys starting with this prefix\n  --public                        Make the created bucket public: anyone will be\n                                  able to download files if they know their name\n  --website                       Configure bucket to act as a website, using\n                                  index.html and error.html\n  --read-only                     Only allow reading from the bucket\n  --write-only                    Only allow writing to the bucket\n  --policy POLICY                 Path to a policy.json file, or literal JSON\n                                  string - $!BUCKET_NAME!$ will be replaced with\n                                  the name of the bucket\n  --statement STATEMENT           JSON statement to add to the policy\n  --bucket-region TEXT            Region in which to create buckets\n  --silent                        Don't show performed steps\n  --dry-run                       Show steps without executing them\n  --user-permissions-boundary TEXT\n                                  Custom permissions boundary to use for created\n                                  users, or 'none' to create without. Defaults\n                                  to limiting to S3 based on --read-only and\n                                  --write-only options.\n  --access-key TEXT               AWS access key ID\n  --secret-key TEXT               AWS secret access key\n  --session-token TEXT            AWS session token\n  --endpoint-url TEXT             Custom endpoint URL\n  -a, --auth FILENAME             Path to JSON/INI file containing credentials\n  --help                          Show this message and exit.\n```\n## s3-credentials debug-bucket --help\n\n```\nUsage: s3-credentials debug-bucket [OPTIONS] BUCKET\n\n  Run a bunch of diagnostics to help debug a bucket\n\n     s3-credentials debug-bucket my-bucket\n\nOptions:\n  --access-key TEXT     AWS access key ID\n  --secret-key TEXT     AWS secret access key\n  --session-token TEXT  AWS session token\n  --endpoint-url TEXT   Custom endpoint URL\n  -a, --auth FILENAME   Path to JSON/INI file containing credentials\n  --help                Show this message and exit.\n```\n## s3-credentials delete-objects --help\n\n```\nUsage: s3-credentials delete-objects [OPTIONS] BUCKET [KEYS]...\n\n  Delete one or more object from an S3 bucket\n\n  Pass one or more keys to delete them:\n\n      s3-credentials delete-objects my-bucket one.txt two.txt\n\n  To delete all files matching a prefix, pass --prefix:\n\n      s3-credentials delete-objects my-bucket --prefix my-folder/\n\nOptions:\n  --prefix TEXT         Delete everything with this prefix\n  -s, --silent          Don't show informational output\n  -d, --dry-run         Show keys that would be deleted without deleting them\n  --access-key TEXT     AWS access key ID\n  --secret-key TEXT     AWS secret access key\n  --session-token TEXT  AWS session token\n  --endpoint-url TEXT   Custom endpoint URL\n  -a, --auth FILENAME   Path to JSON/INI file containing credentials\n  --help                Show this message and exit.\n```\n## s3-credentials delete-user --help\n\n```\nUsage: s3-credentials delete-user [OPTIONS] USERNAMES...\n\n  Delete specified users, their access keys and their inline policies\n\n      s3-credentials delete-user username1 username2\n\nOptions:\n  --access-key TEXT     AWS access key ID\n  --secret-key TEXT     AWS secret access key\n  --session-token TEXT  AWS session token\n  --endpoint-url TEXT   Custom endpoint URL\n  -a, --auth FILENAME   Path to JSON/INI file containing credentials\n  --help                Show this message and exit.\n```\n## s3-credentials get-bucket-policy --help\n\n```\nUsage: s3-credentials get-bucket-policy [OPTIONS] BUCKET\n\n  Get bucket policy for a bucket\n\n     s3-credentials get-bucket-policy my-bucket\n\n  Returns the bucket policy for this bucket, if set, as JSON\n\nOptions:\n  --access-key TEXT     AWS access key ID\n  --secret-key TEXT     AWS secret access key\n  --session-token TEXT  AWS session token\n  --endpoint-url TEXT   Custom endpoint URL\n  -a, --auth FILENAME   Path to JSON/INI file containing credentials\n  --help                Show this message and exit.\n```\n## s3-credentials get-cors-policy --help\n\n```\nUsage: s3-credentials get-cors-policy [OPTIONS] BUCKET\n\n  Get CORS policy for a bucket\n\n     s3-credentials get-cors-policy my-bucket\n\n  Returns the CORS policy for this bucket, if set, as JSON\n\nOptions:\n  --access-key TEXT     AWS access key ID\n  --secret-key TEXT     AWS secret access key\n  --session-token TEXT  AWS session token\n  --endpoint-url TEXT   Custom endpoint URL\n  -a, --auth FILENAME   Path to JSON/INI file containing credentials\n  --help                Show this message and exit.\n```\n## s3-credentials get-object --help\n\n```\nUsage: s3-credentials get-object [OPTIONS] BUCKET KEY\n\n  Download an object from an S3 bucket\n\n  To see the contents of the bucket on standard output:\n\n      s3-credentials get-object my-bucket hello.txt\n\n  To save to a file:\n\n      s3-credentials get-object my-bucket hello.txt -o hello.txt\n\nOptions:\n  -o, --output FILE     Write to this file instead of stdout\n  --access-key TEXT     AWS access key ID\n  --secret-key TEXT     AWS secret access key\n  --session-token TEXT  AWS session token\n  --endpoint-url TEXT   Custom endpoint URL\n  -a, --auth FILENAME   Path to JSON/INI file containing credentials\n  --help                Show this message and exit.\n```\n## s3-credentials get-objects --help\n\n```\nUsage: s3-credentials get-objects [OPTIONS] BUCKET [KEYS]...\n\n  Download multiple objects from an S3 bucket\n\n  To download everything, run:\n\n      s3-credentials get-objects my-bucket\n\n  Files will be saved to a directory called my-bucket. Use -o dirname to save to\n  a different directory.\n\n  To download specific keys, list them:\n\n      s3-credentials get-objects my-bucket one.txt path/two.txt\n\n  To download files matching a glob-style pattern, use:\n\n      s3-credentials get-objects my-bucket --pattern '*/*.js'\n\nOptions:\n  -o, --output DIRECTORY  Write to this directory instead of one matching the\n                          bucket name\n  -p, --pattern TEXT      Glob patterns for files to download, e.g. '*/*.js'\n  -s, --silent            Don't show progress bar\n  --access-key TEXT       AWS access key ID\n  --secret-key TEXT       AWS secret access key\n  --session-token TEXT    AWS session token\n  --endpoint-url TEXT     Custom endpoint URL\n  -a, --auth FILENAME     Path to JSON/INI file containing credentials\n  --help                  Show this message and exit.\n```\n## s3-credentials get-public-access-block --help\n\n```\nUsage: s3-credentials get-public-access-block [OPTIONS] BUCKET\n\n  Get the public access settings for an S3 bucket\n\n  Example usage:\n\n      s3-credentials get-public-access-block my-bucket\n\nOptions:\n  --access-key TEXT     AWS access key ID\n  --secret-key TEXT     AWS secret access key\n  --session-token TEXT  AWS session token\n  --endpoint-url TEXT   Custom endpoint URL\n  -a, --auth FILENAME   Path to JSON/INI file containing credentials\n  --help                Show this message and exit.\n```\n## s3-credentials list-bucket --help\n\n```\nUsage: s3-credentials list-bucket [OPTIONS] BUCKET\n\n  List contents of bucket\n\n  To list the contents of a bucket as JSON:\n\n      s3-credentials list-bucket my-bucket\n\n  Add --csv or --csv for CSV or TSV format:\n\n      s3-credentials list-bucket my-bucket --csv\n\n  Add --urls to get an extra URL field for each key:\n\n      s3-credentials list-bucket my-bucket --urls\n\nOptions:\n  --prefix TEXT         List keys starting with this prefix\n  --urls                Show URLs for each key\n  --nl                  Output newline-delimited JSON\n  --csv                 Output CSV\n  --tsv                 Output TSV\n  --access-key TEXT     AWS access key ID\n  --secret-key TEXT     AWS secret access key\n  --session-token TEXT  AWS session token\n  --endpoint-url TEXT   Custom endpoint URL\n  -a, --auth FILENAME   Path to JSON/INI file containing credentials\n  --help                Show this message and exit.\n```\n## s3-credentials list-buckets --help\n\n```\nUsage: s3-credentials list-buckets [OPTIONS] [BUCKETS]...\n\n  List buckets\n\n  To list all buckets and their creation time as JSON:\n\n      s3-credentials list-buckets\n\n  Add --csv or --csv for CSV or TSV format:\n\n      s3-credentials list-buckets --csv\n\n  For extra details per bucket (much slower) add --details\n\n      s3-credentials list-buckets --details\n\nOptions:\n  --details             Include extra bucket details (slower)\n  --nl                  Output newline-delimited JSON\n  --csv                 Output CSV\n  --tsv                 Output TSV\n  --access-key TEXT     AWS access key ID\n  --secret-key TEXT     AWS secret access key\n  --session-token TEXT  AWS session token\n  --endpoint-url TEXT   Custom endpoint URL\n  -a, --auth FILENAME   Path to JSON/INI file containing credentials\n  --help                Show this message and exit.\n```\n## s3-credentials list-roles --help\n\n```\nUsage: s3-credentials list-roles [OPTIONS] [ROLE_NAMES]...\n\n  List roles\n\n  To list all roles for this AWS account:\n\n      s3-credentials list-roles\n\n  Add --csv or --csv for CSV or TSV format:\n\n      s3-credentials list-roles --csv\n\n  For extra details per role (much slower) add --details\n\n      s3-credentials list-roles --details\n\nOptions:\n  --details             Include attached policies (slower)\n  --nl                  Output newline-delimited JSON\n  --csv                 Output CSV\n  --tsv                 Output TSV\n  --access-key TEXT     AWS access key ID\n  --secret-key TEXT     AWS secret access key\n  --session-token TEXT  AWS session token\n  --endpoint-url TEXT   Custom endpoint URL\n  -a, --auth FILENAME   Path to JSON/INI file containing credentials\n  --help                Show this message and exit.\n```\n## s3-credentials list-user-policies --help\n\n```\nUsage: s3-credentials list-user-policies [OPTIONS] [USERNAMES]...\n\n  List inline policies for specified users\n\n      s3-credentials list-user-policies username\n\n  Returns policies for all users if no usernames are provided.\n\nOptions:\n  --access-key TEXT     AWS access key ID\n  --secret-key TEXT     AWS secret access key\n  --session-token TEXT  AWS session token\n  --endpoint-url TEXT   Custom endpoint URL\n  -a, --auth FILENAME   Path to JSON/INI file containing credentials\n  --help                Show this message and exit.\n```\n## s3-credentials list-users --help\n\n```\nUsage: s3-credentials list-users [OPTIONS]\n\n  List all users for this account\n\n      s3-credentials list-users\n\n  Add --csv or --csv for CSV or TSV format:\n\n      s3-credentials list-users --csv\n\nOptions:\n  --nl                  Output newline-delimited JSON\n  --csv                 Output CSV\n  --tsv                 Output TSV\n  --access-key TEXT     AWS access key ID\n  --secret-key TEXT     AWS secret access key\n  --session-token TEXT  AWS session token\n  --endpoint-url TEXT   Custom endpoint URL\n  -a, --auth FILENAME   Path to JSON/INI file containing credentials\n  --help                Show this message and exit.\n```\n## s3-credentials localserver --help\n\n```\nUsage: s3-credentials localserver [OPTIONS] BUCKET\n\n  Start a localhost server that serves S3 credentials.\n\n  The server responds to GET requests on / with JSON containing temporary AWS\n  credentials that allow access to the specified bucket.\n\n  Credentials are cached and refreshed automatically based on the --duration\n  setting.\n\n  To start a server that serves read-only credentials for a bucket, with\n  credentials valid for 1 hour:\n\n      s3-credentials localserver my-bucket --read-only --duration 1h\n\n  To run on a different port:\n\n      s3-credentials localserver my-bucket --duration 1h --port 9000\n\nOptions:\n  -p, --port INTEGER       Port to run the server on (default: 8094)\n  --host TEXT              Host to bind the server to (default: localhost)\n  --read-only              Only allow reading from the bucket\n  --write-only             Only allow writing to the bucket\n  --prefix TEXT            Restrict to keys starting with this prefix\n  --statement STATEMENT    JSON statement to add to the policy\n  -d, --duration DURATION  How long should credentials be valid for, e.g. 15m,\n                           1h, 12h  [required]\n  --access-key TEXT        AWS access key ID\n  --secret-key TEXT        AWS secret access key\n  --session-token TEXT     AWS session token\n  --endpoint-url TEXT      Custom endpoint URL\n  -a, --auth FILENAME      Path to JSON/INI file containing credentials\n  --help                   Show this message and exit.\n```\n## s3-credentials policy --help\n\n```\nUsage: s3-credentials policy [OPTIONS] BUCKETS...\n\n  Output generated JSON policy for one or more buckets\n\n  Takes the same options as s3-credentials create\n\n  To output a read-only JSON policy for a bucket:\n\n      s3-credentials policy my-bucket --read-only\n\nOptions:\n  --read-only            Only allow reading from the bucket\n  --write-only           Only allow writing to the bucket\n  --prefix TEXT          Restrict to keys starting with this prefix e.g. foo/\n  --statement STATEMENT  JSON statement to add to the policy\n  --public-bucket        Bucket policy for allowing public access\n  --help                 Show this message and exit.\n```\n## s3-credentials put-object --help\n\n```\nUsage: s3-credentials put-object [OPTIONS] BUCKET KEY PATH\n\n  Upload an object to an S3 bucket\n\n  To upload a file to /my-key.txt in the my-bucket bucket:\n\n      s3-credentials put-object my-bucket my-key.txt /path/to/file.txt\n\n  Use - to upload content from standard input:\n\n      echo \"Hello\" | s3-credentials put-object my-bucket hello.txt -\n\nOptions:\n  --content-type TEXT   Content-Type to use (default is auto-detected based on\n                        file extension)\n  -s, --silent          Don't show progress bar\n  --access-key TEXT     AWS access key ID\n  --secret-key TEXT     AWS secret access key\n  --session-token TEXT  AWS session token\n  --endpoint-url TEXT   Custom endpoint URL\n  -a, --auth FILENAME   Path to JSON/INI file containing credentials\n  --help                Show this message and exit.\n```\n## s3-credentials put-objects --help\n\n```\nUsage: s3-credentials put-objects [OPTIONS] BUCKET OBJECTS...\n\n  Upload multiple objects to an S3 bucket\n\n  Pass one or more files to upload them:\n\n      s3-credentials put-objects my-bucket one.txt two.txt\n\n  These will be saved to the root of the bucket. To save to a different location\n  use the --prefix option:\n\n      s3-credentials put-objects my-bucket one.txt two.txt --prefix my-folder\n\n  This will upload them my-folder/one.txt and my-folder/two.txt.\n\n  If you pass a directory it will be uploaded recursively:\n\n      s3-credentials put-objects my-bucket my-folder\n\n  This will create keys in my-folder/... in the S3 bucket.\n\n  To upload all files in a folder to the root of the bucket instead use this:\n\n      s3-credentials put-objects my-bucket my-folder/*\n\nOptions:\n  --prefix TEXT         Prefix to add to the files within the bucket\n  -s, --silent          Don't show progress bar\n  --dry-run             Show steps without executing them\n  --access-key TEXT     AWS access key ID\n  --secret-key TEXT     AWS secret access key\n  --session-token TEXT  AWS session token\n  --endpoint-url TEXT   Custom endpoint URL\n  -a, --auth FILENAME   Path to JSON/INI file containing credentials\n  --help                Show this message and exit.\n```\n## s3-credentials set-bucket-policy --help\n\n```\nUsage: s3-credentials set-bucket-policy [OPTIONS] BUCKET\n\n  Set bucket policy for a bucket\n\n      s3-credentials set-bucket-policy my-bucket --policy-file policy.json\n\n  Or to set a policy that allows GET requests from all:\n\n      s3-credentials set-bucket-policy my-bucket --allow-all-get\n\nOptions:\n  --policy-file FILENAME\n  --allow-all-get         Allow GET requests from all\n  --access-key TEXT       AWS access key ID\n  --secret-key TEXT       AWS secret access key\n  --session-token TEXT    AWS session token\n  --endpoint-url TEXT     Custom endpoint URL\n  -a, --auth FILENAME     Path to JSON/INI file containing credentials\n  --help                  Show this message and exit.\n```\n## s3-credentials set-cors-policy --help\n\n```\nUsage: s3-credentials set-cors-policy [OPTIONS] BUCKET\n\n  Set CORS policy for a bucket\n\n  To allow GET requests from any origin:\n\n      s3-credentials set-cors-policy my-bucket\n\n  To allow GET and PUT from a specific origin and expose ETag headers:\n\n      s3-credentials set-cors-policy my-bucket \\\n        --allowed-method GET \\\n        --allowed-method PUT \\\n        --allowed-origin https://www.example.com/ \\\n        --expose-header ETag\n\nOptions:\n  -m, --allowed-method TEXT  Allowed method e.g. GET\n  -h, --allowed-header TEXT  Allowed header e.g. Authorization\n  -o, --allowed-origin TEXT  Allowed origin e.g. https://www.example.com/\n  -e, --expose-header TEXT   Header to expose e.g. ETag\n  --max-age-seconds INTEGER  How long to cache preflight requests\n  --access-key TEXT          AWS access key ID\n  --secret-key TEXT          AWS secret access key\n  --session-token TEXT       AWS session token\n  --endpoint-url TEXT        Custom endpoint URL\n  -a, --auth FILENAME        Path to JSON/INI file containing credentials\n  --help                     Show this message and exit.\n```\n## s3-credentials set-public-access-block --help\n\n```\nUsage: s3-credentials set-public-access-block [OPTIONS] BUCKET\n\n  Configure public access settings for an S3 bucket.\n\n  Example:\n\n      s3-credentials set-public-access-block my-bucket --block-public-acls false\n\n  To allow full public access to the bucket, use the --allow-public-access flag:\n\n      s3-credentials set-public-access-block my-bucket --allow-public-access\n\nOptions:\n  --block-public-acls BOOLEAN     Block public ACLs for the bucket (true/false).\n  --ignore-public-acls BOOLEAN    Ignore public ACLs for the bucket\n                                  (true/false).\n  --block-public-policy BOOLEAN   Block public bucket policies (true/false).\n  --restrict-public-buckets BOOLEAN\n                                  Restrict public buckets (true/false).\n  --allow-public-access           Set all public access settings to false\n                                  (allows full public access).\n  --access-key TEXT               AWS access key ID\n  --secret-key TEXT               AWS secret access key\n  --session-token TEXT            AWS session token\n  --endpoint-url TEXT             Custom endpoint URL\n  -a, --auth FILENAME             Path to JSON/INI file containing credentials\n  --help                          Show this message and exit.\n```\n## s3-credentials whoami --help\n\n```\nUsage: s3-credentials whoami [OPTIONS]\n\n  Identify currently authenticated user\n\nOptions:\n  --access-key TEXT     AWS access key ID\n  --secret-key TEXT     AWS secret access key\n  --session-token TEXT  AWS session token\n  --endpoint-url TEXT   Custom endpoint URL\n  -a, --auth FILENAME   Path to JSON/INI file containing credentials\n  --help                Show this message and exit.\n```\n<!-- [[[end]]] -->\n"
  },
  {
    "path": "docs/index.md",
    "content": "# s3-credentials\n\n[![PyPI](https://img.shields.io/pypi/v/s3-credentials.svg)](https://pypi.org/project/s3-credentials/)\n[![Changelog](https://img.shields.io/github/v/release/simonw/s3-credentials?include_prereleases&label=changelog)](https://github.com/simonw/s3-credentials/releases)\n[![Tests](https://github.com/simonw/s3-credentials/workflows/Test/badge.svg)](https://github.com/simonw/s3-credentials/actions?query=workflow%3ATest)\n[![Documentation Status](https://readthedocs.org/projects/s3-credentials/badge/?version=latest)](https://s3-credentials.readthedocs.org/)\n[![License](https://img.shields.io/badge/license-Apache%202.0-blue.svg)](https://github.com/simonw/s3-credentials/blob/master/LICENSE)\n\nA tool for creating credentials for accessing S3 buckets\n\nFor project background, see [s3-credentials: a tool for creating credentials for S3 buckets](https://simonwillison.net/2021/Nov/3/s3-credentials/) on my blog.\n\nWhy would you need this? If you want to read and write to an S3 bucket from an automated script somewhere, you'll need an access key and secret key to authenticate your calls. This tool helps you create those with the most restrictive permissions possible.\n\nIf your code is running in EC2 or Lambda you can likely solve this [using roles instead](https://aws.amazon.com/premiumsupport/knowledge-center/lambda-execution-role-s3-bucket/). This tool is mainly useful for when you are interacting with S3 from outside the boundaries of AWS itself.\n\n## Installation\n\nInstall this tool using `pip`:\n\n    $ pip install s3-credentials\n\n## Documentation\n\n```{toctree}\n---\nmaxdepth: 3\n---\nconfiguration\ncreate\nlocalserver\nother-commands\npolicy-documents\nhelp\ncontributing\n```\n\n## Tips\n\nYou can see a log of changes made by this tool using AWS CloudTrail - the following link should provide an Event History interface showing revelant changes made to your AWS account such as `CreateAccessKey`, `CreateUser`, `PutUserPolicy` and more:\n\n<https://console.aws.amazon.com/cloudtrail/home>\n\nYou can view a list of your S3 buckets and confirm that they have the desired permissions and properties here:\n\n<https://console.aws.amazon.com/s3/home>\n\nThe management interface for an individual bucket is at `https://console.aws.amazon.com/s3/buckets/NAME-OF-BUCKET`\n"
  },
  {
    "path": "docs/localserver.md",
    "content": "# Local credential server\n\nThe `s3-credentials localserver` command starts a local HTTP server that serves temporary S3 credentials. This is useful when you need to provide credentials to applications that can fetch them from an HTTP endpoint.\n\n## Basic usage\n\nTo start a server that serves credentials for a bucket:\n\n```bash\ns3-credentials localserver my-bucket --duration 1h\n```\n\nThis starts a server on `localhost:8094` that responds to `GET /` requests with JSON containing temporary AWS credentials.\n\nThe server will output:\n\n```\nGenerating initial credentials...\nServing read-write credentials for bucket 'my-bucket' at http://localhost:8094/\nDuration: 3600 seconds\nPress Ctrl+C to stop\n```\n\n## Fetching credentials\n\nOnce the server is running, fetch credentials with:\n\n```bash\ncurl http://localhost:8094/\n```\n\nThis returns JSON in the [AWS credential_process format](https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-sourcing-external.html):\n\n```json\n{\n  \"Version\": 1,\n  \"AccessKeyId\": \"ASIAWXFXAIOZPAHAYHUG\",\n  \"SecretAccessKey\": \"Nrnoc...\",\n  \"SessionToken\": \"FwoGZXIvYXd...mr9Fjs=\",\n  \"Expiration\": \"2025-12-16T12:00:00+00:00\"\n}\n```\n\n## Options\n\n### Duration (required)\n\nThe `--duration` or `-d` option specifies how long credentials should be valid for. This must be between 15 minutes and 12 hours:\n\n```bash\n# 15 minutes\ns3-credentials localserver my-bucket --duration 15m\n\n# 1 hour\ns3-credentials localserver my-bucket --duration 1h\n\n# 12 hours\ns3-credentials localserver my-bucket --duration 12h\n```\n\n### Port\n\nChange the port with `-p` or `--port`:\n\n```bash\ns3-credentials localserver my-bucket --duration 1h --port 9000\n```\n\n### Host\n\nChange the host to bind to with `--host`:\n\n```bash\ns3-credentials localserver my-bucket --duration 1h --host 0.0.0.0\n```\n\n### Read-only or write-only access\n\nBy default, credentials have read-write access. Use `--read-only` or `--write-only` for more restricted access:\n\n```bash\n# Read-only access\ns3-credentials localserver my-bucket --duration 1h --read-only\n\n# Write-only access\ns3-credentials localserver my-bucket --duration 1h --write-only\n```\n\n### Prefix restriction\n\nRestrict access to keys with a specific prefix:\n\n```bash\ns3-credentials localserver my-bucket --duration 1h --prefix \"uploads/\"\n```\n\n### Custom policy statements\n\nAdd custom IAM policy statements with `--statement`:\n\n```bash\ns3-credentials localserver my-bucket --duration 1h \\\n  --statement '{\"Effect\": \"Allow\", \"Action\": \"textract:*\", \"Resource\": \"*\"}'\n```\n\n## Credential caching\n\nThe server caches credentials internally and serves the same credentials until they expire. When the duration elapses, the server automatically generates new credentials.\n\nThis avoids issues with multiple simultaneous requests all triggering credential generation (dogpile effect), and ensures that applications fetching credentials within a short time window all receive the same credentials.\n\n## Example: Using with AWS CLI profiles\n\nYou can configure an AWS CLI profile to fetch credentials from the local server. Add to your `~/.aws/config`:\n\n```ini\n[profile localserver]\ncredential_process = curl -s http://localhost:8094/\n```\n\nThen use:\n\n```bash\naws s3 ls s3://my-bucket/ --profile localserver\n```\n"
  },
  {
    "path": "docs/other-commands.md",
    "content": "# Other commands\n\n```{contents}\n---\nlocal:\nclass: this-will-duplicate-information-and-it-is-still-useful-here\n---\n```\n\n## policy\n\nYou can use the `s3-credentials policy` command to generate the JSON policy document that would be used without applying it. The command takes one or more required bucket names and a subset of the options available on the `create` command:\n\n- `--read-only` - generate a read-only policy\n- `--write-only` - generate a write-only policy\n- `--prefix` - policy should be restricted to keys in the bucket that start with this prefix\n- `--statement json-statement`: Custom JSON statement block\n- `--public-bucket` - generate a bucket policy for a public bucket\n\nWith none of these options it defaults to a read-write policy.\n```bash\ns3-credentials policy my-bucket --read-only\n```\n```\n{\n    \"Version\": \"2012-10-17\",\n...\n```\n\n## whoami\n\nTo see which user you are authenticated as:\n```bash\ns3-credentials whoami\n```\nThis will output JSON representing the currently authenticated user.\n\nUsing this with the `--auth` option is useful for verifying created credentials:\n```bash\ns3-credentials create static.niche-museums.com --read-only > auth.json\n```\n```bash\ns3-credentials whoami --auth auth.json\n```\n```json\n{\n    \"UserId\": \"AIDAWXFXAIOZPIZC6MHAG\",\n    \"Account\": \"462092780466\",\n    \"Arn\": \"arn:aws:iam::462092780466:user/s3.read-only.static.niche-museums.com\"\n}\n```\n## list-users\n\nTo see a list of all users that exist for your AWS account:\n```bash\ns3-credentials list-users\n```\nThis will return a pretty-printed array of JSON objects by default.\n\nAdd `--nl` to collapse these to single lines as valid newline-delimited JSON.\n\nAdd `--csv` or `--tsv` to get back CSV or TSV data.\n\n## list-buckets\n\nShows a list of all buckets in your AWS account.\n\n```bash\ns3-credentials list-buckets\n```\n```json\n[\n  {\n    \"Name\": \"aws-cloudtrail-logs-462092780466-f2c900d3\",\n    \"CreationDate\": \"2021-03-25 22:19:54+00:00\"\n  },\n  {\n    \"Name\": \"simonw-test-bucket-for-s3-credentials\",\n    \"CreationDate\": \"2021-11-03 21:46:12+00:00\"\n  }\n]\n```\nWith no extra arguments this will show all available buckets - you can also add one or more explicit bucket names to see just those buckets:\n\n```bash\ns3-credentials list-buckets simonw-test-bucket-for-s3-credentials\n```\n```json\n[\n  {\n    \"Name\": \"simonw-test-bucket-for-s3-credentials\",\n    \"CreationDate\": \"2021-11-03 21:46:12+00:00\"\n  }\n]\n```\nThis accepts the same `--nl`, `--csv` and `--tsv` options as `list-users`.\n\nAdd `--details` to include details of the bucket ACL, website configuration and public access block settings. This is useful for running a security audit of your buckets.\n\nUsing `--details` adds several additional API calls for each bucket, so it is advisable to use it with one or more explicit bucket names.\n```bash\ns3-credentials list-buckets simonw-test-public-website-bucket --details\n```\n```json\n[\n  {\n    \"Name\": \"simonw-test-public-website-bucket\",\n    \"CreationDate\": \"2021-11-08 22:53:30+00:00\",\n    \"region\": \"us-east-1\",\n    \"bucket_acl\": {\n      \"Owner\": {\n        \"DisplayName\": \"simon\",\n        \"ID\": \"abcdeabcdeabcdeabcdeabcdeabcde0001\"\n      },\n      \"Grants\": [\n        {\n          \"Grantee\": {\n            \"DisplayName\": \"simon\",\n            \"ID\": \"abcdeabcdeabcdeabcdeabcdeabcde0001\",\n            \"Type\": \"CanonicalUser\"\n          },\n          \"Permission\": \"FULL_CONTROL\"\n        }\n      ]\n    },\n    \"public_access_block\": null,\n    \"bucket_website\": {\n      \"IndexDocument\": {\n        \"Suffix\": \"index.html\"\n      },\n      \"ErrorDocument\": {\n        \"Key\": \"error.html\"\n      },\n      \"url\": \"http://simonw-test-public-website-bucket.s3-website.us-east-1.amazonaws.com/\"\n    }\n  }\n]\n```\nA bucket with `public_access_block` might look like this:\n```json\n{\n  \"Name\": \"aws-cloudtrail-logs-462092780466-f2c900d3\",\n  \"CreationDate\": \"2021-03-25 22:19:54+00:00\",\n  \"bucket_acl\": {\n    \"Owner\": {\n      \"DisplayName\": \"simon\",\n      \"ID\": \"abcdeabcdeabcdeabcdeabcdeabcde0001\"\n    },\n    \"Grants\": [\n      {\n        \"Grantee\": {\n          \"DisplayName\": \"simon\",\n          \"ID\": \"abcdeabcdeabcdeabcdeabcdeabcde0001\",\n          \"Type\": \"CanonicalUser\"\n        },\n        \"Permission\": \"FULL_CONTROL\"\n      }\n    ]\n  },\n  \"public_access_block\": {\n    \"BlockPublicAcls\": true,\n    \"IgnorePublicAcls\": true,\n    \"BlockPublicPolicy\": true,\n    \"RestrictPublicBuckets\": true\n  },\n  \"bucket_website\": null\n}\n```\n\n## list-bucket\n\nTo list the contents of a bucket, use `list-bucket`:\n\n```bash\ns3-credentials list-bucket static.niche-museums.com\n```\n```json\n[\n  {\n    \"Key\": \"Griffith-Observatory.jpg\",\n    \"LastModified\": \"2020-01-05 16:51:01+00:00\",\n    \"ETag\": \"\\\"a4cff17d189e7eb0c4d3bf0257e56885\\\"\",\n    \"Size\": 3360040,\n    \"StorageClass\": \"STANDARD\"\n  },\n  {\n    \"Key\": \"IMG_0353.jpeg\",\n    \"LastModified\": \"2019-10-25 02:50:49+00:00\",\n    \"ETag\": \"\\\"d45bab0b65c0e4b03b2ac0359c7267e3\\\"\",\n    \"Size\": 2581023,\n    \"StorageClass\": \"STANDARD\"\n  }\n]\n```\nYou can use the `--prefix myprefix/` option to list only keys that start with a specific prefix.\n\nThe commmand accepts the same `--nl`, `--csv` and `--tsv` options as `list-users`.\n\nAdd `--urls` to include a `URL` field in the output providing the full URL to each object.\n\n## list-user-policies\n\nTo see a list of inline policies belonging to users:\n\n```bash\ns3-credentials list-user-policies s3.read-write.static.niche-museums.com\n```\n```\nUser: s3.read-write.static.niche-museums.com\nPolicyName: s3.read-write.static.niche-museums.com\n{\n  \"Version\": \"2012-10-17\",\n  \"Statement\": [\n    {\n      \"Effect\": \"Allow\",\n      \"Action\": [\n        \"s3:ListBucket\"\n      ],\n      \"Resource\": [\n        \"arn:aws:s3:::static.niche-museums.com\"\n      ]\n    },\n    {\n      \"Effect\": \"Allow\",\n      \"Action\": \"s3:*Object\",\n      \"Resource\": [\n        \"arn:aws:s3:::static.niche-museums.com/*\"\n      ]\n    }\n  ]\n}\n```\nYou can pass any number of usernames here. If you don't specify a username the tool will loop through every user belonging to your account:\n```bash\ns3-credentials list-user-policies\n```\n## list-roles\n\nThe `list-roles` command lists all of the roles available for the authenticated account.\n\nAdd `--details` to fetch the inline and attached managed policies for each row as well - this is slower as it needs to make several additional API calls for each role.\n\nYou can optionally add one or more role names to the command to display and fetch details about just those specific roles.\n\nExample usage:\n\n```bash\ns3-credentials list-roles AWSServiceRoleForLightsail --details\n```\n```json\n[\n  {\n    \"Path\": \"/aws-service-role/lightsail.amazonaws.com/\",\n    \"RoleName\": \"AWSServiceRoleForLightsail\",\n    \"RoleId\": \"AROAWXFXAIOZG5ACQ5NZ5\",\n    \"Arn\": \"arn:aws:iam::462092780466:role/aws-service-role/lightsail.amazonaws.com/AWSServiceRoleForLightsail\",\n    \"CreateDate\": \"2021-01-15 21:41:48+00:00\",\n    \"AssumeRolePolicyDocument\": {\n      \"Version\": \"2012-10-17\",\n      \"Statement\": [\n        {\n          \"Effect\": \"Allow\",\n          \"Principal\": {\n            \"Service\": \"lightsail.amazonaws.com\"\n          },\n          \"Action\": \"sts:AssumeRole\"\n        }\n      ]\n    },\n    \"MaxSessionDuration\": 3600,\n    \"inline_policies\": [\n      {\n        \"RoleName\": \"AWSServiceRoleForLightsail\",\n        \"PolicyName\": \"LightsailExportAccess\",\n        \"PolicyDocument\": {\n          \"Version\": \"2012-10-17\",\n          \"Statement\": [\n            {\n              \"Effect\": \"Allow\",\n              \"Action\": [\n                \"kms:Decrypt\",\n                \"kms:DescribeKey\",\n                \"kms:CreateGrant\"\n              ],\n              \"Resource\": \"arn:aws:kms:*:451833091580:key/*\"\n            },\n            {\n              \"Effect\": \"Allow\",\n              \"Action\": [\n                \"cloudformation:DescribeStacks\"\n              ],\n              \"Resource\": \"arn:aws:cloudformation:*:*:stack/*/*\"\n            }\n          ]\n        }\n      }\n    ],\n    \"attached_policies\": [\n      {\n        \"PolicyName\": \"LightsailExportAccess\",\n        \"PolicyId\": \"ANPAJ4LZGPQLZWMVR4WMQ\",\n        \"Arn\": \"arn:aws:iam::aws:policy/aws-service-role/LightsailExportAccess\",\n        \"Path\": \"/aws-service-role/\",\n        \"DefaultVersionId\": \"v2\",\n        \"AttachmentCount\": 1,\n        \"PermissionsBoundaryUsageCount\": 0,\n        \"IsAttachable\": true,\n        \"Description\": \"AWS Lightsail service linked role policy which grants permissions to export resources\",\n        \"CreateDate\": \"2018-09-28 16:35:54+00:00\",\n        \"UpdateDate\": \"2022-01-15 01:45:33+00:00\",\n        \"Tags\": [],\n        \"PolicyVersion\": {\n          \"Document\": {\n            \"Version\": \"2012-10-17\",\n            \"Statement\": [\n              {\n                \"Effect\": \"Allow\",\n                \"Action\": [\n                  \"iam:DeleteServiceLinkedRole\",\n                  \"iam:GetServiceLinkedRoleDeletionStatus\"\n                ],\n                \"Resource\": \"arn:aws:iam::*:role/aws-service-role/lightsail.amazonaws.com/AWSServiceRoleForLightsail*\"\n              },\n              {\n                \"Effect\": \"Allow\",\n                \"Action\": [\n                  \"ec2:CopySnapshot\",\n                  \"ec2:DescribeSnapshots\",\n                  \"ec2:CopyImage\",\n                  \"ec2:DescribeImages\"\n                ],\n                \"Resource\": \"*\"\n              },\n              {\n                \"Effect\": \"Allow\",\n                \"Action\": [\n                  \"s3:GetAccountPublicAccessBlock\"\n                ],\n                \"Resource\": \"*\"\n              }\n            ]\n          },\n          \"VersionId\": \"v2\",\n          \"IsDefaultVersion\": true,\n          \"CreateDate\": \"2022-01-15 01:45:33+00:00\"\n        }\n      }\n    ]\n  }\n]\n```\nAdd `--nl` to collapse these to single lines as valid newline-delimited JSON.\n\nAdd `--csv` or `--tsv` to get back CSV or TSV data.\n\n## delete-user\n\nIn trying out this tool it's possible you will create several different user accounts that you later decide to clean up.\n\nDeleting AWS users is a little fiddly: you first need to delete their access keys, then their inline policies and finally the user themselves.\n\nThe `s3-credentials delete-user` handles this for you:\n\n```bash\ns3-credentials delete-user s3.read-write.simonw-test-bucket-10\n```\n```\nUser: s3.read-write.simonw-test-bucket-10\n  Deleted policy: s3.read-write.simonw-test-bucket-10\n  Deleted access key: AKIAWXFXAIOZK3GPEIWR\n  Deleted user\n```\nYou can pass it multiple usernames to delete multiple users at a time.\n\n## put-object\n\nYou can upload a file to a key in an S3 bucket using `s3-credentials put-object`:\n```bash\ns3-credentials put-object my-bucket my-key.txt /path/to/file.txt\n```\nUse `-` as the file name to upload from standard input:\n```bash\necho \"Hello\" | s3-credentials put-object my-bucket hello.txt -\n```\nThis command shows a progress bar by default. Use `-s` or `--silent` to hide the progress bar.\n\nThe `Content-Type` on the uploaded object will be automatically set based on the file extension. If you are using standard input, or you want to over-ride the detected type, you can do so using the `--content-type` option:\n```bash\necho \"<h1>Hello World</h1>\" | \\\n  s3-credentials put-object my-bucket hello.html - --content-type \"text/html\"\n```\n## put-objects\n\n`s3-credentials put-objects` can be used to upload more than one file at once.\n\nPass one or more filenames to upload them to the root of your bucket:\n```bash\ns3-credentials put-objects my-bucket one.txt two.txt three.txt\n```\nUse `--prefix my-prefix` to upload them to the specified prefix:\n```bash\ns3-credentials put-objects my-bucket one.txt --prefix my-prefix\n```\nThis will upload the file to `my-prefix/one.txt`.\n\nPass one or more directories to upload the contents of those directories.\n`.` uploads everything in your current directory:\n```bash\ns3-credentials put-objects my-bucket .\n```\nPassing directory names will upload the directory and all of its contents:\n```bash\ns3-credentials put-objects my-bucket my-directory\n```\nIf `my-directory` had files `one.txt` and `two.txt` in it, the result would be:\n```\nmy-directory/one.txt\nmy-directory/two.txt\n```\nA progress bar will be shown by default. Use `-s` or `--silent` to hide it.\n\nAdd `--dry-run` to get a preview of what would be uploaded without uploading anything:\n```bash\ns3-credentials put-objects my-bucket . --dry-run\n```\n```\nout/IMG_1254.jpeg => s3://my-bucket/out/IMG_1254.jpeg\nout/alverstone-mead-2.jpg => s3://my-bucket/out/alverstone-mead-2.jpg\nout/alverstone-mead-1.jpg => s3://my-bucket/out/alverstone-mead-1.jpg\n```\n\n## delete-objects\n\n`s3-credentials delete-objects` can be used to delete one or more keys from the bucket.\n\nPass one or more keys to delete them:\n```bash\ns3-credentials delete-objects my-bucket one.txt two.txt three.txt\n```\nUse `--prefix my-prefix` to delete all keys with the specified prefix:\n```bash\ns3-credentials delete-objects my-bucket --prefix my-prefix\n```\nPass `-d` or `--dry-run` to perform a dry-run of the deletion, which will list the keys that would be deleted without actually deleting them.\n```bash\ns3-credentials delete-objects my-bucket --prefix my-prefix --dry-run\n```\n## get-object\n\nTo download a file from a bucket use `s3-credentials get-object`:\n```bash\ns3-credentials get-object my-bucket hello.txt\n```\nThis defaults to outputting the downloaded file to the terminal. You can instead direct it to save to a file on disk using the `-o` or `--output` option:\n```bash\ns3-credentials get-object my-bucket hello.txt -o /path/to/hello.txt\n```\n## get-objects\n\n`s3-credentials get-objects` can be used to download multiple files from a bucket at once.\n\nWithout extra arguments, this downloads everything:\n```bash\ns3-credentials get-objects my-bucket\n```\nFiles will be written to the current directory by default, preserving their directory structure from the bucket.\n\nTo write to a different directory use `--output` or `-o`:\n```bash\ns3-credentials get-objects my-bucket -o /path/to/output\n```\nTo download multiple specific files, add them as arguments to the command:\n```bash\ns3-credentials get-objects my-bucket one.txt two.txt path/to/three.txt\n```\nYou can pass one or more `--pattern` or `-p` options to download files matching a specific pattern:\n```bash\ns3-credentials get-objects my-bucket -p \"*.txt\" -p \"static/*.css\"\n```\nHere the `*` wildcard will match any sequence of characters, including `/`. `?` will match a single character.\n\nA progress bar will be shown by default. Use `-s` or `--silent` to hide it.\n\n## set-cors-policy and get-cors-policy\n\nYou can set the [CORS policy](https://docs.aws.amazon.com/AmazonS3/latest/userguide/cors.html) for a bucket using the `set-cors-policy` command. S3 CORS policies are set at the bucket level - they cannot be set for individual items.\n\nFirst, create the bucket. Make sure to make it `--public`:\n```bash\ns3-credentials create my-cors-bucket --public -c\n```\nYou can set a default CORS policy - allowing `GET` requests from any origin - like this:\n```bash\ns3-credentials set-cors-policy my-cors-bucket\n```\nYou can use the `get-cors-policy` command to confirm the policy you have set:\n```bash\ns3-credentials get-cors-policy my-cors-bucket\n```\n```json\n[\n    {\n        \"ID\": \"set-by-s3-credentials\",\n        \"AllowedMethods\": [\n            \"GET\"\n        ],\n        \"AllowedOrigins\": [\n            \"*\"\n        ]\n    }\n]\n```\nTo customize the CORS policy, use the following options:\n\n- `-m/--allowed-method` - Allowed method e.g. `GET`\n- `-h/--allowed-header` - Allowed header e.g. `Authorization`\n- `-o/--allowed-origin` - Allowed origin e.g. `https://www.example.com/`\n- `-e/--expose-header` -  Header to expose e.g. `ETag`\n- `--max-age-seconds` - How long to cache preflight requests\n\nEach of these can be passed multiple times with the exception of `--max-age-seconds`.\n\nThe following example allows GET and PUT methods from code running on `https://www.example.com/`, allows the incoming `Authorization` header and exposes the `ETag` header. It also sets the client to cache preflight requests for 60 seconds:\n```bash\ns3-credentials set-cors-policy my-cors-bucket2 \\\n  --allowed-method GET \\\n  --allowed-method PUT \\\n  --allowed-origin https://www.example.com/ \\\n  --expose-header ETag \\\n  --max-age-seconds 60\n```\n## debug-bucket\n\nThe `debug-bucket` command is useful for diagnosing issues with a bucket:\n```bash\ns3-credentials debug-bucket my-bucket\n```\nExample output:\n```\nBucket ACL:\n{\n    \"Owner\": {\n        \"DisplayName\": \"username\",\n        \"ID\": \"cc8ca3a037c6a7c1fa7580076bf7cd1949b3f2f58f01c9df9e53c51f6a249910\"\n    },\n    \"Grants\": [\n        {\n            \"Grantee\": {\n                \"DisplayName\": \"username\",\n                \"ID\": \"cc8ca3a037c6a7c1fa7580076bf7cd1949b3f2f58f01c9df9e53c51f6a249910\",\n                \"Type\": \"CanonicalUser\"\n            },\n            \"Permission\": \"FULL_CONTROL\"\n        }\n    ]\n}\nBucket policy status:\n{\n    \"PolicyStatus\": {\n        \"IsPublic\": true\n    }\n}\nBucket public access block:\n{\n    \"PublicAccessBlockConfiguration\": {\n        \"BlockPublicAcls\": false,\n        \"IgnorePublicAcls\": false,\n        \"BlockPublicPolicy\": false,\n        \"RestrictPublicBuckets\": false\n    }\n}\n```\n## get-bucket-policy\n\nThe `get-bucket-policy` command displays the current bucket policy for a bucket:\n```bash\ns3-credentials get-bucket-policy my-bucket\n```\nExample output:\n\n```json\n{\n    \"Version\": \"2012-10-17\",\n    \"Statement\": [\n        {\n            \"Sid\": \"AllowAllGetObject\",\n            \"Effect\": \"Allow\",\n            \"Principal\": \"*\",\n            \"Action\": \"s3:GetObject\",\n            \"Resource\": \"arn:aws:s3:::my-bucket/*\"\n        }\n    ]\n}\n```\n\n## set-bucket-policy\n\nThe `set-bucket-policy` command can be used to set a bucket policy for a bucket:\n```bash\ns3-credentials set-bucket-policy my-bucket --policy-file policy.json\n```\nOr for the common case of setting a policy to allow GET access to all buckets:\n```bash\ns3-credentials set-bucket-policy my-bucket --allow-all-get\n```\n\n## get-public-access-block\n\nThe `get-public-access-block` command displays the current public access block configuration for a bucket:\n```bash\ns3-credentials get-public-access-block my-bucket\n```\nExample output:\n\n```json\n{\n    \"BlockPublicAcls\": false,\n    \"IgnorePublicAcls\": false,\n    \"BlockPublicPolicy\": false,\n    \"RestrictPublicBuckets\": false\n}\n```\n\n## set-public-access-block\n\nThe `set-public-access-block` command can be used to set the public access block configuration for a bucket:\n```bash\ns3-credentials set-public-access-block my-bucket \\\n  --block-public-acls true \\\n  --ignore-public-acls true \\\n  --block-public-policy true \\\n  --restrict-public-buckets true\n```\nEach of the above options accepts `true` or `false`.\n\nYou can use the `--allow-public-access` shortcut to set everything to `false` in one go:\n```bash\ns3-credentials set-public-access-block my-bucket \\\n  --allow-public-access\n```\n"
  },
  {
    "path": "docs/policy-documents.md",
    "content": "# Policy documents\n\nThe IAM policies generated by this tool for a bucket called `my-s3-bucket` would look like this:\n\n## read-write (default)\n\n<!-- [[[cog\nimport cog, json\nfrom s3_credentials import cli\nfrom click.testing import CliRunner\nrunner = CliRunner()\nresult = runner.invoke(cli.cli, [\"policy\", \"my-s3-bucket\"])\ncog.out(\n    \"```\\n{}\\n```\".format(json.dumps(json.loads(result.output), indent=2))\n)\n]]] -->\n```\n{\n  \"Version\": \"2012-10-17\",\n  \"Statement\": [\n    {\n      \"Effect\": \"Allow\",\n      \"Action\": [\n        \"s3:ListBucket\",\n        \"s3:GetBucketLocation\"\n      ],\n      \"Resource\": [\n        \"arn:aws:s3:::my-s3-bucket\"\n      ]\n    },\n    {\n      \"Effect\": \"Allow\",\n      \"Action\": [\n        \"s3:GetObject\",\n        \"s3:GetObjectAcl\",\n        \"s3:GetObjectLegalHold\",\n        \"s3:GetObjectRetention\",\n        \"s3:GetObjectTagging\"\n      ],\n      \"Resource\": [\n        \"arn:aws:s3:::my-s3-bucket/*\"\n      ]\n    },\n    {\n      \"Effect\": \"Allow\",\n      \"Action\": [\n        \"s3:PutObject\",\n        \"s3:DeleteObject\"\n      ],\n      \"Resource\": [\n        \"arn:aws:s3:::my-s3-bucket/*\"\n      ]\n    }\n  ]\n}\n```\n<!-- [[[end]]] -->\n\n## `--read-only`\n\n<!-- [[[cog\nresult = runner.invoke(cli.cli, [\"policy\", \"my-s3-bucket\", \"--read-only\"])\ncog.out(\n    \"```\\n{}\\n```\".format(json.dumps(json.loads(result.output), indent=2))\n)\n]]] -->\n```\n{\n  \"Version\": \"2012-10-17\",\n  \"Statement\": [\n    {\n      \"Effect\": \"Allow\",\n      \"Action\": [\n        \"s3:ListBucket\",\n        \"s3:GetBucketLocation\"\n      ],\n      \"Resource\": [\n        \"arn:aws:s3:::my-s3-bucket\"\n      ]\n    },\n    {\n      \"Effect\": \"Allow\",\n      \"Action\": [\n        \"s3:GetObject\",\n        \"s3:GetObjectAcl\",\n        \"s3:GetObjectLegalHold\",\n        \"s3:GetObjectRetention\",\n        \"s3:GetObjectTagging\"\n      ],\n      \"Resource\": [\n        \"arn:aws:s3:::my-s3-bucket/*\"\n      ]\n    }\n  ]\n}\n```\n<!-- [[[end]]] -->\n\n## `--write-only`\n\n<!-- [[[cog\nresult = runner.invoke(cli.cli, [\"policy\", \"my-s3-bucket\", \"--write-only\"])\ncog.out(\n    \"```\\n{}\\n```\".format(json.dumps(json.loads(result.output), indent=2))\n)\n]]] -->\n```\n{\n  \"Version\": \"2012-10-17\",\n  \"Statement\": [\n    {\n      \"Effect\": \"Allow\",\n      \"Action\": [\n        \"s3:PutObject\"\n      ],\n      \"Resource\": [\n        \"arn:aws:s3:::my-s3-bucket/*\"\n      ]\n    }\n  ]\n}\n```\n<!-- [[[end]]] -->\n\n## `--prefix my-prefix/`\n\n<!-- [[[cog\nresult = runner.invoke(cli.cli, [\"policy\", \"my-s3-bucket\", \"--prefix\", \"my-prefix/\"])\ncog.out(\n    \"```\\n{}\\n```\".format(json.dumps(json.loads(result.output), indent=2))\n)\n]]] -->\n```\n{\n  \"Version\": \"2012-10-17\",\n  \"Statement\": [\n    {\n      \"Effect\": \"Allow\",\n      \"Action\": [\n        \"s3:GetBucketLocation\"\n      ],\n      \"Resource\": [\n        \"arn:aws:s3:::my-s3-bucket\"\n      ]\n    },\n    {\n      \"Effect\": \"Allow\",\n      \"Action\": [\n        \"s3:ListBucket\"\n      ],\n      \"Resource\": [\n        \"arn:aws:s3:::my-s3-bucket\"\n      ],\n      \"Condition\": {\n        \"StringLike\": {\n          \"s3:prefix\": [\n            \"my-prefix/*\"\n          ]\n        }\n      }\n    },\n    {\n      \"Effect\": \"Allow\",\n      \"Action\": [\n        \"s3:GetObject\",\n        \"s3:GetObjectAcl\",\n        \"s3:GetObjectLegalHold\",\n        \"s3:GetObjectRetention\",\n        \"s3:GetObjectTagging\"\n      ],\n      \"Resource\": [\n        \"arn:aws:s3:::my-s3-bucket/my-prefix/*\"\n      ]\n    },\n    {\n      \"Effect\": \"Allow\",\n      \"Action\": [\n        \"s3:PutObject\",\n        \"s3:DeleteObject\"\n      ],\n      \"Resource\": [\n        \"arn:aws:s3:::my-s3-bucket/my-prefix/*\"\n      ]\n    }\n  ]\n}\n```\n<!-- [[[end]]] -->\n\n## `--prefix my-prefix/ --read-only`\n\n<!-- [[[cog\nresult = runner.invoke(cli.cli, [\"policy\", \"my-s3-bucket\", \"--prefix\", \"my-prefix/\", \"--read-only\"])\ncog.out(\n    \"```\\n{}\\n```\".format(json.dumps(json.loads(result.output), indent=2))\n)\n]]] -->\n```\n{\n  \"Version\": \"2012-10-17\",\n  \"Statement\": [\n    {\n      \"Effect\": \"Allow\",\n      \"Action\": [\n        \"s3:GetBucketLocation\"\n      ],\n      \"Resource\": [\n        \"arn:aws:s3:::my-s3-bucket\"\n      ]\n    },\n    {\n      \"Effect\": \"Allow\",\n      \"Action\": [\n        \"s3:ListBucket\"\n      ],\n      \"Resource\": [\n        \"arn:aws:s3:::my-s3-bucket\"\n      ],\n      \"Condition\": {\n        \"StringLike\": {\n          \"s3:prefix\": [\n            \"my-prefix/*\"\n          ]\n        }\n      }\n    },\n    {\n      \"Effect\": \"Allow\",\n      \"Action\": [\n        \"s3:GetObject\",\n        \"s3:GetObjectAcl\",\n        \"s3:GetObjectLegalHold\",\n        \"s3:GetObjectRetention\",\n        \"s3:GetObjectTagging\"\n      ],\n      \"Resource\": [\n        \"arn:aws:s3:::my-s3-bucket/my-prefix/*\"\n      ]\n    }\n  ]\n}\n```\n<!-- [[[end]]] -->\n\n## `--prefix my-prefix/ --write-only`\n\n<!-- [[[cog\nresult = runner.invoke(cli.cli, [\"policy\", \"my-s3-bucket\", \"--prefix\", \"my-prefix/\", \"--write-only\"])\ncog.out(\n    \"```\\n{}\\n```\".format(json.dumps(json.loads(result.output), indent=2))\n)\n]]] -->\n```\n{\n  \"Version\": \"2012-10-17\",\n  \"Statement\": [\n    {\n      \"Effect\": \"Allow\",\n      \"Action\": [\n        \"s3:PutObject\"\n      ],\n      \"Resource\": [\n        \"arn:aws:s3:::my-s3-bucket/my-prefix/*\"\n      ]\n    }\n  ]\n}\n```\n<!-- [[[end]]] -->\n\n(public_bucket_policy)=\n\n## public bucket policy\n\nBuckets created using the `--public` option will have the following bucket policy attached to them:\n\n<!-- [[[cog\nresult = runner.invoke(cli.cli, [\"policy\", \"my-s3-bucket\", \"--public-bucket\"])\ncog.out(\n    \"```\\n{}\\n```\".format(json.dumps(json.loads(result.output), indent=2))\n)\n]]] -->\n```\n{\n  \"Version\": \"2012-10-17\",\n  \"Statement\": [\n    {\n      \"Sid\": \"AllowAllGetObject\",\n      \"Effect\": \"Allow\",\n      \"Principal\": \"*\",\n      \"Action\": [\n        \"s3:GetObject\"\n      ],\n      \"Resource\": [\n        \"arn:aws:s3:::my-s3-bucket/*\"\n      ]\n    }\n  ]\n}\n```\n<!-- [[[end]]] -->"
  },
  {
    "path": "docs/requirements.txt",
    "content": "furo\nsphinx-autobuild\nmyst-parser\ncogapp\n"
  },
  {
    "path": "pyproject.toml",
    "content": "[project]\nname = \"s3-credentials\"\nversion = \"0.17\"\ndescription = \"A tool for creating credentials for accessing S3 buckets\"\nreadme = \"README.md\"\nauthors = [{name = \"Simon Willison\"}]\nlicense = {text = \"Apache-2.0\"}\nrequires-python = \">=3.10\"\ndependencies = [\n    \"click\",\n    \"boto3\",\n]\n\n[project.urls]\nHomepage = \"https://github.com/simonw/s3-credentials\"\nIssues = \"https://github.com/simonw/s3-credentials/issues\"\nCI = \"https://github.com/simonw/s3-credentials/actions\"\nChangelog = \"https://github.com/simonw/s3-credentials/releases\"\n\n[project.scripts]\ns3-credentials = \"s3_credentials.cli:cli\"\n\n[tool.poe.tasks]\ndocs.cmd = \"sphinx-build -M html docs docs/_build\"\ndocs.help = \"Build the docs\"\nlivehtml.cmd = \"sphinx-autobuild -b html docs docs/_build\"\nlivehtml.help = \"Live-reloading docs server\"\ncog.cmd = \"cog -r docs/*.md\"\ncog.help = \"Regenerate cog snippets in the docs\"\n\n[build-system]\nrequires = [\"setuptools\"]\nbuild-backend = \"setuptools.build_meta\"\n\n[dependency-groups]\ntest = [\n    \"pytest\",\n    \"pytest-mock\",\n    \"cogapp\",\n    \"moto>=5.0.4\",\n]\ndocs = [\n    \"furo\",\n    \"sphinx-autobuild\",\n    \"myst-parser\",\n    \"cogapp\",\n]\ndev = [\n    {include-group = \"test\"},\n    {include-group = \"docs\"},\n    \"poethepoet>=0.38.0\",\n]\n"
  },
  {
    "path": "s3_credentials/__init__.py",
    "content": ""
  },
  {
    "path": "s3_credentials/cli.py",
    "content": "from re import A\nimport boto3\nimport botocore\nimport click\nimport configparser\nfrom csv import DictWriter\nimport fnmatch\nimport io\nimport itertools\nimport json\nimport mimetypes\nimport os\nimport pathlib\nimport re\nimport sys\nimport textwrap\nfrom . import policies\n\nPUBLIC_ACCESS_BLOCK_CONFIGURATION = {\n    \"BlockPublicAcls\": False,\n    \"IgnorePublicAcls\": False,\n    \"BlockPublicPolicy\": False,\n    \"RestrictPublicBuckets\": False,\n}\n\n\ndef bucket_exists(s3, bucket):\n    try:\n        s3.head_bucket(Bucket=bucket)\n        return True\n    except botocore.exceptions.ClientError:\n        return False\n\n\ndef user_exists(iam, username):\n    try:\n        iam.get_user(UserName=username)\n        return True\n    except iam.exceptions.NoSuchEntityException:\n        return False\n\n\ndef common_boto3_options(fn):\n    for decorator in reversed(\n        (\n            click.option(\n                \"--access-key\",\n                help=\"AWS access key ID\",\n            ),\n            click.option(\n                \"--secret-key\",\n                help=\"AWS secret access key\",\n            ),\n            click.option(\n                \"--session-token\",\n                help=\"AWS session token\",\n            ),\n            click.option(\n                \"--endpoint-url\",\n                help=\"Custom endpoint URL\",\n            ),\n            click.option(\n                \"-a\",\n                \"--auth\",\n                type=click.File(\"r\"),\n                help=\"Path to JSON/INI file containing credentials\",\n            ),\n        )\n    ):\n        fn = decorator(fn)\n    return fn\n\n\ndef common_output_options(fn):\n    for decorator in reversed(\n        (\n            click.option(\"--nl\", help=\"Output newline-delimited JSON\", is_flag=True),\n            click.option(\"--csv\", help=\"Output CSV\", is_flag=True),\n            click.option(\"--tsv\", help=\"Output TSV\", is_flag=True),\n        )\n    ):\n        fn = decorator(fn)\n    return fn\n\n\n@click.group()\n@click.version_option()\ndef cli():\n    \"\"\"\n    A tool for creating credentials for accessing S3 buckets\n\n    Documentation: https://s3-credentials.readthedocs.io/\n    \"\"\"\n\n\nclass PolicyParam(click.ParamType):\n    \"Returns string of guaranteed well-formed JSON\"\n    name = \"policy\"\n\n    def convert(self, policy, param, ctx):\n        if policy.strip().startswith(\"{\"):\n            # Verify policy string is valid JSON\n            try:\n                json.loads(policy)\n            except ValueError:\n                self.fail(\"Invalid JSON string\")\n            return policy\n        else:\n            # Assume policy is a file path or '-'\n            try:\n                with click.open_file(policy) as f:\n                    contents = f.read()\n                    try:\n                        json.loads(contents)\n                        return contents\n                    except ValueError:\n                        self.fail(\n                            \"{} contained invalid JSON\".format(\n                                \"Input\" if policy == \"-\" else \"File\"\n                            )\n                        )\n            except FileNotFoundError:\n                self.fail(\"File not found\")\n\n\nclass DurationParam(click.ParamType):\n    name = \"duration\"\n    pattern = re.compile(r\"^(\\d+)(m|h|s)?$\")\n\n    def convert(self, value, param, ctx):\n        match = self.pattern.match(value)\n        if match is None:\n            self.fail(\"Duration must be of form 3600s or 15m or 2h\")\n        integer_string, suffix = match.groups()\n        integer = int(integer_string)\n        if suffix == \"m\":\n            integer *= 60\n        elif suffix == \"h\":\n            integer *= 3600\n        # Must be between 15 minutes and 12 hours\n        if not (15 * 60 <= integer <= 12 * 60 * 60):\n            self.fail(\"Duration must be between 15 minutes and 12 hours\")\n        return integer\n\n\nclass StatementParam(click.ParamType):\n    \"Ensures statement is valid JSON with required fields\"\n    name = \"statement\"\n\n    def convert(self, statement, param, ctx):\n        try:\n            data = json.loads(statement)\n        except ValueError:\n            self.fail(\"Invalid JSON string\")\n        if not isinstance(data, dict):\n            self.fail(\"JSON must be an object\")\n        missing_keys = {\"Effect\", \"Action\", \"Resource\"} - data.keys()\n        if missing_keys:\n            self.fail(\n                \"Statement JSON missing required keys: {}\".format(\n                    \", \".join(sorted(missing_keys))\n                )\n            )\n        return data\n\n\n@cli.command()\n@click.argument(\n    \"buckets\",\n    nargs=-1,\n    required=True,\n)\n@click.option(\"--read-only\", help=\"Only allow reading from the bucket\", is_flag=True)\n@click.option(\"--write-only\", help=\"Only allow writing to the bucket\", is_flag=True)\n@click.option(\n    \"--prefix\", help=\"Restrict to keys starting with this prefix e.g. foo/\", default=\"*\"\n)\n@click.option(\n    \"extra_statements\",\n    \"--statement\",\n    multiple=True,\n    type=StatementParam(),\n    help=\"JSON statement to add to the policy\",\n)\n@click.option(\n    \"--public-bucket\",\n    help=\"Bucket policy for allowing public access\",\n    is_flag=True,\n)\ndef policy(buckets, read_only, write_only, prefix, extra_statements, public_bucket):\n    \"\"\"\n    Output generated JSON policy for one or more buckets\n\n    Takes the same options as s3-credentials create\n\n    To output a read-only JSON policy for a bucket:\n\n        s3-credentials policy my-bucket --read-only\n    \"\"\"\n    \"Generate JSON policy for one or more buckets\"\n    if public_bucket:\n        if len(buckets) != 1:\n            raise click.ClickException(\n                \"--public-bucket policy can only be generated for a single bucket\"\n            )\n        click.echo(\n            json.dumps(policies.bucket_policy_allow_all_get(buckets[0]), indent=4)\n        )\n        return\n    permission = \"read-write\"\n    if read_only:\n        permission = \"read-only\"\n    if write_only:\n        permission = \"write-only\"\n    statements = []\n    if permission == \"read-write\":\n        for bucket in buckets:\n            statements.extend(policies.read_write_statements(bucket, prefix))\n    elif permission == \"read-only\":\n        for bucket in buckets:\n            statements.extend(policies.read_only_statements(bucket, prefix))\n    elif permission == \"write-only\":\n        for bucket in buckets:\n            statements.extend(policies.write_only_statements(bucket, prefix))\n    else:\n        assert False, \"Unknown permission: {}\".format(permission)\n    if extra_statements:\n        statements.extend(extra_statements)\n    bucket_access_policy = policies.wrap_policy(statements)\n    click.echo(json.dumps(bucket_access_policy, indent=4))\n\n\n@cli.command()\n@click.argument(\n    \"buckets\",\n    nargs=-1,\n    required=True,\n)\n@click.option(\n    \"format_\",\n    \"-f\",\n    \"--format\",\n    type=click.Choice([\"ini\", \"json\"]),\n    default=\"json\",\n    help=\"Output format for credentials\",\n)\n@click.option(\n    \"-d\",\n    \"--duration\",\n    type=DurationParam(),\n    help=\"How long should these credentials work for? Default is forever, use 3600 for 3600 seconds, 15m for 15 minutes, 1h for 1 hour\",\n)\n@click.option(\"--username\", help=\"Username to create or existing user to use\")\n@click.option(\n    \"-c\",\n    \"--create-bucket\",\n    help=\"Create buckets if they do not already exist\",\n    is_flag=True,\n)\n@click.option(\n    \"--prefix\", help=\"Restrict to keys starting with this prefix\", default=\"*\"\n)\n@click.option(\n    \"--public\",\n    help=\"Make the created bucket public: anyone will be able to download files if they know their name\",\n    is_flag=True,\n)\n@click.option(\n    \"--website\",\n    help=\"Configure bucket to act as a website, using index.html and error.html\",\n    is_flag=True,\n)\n@click.option(\"--read-only\", help=\"Only allow reading from the bucket\", is_flag=True)\n@click.option(\"--write-only\", help=\"Only allow writing to the bucket\", is_flag=True)\n@click.option(\n    \"--policy\",\n    type=PolicyParam(),\n    help=\"Path to a policy.json file, or literal JSON string - $!BUCKET_NAME!$ will be replaced with the name of the bucket\",\n)\n@click.option(\n    \"extra_statements\",\n    \"--statement\",\n    multiple=True,\n    type=StatementParam(),\n    help=\"JSON statement to add to the policy\",\n)\n@click.option(\"--bucket-region\", help=\"Region in which to create buckets\")\n@click.option(\"--silent\", help=\"Don't show performed steps\", is_flag=True)\n@click.option(\"--dry-run\", help=\"Show steps without executing them\", is_flag=True)\n@click.option(\n    \"--user-permissions-boundary\",\n    help=(\n        \"Custom permissions boundary to use for created users, or 'none' to \"\n        \"create without. Defaults to limiting to S3 based on \"\n        \"--read-only and --write-only options.\"\n    ),\n)\n@common_boto3_options\ndef create(\n    buckets,\n    format_,\n    duration,\n    username,\n    create_bucket,\n    prefix,\n    public,\n    website,\n    read_only,\n    write_only,\n    policy,\n    extra_statements,\n    bucket_region,\n    user_permissions_boundary,\n    silent,\n    dry_run,\n    **boto_options,\n):\n    \"\"\"\n    Create and return new AWS credentials for specified S3 buckets - optionally\n    also creating the bucket if it does not yet exist.\n\n    To create a new bucket and output read-write credentials:\n\n        s3-credentials create my-new-bucket -c\n\n    To create read-only credentials for an existing bucket:\n\n        s3-credentials create my-existing-bucket --read-only\n\n    To create write-only credentials that are only valid for 15 minutes:\n\n        s3-credentials create my-existing-bucket --write-only -d 15m\n    \"\"\"\n    if read_only and write_only:\n        raise click.ClickException(\n            \"Cannot use --read-only and --write-only at the same time\"\n        )\n    extra_statements = list(extra_statements)\n\n    def log(message):\n        if not silent:\n            click.echo(message, err=True)\n\n    permission = \"read-write\"\n    if read_only:\n        permission = \"read-only\"\n    if write_only:\n        permission = \"write-only\"\n\n    if not user_permissions_boundary and (policy or extra_statements):\n        user_permissions_boundary = \"none\"\n\n    if website:\n        public = True\n\n    s3 = None\n    iam = None\n    sts = None\n\n    if not dry_run:\n        s3 = make_client(\"s3\", **boto_options)\n        iam = make_client(\"iam\", **boto_options)\n        sts = make_client(\"sts\", **boto_options)\n\n    # Verify buckets\n    for bucket in buckets:\n        # Create bucket if it doesn't exist\n        if dry_run or (not bucket_exists(s3, bucket)):\n            if (not dry_run) and (not create_bucket):\n                raise click.ClickException(\n                    \"Bucket does not exist: {} - try --create-bucket to create it\".format(\n                        bucket\n                    )\n                )\n            if dry_run or create_bucket:\n                kwargs = {}\n                if bucket_region:\n                    kwargs = {\n                        \"CreateBucketConfiguration\": {\n                            \"LocationConstraint\": bucket_region\n                        }\n                    }\n                bucket_policy = {}\n                if public:\n                    bucket_policy = policies.bucket_policy_allow_all_get(bucket)\n\n                if dry_run:\n                    click.echo(\n                        \"Would create bucket: '{}'{}\".format(\n                            bucket,\n                            (\n                                \" with args {}\".format(json.dumps(kwargs, indent=4))\n                                if kwargs\n                                else \"\"\n                            ),\n                        )\n                    )\n                    if public:\n                        click.echo(\n                            \"... then add this public access block configuration:\"\n                        )\n                        click.echo(json.dumps(PUBLIC_ACCESS_BLOCK_CONFIGURATION))\n\n                    if bucket_policy:\n                        click.echo(\"... then attach the following bucket policy to it:\")\n                        click.echo(json.dumps(bucket_policy, indent=4))\n                    if website:\n                        click.echo(\n                            \"... then configure index.html and error.html website settings\"\n                        )\n                else:\n                    s3.create_bucket(Bucket=bucket, **kwargs)\n                    info = \"Created bucket: {}\".format(bucket)\n                    if bucket_region:\n                        info += \" in region: {}\".format(bucket_region)\n                    log(info)\n\n                    if public:\n                        s3.put_public_access_block(\n                            Bucket=bucket,\n                            PublicAccessBlockConfiguration=PUBLIC_ACCESS_BLOCK_CONFIGURATION,\n                        )\n                        log(\"Set public access block configuration\")\n\n                    if bucket_policy:\n                        s3.put_bucket_policy(\n                            Bucket=bucket, Policy=json.dumps(bucket_policy)\n                        )\n                        log(\"Attached bucket policy allowing public access\")\n                    if website:\n                        s3.put_bucket_website(\n                            Bucket=bucket,\n                            WebsiteConfiguration={\n                                \"ErrorDocument\": {\"Key\": \"error.html\"},\n                                \"IndexDocument\": {\"Suffix\": \"index.html\"},\n                            },\n                        )\n                        log(\n                            \"Configured website: IndexDocument=index.html, ErrorDocument=error.html\"\n                        )\n\n    # At this point the buckets definitely exist - create the inline policy for assume_role()\n    assume_role_policy = {}\n    if policy:\n        assume_role_policy = json.loads(policy.replace(\"$!BUCKET_NAME!$\", bucket))\n    else:\n        statements = []\n        if permission == \"read-write\":\n            for bucket in buckets:\n                statements.extend(policies.read_write_statements(bucket, prefix))\n        elif permission == \"read-only\":\n            for bucket in buckets:\n                statements.extend(policies.read_only_statements(bucket, prefix))\n        elif permission == \"write-only\":\n            for bucket in buckets:\n                statements.extend(policies.write_only_statements(bucket, prefix))\n        else:\n            assert False, \"Unknown permission: {}\".format(permission)\n        statements.extend(extra_statements)\n        assume_role_policy = policies.wrap_policy(statements)\n\n    if duration:\n        # We're going to use sts.assume_role() rather than creating a user\n        if dry_run:\n            click.echo(\"Would ensure role: 's3-credentials.AmazonS3FullAccess'\")\n            click.echo(\n                \"Would assume role using following policy for {} seconds:\".format(\n                    duration\n                )\n            )\n            click.echo(json.dumps(assume_role_policy, indent=4))\n        else:\n            s3_role_arn = ensure_s3_role_exists(iam, sts)\n            log(\"Assume role against {} for {}s\".format(s3_role_arn, duration))\n            credentials_response = sts.assume_role(\n                RoleArn=s3_role_arn,\n                RoleSessionName=\"s3.{permission}.{buckets}\".format(\n                    permission=\"custom\" if (policy or extra_statements) else permission,\n                    buckets=\",\".join(buckets),\n                ),\n                Policy=json.dumps(assume_role_policy),\n                DurationSeconds=duration,\n            )\n            if format_ == \"ini\":\n                click.echo(\n                    (\n                        \"[default]\\naws_access_key_id={}\\n\"\n                        \"aws_secret_access_key={}\\naws_session_token={}\"\n                    ).format(\n                        credentials_response[\"Credentials\"][\"AccessKeyId\"],\n                        credentials_response[\"Credentials\"][\"SecretAccessKey\"],\n                        credentials_response[\"Credentials\"][\"SessionToken\"],\n                    )\n                )\n            else:\n                click.echo(\n                    json.dumps(\n                        credentials_response[\"Credentials\"], indent=4, default=str\n                    )\n                )\n        return\n    # No duration, so wo create a new user so we can issue non-expiring credentials\n    if not username:\n        # Default username is \"s3.read-write.bucket1,bucket2\"\n        username = \"s3.{permission}.{buckets}\".format(\n            permission=\"custom\" if (policy or extra_statements) else permission,\n            buckets=\",\".join(buckets),\n        )\n    if dry_run or (not user_exists(iam, username)):\n        kwargs = {\"UserName\": username}\n        if user_permissions_boundary != \"none\":\n            # This is a user-account level limitation, it does not grant\n            # permissions on its own but is a useful extra level of defense\n            # https://github.com/simonw/s3-credentials/issues/1#issuecomment-958201717\n            if not user_permissions_boundary:\n                # Pick one based on --read-only/--write-only\n                if read_only:\n                    user_permissions_boundary = (\n                        \"arn:aws:iam::aws:policy/AmazonS3ReadOnlyAccess\"\n                    )\n                else:\n                    # Need full access in order to be able to write\n                    user_permissions_boundary = (\n                        \"arn:aws:iam::aws:policy/AmazonS3FullAccess\"\n                    )\n            kwargs[\"PermissionsBoundary\"] = user_permissions_boundary\n        info = \" user: '{}'\".format(username)\n        if user_permissions_boundary != \"none\":\n            info += \" with permissions boundary: '{}'\".format(user_permissions_boundary)\n        if dry_run:\n            click.echo(\"Would create{}\".format(info))\n        else:\n            iam.create_user(**kwargs)\n            log(\"Created {}\".format(info))\n\n    # Add inline policies to the user so they can access the buckets\n    user_policy = {}\n    for bucket in buckets:\n        policy_name = \"s3.{permission}.{bucket}\".format(\n            permission=\"custom\" if (policy or extra_statements) else permission,\n            bucket=bucket,\n        )\n        if policy:\n            user_policy = json.loads(policy.replace(\"$!BUCKET_NAME!$\", bucket))\n        else:\n            if permission == \"read-write\":\n                user_policy = policies.read_write(bucket, prefix, extra_statements)\n            elif permission == \"read-only\":\n                user_policy = policies.read_only(bucket, prefix, extra_statements)\n            elif permission == \"write-only\":\n                user_policy = policies.write_only(bucket, prefix, extra_statements)\n            else:\n                assert False, \"Unknown permission: {}\".format(permission)\n\n        if dry_run:\n            click.echo(\n                \"Would attach policy called '{}' to user '{}', details:\\n{}\".format(\n                    policy_name,\n                    username,\n                    json.dumps(user_policy, indent=4),\n                )\n            )\n        else:\n            iam.put_user_policy(\n                PolicyDocument=json.dumps(user_policy),\n                PolicyName=policy_name,\n                UserName=username,\n            )\n            log(\"Attached policy {} to user {}\".format(policy_name, username))\n\n    # Retrieve and print out the credentials\n    if dry_run:\n        click.echo(\"Would call create access key for user '{}'\".format(username))\n    else:\n        response = iam.create_access_key(\n            UserName=username,\n        )\n        log(\"Created access key for user: {}\".format(username))\n        if format_ == \"ini\":\n            click.echo(\n                (\"[default]\\naws_access_key_id={}\\n\" \"aws_secret_access_key={}\").format(\n                    response[\"AccessKey\"][\"AccessKeyId\"],\n                    response[\"AccessKey\"][\"SecretAccessKey\"],\n                )\n            )\n        elif format_ == \"json\":\n            click.echo(json.dumps(response[\"AccessKey\"], indent=4, default=str))\n\n\n@cli.command()\n@common_boto3_options\ndef whoami(**boto_options):\n    \"Identify currently authenticated user\"\n    sts = make_client(\"sts\", **boto_options)\n    identity = sts.get_caller_identity()\n    identity.pop(\"ResponseMetadata\")\n    click.echo(json.dumps(identity, indent=4, default=str))\n\n\n@cli.command()\n@common_output_options\n@common_boto3_options\ndef list_users(nl, csv, tsv, **boto_options):\n    \"\"\"\n    List all users for this account\n\n        s3-credentials list-users\n\n    Add --csv or --csv for CSV or TSV format:\n\n        s3-credentials list-users --csv\n    \"\"\"\n    iam = make_client(\"iam\", **boto_options)\n    output(\n        paginate(iam, \"list_users\", \"Users\"),\n        (\n            \"UserName\",\n            \"UserId\",\n            \"Arn\",\n            \"Path\",\n            \"CreateDate\",\n            \"PasswordLastUsed\",\n            \"PermissionsBoundary\",\n            \"Tags\",\n        ),\n        nl,\n        csv,\n        tsv,\n    )\n\n\n@cli.command()\n@click.argument(\"role_names\", nargs=-1)\n@click.option(\"--details\", help=\"Include attached policies (slower)\", is_flag=True)\n@common_output_options\n@common_boto3_options\ndef list_roles(role_names, details, nl, csv, tsv, **boto_options):\n    \"\"\"\n    List roles\n\n    To list all roles for this AWS account:\n\n        s3-credentials list-roles\n\n    Add --csv or --csv for CSV or TSV format:\n\n        s3-credentials list-roles --csv\n\n    For extra details per role (much slower) add --details\n\n        s3-credentials list-roles --details\n    \"\"\"\n    iam = make_client(\"iam\", **boto_options)\n    headers = (\n        \"Path\",\n        \"RoleName\",\n        \"RoleId\",\n        \"Arn\",\n        \"CreateDate\",\n        \"AssumeRolePolicyDocument\",\n        \"Description\",\n        \"MaxSessionDuration\",\n        \"PermissionsBoundary\",\n        \"Tags\",\n        \"RoleLastUsed\",\n    )\n    if details:\n        headers += (\"inline_policies\", \"attached_policies\")\n\n    def iterate():\n        for role in paginate(iam, \"list_roles\", \"Roles\"):\n            if role_names and role[\"RoleName\"] not in role_names:\n                continue\n            if details:\n                role_name = role[\"RoleName\"]\n                role[\"inline_policies\"] = []\n                # Get inline policy names, then policy for each one\n                for policy_name in paginate(\n                    iam, \"list_role_policies\", \"PolicyNames\", RoleName=role_name\n                ):\n                    role_policy_response = iam.get_role_policy(\n                        RoleName=role_name,\n                        PolicyName=policy_name,\n                    )\n                    role_policy_response.pop(\"ResponseMetadata\", None)\n                    role[\"inline_policies\"].append(role_policy_response)\n\n                # Get attached managed policies\n                role[\"attached_policies\"] = []\n                for attached in paginate(\n                    iam,\n                    \"list_attached_role_policies\",\n                    \"AttachedPolicies\",\n                    RoleName=role_name,\n                ):\n                    policy_arn = attached[\"PolicyArn\"]\n                    attached_policy_response = iam.get_policy(\n                        PolicyArn=policy_arn,\n                    )\n                    policy_details = attached_policy_response[\"Policy\"]\n                    # Also need to fetch the policy JSON\n                    version_id = policy_details[\"DefaultVersionId\"]\n                    policy_version_response = iam.get_policy_version(\n                        PolicyArn=policy_arn,\n                        VersionId=version_id,\n                    )\n                    policy_details[\"PolicyVersion\"] = policy_version_response[\n                        \"PolicyVersion\"\n                    ]\n                    role[\"attached_policies\"].append(policy_details)\n\n            yield role\n\n    output(iterate(), headers, nl, csv, tsv)\n\n\n@cli.command()\n@click.argument(\"usernames\", nargs=-1)\n@common_boto3_options\ndef list_user_policies(usernames, **boto_options):\n    \"\"\"\n    List inline policies for specified users\n\n        s3-credentials list-user-policies username\n\n    Returns policies for all users if no usernames are provided.\n    \"\"\"\n    iam = make_client(\"iam\", **boto_options)\n    if not usernames:\n        usernames = [user[\"UserName\"] for user in paginate(iam, \"list_users\", \"Users\")]\n    for username in usernames:\n        click.echo(\"User: {}\".format(username))\n        for policy_name in paginate(\n            iam, \"list_user_policies\", \"PolicyNames\", UserName=username\n        ):\n            click.echo(\"PolicyName: {}\".format(policy_name))\n            policy_response = iam.get_user_policy(\n                UserName=username, PolicyName=policy_name\n            )\n            click.echo(\n                json.dumps(policy_response[\"PolicyDocument\"], indent=4, default=str)\n            )\n\n\n@cli.command()\n@click.argument(\"buckets\", nargs=-1)\n@click.option(\"--details\", help=\"Include extra bucket details (slower)\", is_flag=True)\n@common_output_options\n@common_boto3_options\ndef list_buckets(buckets, details, nl, csv, tsv, **boto_options):\n    \"\"\"\n    List buckets\n\n    To list all buckets and their creation time as JSON:\n\n        s3-credentials list-buckets\n\n    Add --csv or --csv for CSV or TSV format:\n\n        s3-credentials list-buckets --csv\n\n    For extra details per bucket (much slower) add --details\n\n        s3-credentials list-buckets --details\n    \"\"\"\n    s3 = make_client(\"s3\", **boto_options)\n\n    headers = [\"Name\", \"CreationDate\"]\n    if details:\n        headers += [\"bucket_acl\", \"public_access_block\", \"bucket_website\"]\n\n    def iterator():\n        for bucket in s3.list_buckets()[\"Buckets\"]:\n            if buckets and (bucket[\"Name\"] not in buckets):\n                continue\n            if details:\n                bucket_acl = dict(\n                    (key, value)\n                    for key, value in s3.get_bucket_acl(\n                        Bucket=bucket[\"Name\"],\n                    ).items()\n                    if key != \"ResponseMetadata\"\n                )\n                region = s3.get_bucket_location(Bucket=bucket[\"Name\"])[\n                    \"LocationConstraint\"\n                ]\n                if region is None:\n                    # \"Buckets in Region us-east-1 have a LocationConstraint of null\"\n                    region = \"us-east-1\"\n                try:\n                    pab = s3.get_public_access_block(\n                        Bucket=bucket[\"Name\"],\n                    )[\"PublicAccessBlockConfiguration\"]\n                except s3.exceptions.ClientError:\n                    pab = None\n                try:\n                    bucket_website = dict(\n                        (key, value)\n                        for key, value in s3.get_bucket_website(\n                            Bucket=bucket[\"Name\"],\n                        ).items()\n                        if key != \"ResponseMetadata\"\n                    )\n                    bucket_website[\"url\"] = (\n                        \"http://{}.s3-website.{}.amazonaws.com/\".format(\n                            bucket[\"Name\"], region\n                        )\n                    )\n                except s3.exceptions.ClientError:\n                    bucket_website = None\n                bucket[\"region\"] = region\n                bucket[\"bucket_acl\"] = bucket_acl\n                bucket[\"public_access_block\"] = pab\n                bucket[\"bucket_website\"] = bucket_website\n            yield bucket\n\n    output(iterator(), headers, nl, csv, tsv)\n\n\n@cli.command()\n@click.argument(\"usernames\", nargs=-1, required=True)\n@common_boto3_options\ndef delete_user(usernames, **boto_options):\n    \"\"\"\n    Delete specified users, their access keys and their inline policies\n\n\n        s3-credentials delete-user username1 username2\n    \"\"\"\n    iam = make_client(\"iam\", **boto_options)\n    for username in usernames:\n        click.echo(\"User: {}\".format(username))\n        # Fetch and delete their policies\n        policy_names_to_delete = list(\n            paginate(iam, \"list_user_policies\", \"PolicyNames\", UserName=username)\n        )\n        for policy_name in policy_names_to_delete:\n            iam.delete_user_policy(\n                UserName=username,\n                PolicyName=policy_name,\n            )\n            click.echo(\"  Deleted policy: {}\".format(policy_name))\n        # Fetch and delete their access keys\n        access_key_ids_to_delete = [\n            access_key[\"AccessKeyId\"]\n            for access_key in paginate(\n                iam, \"list_access_keys\", \"AccessKeyMetadata\", UserName=username\n            )\n        ]\n        for access_key_id in access_key_ids_to_delete:\n            iam.delete_access_key(\n                UserName=username,\n                AccessKeyId=access_key_id,\n            )\n            click.echo(\"  Deleted access key: {}\".format(access_key_id))\n        iam.delete_user(UserName=username)\n        click.echo(\"  Deleted user\")\n\n\ndef make_client(service, access_key, secret_key, session_token, endpoint_url, auth):\n    if auth:\n        if access_key or secret_key or session_token:\n            raise click.ClickException(\n                \"--auth cannot be used with --access-key, --secret-key or --session-token\"\n            )\n        auth_content = auth.read().strip()\n        if auth_content.startswith(\"{\"):\n            # Treat as JSON\n            decoded = json.loads(auth_content)\n            access_key = decoded.get(\"AccessKeyId\")\n            secret_key = decoded.get(\"SecretAccessKey\")\n            session_token = decoded.get(\"SessionToken\")\n        else:\n            # Treat as INI\n            config = configparser.ConfigParser()\n            config.read_string(auth_content)\n            # Use the first section that has an aws_access_key_id\n            for section in config.sections():\n                if \"aws_access_key_id\" in config[section]:\n                    access_key = config[section].get(\"aws_access_key_id\")\n                    secret_key = config[section].get(\"aws_secret_access_key\")\n                    session_token = config[section].get(\"aws_session_token\")\n                    break\n    kwargs = {}\n    if access_key:\n        kwargs[\"aws_access_key_id\"] = access_key\n    if secret_key:\n        kwargs[\"aws_secret_access_key\"] = secret_key\n    if session_token:\n        kwargs[\"aws_session_token\"] = session_token\n    if endpoint_url:\n        kwargs[\"endpoint_url\"] = endpoint_url\n    return boto3.client(service, **kwargs)\n\n\ndef ensure_s3_role_exists(iam, sts):\n    \"Create s3-credentials.AmazonS3FullAccess role if not exists, return ARN\"\n    role_name = \"s3-credentials.AmazonS3FullAccess\"\n    account_id = sts.get_caller_identity()[\"Account\"]\n    try:\n        role = iam.get_role(RoleName=role_name)\n        return role[\"Role\"][\"Arn\"]\n    except iam.exceptions.NoSuchEntityException:\n        create_role_response = iam.create_role(\n            Description=(\n                \"Role used by the s3-credentials tool to create time-limited \"\n                \"credentials that are restricted to specific buckets\"\n            ),\n            RoleName=role_name,\n            AssumeRolePolicyDocument=json.dumps(\n                {\n                    \"Version\": \"2012-10-17\",\n                    \"Statement\": [\n                        {\n                            \"Effect\": \"Allow\",\n                            \"Principal\": {\n                                \"AWS\": \"arn:aws:iam::{}:root\".format(account_id)\n                            },\n                            \"Action\": \"sts:AssumeRole\",\n                        }\n                    ],\n                }\n            ),\n            MaxSessionDuration=12 * 60 * 60,\n        )\n        # Attach AmazonS3FullAccess to it - note that even though we use full access\n        # on the role itself any time we call sts.assume_role() we attach an additional\n        # policy to ensure reduced access for the temporary credentials\n        iam.attach_role_policy(\n            RoleName=\"s3-credentials.AmazonS3FullAccess\",\n            PolicyArn=\"arn:aws:iam::aws:policy/AmazonS3FullAccess\",\n        )\n        return create_role_response[\"Role\"][\"Arn\"]\n\n\n@cli.command()\n@click.argument(\"bucket\")\n@click.option(\"--prefix\", help=\"List keys starting with this prefix\")\n@click.option(\"--urls\", is_flag=True, help=\"Show URLs for each key\")\n@common_output_options\n@common_boto3_options\ndef list_bucket(bucket, prefix, urls, nl, csv, tsv, **boto_options):\n    \"\"\"\n    List contents of bucket\n\n    To list the contents of a bucket as JSON:\n\n        s3-credentials list-bucket my-bucket\n\n    Add --csv or --csv for CSV or TSV format:\n\n        s3-credentials list-bucket my-bucket --csv\n\n    Add --urls to get an extra URL field for each key:\n\n        s3-credentials list-bucket my-bucket --urls\n    \"\"\"\n    s3 = make_client(\"s3\", **boto_options)\n    kwargs = {\"Bucket\": bucket}\n    if prefix:\n        kwargs[\"Prefix\"] = prefix\n\n    fields = [\"Key\", \"LastModified\", \"ETag\", \"Size\", \"StorageClass\", \"Owner\"]\n    if urls:\n        fields.append(\"URL\")\n\n    items = paginate(s3, \"list_objects_v2\", \"Contents\", **kwargs)\n    if urls:\n        items = (\n            dict(item, URL=\"https://s3.amazonaws.com/{}/{}\".format(bucket, item[\"Key\"]))\n            for item in items\n        )\n\n    try:\n        output(\n            items,\n            fields,\n            nl,\n            csv,\n            tsv,\n        )\n    except botocore.exceptions.ClientError as e:\n        raise click.ClickException(e)\n\n\n@cli.command()\n@click.argument(\"bucket\")\n@click.argument(\"key\")\n@click.argument(\n    \"path\",\n    type=click.Path(\n        exists=True, file_okay=True, dir_okay=False, readable=True, allow_dash=True\n    ),\n)\n@click.option(\n    \"--content-type\",\n    help=\"Content-Type to use (default is auto-detected based on file extension)\",\n)\n@click.option(\"silent\", \"-s\", \"--silent\", is_flag=True, help=\"Don't show progress bar\")\n@common_boto3_options\ndef put_object(bucket, key, path, content_type, silent, **boto_options):\n    \"\"\"\n    Upload an object to an S3 bucket\n\n    To upload a file to /my-key.txt in the my-bucket bucket:\n\n        s3-credentials put-object my-bucket my-key.txt /path/to/file.txt\n\n    Use - to upload content from standard input:\n\n        echo \"Hello\" | s3-credentials put-object my-bucket hello.txt -\n    \"\"\"\n    s3 = make_client(\"s3\", **boto_options)\n    size = None\n    extra_args = {}\n    if path == \"-\":\n        # boto needs to be able to seek\n        fp = io.BytesIO(sys.stdin.buffer.read())\n        if not silent:\n            size = fp.getbuffer().nbytes\n    else:\n        if not content_type:\n            content_type = mimetypes.guess_type(path)[0]\n        fp = click.open_file(path, \"rb\")\n        if not silent:\n            size = os.path.getsize(path)\n    if content_type is not None:\n        extra_args[\"ContentType\"] = content_type\n    if not silent:\n        # Show progress bar\n        with click.progressbar(length=size, label=\"Uploading\", file=sys.stderr) as bar:\n            s3.upload_fileobj(\n                fp, bucket, key, Callback=bar.update, ExtraArgs=extra_args\n            )\n    else:\n        s3.upload_fileobj(fp, bucket, key, ExtraArgs=extra_args)\n\n\n@cli.command()\n@click.argument(\"bucket\")\n@click.argument(\n    \"objects\",\n    nargs=-1,\n    required=True,\n)\n@click.option(\n    \"--prefix\",\n    help=\"Prefix to add to the files within the bucket\",\n)\n@click.option(\"silent\", \"-s\", \"--silent\", is_flag=True, help=\"Don't show progress bar\")\n@click.option(\"--dry-run\", help=\"Show steps without executing them\", is_flag=True)\n@common_boto3_options\ndef put_objects(bucket, objects, prefix, silent, dry_run, **boto_options):\n    \"\"\"\n    Upload multiple objects to an S3 bucket\n\n    Pass one or more files to upload them:\n\n        s3-credentials put-objects my-bucket one.txt two.txt\n\n    These will be saved to the root of the bucket. To save to a different location\n    use the --prefix option:\n\n        s3-credentials put-objects my-bucket one.txt two.txt --prefix my-folder\n\n    This will upload them my-folder/one.txt and my-folder/two.txt.\n\n    If you pass a directory it will be uploaded recursively:\n\n        s3-credentials put-objects my-bucket my-folder\n\n    This will create keys in my-folder/... in the S3 bucket.\n\n    To upload all files in a folder to the root of the bucket instead use this:\n\n        s3-credentials put-objects my-bucket my-folder/*\n    \"\"\"\n    s3 = make_client(\"s3\", **boto_options)\n    if prefix and not prefix.endswith(\"/\"):\n        prefix = prefix + \"/\"\n    total_size = 0\n    # Figure out files to upload and their keys\n    paths = []  # (path, key)\n    for obj in objects:\n        path = pathlib.Path(obj)\n        if path.is_file():\n            # Just use the filename as the key\n            paths.append((path, path.name))\n        elif path.is_dir():\n            # Key is the relative path within the directory\n            for p in path.glob(\"**/*\"):\n                if p.is_file():\n                    paths.append((p, str(p.relative_to(path.parent))))\n\n    def upload(path, key, callback=None):\n        final_key = key\n        if prefix:\n            final_key = prefix + key\n        if dry_run:\n            click.echo(\"{} => s3://{}/{}\".format(path, bucket, final_key))\n        else:\n            s3.upload_file(\n                Filename=str(path), Bucket=bucket, Key=final_key, Callback=callback\n            )\n\n    if not silent and not dry_run:\n        total_size = sum(p[0].stat().st_size for p in paths)\n        with click.progressbar(\n            length=total_size,\n            label=\"Uploading {} ({} file{})\".format(\n                format_bytes(total_size),\n                len(paths),\n                \"s\" if len(paths) != 1 else \"\",\n            ),\n            file=sys.stderr,\n        ) as bar:\n            for path, key in paths:\n                upload(path, key, bar.update)\n    else:\n        for path, key in paths:\n            upload(path, key)\n\n\n@cli.command()\n@click.argument(\"bucket\")\n@click.argument(\"key\")\n@click.option(\n    \"output\",\n    \"-o\",\n    \"--output\",\n    type=click.Path(file_okay=True, dir_okay=False, writable=True, allow_dash=False),\n    help=\"Write to this file instead of stdout\",\n)\n@common_boto3_options\ndef get_object(bucket, key, output, **boto_options):\n    \"\"\"\n    Download an object from an S3 bucket\n\n    To see the contents of the bucket on standard output:\n\n        s3-credentials get-object my-bucket hello.txt\n\n    To save to a file:\n\n        s3-credentials get-object my-bucket hello.txt -o hello.txt\n    \"\"\"\n    s3 = make_client(\"s3\", **boto_options)\n    if not output:\n        fp = sys.stdout.buffer\n    else:\n        fp = click.open_file(output, \"wb\")\n    s3.download_fileobj(bucket, key, fp)\n\n\n@cli.command()\n@click.argument(\"bucket\")\n@click.argument(\n    \"keys\",\n    nargs=-1,\n    required=False,\n)\n@click.option(\n    \"output\",\n    \"-o\",\n    \"--output\",\n    type=click.Path(file_okay=False, dir_okay=True, writable=True, allow_dash=False),\n    help=\"Write to this directory instead of one matching the bucket name\",\n)\n@click.option(\n    \"patterns\",\n    \"-p\",\n    \"--pattern\",\n    multiple=True,\n    help=\"Glob patterns for files to download, e.g. '*/*.js'\",\n)\n@click.option(\"silent\", \"-s\", \"--silent\", is_flag=True, help=\"Don't show progress bar\")\n@common_boto3_options\ndef get_objects(bucket, keys, output, patterns, silent, **boto_options):\n    \"\"\"\n    Download multiple objects from an S3 bucket\n\n    To download everything, run:\n\n        s3-credentials get-objects my-bucket\n\n    Files will be saved to a directory called my-bucket. Use -o dirname to save to a\n    different directory.\n\n    To download specific keys, list them:\n\n        s3-credentials get-objects my-bucket one.txt path/two.txt\n\n    To download files matching a glob-style pattern, use:\n\n        s3-credentials get-objects my-bucket --pattern '*/*.js'\n    \"\"\"\n    s3 = make_client(\"s3\", **boto_options)\n\n    # If user specified keys and no patterns, use the keys they specified\n    keys_to_download = list(keys)\n    key_sizes = {}\n\n    if keys and not silent:\n        # Get sizes of those keys for progress bar\n        for key in keys:\n            try:\n                key_sizes[key] = s3.head_object(Bucket=bucket, Key=key)[\"ContentLength\"]\n            except botocore.exceptions.ClientError:\n                # Ignore errors - they will be reported later\n                key_sizes[key] = 0\n\n    if (not keys) or patterns:\n        # Fetch all keys, then filter them if --pattern\n        all_key_infos = list(paginate(s3, \"list_objects_v2\", \"Contents\", Bucket=bucket))\n        if patterns:\n            filtered = []\n            for pattern in patterns:\n                filtered.extend(\n                    fnmatch.filter((k[\"Key\"] for k in all_key_infos), pattern)\n                )\n            keys_to_download.extend(filtered)\n        else:\n            keys_to_download.extend(k[\"Key\"] for k in all_key_infos)\n        if not silent:\n            key_set = set(keys_to_download)\n            for key in all_key_infos:\n                if key[\"Key\"] in key_set:\n                    key_sizes[key[\"Key\"]] = key[\"Size\"]\n\n    output_dir = pathlib.Path(output or \".\")\n    if not output_dir.exists():\n        output_dir.mkdir(parents=True)\n\n    errors = []\n\n    def download(key, callback=None):\n        # Ensure directory for key exists\n        key_dir = (output_dir / key).parent\n        if not key_dir.exists():\n            key_dir.mkdir(parents=True)\n        try:\n            s3.download_file(bucket, key, str(output_dir / key), Callback=callback)\n        except botocore.exceptions.ClientError as e:\n            errors.append(\"Not found: {}\".format(key))\n\n    if not silent:\n        total_size = sum(key_sizes.values())\n        with click.progressbar(\n            length=total_size,\n            label=\"Downloading {} ({} file{})\".format(\n                format_bytes(total_size),\n                len(key_sizes),\n                \"s\" if len(key_sizes) != 1 else \"\",\n            ),\n            file=sys.stderr,\n        ) as bar:\n            for key in keys_to_download:\n                download(key, bar.update)\n    else:\n        for key in keys_to_download:\n            download(key)\n\n    if errors:\n        raise click.ClickException(\"\\n\".join(errors))\n\n\n@cli.command()\n@click.argument(\"bucket\")\n@click.option(\n    \"allowed_methods\",\n    \"-m\",\n    \"--allowed-method\",\n    multiple=True,\n    help=\"Allowed method e.g. GET\",\n)\n@click.option(\n    \"allowed_headers\",\n    \"-h\",\n    \"--allowed-header\",\n    multiple=True,\n    help=\"Allowed header e.g. Authorization\",\n)\n@click.option(\n    \"allowed_origins\",\n    \"-o\",\n    \"--allowed-origin\",\n    multiple=True,\n    help=\"Allowed origin e.g. https://www.example.com/\",\n)\n@click.option(\n    \"expose_headers\",\n    \"-e\",\n    \"--expose-header\",\n    multiple=True,\n    help=\"Header to expose e.g. ETag\",\n)\n@click.option(\n    \"max_age_seconds\",\n    \"--max-age-seconds\",\n    type=int,\n    help=\"How long to cache preflight requests\",\n)\n@common_boto3_options\ndef set_cors_policy(\n    bucket,\n    allowed_methods,\n    allowed_headers,\n    allowed_origins,\n    expose_headers,\n    max_age_seconds,\n    **boto_options,\n):\n    \"\"\"\n    Set CORS policy for a bucket\n\n    To allow GET requests from any origin:\n\n        s3-credentials set-cors-policy my-bucket\n\n    To allow GET and PUT from a specific origin and expose ETag headers:\n\n    \\b\n        s3-credentials set-cors-policy my-bucket \\\\\n          --allowed-method GET \\\\\n          --allowed-method PUT \\\\\n          --allowed-origin https://www.example.com/ \\\\\n          --expose-header ETag\n    \"\"\"\n    s3 = make_client(\"s3\", **boto_options)\n    if not bucket_exists(s3, bucket):\n        raise click.ClickException(\"Bucket {} does not exists\".format(bucket))\n\n    cors_rule = {\n        \"ID\": \"set-by-s3-credentials\",\n        \"AllowedOrigins\": allowed_origins or [\"*\"],\n        \"AllowedHeaders\": allowed_headers,\n        \"AllowedMethods\": allowed_methods or [\"GET\"],\n        \"ExposeHeaders\": expose_headers,\n    }\n    if max_age_seconds:\n        cors_rule[\"MaxAgeSeconds\"] = max_age_seconds\n\n    try:\n        s3.put_bucket_cors(Bucket=bucket, CORSConfiguration={\"CORSRules\": [cors_rule]})\n    except botocore.exceptions.ClientError as e:\n        raise click.ClickException(e)\n\n\n@cli.command()\n@click.argument(\"bucket\")\n@common_boto3_options\ndef get_cors_policy(bucket, **boto_options):\n    \"\"\"\n    Get CORS policy for a bucket\n\n       s3-credentials get-cors-policy my-bucket\n\n    Returns the CORS policy for this bucket, if set, as JSON\n    \"\"\"\n    s3 = make_client(\"s3\", **boto_options)\n    try:\n        response = s3.get_bucket_cors(Bucket=bucket)\n    except botocore.exceptions.ClientError as e:\n        raise click.ClickException(e)\n    click.echo(json.dumps(response[\"CORSRules\"], indent=4, default=str))\n\n\n@cli.command()\n@click.argument(\"bucket\")\n@common_boto3_options\ndef get_bucket_policy(bucket, **boto_options):\n    \"\"\"\n    Get bucket policy for a bucket\n\n       s3-credentials get-bucket-policy my-bucket\n\n    Returns the bucket policy for this bucket, if set, as JSON\n    \"\"\"\n    s3 = make_client(\"s3\", **boto_options)\n    try:\n        response = s3.get_bucket_policy(Bucket=bucket)\n    except botocore.exceptions.ClientError as e:\n        raise click.ClickException(e)\n    click.echo(json.dumps(json.loads(response[\"Policy\"]), indent=4, default=str))\n\n\n@cli.command()\n@click.argument(\"bucket\")\n@click.option(\"--policy-file\", type=click.File(\"r\"))\n@click.option(\"--allow-all-get\", is_flag=True, help=\"Allow GET requests from all\")\n@common_boto3_options\ndef set_bucket_policy(bucket, policy_file, allow_all_get, **boto_options):\n    \"\"\"\n    Set bucket policy for a bucket\n\n        s3-credentials set-bucket-policy my-bucket --policy-file policy.json\n\n    Or to set a policy that allows GET requests from all:\n\n        s3-credentials set-bucket-policy my-bucket --allow-all-get\n    \"\"\"\n    s3 = make_client(\"s3\", **boto_options)\n    if allow_all_get and policy_file:\n        raise click.ClickException(\"Cannot pass both --allow-all-get and --policy-file\")\n    if allow_all_get:\n        policy = policies.bucket_policy_allow_all_get(bucket)\n    else:\n        policy = json.load(policy_file)\n    try:\n        s3.put_bucket_policy(Bucket=bucket, Policy=json.dumps(policy))\n    except botocore.exceptions.ClientError as e:\n        raise click.ClickException(e)\n    click.echo(\"Policy set:\\n\" + json.dumps(policy, indent=4), err=True)\n\n\ndef without_response_metadata(data):\n    return dict(\n        (key, value) for key, value in data.items() if key != \"ResponseMetadata\"\n    )\n\n\n@cli.command()\n@click.argument(\"bucket\")\n@common_boto3_options\ndef debug_bucket(bucket, **boto_options):\n    \"\"\"\n    Run a bunch of diagnostics to help debug a bucket\n\n       s3-credentials debug-bucket my-bucket\n    \"\"\"\n    s3 = make_client(\"s3\", **boto_options)\n\n    try:\n        bucket_acl = s3.get_bucket_acl(Bucket=bucket)\n        click.echo(\"Bucket ACL:\")\n        click.echo(json.dumps(without_response_metadata(bucket_acl), indent=4))\n    except Exception as ex:\n        print(f\"Error checking bucket ACL: {ex}\")\n\n    try:\n        bucket_policy_status = s3.get_bucket_policy_status(Bucket=bucket)\n        click.echo(\"Bucket policy status:\")\n        click.echo(\n            json.dumps(without_response_metadata(bucket_policy_status), indent=4)\n        )\n    except Exception as ex:\n        print(f\"Error checking bucket policy status: {ex}\")\n\n    try:\n        bucket_public_access_block = s3.get_public_access_block(Bucket=bucket)\n        click.echo(\"Bucket public access block:\")\n        click.echo(\n            json.dumps(without_response_metadata(bucket_public_access_block), indent=4)\n        )\n    except Exception as ex:\n        print(f\"Error checking bucket public access block: {ex}\")\n\n\n@cli.command()\n@click.argument(\"bucket\")\n@click.argument(\n    \"keys\",\n    nargs=-1,\n)\n@click.option(\n    \"--prefix\",\n    help=\"Delete everything with this prefix\",\n)\n@click.option(\n    \"silent\", \"-s\", \"--silent\", is_flag=True, help=\"Don't show informational output\"\n)\n@click.option(\n    \"dry_run\",\n    \"-d\",\n    \"--dry-run\",\n    is_flag=True,\n    help=\"Show keys that would be deleted without deleting them\",\n)\n@common_boto3_options\ndef delete_objects(bucket, keys, prefix, silent, dry_run, **boto_options):\n    \"\"\"\n    Delete one or more object from an S3 bucket\n\n    Pass one or more keys to delete them:\n\n        s3-credentials delete-objects my-bucket one.txt two.txt\n\n    To delete all files matching a prefix, pass --prefix:\n\n        s3-credentials delete-objects my-bucket --prefix my-folder/\n    \"\"\"\n    s3 = make_client(\"s3\", **boto_options)\n    if keys and prefix:\n        raise click.ClickException(\"Cannot pass both keys and --prefix\")\n    if not keys and not prefix:\n        raise click.ClickException(\"Specify one or more keys or use --prefix\")\n    if prefix:\n        # List all keys with this prefix\n        paginator = s3.get_paginator(\"list_objects_v2\")\n        response_iterator = paginator.paginate(Bucket=bucket, Prefix=prefix)\n        keys = []\n        for response in response_iterator:\n            keys.extend([obj[\"Key\"] for obj in response.get(\"Contents\", [])])\n    if not silent:\n        click.echo(\n            \"Deleting {} object{} from {}\".format(\n                len(keys), \"s\" if len(keys) != 1 else \"\", bucket\n            ),\n            err=True,\n        )\n    if dry_run:\n        click.echo(\"The following keys would be deleted:\")\n        for key in keys:\n            click.echo(key)\n        return\n    for batch in batches(keys, 1000):\n        # Remove any rogue \\r characters:\n        batch = [k.strip() for k in batch]\n        response = s3.delete_objects(\n            Bucket=bucket, Delete={\"Objects\": [{\"Key\": key} for key in batch]}\n        )\n        if response.get(\"Errors\"):\n            click.echo(\n                \"Errors deleting objects: {}\".format(response[\"Errors\"]), err=True\n            )\n\n\n@cli.command()\n@click.argument(\"bucket\", required=True)\n@common_boto3_options\ndef get_public_access_block(bucket, **boto_options):\n    \"\"\"\n    Get the public access settings for an S3 bucket\n\n    Example usage:\n\n        s3-credentials get-public-access-block my-bucket\n    \"\"\"\n    s3 = make_client(\"s3\", **boto_options)\n    try:\n        response = s3.get_public_access_block(Bucket=bucket)\n    except botocore.exceptions.ClientError as e:\n        raise click.ClickException(e)\n    click.echo(json.dumps(response[\"PublicAccessBlockConfiguration\"], indent=4))\n\n\n@cli.command()\n@click.argument(\"bucket\", required=True)\n@click.option(\n    \"--block-public-acls\",\n    type=bool,\n    default=None,\n    help=\"Block public ACLs for the bucket (true/false).\",\n)\n@click.option(\n    \"--ignore-public-acls\",\n    type=bool,\n    default=None,\n    help=\"Ignore public ACLs for the bucket (true/false).\",\n)\n@click.option(\n    \"--block-public-policy\",\n    type=bool,\n    default=None,\n    help=\"Block public bucket policies (true/false).\",\n)\n@click.option(\n    \"--restrict-public-buckets\",\n    type=bool,\n    default=None,\n    help=\"Restrict public buckets (true/false).\",\n)\n@click.option(\n    \"--allow-public-access\",\n    is_flag=True,\n    help=\"Set all public access settings to false (allows full public access).\",\n)\n@common_boto3_options\ndef set_public_access_block(\n    bucket,\n    block_public_acls,\n    ignore_public_acls,\n    block_public_policy,\n    restrict_public_buckets,\n    allow_public_access,\n    **boto_options,\n):\n    \"\"\"\n    Configure public access settings for an S3 bucket.\n\n    Example:\n\n        s3-credentials set-public-access-block my-bucket --block-public-acls false\n\n    To allow full public access to the bucket, use the --allow-public-access flag:\n\n        s3-credentials set-public-access-block my-bucket --allow-public-access\n    \"\"\"\n    s3 = make_client(\"s3\", **boto_options)\n\n    # Default public access block configuration\n    public_access_block_config = {}\n\n    if allow_public_access:\n        # Set all settings to False if --allow-public-access is provided\n        public_access_block_config = {\n            \"BlockPublicAcls\": False,\n            \"IgnorePublicAcls\": False,\n            \"BlockPublicPolicy\": False,\n            \"RestrictPublicBuckets\": False,\n        }\n    else:\n        # Add values only if they are explicitly provided\n        if block_public_acls is not None:\n            public_access_block_config[\"BlockPublicAcls\"] = block_public_acls\n        if ignore_public_acls is not None:\n            public_access_block_config[\"IgnorePublicAcls\"] = ignore_public_acls\n        if block_public_policy is not None:\n            public_access_block_config[\"BlockPublicPolicy\"] = block_public_policy\n        if restrict_public_buckets is not None:\n            public_access_block_config[\"RestrictPublicBuckets\"] = (\n                restrict_public_buckets\n            )\n\n    if not public_access_block_config:\n        raise click.ClickException(\n            \"No valid options provided. Use --help to see available options.\"\n        )\n\n    # Apply the public access block configuration to the bucket\n    s3.put_public_access_block(\n        Bucket=bucket, PublicAccessBlockConfiguration=public_access_block_config\n    )\n\n    click.echo(\n        f\"Updated public access block settings for bucket '{bucket}': {public_access_block_config}\",\n        err=True,\n    )\n\n\n@cli.command()\n@click.argument(\"bucket\")\n@click.option(\n    \"-p\",\n    \"--port\",\n    type=int,\n    default=8094,\n    help=\"Port to run the server on (default: 8094)\",\n)\n@click.option(\n    \"--host\",\n    default=\"localhost\",\n    help=\"Host to bind the server to (default: localhost)\",\n)\n@click.option(\"--read-only\", help=\"Only allow reading from the bucket\", is_flag=True)\n@click.option(\"--write-only\", help=\"Only allow writing to the bucket\", is_flag=True)\n@click.option(\n    \"--prefix\", help=\"Restrict to keys starting with this prefix\", default=\"*\"\n)\n@click.option(\n    \"extra_statements\",\n    \"--statement\",\n    multiple=True,\n    type=StatementParam(),\n    help=\"JSON statement to add to the policy\",\n)\n@click.option(\n    \"-d\",\n    \"--duration\",\n    type=DurationParam(),\n    required=True,\n    help=\"How long should credentials be valid for, e.g. 15m, 1h, 12h\",\n)\n@common_boto3_options\ndef localserver(\n    bucket,\n    port,\n    host,\n    read_only,\n    write_only,\n    prefix,\n    extra_statements,\n    duration,\n    **boto_options,\n):\n    \"\"\"\n    Start a localhost server that serves S3 credentials.\n\n    The server responds to GET requests on / with JSON containing temporary\n    AWS credentials that allow access to the specified bucket.\n\n    Credentials are cached and refreshed automatically based on the\n    --duration setting.\n\n    To start a server that serves read-only credentials for a bucket,\n    with credentials valid for 1 hour:\n\n        s3-credentials localserver my-bucket --read-only --duration 1h\n\n    To run on a different port:\n\n        s3-credentials localserver my-bucket --duration 1h --port 9000\n    \"\"\"\n    from . import localserver as localserver_module\n\n    if read_only and write_only:\n        raise click.ClickException(\n            \"Cannot use --read-only and --write-only at the same time\"\n        )\n    extra_statements = list(extra_statements)\n\n    permission = \"read-write\"\n    if read_only:\n        permission = \"read-only\"\n    if write_only:\n        permission = \"write-only\"\n\n    # Create AWS clients\n    iam = make_client(\"iam\", **boto_options)\n    sts = make_client(\"sts\", **boto_options)\n    s3 = make_client(\"s3\", **boto_options)\n\n    # Verify bucket exists\n    if not bucket_exists(s3, bucket):\n        raise click.ClickException(\"Bucket does not exist: {}\".format(bucket))\n\n    try:\n        localserver_module.run_server(\n            bucket=bucket,\n            port=port,\n            host=host,\n            permission=permission,\n            prefix=prefix,\n            duration=duration,\n            extra_statements=extra_statements,\n            iam=iam,\n            sts=sts,\n        )\n    except Exception as e:\n        raise click.ClickException(\"Failed to start server: {}\".format(e))\n\n\ndef output(iterator, headers, nl, csv, tsv):\n    if nl:\n        for item in iterator:\n            click.echo(json.dumps(item, default=str))\n    elif csv or tsv:\n        writer = DictWriter(\n            sys.stdout, headers, dialect=\"excel-tab\" if tsv else \"excel\"\n        )\n        writer.writeheader()\n        writer.writerows(fix_json(row) for row in iterator)\n    else:\n        for line in stream_indented_json(iterator):\n            click.echo(line)\n\n\ndef stream_indented_json(iterator, indent=2):\n    # We have to iterate two-at-a-time so we can know if we\n    # should output a trailing comma or if we have reached\n    # the last item.\n    current_iter, next_iter = itertools.tee(iterator, 2)\n    next(next_iter, None)\n    first = True\n    for item, next_item in itertools.zip_longest(current_iter, next_iter):\n        is_last = next_item is None\n        data = item\n        line = \"{first}{serialized}{separator}{last}\".format(\n            first=\"[\\n\" if first else \"\",\n            serialized=textwrap.indent(\n                json.dumps(data, indent=indent, default=str), \" \" * indent\n            ),\n            separator=\",\" if not is_last else \"\",\n            last=\"\\n]\" if is_last else \"\",\n        )\n        yield line\n        first = False\n    if first:\n        # We didn't output anything, so yield the empty list\n        yield \"[]\"\n\n\ndef paginate(service, method, list_key, **kwargs):\n    paginator = service.get_paginator(method)\n    for response in paginator.paginate(**kwargs):\n        yield from response.get(list_key) or []\n\n\ndef fix_json(row):\n    # If a key value is list or dict, json encode it\n    return dict(\n        [\n            (\n                key,\n                (\n                    json.dumps(value, indent=2, default=str)\n                    if isinstance(value, (dict, list, tuple))\n                    else value\n                ),\n            )\n            for key, value in row.items()\n        ]\n    )\n\n\ndef format_bytes(size):\n    for x in (\"bytes\", \"KB\", \"MB\", \"GB\", \"TB\"):\n        if size < 1024:\n            return \"{:3.1f} {}\".format(size, x)\n        size /= 1024\n\n    return size\n\n\ndef batches(all, batch_size):\n    return [all[i : i + batch_size] for i in range(0, len(all), batch_size)]\n"
  },
  {
    "path": "s3_credentials/localserver.py",
    "content": "\"\"\"\nLocal server for serving S3 credentials via HTTP.\n\"\"\"\n\nimport datetime\nfrom http.server import HTTPServer, BaseHTTPRequestHandler\nimport json\nimport threading\nimport time\n\nimport click\n\nfrom . import policies\nfrom .cli import ensure_s3_role_exists\n\n\nclass CredentialCache:\n    \"\"\"Thread-safe credential cache that regenerates credentials on expiry.\"\"\"\n\n    def __init__(\n        self, iam, sts, bucket, permission, prefix, duration, extra_statements\n    ):\n        self.iam = iam\n        self.sts = sts\n        self.bucket = bucket\n        self.permission = permission\n        self.prefix = prefix\n        self.duration = duration\n        self.extra_statements = extra_statements\n        self._credentials = None\n        self._expiry_time = None\n        self._lock = threading.Lock()\n        self._generating = False\n\n    def _generate_policy(self):\n        \"\"\"Generate the IAM policy for bucket access.\"\"\"\n        statements = []\n        if self.permission == \"read-write\":\n            statements.extend(policies.read_write_statements(self.bucket, self.prefix))\n        elif self.permission == \"read-only\":\n            statements.extend(policies.read_only_statements(self.bucket, self.prefix))\n        elif self.permission == \"write-only\":\n            statements.extend(policies.write_only_statements(self.bucket, self.prefix))\n        if self.extra_statements:\n            statements.extend(self.extra_statements)\n        return policies.wrap_policy(statements)\n\n    def _generate_credentials(self):\n        \"\"\"Generate new temporary credentials using STS assume_role.\"\"\"\n        s3_role_arn = ensure_s3_role_exists(self.iam, self.sts)\n\n        policy_document = self._generate_policy()\n        credentials_response = self.sts.assume_role(\n            RoleArn=s3_role_arn,\n            RoleSessionName=\"s3.{permission}.{bucket}\".format(\n                permission=self.permission,\n                bucket=self.bucket,\n            ),\n            Policy=json.dumps(policy_document),\n            DurationSeconds=self.duration,\n        )\n        return credentials_response[\"Credentials\"]\n\n    def get_credentials(self):\n        \"\"\"Get cached credentials, regenerating if expired or about to expire.\"\"\"\n        current_time = time.time()\n\n        # Check if we need new credentials\n        with self._lock:\n            if self._credentials is not None and self._expiry_time is not None:\n                # Return cached credentials if still valid\n                if current_time < self._expiry_time:\n                    return self._credentials\n\n            # Need to generate new credentials\n            # Check if another thread is already generating\n            if self._generating:\n                # Wait for the other thread to finish\n                while self._generating:\n                    self._lock.release()\n                    time.sleep(0.1)\n                    self._lock.acquire()\n                return self._credentials\n\n            # Mark that we're generating\n            self._generating = True\n\n        try:\n            # Generate new credentials outside the lock\n            credentials = self._generate_credentials()\n            with self._lock:\n                self._credentials = credentials\n                # Set expiry time to duration from now\n                self._expiry_time = current_time + self.duration\n                self._generating = False\n            return credentials\n        except Exception:\n            with self._lock:\n                self._generating = False\n            raise\n\n\ndef make_credential_handler(credential_cache):\n    \"\"\"Create an HTTP request handler class with access to the credential cache.\"\"\"\n\n    class CredentialHandler(BaseHTTPRequestHandler):\n        def log_message(self, format, *args):\n            # Log to stderr with timestamp\n            click.echo(\n                \"{} - {}\".format(\n                    datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\"),\n                    format % args,\n                ),\n                err=True,\n            )\n\n        def do_GET(self):\n            if self.path != \"/\":\n                self.send_response(404)\n                self.send_header(\"Content-Type\", \"application/json\")\n                self.end_headers()\n                self.wfile.write(json.dumps({\"error\": \"Not found\"}).encode())\n                return\n\n            try:\n                credentials = credential_cache.get_credentials()\n                response_data = {\n                    \"Version\": 1,\n                    \"AccessKeyId\": credentials[\"AccessKeyId\"],\n                    \"SecretAccessKey\": credentials[\"SecretAccessKey\"],\n                    \"SessionToken\": credentials[\"SessionToken\"],\n                    \"Expiration\": (\n                        credentials[\"Expiration\"].isoformat()\n                        if hasattr(credentials[\"Expiration\"], \"isoformat\")\n                        else str(credentials[\"Expiration\"])\n                    ),\n                }\n                self.send_response(200)\n                self.send_header(\"Content-Type\", \"application/json\")\n                self.end_headers()\n                self.wfile.write(json.dumps(response_data, indent=2).encode())\n            except Exception as e:\n                self.send_response(500)\n                self.send_header(\"Content-Type\", \"application/json\")\n                self.end_headers()\n                self.wfile.write(json.dumps({\"error\": str(e)}).encode())\n\n    return CredentialHandler\n\n\ndef run_server(\n    bucket,\n    port,\n    host,\n    permission,\n    prefix,\n    duration,\n    extra_statements,\n    iam,\n    sts,\n):\n    \"\"\"Run the credential server.\"\"\"\n    # Create credential cache\n    credential_cache = CredentialCache(\n        iam=iam,\n        sts=sts,\n        bucket=bucket,\n        permission=permission,\n        prefix=prefix,\n        duration=duration,\n        extra_statements=extra_statements,\n    )\n\n    # Pre-generate credentials to catch any errors early\n    click.echo(\"Generating initial credentials...\", err=True)\n    credential_cache.get_credentials()\n\n    # Create and start server\n    handler = make_credential_handler(credential_cache)\n    server = HTTPServer((host, port), handler)\n\n    click.echo(\n        \"Serving {} credentials for bucket '{}' at http://{}:{}/\".format(\n            permission, bucket, host, port\n        ),\n        err=True,\n    )\n    click.echo(\"Duration: {} seconds\".format(duration), err=True)\n    click.echo(\"Press Ctrl+C to stop\", err=True)\n\n    try:\n        server.serve_forever()\n    except KeyboardInterrupt:\n        click.echo(\"\\nShutting down server...\", err=True)\n        server.shutdown()\n"
  },
  {
    "path": "s3_credentials/policies.py",
    "content": "def read_write(bucket, prefix=\"*\", extra_statements=None):\n    statements = read_write_statements(bucket, prefix=prefix)\n    if extra_statements:\n        statements.extend(extra_statements)\n    return wrap_policy(statements)\n\n\ndef read_write_statements(bucket, prefix=\"*\"):\n    # https://github.com/simonw/s3-credentials/issues/24\n    if not prefix.endswith(\"*\"):\n        prefix += \"*\"\n    return read_only_statements(bucket, prefix) + [\n        {\n            \"Effect\": \"Allow\",\n            \"Action\": [\"s3:PutObject\", \"s3:DeleteObject\"],\n            \"Resource\": [\"arn:aws:s3:::{}/{}\".format(bucket, prefix)],\n        }\n    ]\n\n\ndef read_only(bucket, prefix=\"*\", extra_statements=None):\n    statements = read_only_statements(bucket, prefix=prefix)\n    if extra_statements:\n        statements.extend(extra_statements)\n    return wrap_policy(statements)\n\n\ndef read_only_statements(bucket, prefix=\"*\"):\n    # https://github.com/simonw/s3-credentials/issues/23\n    statements = []\n    if not prefix.endswith(\"*\"):\n        prefix += \"*\"\n    if prefix != \"*\":\n        statements.append(\n            {\n                \"Effect\": \"Allow\",\n                \"Action\": [\"s3:GetBucketLocation\"],\n                \"Resource\": [\"arn:aws:s3:::{}\".format(bucket)],\n            }\n        )\n        statements.append(\n            {\n                \"Effect\": \"Allow\",\n                \"Action\": [\"s3:ListBucket\"],\n                \"Resource\": [\"arn:aws:s3:::{}\".format(bucket)],\n                \"Condition\": {\n                    \"StringLike\": {\n                        # Note that prefix must end in / if user wants to limit to a folder\n                        \"s3:prefix\": [prefix]\n                    }\n                },\n            }\n        )\n    else:\n        # We can combine s3:GetBucketLocation and s3:ListBucket into one\n        statements.append(\n            {\n                \"Effect\": \"Allow\",\n                \"Action\": [\"s3:ListBucket\", \"s3:GetBucketLocation\"],\n                \"Resource\": [\"arn:aws:s3:::{}\".format(bucket)],\n            }\n        )\n\n    return statements + [\n        {\n            \"Effect\": \"Allow\",\n            \"Action\": [\n                \"s3:GetObject\",\n                \"s3:GetObjectAcl\",\n                \"s3:GetObjectLegalHold\",\n                \"s3:GetObjectRetention\",\n                \"s3:GetObjectTagging\",\n            ],\n            \"Resource\": [\"arn:aws:s3:::{}/{}\".format(bucket, prefix)],\n        },\n    ]\n\n\ndef write_only(bucket, prefix=\"*\", extra_statements=None):\n    statements = write_only_statements(bucket, prefix=prefix)\n    if extra_statements:\n        statements.extend(extra_statements)\n    return wrap_policy(statements)\n\n\ndef write_only_statements(bucket, prefix=\"*\"):\n    # https://github.com/simonw/s3-credentials/issues/25\n    if not prefix.endswith(\"*\"):\n        prefix += \"*\"\n    return [\n        {\n            \"Effect\": \"Allow\",\n            \"Action\": [\"s3:PutObject\"],\n            \"Resource\": [\"arn:aws:s3:::{}/{}\".format(bucket, prefix)],\n        }\n    ]\n\n\ndef wrap_policy(statements):\n    return {\"Version\": \"2012-10-17\", \"Statement\": statements}\n\n\ndef bucket_policy_allow_all_get(bucket):\n    return {\n        \"Version\": \"2012-10-17\",\n        \"Statement\": [\n            {\n                \"Sid\": \"AllowAllGetObject\",\n                \"Effect\": \"Allow\",\n                \"Principal\": \"*\",\n                \"Action\": [\"s3:GetObject\"],\n                \"Resource\": [\"arn:aws:s3:::{}/*\".format(bucket)],\n            }\n        ],\n    }\n"
  },
  {
    "path": "tests/conftest.py",
    "content": "import boto3\nimport logging\nimport os\nimport pytest\nfrom moto import mock_aws\n\n\ndef pytest_addoption(parser):\n    parser.addoption(\n        \"--integration\",\n        action=\"store_true\",\n        default=False,\n        help=\"run integration tests\",\n    )\n    parser.addoption(\n        \"--boto-logging\",\n        action=\"store_true\",\n        default=False,\n        help=\"turn on boto3 logging\",\n    )\n\n\ndef pytest_configure(config):\n    config.addinivalue_line(\n        \"markers\",\n        \"integration: mark test as integration test, only run with --integration\",\n    )\n\n\ndef pytest_collection_modifyitems(config, items):\n    if config.getoption(\"--boto-logging\"):\n        boto3.set_stream_logger(\"botocore.endpoint\", logging.DEBUG)\n    if config.getoption(\"--integration\"):\n        # Also run integration tests\n        return\n    skip_slow = pytest.mark.skip(reason=\"use --integration option to run\")\n    for item in items:\n        if \"integration\" in item.keywords:\n            item.add_marker(skip_slow)\n\n\n@pytest.fixture(scope=\"function\")\ndef aws_credentials():\n    \"\"\"Mocked AWS Credentials for moto.\"\"\"\n    os.environ[\"AWS_ACCESS_KEY_ID\"] = \"testing\"\n    os.environ[\"AWS_SECRET_ACCESS_KEY\"] = \"testing\"\n    os.environ[\"AWS_SECURITY_TOKEN\"] = \"testing\"\n    os.environ[\"AWS_SESSION_TOKEN\"] = \"testing\"\n    os.environ[\"AWS_DEFAULT_REGION\"] = \"us-east-1\"\n\n\n@pytest.fixture(scope=\"function\")\ndef moto_s3(aws_credentials):\n    with mock_aws():\n        client = boto3.client(\"s3\", region_name=\"us-east-1\")\n        client.create_bucket(Bucket=\"my-bucket\")\n        yield client\n\n\n@pytest.fixture(scope=\"function\")\ndef moto_s3_populated(moto_s3):\n    for key in (\"one.txt\", \"directory/two.txt\", \"directory/three.json\"):\n        moto_s3.put_object(Bucket=\"my-bucket\", Key=key, Body=key.encode(\"utf-8\"))\n    yield moto_s3\n"
  },
  {
    "path": "tests/test_dry_run.py",
    "content": "from click.testing import CliRunner\nfrom s3_credentials.cli import cli\nimport pytest\nimport re\nimport textwrap\n\n\ndef assert_match_with_wildcards(pattern, input):\n    # Pattern language is simple: '*' becomes '.*?'\n    bits = pattern.split(\"*\")\n    regex = \"^{}$\".format(\".*?\".join(re.escape(bit) for bit in bits))\n    print(regex)\n    match = re.compile(regex.strip(), re.DOTALL).match(input.strip())\n    if match is None:\n        # Build a useful message\n        message = \"Pattern:\\n{}\\n\\nDoes not match input:\\n\\n{}\".format(pattern, input)\n        bad_bits = [bit for bit in bits if bit not in input]\n        if bad_bits:\n            message += \"\\nThese parts were not found in the input:\\n\\n\"\n            for bit in bad_bits:\n                message += textwrap.indent(\"{}\\n\\n\".format(bit), \"    \")\n        assert False, message\n\n\n@pytest.mark.parametrize(\n    \"options,expected\",\n    (\n        (\n            [],\n            (\n                \"\"\"Would create bucket: 'my-bucket'\nWould create user: 's3.read-write.my-bucket' with permissions boundary: 'arn:aws:iam::aws:policy/AmazonS3FullAccess'\nWould attach policy called 's3.read-write.my-bucket' to user 's3.read-write.my-bucket', details:*\nWould call create access key for user 's3.read-write.my-bucket'\"\"\"\n            ),\n        ),\n        (\n            [\"--username\", \"frank\"],\n            (\n                \"\"\"Would create bucket: 'my-bucket'\nWould create user: 'frank' with permissions boundary: 'arn:aws:iam::aws:policy/AmazonS3FullAccess'\nWould attach policy called 's3.read-write.my-bucket' to user 'frank', details:*\nWould call create access key for user 'frank'\"\"\"\n            ),\n        ),\n        (\n            [\"--duration\", \"20m\"],\n            (\n                \"\"\"Would create bucket: 'my-bucket'\nWould ensure role: 's3-credentials.AmazonS3FullAccess'\nWould assume role using following policy for 1200 seconds:*\"\"\"\n            ),\n        ),\n        (\n            [\"--public\"],\n            (\n                \"\"\"Would create bucket: 'my-bucket'\n... then add this public access block configuration:\n{\"BlockPublicAcls\": false, \"IgnorePublicAcls\": false, \"BlockPublicPolicy\": false, \"RestrictPublicBuckets\": false}\n... then attach the following bucket policy to it:*\nWould create user: 's3.read-write.my-bucket' with permissions boundary: 'arn:aws:iam::aws:policy/AmazonS3FullAccess'\nWould attach policy called 's3.read-write.my-bucket' to user 's3.read-write.my-bucket', details:*\nWould call create access key for user 's3.read-write.my-bucket'\"\"\"\n            ),\n        ),\n        (\n            [\n                \"--statement\",\n                '{\"Effect\": \"Allow\", \"Action\": \"textract:*\", \"Resource\": \"*\"}',\n            ],\n            (\n                \"\"\"Would create bucket: 'my-bucket'\nWould create user: 's3.custom.my-bucket'\n*\"Action\": \"textract:*\"\"\"\n            ),\n        ),\n    ),\n)\ndef test_dry_run(options, expected):\n    runner = CliRunner()\n    result = runner.invoke(cli, [\"create\", \"my-bucket\", \"--dry-run\"] + options)\n    assert result.exit_code == 0, result.output\n    assert_match_with_wildcards(expected, result.output)\n"
  },
  {
    "path": "tests/test_integration.py",
    "content": "# These integration tests only run with \"pytest --integration\" -\n# they execute live calls against AWS using environment variables\n# and clean up after themselves\nfrom click.testing import CliRunner\nfrom s3_credentials.cli import bucket_exists, cli\nimport botocore\nimport boto3\nimport datetime\nimport json\nimport pytest\nimport secrets\nimport time\nimport urllib\n\n# Mark all tests in this module with \"integration\":\npytestmark = pytest.mark.integration\n\n\n@pytest.fixture(autouse=True)\ndef cleanup():\n    cleanup_any_resources()\n    yield\n    cleanup_any_resources()\n\n\ndef test_create_bucket_with_read_write(tmpdir):\n    bucket_name = \"s3-credentials-tests.read-write.{}\".format(secrets.token_hex(4))\n    # Bucket should not exist\n    s3 = boto3.client(\"s3\")\n    assert not bucket_exists(s3, bucket_name)\n    credentials = get_output(\"create\", bucket_name, \"-c\")\n    credentials_decoded = json.loads(credentials)\n    credentials_s3 = boto3.session.Session(\n        aws_access_key_id=credentials_decoded[\"AccessKeyId\"],\n        aws_secret_access_key=credentials_decoded[\"SecretAccessKey\"],\n    ).client(\"s3\")\n    # Bucket should exist - found I needed to sleep(10) before put-object would work\n    time.sleep(10)\n    assert bucket_exists(s3, bucket_name)\n    # Use the credentials to write a file to that bucket\n    test_write = tmpdir / \"test-write.txt\"\n    test_write.write_text(\"hello\", \"utf-8\")\n    get_output(\"put-object\", bucket_name, \"test-write.txt\", str(test_write))\n    credentials_s3.put_object(\n        Body=\"hello\".encode(\"utf-8\"), Bucket=bucket_name, Key=\"test-write.txt\"\n    )\n    # Use default s3 client to check that the write succeeded\n    get_object_response = s3.get_object(Bucket=bucket_name, Key=\"test-write.txt\")\n    assert get_object_response[\"Body\"].read() == b\"hello\"\n    # Check we can read the file using the credentials too\n    output = get_output(\"get-object\", bucket_name, \"test-write.txt\")\n    assert output == \"hello\"\n\n\ndef test_create_bucket_read_only_duration_15():\n    bucket_name = \"s3-credentials-tests.read-only.{}\".format(secrets.token_hex(4))\n    s3 = boto3.client(\"s3\")\n    assert not bucket_exists(s3, bucket_name)\n    credentials_decoded = json.loads(\n        get_output(\"create\", bucket_name, \"-c\", \"--duration\", \"15m\", \"--read-only\")\n    )\n    assert set(credentials_decoded.keys()) == {\n        \"AccessKeyId\",\n        \"SecretAccessKey\",\n        \"SessionToken\",\n        \"Expiration\",\n    }\n    # Expiration should be ~15 minutes in the future\n    delta = (\n        datetime.datetime.fromisoformat(credentials_decoded[\"Expiration\"])\n        - datetime.datetime.now(datetime.timezone.utc)\n    ).total_seconds()\n    # Should be around about 900 seconds\n    assert 800 < delta < 1000\n    # Wait for everything to exist\n    time.sleep(10)\n    # Create client with these credentials\n    credentials_s3 = boto3.session.Session(\n        aws_access_key_id=credentials_decoded[\"AccessKeyId\"],\n        aws_secret_access_key=credentials_decoded[\"SecretAccessKey\"],\n        aws_session_token=credentials_decoded[\"SessionToken\"],\n    ).client(\"s3\")\n    # Client should NOT be allowed to write objects\n    with pytest.raises(botocore.exceptions.ClientError):\n        credentials_s3.put_object(\n            Body=\"hello\".encode(\"utf-8\"), Bucket=bucket_name, Key=\"hello.txt\"\n        )\n    # Write an object using root credentials\n    s3.put_object(\n        Body=\"hello read-only\".encode(\"utf-8\"),\n        Bucket=bucket_name,\n        Key=\"hello-read-only.txt\",\n    )\n    # Client should be able to read this\n    assert (\n        read_file(credentials_s3, bucket_name, \"hello-read-only.txt\")\n        == \"hello read-only\"\n    )\n\n\ndef test_read_write_bucket_prefix_temporary_credentials():\n    bucket_name = \"s3-credentials-tests.read-write-prefix.{}\".format(\n        secrets.token_hex(4)\n    )\n    s3 = boto3.client(\"s3\")\n    assert not bucket_exists(s3, bucket_name)\n    credentials_decoded = json.loads(\n        get_output(\n            \"create\", bucket_name, \"-c\", \"--duration\", \"15m\", \"--prefix\", \"my/prefix/\"\n        )\n    )\n    # Wait for everything to exist\n    time.sleep(10)\n    # Create client with these credentials\n    credentials_s3 = boto3.session.Session(\n        aws_access_key_id=credentials_decoded[\"AccessKeyId\"],\n        aws_secret_access_key=credentials_decoded[\"SecretAccessKey\"],\n        aws_session_token=credentials_decoded[\"SessionToken\"],\n    ).client(\"s3\")\n    # Write file with root credentials that I should not be able to see\n    s3.put_object(\n        Body=\"hello\".encode(\"utf-8\"),\n        Bucket=bucket_name,\n        Key=\"should-not-be-visible.txt\",\n    )\n    # I should be able to write to and read from /my/prefix/file.txt\n    credentials_s3.put_object(\n        Body=\"hello\".encode(\"utf-8\"),\n        Bucket=bucket_name,\n        Key=\"my/prefix/file.txt\",\n    )\n    assert read_file(credentials_s3, bucket_name, \"my/prefix/file.txt\") == \"hello\"\n    # Should NOT be able to read should-not-be-visible.txt\n    with pytest.raises(botocore.exceptions.ClientError):\n        read_file(credentials_s3, bucket_name, \"should-not-be-visible.txt\")\n\n\ndef test_read_write_bucket_prefix_permanent_credentials():\n    bucket_name = \"s3-credentials-tests.rw-prefix-perm.{}\".format(secrets.token_hex(4))\n    s3 = boto3.client(\"s3\")\n    assert not bucket_exists(s3, bucket_name)\n    credentials_decoded = json.loads(\n        get_output(\"create\", bucket_name, \"-c\", \"--prefix\", \"my/prefix-2/\")\n    )\n    # Wait for everything to exist\n    time.sleep(10)\n    # Create client with these credentials\n    credentials_s3 = boto3.session.Session(\n        aws_access_key_id=credentials_decoded[\"AccessKeyId\"],\n        aws_secret_access_key=credentials_decoded[\"SecretAccessKey\"],\n    ).client(\"s3\")\n    # Write file with root credentials that I should not be able to see\n    s3.put_object(\n        Body=\"hello\".encode(\"utf-8\"),\n        Bucket=bucket_name,\n        Key=\"should-not-be-visible.txt\",\n    )\n    # I should be able to write to and read from /my/prefix/file.txt\n    credentials_s3.put_object(\n        Body=\"hello\".encode(\"utf-8\"),\n        Bucket=bucket_name,\n        Key=\"my/prefix-2/file.txt\",\n    )\n    assert read_file(credentials_s3, bucket_name, \"my/prefix-2/file.txt\") == \"hello\"\n    # Should NOT be able to read should-not-be-visible.txt\n    with pytest.raises(botocore.exceptions.ClientError):\n        read_file(credentials_s3, bucket_name, \"should-not-be-visible.txt\")\n\n\ndef test_list_bucket_including_with_prefix():\n    bucket_name = \"s3-credentials-tests.lbucket.{}\".format(secrets.token_hex(4))\n    s3 = boto3.client(\"s3\")\n    assert not bucket_exists(s3, bucket_name)\n    credentials_decoded = json.loads(get_output(\"create\", bucket_name, \"-c\"))\n    time.sleep(10)\n    credentials_s3 = boto3.session.Session(\n        aws_access_key_id=credentials_decoded[\"AccessKeyId\"],\n        aws_secret_access_key=credentials_decoded[\"SecretAccessKey\"],\n    ).client(\"s3\")\n    credentials_s3.put_object(\n        Body=\"one\".encode(\"utf-8\"),\n        Bucket=bucket_name,\n        Key=\"one/file.txt\",\n    )\n    credentials_s3.put_object(\n        Body=\"two\".encode(\"utf-8\"),\n        Bucket=bucket_name,\n        Key=\"two/file.txt\",\n    )\n    # Try list-bucket against everything\n    everything = json.loads(\n        get_output(\n            \"list-bucket\",\n            bucket_name,\n            \"--access-key\",\n            credentials_decoded[\"AccessKeyId\"],\n            \"--secret-key\",\n            credentials_decoded[\"SecretAccessKey\"],\n        )\n    )\n    assert [e[\"Key\"] for e in everything] == [\"one/file.txt\", \"two/file.txt\"]\n    # Now use --prefix\n    prefix_output = json.loads(\n        get_output(\n            \"list-bucket\",\n            bucket_name,\n            \"--prefix\",\n            \"one/\",\n            \"--access-key\",\n            credentials_decoded[\"AccessKeyId\"],\n            \"--secret-key\",\n            credentials_decoded[\"SecretAccessKey\"],\n        )\n    )\n    assert len(prefix_output) == 1\n    assert prefix_output[0][\"Key\"] == \"one/file.txt\"\n\n\ndef test_prefix_read_only():\n    bucket_name = \"s3-credentials-tests.pre-ro.{}\".format(secrets.token_hex(4))\n    s3 = boto3.client(\"s3\")\n    assert not bucket_exists(s3, bucket_name)\n    credentials_decoded = json.loads(\n        get_output(\"create\", bucket_name, \"-c\", \"--read-only\", \"--prefix\", \"prefix/\")\n    )\n    time.sleep(10)\n    credentials_s3 = boto3.session.Session(\n        aws_access_key_id=credentials_decoded[\"AccessKeyId\"],\n        aws_secret_access_key=credentials_decoded[\"SecretAccessKey\"],\n    ).client(\"s3\")\n    # Should not be able to write objects\n    with pytest.raises(botocore.exceptions.ClientError):\n        credentials_s3.put_object(\n            Body=\"allowed\".encode(\"utf-8\"),\n            Bucket=bucket_name,\n            Key=\"prefix/allowed.txt\",\n        )\n    # So we use root permissions to write these:\n    s3 = boto3.client(\"s3\")\n    s3.put_object(\n        Body=\"denied\".encode(\"utf-8\"),\n        Bucket=bucket_name,\n        Key=\"denied.txt\",\n    )\n    s3.put_object(\n        Body=\"allowed\".encode(\"utf-8\"),\n        Bucket=bucket_name,\n        Key=\"prefix/allowed.txt\",\n    )\n    # list-bucket against everything should error\n    with pytest.raises(GetOutputError):\n        get_output(\n            \"list-bucket\",\n            bucket_name,\n            \"--access-key\",\n            credentials_decoded[\"AccessKeyId\"],\n            \"--secret-key\",\n            credentials_decoded[\"SecretAccessKey\"],\n        )\n\n    # list-bucket against --prefix prefix/ should work\n    items = json.loads(\n        get_output(\n            \"list-bucket\",\n            bucket_name,\n            \"--prefix\",\n            \"prefix/\",\n            \"--access-key\",\n            credentials_decoded[\"AccessKeyId\"],\n            \"--secret-key\",\n            credentials_decoded[\"SecretAccessKey\"],\n        )\n    )\n    assert [e[\"Key\"] for e in items] == [\"prefix/allowed.txt\"]\n    # Should NOT be able to read \"denied.txt\"\n    with pytest.raises(botocore.exceptions.ClientError):\n        read_file(credentials_s3, bucket_name, \"denied.txt\")\n    # Should be able to read prefix/allowed.txt\n    assert read_file(credentials_s3, bucket_name, \"prefix/allowed.txt\") == \"allowed\"\n\n\ndef test_prefix_write_only():\n    bucket_name = \"s3-credentials-tests.pre-wo.{}\".format(secrets.token_hex(4))\n    s3 = boto3.client(\"s3\")\n    assert not bucket_exists(s3, bucket_name)\n    credentials_decoded = json.loads(\n        get_output(\"create\", bucket_name, \"-c\", \"--write-only\", \"--prefix\", \"prefix/\")\n    )\n    time.sleep(10)\n    credentials_s3 = boto3.session.Session(\n        aws_access_key_id=credentials_decoded[\"AccessKeyId\"],\n        aws_secret_access_key=credentials_decoded[\"SecretAccessKey\"],\n    ).client(\"s3\")\n    # Should not be able to write objects to root\n    with pytest.raises(botocore.exceptions.ClientError):\n        credentials_s3.put_object(\n            Body=\"denied\".encode(\"utf-8\"),\n            Bucket=bucket_name,\n            Key=\"denied.txt\",\n        )\n    # Should be able to write them to prefix/\n    credentials_s3.put_object(\n        Body=\"allowed\".encode(\"utf-8\"),\n        Bucket=bucket_name,\n        Key=\"prefix/allowed2.txt\",\n    )\n    # Use root permissions to verfy the write\n    s3 = boto3.client(\"s3\")\n    assert read_file(s3, bucket_name, \"prefix/allowed2.txt\") == \"allowed\"\n    # Should not be able to run list-bucket, even against the prefix\n    for options in ([], [\"--prefix\", \"prefix/\"]):\n        with pytest.raises(GetOutputError):\n            args = [\n                \"list-bucket\",\n                bucket_name,\n                \"--access-key\",\n                credentials_decoded[\"AccessKeyId\"],\n                \"--secret-key\",\n                credentials_decoded[\"SecretAccessKey\"],\n            ] + options\n            get_output(*args)\n    # Should not be able to get-object\n    for key in (\"denied.txt\", \"prefix/allowed2.txt\"):\n        with pytest.raises(botocore.exceptions.ClientError):\n            read_file(credentials_s3, bucket_name, key)\n\n\nclass GetOutputError(Exception):\n    pass\n\n\ndef get_output(*args, input=None):\n    runner = CliRunner()\n    with runner.isolated_filesystem():\n        result = runner.invoke(cli, args, catch_exceptions=False, input=input)\n    if result.exit_code != 0:\n        raise GetOutputError(result.output)\n    return result.stdout\n\n\ndef read_file(s3, bucket, path):\n    response = s3.get_object(Bucket=bucket, Key=path)\n    return response[\"Body\"].read().decode(\"utf-8\")\n\n\ndef cleanup_any_resources():\n    # Delete any users beginning s3-credentials-tests.\n    users = json.loads(get_output(\"list-users\"))\n    users_to_delete = [\n        user[\"UserName\"]\n        for user in users\n        if \".s3-credentials-tests.\" in user[\"UserName\"]\n    ]\n    if users_to_delete:\n        print(\"Deleting users: \", users_to_delete)\n        get_output(\"delete-user\", *users_to_delete)\n    s3 = boto3.client(\"s3\")\n    # Delete any buckets beginning s3-credentials-tests.\n    buckets = json.loads(get_output(\"list-buckets\"))\n    buckets_to_delete = [\n        bucket[\"Name\"]\n        for bucket in buckets\n        if bucket[\"Name\"].startswith(\"s3-credentials-tests.\")\n    ]\n    for bucket in buckets_to_delete:\n        print(\"Deleting bucket: {}\".format(bucket))\n        # Delete all objects in the bucket\n        boto3.resource(\"s3\").Bucket(bucket).objects.all().delete()\n        # Delete the bucket\n        s3.delete_bucket(Bucket=bucket)\n\n\ndef test_public_bucket():\n    bucket_name = \"s3-credentials-tests.public-bucket.{}\".format(secrets.token_hex(4))\n    s3 = boto3.client(\"s3\")\n    assert not bucket_exists(s3, bucket_name)\n    credentials_decoded = json.loads(\n        get_output(\"create\", bucket_name, \"-c\", \"--duration\", \"15m\", \"--public\")\n    )\n    assert set(credentials_decoded.keys()) == {\n        \"AccessKeyId\",\n        \"SecretAccessKey\",\n        \"SessionToken\",\n        \"Expiration\",\n    }\n    # Wait for everything to exist\n    time.sleep(5)\n    # Use those credentials to upload a file\n    content = \"<h1>Hello world</h1>\"\n    get_output(\n        \"put-object\",\n        bucket_name,\n        \"hello.html\",\n        \"-\",\n        \"--content-type\",\n        \"text/html\",\n        \"--access-key\",\n        credentials_decoded[\"AccessKeyId\"],\n        \"--secret-key\",\n        credentials_decoded[\"SecretAccessKey\"],\n        \"--session-token\",\n        credentials_decoded[\"SessionToken\"],\n        input=content,\n    )\n    # It should be publicly accessible\n    url = \"https://s3.amazonaws.com/{}/hello.html\".format(bucket_name)\n    print(url)\n    response = urllib.request.urlopen(url)\n    actual_content = response.read().decode(\"utf-8\")\n    assert response.status == 200\n    assert response.headers[\"content-type\"] == \"text/html\"\n    assert actual_content == content\n"
  },
  {
    "path": "tests/test_localserver.py",
    "content": "\"\"\"Tests for the localserver command and related functionality.\"\"\"\n\nimport botocore\nfrom click.testing import CliRunner\nfrom s3_credentials.cli import cli\nimport datetime\nimport json\nimport pytest\nfrom unittest.mock import Mock\n\n\ndef test_localserver_missing_duration():\n    runner = CliRunner()\n    result = runner.invoke(cli, [\"localserver\", \"my-bucket\"])\n    assert result.exit_code == 2\n    assert \"Missing option\" in result.output\n    assert \"duration\" in result.output.lower()\n\n\ndef test_localserver_invalid_duration():\n    runner = CliRunner()\n    result = runner.invoke(cli, [\"localserver\", \"my-bucket\", \"--duration\", \"5s\"])\n    assert result.exit_code == 2\n    assert \"Duration must be between 15 minutes and 12 hours\" in result.output\n\n\ndef test_localserver_read_only_write_only_conflict():\n    runner = CliRunner()\n    result = runner.invoke(\n        cli,\n        [\n            \"localserver\",\n            \"my-bucket\",\n            \"--duration\",\n            \"15m\",\n            \"--read-only\",\n            \"--write-only\",\n        ],\n    )\n    assert result.exit_code == 1\n    assert \"Cannot use --read-only and --write-only at the same time\" in result.output\n\n\ndef test_localserver_bucket_not_exists(mocker):\n    boto3 = mocker.patch(\"boto3.client\")\n    boto3.return_value = Mock()\n    boto3.return_value.head_bucket.side_effect = botocore.exceptions.ClientError(\n        error_response={}, operation_name=\"\"\n    )\n\n    runner = CliRunner()\n    result = runner.invoke(\n        cli, [\"localserver\", \"nonexistent-bucket\", \"--duration\", \"15m\"]\n    )\n    assert result.exit_code == 1\n    assert \"Bucket does not exist: nonexistent-bucket\" in result.output\n\n\ndef test_credential_cache_generates_credentials(mocker):\n    from s3_credentials.localserver import CredentialCache\n\n    mock_iam = Mock()\n    mock_sts = Mock()\n\n    mock_sts.get_caller_identity.return_value = {\"Account\": \"123456\"}\n    mock_iam.get_role.return_value = {\"Role\": {\"Arn\": \"arn:aws:iam::123456:role/test\"}}\n    mock_sts.assume_role.return_value = {\n        \"Credentials\": {\n            \"AccessKeyId\": \"AKIAIOSFODNN7EXAMPLE\",\n            \"SecretAccessKey\": \"wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY\",\n            \"SessionToken\": \"session-token\",\n            \"Expiration\": datetime.datetime(2025, 12, 16, 12, 0, 0),\n        }\n    }\n\n    cache = CredentialCache(\n        iam=mock_iam,\n        sts=mock_sts,\n        bucket=\"test-bucket\",\n        permission=\"read-only\",\n        prefix=\"*\",\n        duration=900,  # 15 minutes\n        extra_statements=[],\n    )\n\n    credentials = cache.get_credentials()\n\n    assert credentials[\"AccessKeyId\"] == \"AKIAIOSFODNN7EXAMPLE\"\n    assert credentials[\"SecretAccessKey\"] == \"wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY\"\n    assert credentials[\"SessionToken\"] == \"session-token\"\n\n    mock_sts.assume_role.assert_called_once()\n    call_kwargs = mock_sts.assume_role.call_args[1]\n    assert call_kwargs[\"RoleArn\"] == \"arn:aws:iam::123456:role/test\"\n    assert call_kwargs[\"RoleSessionName\"] == \"s3.read-only.test-bucket\"\n    assert call_kwargs[\"DurationSeconds\"] == 900\n\n\ndef test_credential_cache_caches_credentials(mocker):\n    from s3_credentials.localserver import CredentialCache\n\n    mock_iam = Mock()\n    mock_sts = Mock()\n\n    mock_sts.get_caller_identity.return_value = {\"Account\": \"123456\"}\n    mock_iam.get_role.return_value = {\"Role\": {\"Arn\": \"arn:aws:iam::123456:role/test\"}}\n    mock_sts.assume_role.return_value = {\n        \"Credentials\": {\n            \"AccessKeyId\": \"AKIAIOSFODNN7EXAMPLE\",\n            \"SecretAccessKey\": \"secret\",\n            \"SessionToken\": \"token\",\n            \"Expiration\": datetime.datetime(2025, 12, 16, 12, 0, 0),\n        }\n    }\n\n    cache = CredentialCache(\n        iam=mock_iam,\n        sts=mock_sts,\n        bucket=\"test-bucket\",\n        permission=\"read-write\",\n        prefix=\"*\",\n        duration=900,\n        extra_statements=[],\n    )\n\n    # Get credentials twice\n    creds1 = cache.get_credentials()\n    creds2 = cache.get_credentials()\n\n    # Should be the same object (cached)\n    assert creds1 is creds2\n    # Should only have called assume_role once\n    assert mock_sts.assume_role.call_count == 1\n\n\ndef test_credential_cache_refreshes_after_duration(mocker):\n    from s3_credentials.localserver import CredentialCache\n    import time\n\n    mock_iam = Mock()\n    mock_sts = Mock()\n\n    mock_sts.get_caller_identity.return_value = {\"Account\": \"123456\"}\n    mock_iam.get_role.return_value = {\"Role\": {\"Arn\": \"arn:aws:iam::123456:role/test\"}}\n    mock_sts.assume_role.return_value = {\n        \"Credentials\": {\n            \"AccessKeyId\": \"AKIAIOSFODNN7EXAMPLE\",\n            \"SecretAccessKey\": \"secret\",\n            \"SessionToken\": \"token\",\n            \"Expiration\": datetime.datetime(2025, 12, 16, 12, 0, 0),\n        }\n    }\n\n    cache = CredentialCache(\n        iam=mock_iam,\n        sts=mock_sts,\n        bucket=\"test-bucket\",\n        permission=\"read-write\",\n        prefix=\"*\",\n        duration=1,  # 1 second for testing\n        extra_statements=[],\n    )\n\n    # Get credentials first time\n    cache.get_credentials()\n    assert mock_sts.assume_role.call_count == 1\n\n    # Wait for duration to expire\n    time.sleep(1.1)\n\n    # Get credentials again - should regenerate\n    cache.get_credentials()\n    assert mock_sts.assume_role.call_count == 2\n\n\n@pytest.mark.parametrize(\n    \"permission,expected_permission\",\n    (\n        (\"read-write\", \"read-write\"),\n        (\"read-only\", \"read-only\"),\n        (\"write-only\", \"write-only\"),\n    ),\n)\ndef test_credential_cache_permission_in_session_name(\n    mocker, permission, expected_permission\n):\n    from s3_credentials.localserver import CredentialCache\n\n    mock_iam = Mock()\n    mock_sts = Mock()\n\n    mock_sts.get_caller_identity.return_value = {\"Account\": \"123456\"}\n    mock_iam.get_role.return_value = {\"Role\": {\"Arn\": \"arn:aws:iam::123456:role/test\"}}\n    mock_sts.assume_role.return_value = {\n        \"Credentials\": {\n            \"AccessKeyId\": \"AKIAIOSFODNN7EXAMPLE\",\n            \"SecretAccessKey\": \"secret\",\n            \"SessionToken\": \"token\",\n            \"Expiration\": datetime.datetime(2025, 12, 16, 12, 0, 0),\n        }\n    }\n\n    cache = CredentialCache(\n        iam=mock_iam,\n        sts=mock_sts,\n        bucket=\"my-bucket\",\n        permission=permission,\n        prefix=\"*\",\n        duration=900,\n        extra_statements=[],\n    )\n\n    cache.get_credentials()\n\n    call_kwargs = mock_sts.assume_role.call_args[1]\n    assert call_kwargs[\"RoleSessionName\"] == f\"s3.{expected_permission}.my-bucket\"\n\n\ndef test_credential_cache_policy_generation(mocker):\n    from s3_credentials.localserver import CredentialCache\n\n    mock_iam = Mock()\n    mock_sts = Mock()\n\n    mock_sts.get_caller_identity.return_value = {\"Account\": \"123456\"}\n    mock_iam.get_role.return_value = {\"Role\": {\"Arn\": \"arn:aws:iam::123456:role/test\"}}\n    mock_sts.assume_role.return_value = {\n        \"Credentials\": {\n            \"AccessKeyId\": \"AKIAIOSFODNN7EXAMPLE\",\n            \"SecretAccessKey\": \"secret\",\n            \"SessionToken\": \"token\",\n            \"Expiration\": datetime.datetime(2025, 12, 16, 12, 0, 0),\n        }\n    }\n\n    cache = CredentialCache(\n        iam=mock_iam,\n        sts=mock_sts,\n        bucket=\"test-bucket\",\n        permission=\"read-only\",\n        prefix=\"*\",\n        duration=900,\n        extra_statements=[],\n    )\n\n    cache.get_credentials()\n\n    call_kwargs = mock_sts.assume_role.call_args[1]\n    policy = json.loads(call_kwargs[\"Policy\"])\n    assert policy[\"Version\"] == \"2012-10-17\"\n    assert len(policy[\"Statement\"]) == 2\n    # Should have ListBucket and GetObject statements\n    actions = []\n    for stmt in policy[\"Statement\"]:\n        actions.extend(stmt[\"Action\"])\n    assert \"s3:ListBucket\" in actions\n    assert \"s3:GetObject\" in actions\n\n\nVALID_CREDENTIALS = {\n    \"AccessKeyId\": \"AKIAIOSFODNN7EXAMPLE\",\n    \"SecretAccessKey\": \"wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY\",\n    \"SessionToken\": \"session-token\",\n    \"Expiration\": datetime.datetime(2025, 12, 16, 12, 0, 0),\n}\n\n\n@pytest.mark.parametrize(\n    \"path,credentials_return,credentials_error,expected_status,expected_body_contains\",\n    [\n        # Success case: valid path, credentials returned\n        (\n            \"/\",\n            VALID_CREDENTIALS,\n            None,\n            200,\n            ['\"Version\": 1', '\"AccessKeyId\"', '\"SessionToken\"'],\n        ),\n        # 404 case: wrong path\n        (\n            \"/wrong-path\",\n            None,\n            None,\n            404,\n            [\"Not found\"],\n        ),\n        # 500 case: credential generation fails\n        (\n            \"/\",\n            None,\n            Exception(\"AWS Error\"),\n            500,\n            [\"AWS Error\"],\n        ),\n    ],\n    ids=[\"success\", \"wrong-path-404\", \"error-500\"],\n)\ndef test_credential_handler_responses(\n    path,\n    credentials_return,\n    credentials_error,\n    expected_status,\n    expected_body_contains,\n):\n    from s3_credentials.localserver import make_credential_handler\n    import io\n\n    mock_cache = Mock()\n    if credentials_error:\n        mock_cache.get_credentials.side_effect = credentials_error\n    elif credentials_return:\n        mock_cache.get_credentials.return_value = credentials_return\n\n    handler_class = make_credential_handler(mock_cache)\n\n    handler = handler_class.__new__(handler_class)\n    handler.path = path\n    handler.wfile = io.BytesIO()\n    handler.request_version = \"HTTP/1.1\"\n\n    response_code = None\n\n    def mock_send_response(code):\n        nonlocal response_code\n        response_code = code\n\n    handler.send_response = mock_send_response\n    handler.send_header = lambda name, value: None\n    handler.end_headers = lambda: None\n\n    handler.do_GET()\n\n    assert response_code == expected_status\n    response_body = handler.wfile.getvalue().decode()\n    for expected in expected_body_contains:\n        assert expected in response_body\n"
  },
  {
    "path": "tests/test_s3_credentials.py",
    "content": "import botocore\nfrom click.testing import CliRunner\nimport s3_credentials\nfrom s3_credentials.cli import cli\nimport json\nimport os\nimport pathlib\nimport pytest\nfrom unittest.mock import call, Mock\nfrom botocore.stub import Stubber\n\n\n@pytest.fixture\ndef stub_iam(mocker):\n    client = botocore.session.get_session().create_client(\"iam\")\n    stubber = Stubber(client)\n    stubber.activate()\n    mocker.patch(\"s3_credentials.cli.make_client\", return_value=client)\n    return stubber\n\n\n@pytest.fixture\ndef stub_s3(mocker):\n    client = botocore.session.get_session().create_client(\"s3\")\n    stubber = Stubber(client)\n    stubber.activate()\n    mocker.patch(\"s3_credentials.cli.make_client\", return_value=client)\n    return stubber\n\n\n@pytest.fixture\ndef stub_sts(mocker):\n    client = botocore.session.get_session().create_client(\"sts\")\n    stubber = Stubber(client)\n    stubber.activate()\n    mocker.patch(\"s3_credentials.cli.make_client\", return_value=client)\n    return stubber\n\n\ndef test_whoami(mocker, stub_sts):\n    stub_sts.add_response(\n        \"get_caller_identity\",\n        {\n            \"UserId\": \"AEONAUTHOUNTOHU\",\n            \"Account\": \"123456\",\n            \"Arn\": \"arn:aws:iam::123456:user/user-name\",\n            \"ResponseMetadata\": {},\n        },\n    )\n\n    runner = CliRunner()\n    with runner.isolated_filesystem():\n        result = runner.invoke(cli, [\"whoami\"])\n        assert result.exit_code == 0\n        assert json.loads(result.output) == {\n            \"UserId\": \"AEONAUTHOUNTOHU\",\n            \"Account\": \"123456\",\n            \"Arn\": \"arn:aws:iam::123456:user/user-name\",\n        }\n\n\n@pytest.mark.parametrize(\n    \"option,expected\",\n    (\n        (\n            \"\",\n            \"[\\n\"\n            \"  {\\n\"\n            '    \"Path\": \"/\",\\n'\n            '    \"UserName\": \"NameA\",\\n'\n            '    \"UserId\": \"AID000000000000000001\",\\n'\n            '    \"Arn\": \"arn:aws:iam::000000000000:user/NameB\",\\n'\n            '    \"CreateDate\": \"2020-01-01 00:00:00+00:00\"\\n'\n            \"  },\\n\"\n            \"  {\\n\"\n            '    \"Path\": \"/\",\\n'\n            '    \"UserName\": \"NameA\",\\n'\n            '    \"UserId\": \"AID000000000000000000\",\\n'\n            '    \"Arn\": \"arn:aws:iam::000000000000:user/NameB\",\\n'\n            '    \"CreateDate\": \"2020-01-01 00:00:00+00:00\"\\n'\n            \"  }\\n\"\n            \"]\\n\",\n        ),\n        (\n            \"--nl\",\n            '{\"Path\": \"/\", \"UserName\": \"NameA\", \"UserId\": \"AID000000000000000001\", \"Arn\": \"arn:aws:iam::000000000000:user/NameB\", \"CreateDate\": \"2020-01-01 00:00:00+00:00\"}\\n'\n            '{\"Path\": \"/\", \"UserName\": \"NameA\", \"UserId\": \"AID000000000000000000\", \"Arn\": \"arn:aws:iam::000000000000:user/NameB\", \"CreateDate\": \"2020-01-01 00:00:00+00:00\"}\\n',\n        ),\n        (\n            \"--csv\",\n            (\n                \"UserName,UserId,Arn,Path,CreateDate,PasswordLastUsed,PermissionsBoundary,Tags\\n\"\n                \"NameA,AID000000000000000001,arn:aws:iam::000000000000:user/NameB,/,2020-01-01 00:00:00+00:00,,,\\n\"\n                \"NameA,AID000000000000000000,arn:aws:iam::000000000000:user/NameB,/,2020-01-01 00:00:00+00:00,,,\\n\"\n            ),\n        ),\n        (\n            \"--tsv\",\n            (\n                \"UserName\\tUserId\\tArn\\tPath\\tCreateDate\\tPasswordLastUsed\\tPermissionsBoundary\\tTags\\n\"\n                \"NameA\\tAID000000000000000001\\tarn:aws:iam::000000000000:user/NameB\\t/\\t2020-01-01 00:00:00+00:00\\t\\t\\t\\n\"\n                \"NameA\\tAID000000000000000000\\tarn:aws:iam::000000000000:user/NameB\\t/\\t2020-01-01 00:00:00+00:00\\t\\t\\t\\n\"\n            ),\n        ),\n    ),\n)\ndef test_list_users(option, expected, stub_iam):\n    stub_iam.add_response(\n        \"list_users\",\n        {\n            \"Users\": [\n                {\n                    \"Path\": \"/\",\n                    \"UserName\": \"NameA\",\n                    \"UserId\": \"AID000000000000000001\",\n                    \"Arn\": \"arn:aws:iam::000000000000:user/NameB\",\n                    \"CreateDate\": \"2020-01-01 00:00:00+00:00\",\n                },\n                {\n                    \"Path\": \"/\",\n                    \"UserName\": \"NameA\",\n                    \"UserId\": \"AID000000000000000000\",\n                    \"Arn\": \"arn:aws:iam::000000000000:user/NameB\",\n                    \"CreateDate\": \"2020-01-01 00:00:00+00:00\",\n                },\n            ]\n        },\n    )\n\n    runner = CliRunner()\n    with runner.isolated_filesystem():\n        result = runner.invoke(cli, [\"list-users\"] + ([option] if option else []))\n        assert result.exit_code == 0\n        assert result.output == expected\n\n\n@pytest.mark.parametrize(\n    \"options,expected\",\n    (\n        (\n            [],\n            (\n                \"[\\n\"\n                \"  {\\n\"\n                '    \"Name\": \"bucket-one\",\\n'\n                '    \"CreationDate\": \"2020-01-01 00:00:00+00:00\"\\n'\n                \"  },\\n\"\n                \"  {\\n\"\n                '    \"Name\": \"bucket-two\",\\n'\n                '    \"CreationDate\": \"2020-02-01 00:00:00+00:00\"\\n'\n                \"  }\\n\"\n                \"]\\n\"\n            ),\n        ),\n        (\n            [\"--nl\"],\n            '{\"Name\": \"bucket-one\", \"CreationDate\": \"2020-01-01 00:00:00+00:00\"}\\n'\n            '{\"Name\": \"bucket-two\", \"CreationDate\": \"2020-02-01 00:00:00+00:00\"}\\n',\n        ),\n        (\n            [\"--nl\", \"bucket-one\"],\n            '{\"Name\": \"bucket-one\", \"CreationDate\": \"2020-01-01 00:00:00+00:00\"}\\n',\n        ),\n    ),\n)\ndef test_list_buckets(stub_s3, options, expected):\n    stub_s3.add_response(\n        \"list_buckets\",\n        {\n            \"Buckets\": [\n                {\n                    \"Name\": \"bucket-one\",\n                    \"CreationDate\": \"2020-01-01 00:00:00+00:00\",\n                },\n                {\n                    \"Name\": \"bucket-two\",\n                    \"CreationDate\": \"2020-02-01 00:00:00+00:00\",\n                },\n            ]\n        },\n    )\n    runner = CliRunner()\n    with runner.isolated_filesystem():\n        result = runner.invoke(cli, [\"list-buckets\"] + options)\n        assert result.exit_code == 0\n        assert result.output == expected\n\n\ndef test_list_buckets_details(stub_s3):\n    stub_s3.add_response(\n        \"list_buckets\",\n        {\n            \"Buckets\": [\n                {\n                    \"Name\": \"bucket-one\",\n                    \"CreationDate\": \"2020-01-01 00:00:00+00:00\",\n                }\n            ]\n        },\n    )\n    stub_s3.add_response(\n        \"get_bucket_acl\",\n        {\n            \"Owner\": {\n                \"DisplayName\": \"swillison\",\n                \"ID\": \"36b2eeee501c5952a8ac119f9e5212277a4c01eccfa8d6a9d670bba1e2d5f441\",\n            },\n            \"Grants\": [\n                {\n                    \"Grantee\": {\n                        \"DisplayName\": \"swillison\",\n                        \"ID\": \"36b2eeee501c5952a8ac119f9e5212277a4c01eccfa8d6a9d670bba1e2d5f441\",\n                        \"Type\": \"CanonicalUser\",\n                    },\n                    \"Permission\": \"FULL_CONTROL\",\n                }\n            ],\n            \"ResponseMetadata\": {},\n        },\n    )\n    stub_s3.add_response(\n        \"get_bucket_location\",\n        {\n            \"LocationConstraint\": \"us-west-2\",\n        },\n    )\n    stub_s3.add_response(\n        \"get_public_access_block\",\n        {\n            \"PublicAccessBlockConfiguration\": {\n                \"BlockPublicAcls\": True,\n                \"IgnorePublicAcls\": True,\n                \"BlockPublicPolicy\": True,\n                \"RestrictPublicBuckets\": True,\n            },\n        },\n    )\n    stub_s3.add_response(\n        \"get_bucket_website\",\n        {\n            \"IndexDocument\": {\"Suffix\": \"index.html\"},\n            \"ErrorDocument\": {\"Key\": \"error.html\"},\n        },\n    )\n    runner = CliRunner()\n    with runner.isolated_filesystem():\n        result = runner.invoke(cli, [\"list-buckets\", \"--details\"])\n        assert result.exit_code == 0\n        assert result.output == (\n            \"[\\n\"\n            \"  {\\n\"\n            '    \"Name\": \"bucket-one\",\\n'\n            '    \"CreationDate\": \"2020-01-01 00:00:00+00:00\",\\n'\n            '    \"region\": \"us-west-2\",\\n'\n            '    \"bucket_acl\": {\\n'\n            '      \"Owner\": {\\n'\n            '        \"DisplayName\": \"swillison\",\\n'\n            '        \"ID\": \"36b2eeee501c5952a8ac119f9e5212277a4c01eccfa8d6a9d670bba1e2d5f441\"\\n'\n            \"      },\\n\"\n            '      \"Grants\": [\\n'\n            \"        {\\n\"\n            '          \"Grantee\": {\\n'\n            '            \"DisplayName\": \"swillison\",\\n'\n            '            \"ID\": \"36b2eeee501c5952a8ac119f9e5212277a4c01eccfa8d6a9d670bba1e2d5f441\",\\n'\n            '            \"Type\": \"CanonicalUser\"\\n'\n            \"          },\\n\"\n            '          \"Permission\": \"FULL_CONTROL\"\\n'\n            \"        }\\n\"\n            \"      ]\\n\"\n            \"    },\\n\"\n            '    \"public_access_block\": {\\n'\n            '      \"BlockPublicAcls\": true,\\n'\n            '      \"IgnorePublicAcls\": true,\\n'\n            '      \"BlockPublicPolicy\": true,\\n'\n            '      \"RestrictPublicBuckets\": true\\n'\n            \"    },\\n\"\n            '    \"bucket_website\": {\\n'\n            '      \"IndexDocument\": {\\n'\n            '        \"Suffix\": \"index.html\"\\n'\n            \"      },\\n\"\n            '      \"ErrorDocument\": {\\n'\n            '        \"Key\": \"error.html\"\\n'\n            \"      },\\n\"\n            '      \"url\": \"http://bucket-one.s3-website.us-west-2.amazonaws.com/\"\\n'\n            \"    }\\n\"\n            \"  }\\n\"\n            \"]\\n\"\n        )\n\n\nCUSTOM_POLICY = '{\"custom\": \"policy\", \"bucket\": \"$!BUCKET_NAME!$\"}'\nREAD_WRITE_POLICY = '{\"Version\": \"2012-10-17\", \"Statement\": [{\"Effect\": \"Allow\", \"Action\": [\"s3:ListBucket\", \"s3:GetBucketLocation\"], \"Resource\": [\"arn:aws:s3:::pytest-bucket-simonw-1\"]}, {\"Effect\": \"Allow\", \"Action\": [\"s3:GetObject\", \"s3:GetObjectAcl\", \"s3:GetObjectLegalHold\", \"s3:GetObjectRetention\", \"s3:GetObjectTagging\"], \"Resource\": [\"arn:aws:s3:::pytest-bucket-simonw-1/*\"]}, {\"Effect\": \"Allow\", \"Action\": [\"s3:PutObject\", \"s3:DeleteObject\"], \"Resource\": [\"arn:aws:s3:::pytest-bucket-simonw-1/*\"]}]}'\nREAD_ONLY_POLICY = '{\"Version\": \"2012-10-17\", \"Statement\": [{\"Effect\": \"Allow\", \"Action\": [\"s3:ListBucket\", \"s3:GetBucketLocation\"], \"Resource\": [\"arn:aws:s3:::pytest-bucket-simonw-1\"]}, {\"Effect\": \"Allow\", \"Action\": [\"s3:GetObject\", \"s3:GetObjectAcl\", \"s3:GetObjectLegalHold\", \"s3:GetObjectRetention\", \"s3:GetObjectTagging\"], \"Resource\": [\"arn:aws:s3:::pytest-bucket-simonw-1/*\"]}]}'\nWRITE_ONLY_POLICY = '{\"Version\": \"2012-10-17\", \"Statement\": [{\"Effect\": \"Allow\", \"Action\": [\"s3:PutObject\"], \"Resource\": [\"arn:aws:s3:::pytest-bucket-simonw-1/*\"]}]}'\nPREFIX_POLICY = '{\"Version\": \"2012-10-17\", \"Statement\": [{\"Effect\": \"Allow\", \"Action\": [\"s3:GetBucketLocation\"], \"Resource\": [\"arn:aws:s3:::pytest-bucket-simonw-1\"]}, {\"Effect\": \"Allow\", \"Action\": [\"s3:ListBucket\"], \"Resource\": [\"arn:aws:s3:::pytest-bucket-simonw-1\"], \"Condition\": {\"StringLike\": {\"s3:prefix\": [\"my-prefix/*\"]}}}, {\"Effect\": \"Allow\", \"Action\": [\"s3:GetObject\", \"s3:GetObjectAcl\", \"s3:GetObjectLegalHold\", \"s3:GetObjectRetention\", \"s3:GetObjectTagging\"], \"Resource\": [\"arn:aws:s3:::pytest-bucket-simonw-1/my-prefix/*\"]}, {\"Effect\": \"Allow\", \"Action\": [\"s3:PutObject\", \"s3:DeleteObject\"], \"Resource\": [\"arn:aws:s3:::pytest-bucket-simonw-1/my-prefix/*\"]}]}'\nEXTRA_STATEMENTS_POLICY = '{\"Version\": \"2012-10-17\", \"Statement\": [{\"Effect\": \"Allow\", \"Action\": [\"s3:ListBucket\", \"s3:GetBucketLocation\"], \"Resource\": [\"arn:aws:s3:::pytest-bucket-simonw-1\"]}, {\"Effect\": \"Allow\", \"Action\": [\"s3:GetObject\", \"s3:GetObjectAcl\", \"s3:GetObjectLegalHold\", \"s3:GetObjectRetention\", \"s3:GetObjectTagging\"], \"Resource\": [\"arn:aws:s3:::pytest-bucket-simonw-1/*\"]}, {\"Effect\": \"Allow\", \"Action\": [\"s3:PutObject\", \"s3:DeleteObject\"], \"Resource\": [\"arn:aws:s3:::pytest-bucket-simonw-1/*\"]}, {\"Effect\": \"Allow\", \"Action\": \"textract:*\", \"Resource\": \"*\"}]}'\n\n# Used by both test_create and test_create_duration\nCREATE_TESTS = (\n    # options,use_policy_stdin,expected_policy,expected_name_fragment\n    ([], False, READ_WRITE_POLICY, \"read-write\"),\n    ([\"--read-only\"], False, READ_ONLY_POLICY, \"read-only\"),\n    ([\"--write-only\"], False, WRITE_ONLY_POLICY, \"write-only\"),\n    ([\"--prefix\", \"my-prefix/\"], False, PREFIX_POLICY, \"read-write\"),\n    ([\"--policy\", \"POLICYFILEPATH\"], False, CUSTOM_POLICY, \"custom\"),\n    ([\"--policy\", \"-\"], True, CUSTOM_POLICY, \"custom\"),\n    ([\"--policy\", CUSTOM_POLICY], False, CUSTOM_POLICY, \"custom\"),\n    (\n        [\"--statement\", '{\"Effect\": \"Allow\", \"Action\": \"textract:*\", \"Resource\": \"*\"}'],\n        False,\n        EXTRA_STATEMENTS_POLICY,\n        \"custom\",\n    ),\n)\n\n\n@pytest.mark.parametrize(\n    \"options,use_policy_stdin,expected_policy,expected_name_fragment\",\n    CREATE_TESTS,\n)\ndef test_create(\n    mocker, tmpdir, options, use_policy_stdin, expected_policy, expected_name_fragment\n):\n    boto3 = mocker.patch(\"boto3.client\")\n    boto3.return_value = Mock()\n    boto3.return_value.create_access_key.return_value = {\n        \"AccessKey\": {\"AccessKeyId\": \"access\", \"SecretAccessKey\": \"secret\"}\n    }\n    runner = CliRunner()\n    with runner.isolated_filesystem():\n        filepath = str(tmpdir / \"policy.json\")\n        open(filepath, \"w\").write(CUSTOM_POLICY)\n        fixed_options = [\n            filepath if option == \"POLICYFILEPATH\" else option for option in options\n        ]\n        args = [\"create\", \"pytest-bucket-simonw-1\", \"-c\"] + fixed_options\n        kwargs = {}\n        if use_policy_stdin:\n            kwargs[\"input\"] = CUSTOM_POLICY\n        result = runner.invoke(cli, args, **kwargs, catch_exceptions=False)\n        assert result.exit_code == 0\n        assert result.output == (\n            \"Attached policy s3.NAME_FRAGMENT.pytest-bucket-simonw-1 to user s3.NAME_FRAGMENT.pytest-bucket-simonw-1\\n\"\n            \"Created access key for user: s3.NAME_FRAGMENT.pytest-bucket-simonw-1\\n\"\n            '{\\n    \"AccessKeyId\": \"access\",\\n    \"SecretAccessKey\": \"secret\"\\n}\\n'\n        ).replace(\"NAME_FRAGMENT\", expected_name_fragment)\n        assert [str(c) for c in boto3.mock_calls] == [\n            \"call('s3')\",\n            \"call('iam')\",\n            \"call('sts')\",\n            \"call().head_bucket(Bucket='pytest-bucket-simonw-1')\",\n            \"call().get_user(UserName='s3.{}.pytest-bucket-simonw-1')\".format(\n                expected_name_fragment\n            ),\n            \"call().put_user_policy(PolicyDocument='{}', PolicyName='s3.{}.pytest-bucket-simonw-1', UserName='s3.{}.pytest-bucket-simonw-1')\".format(\n                expected_policy.replace(\"$!BUCKET_NAME!$\", \"pytest-bucket-simonw-1\"),\n                expected_name_fragment,\n                expected_name_fragment,\n            ),\n            \"call().create_access_key(UserName='s3.{}.pytest-bucket-simonw-1')\".format(\n                expected_name_fragment\n            ),\n        ]\n\n\n@pytest.mark.parametrize(\n    \"statement,expected_error\",\n    (\n        (\"\", \"Invalid JSON string\"),\n        (\"{}\", \"missing required keys: Action, Effect, Resource\"),\n        ('{\"Action\": 1}', \"missing required keys: Effect, Resource\"),\n        ('{\"Action\": 1, \"Effect\": 2}', \"missing required keys: Resource\"),\n    ),\n)\ndef test_create_statement_error(statement, expected_error):\n    runner = CliRunner()\n    result = runner.invoke(cli, [\"create\", \"--statement\", statement])\n    assert result.exit_code == 2\n    assert expected_error in result.output\n\n\n@pytest.fixture\ndef mocked_for_duration(mocker):\n    boto3 = mocker.patch(\"boto3.client\")\n    boto3.return_value = Mock()\n    boto3.return_value.create_access_key.return_value = {\n        \"AccessKey\": {\"AccessKeyId\": \"access\", \"SecretAccessKey\": \"secret\"}\n    }\n    boto3.return_value.get_caller_identity.return_value = {\"Account\": \"1234\"}\n    boto3.return_value.get_role.return_value = {\"Role\": {\"Arn\": \"arn:::role\"}}\n    boto3.return_value.assume_role.return_value = {\n        \"Credentials\": {\n            \"AccessKeyId\": \"access\",\n            \"SecretAccessKey\": \"secret\",\n            \"SessionToken\": \"session\",\n        }\n    }\n    return boto3\n\n\n@pytest.mark.parametrize(\n    \"options,use_policy_stdin,expected_policy,expected_name_fragment\",\n    CREATE_TESTS,\n)\ndef test_create_duration(\n    mocked_for_duration,\n    tmpdir,\n    options,\n    use_policy_stdin,\n    expected_policy,\n    expected_name_fragment,\n):\n    runner = CliRunner()\n    with runner.isolated_filesystem():\n        filepath = str(tmpdir / \"policy.json\")\n        open(filepath, \"w\").write(CUSTOM_POLICY)\n        fixed_options = [\n            filepath if option == \"POLICYFILEPATH\" else option for option in options\n        ]\n        args = [\n            \"create\",\n            \"pytest-bucket-simonw-1\",\n            \"-c\",\n            \"--duration\",\n            \"15m\",\n        ] + fixed_options\n        kwargs = {}\n        if use_policy_stdin:\n            kwargs[\"input\"] = CUSTOM_POLICY\n        result = runner.invoke(cli, args, **kwargs, catch_exceptions=False)\n        assert result.exit_code == 0\n        assert result.output == (\n            \"Assume role against arn:::role for 900s\\n\"\n            \"{\\n\"\n            '    \"AccessKeyId\": \"access\",\\n'\n            '    \"SecretAccessKey\": \"secret\",\\n'\n            '    \"SessionToken\": \"session\"\\n'\n            \"}\\n\"\n        )\n        assert mocked_for_duration.mock_calls == [\n            call(\"s3\"),\n            call(\"iam\"),\n            call(\"sts\"),\n            call().head_bucket(Bucket=\"pytest-bucket-simonw-1\"),\n            call().get_caller_identity(),\n            call().get_role(RoleName=\"s3-credentials.AmazonS3FullAccess\"),\n            call().assume_role(\n                RoleArn=\"arn:::role\",\n                RoleSessionName=\"s3.{fragment}.pytest-bucket-simonw-1\".format(\n                    fragment=expected_name_fragment\n                ),\n                Policy=\"{policy}\".format(\n                    policy=expected_policy.replace(\n                        \"$!BUCKET_NAME!$\", \"pytest-bucket-simonw-1\"\n                    ),\n                ),\n                DurationSeconds=900,\n            ),\n        ]\n\n\ndef test_create_public(mocker):\n    boto3 = mocker.patch(\"boto3.client\")\n    boto3.return_value = Mock()\n    boto3.return_value.create_access_key.return_value = {\n        \"AccessKey\": {\"AccessKeyId\": \"access\", \"SecretAccessKey\": \"secret\"}\n    }\n    # Fake that the bucket does not exist\n    boto3.return_value.head_bucket.side_effect = botocore.exceptions.ClientError(\n        error_response={}, operation_name=\"\"\n    )\n    runner = CliRunner()\n    with runner.isolated_filesystem():\n        args = [\"create\", \"pytest-bucket-simonw-1\", \"-c\", \"--public\"]\n        result = runner.invoke(cli, args, catch_exceptions=False)\n        assert result.exit_code == 0\n        assert result.output == (\n            \"Created bucket: pytest-bucket-simonw-1\\n\"\n            \"Set public access block configuration\\n\"\n            \"Attached bucket policy allowing public access\\n\"\n            \"Attached policy s3.read-write.pytest-bucket-simonw-1 to user s3.read-write.pytest-bucket-simonw-1\\n\"\n            \"Created access key for user: s3.read-write.pytest-bucket-simonw-1\\n\"\n            \"{\\n\"\n            '    \"AccessKeyId\": \"access\",\\n'\n            '    \"SecretAccessKey\": \"secret\"\\n'\n            \"}\\n\"\n        )\n        assert [str(c) for c in boto3.mock_calls] == [\n            \"call('s3')\",\n            \"call('iam')\",\n            \"call('sts')\",\n            \"call().head_bucket(Bucket='pytest-bucket-simonw-1')\",\n            \"call().create_bucket(Bucket='pytest-bucket-simonw-1')\",\n            \"call().put_public_access_block(Bucket='pytest-bucket-simonw-1', PublicAccessBlockConfiguration={'BlockPublicAcls': False, 'IgnorePublicAcls': False, 'BlockPublicPolicy': False, 'RestrictPublicBuckets': False})\",\n            'call().put_bucket_policy(Bucket=\\'pytest-bucket-simonw-1\\', Policy=\\'{\"Version\": \"2012-10-17\", \"Statement\": [{\"Sid\": \"AllowAllGetObject\", \"Effect\": \"Allow\", \"Principal\": \"*\", \"Action\": [\"s3:GetObject\"], \"Resource\": [\"arn:aws:s3:::pytest-bucket-simonw-1/*\"]}]}\\')',\n            \"call().get_user(UserName='s3.read-write.pytest-bucket-simonw-1')\",\n            \"call().put_user_policy(PolicyDocument='{}', PolicyName='s3.read-write.pytest-bucket-simonw-1', UserName='s3.read-write.pytest-bucket-simonw-1')\".format(\n                READ_WRITE_POLICY.replace(\"$!BUCKET_NAME!$\", \"pytest-bucket-simonw-1\"),\n            ),\n            \"call().create_access_key(UserName='s3.read-write.pytest-bucket-simonw-1')\",\n        ]\n\n\ndef test_create_website(mocker):\n    boto3 = mocker.patch(\"boto3.client\")\n    boto3.return_value = Mock()\n    boto3.return_value.create_access_key.return_value = {\n        \"AccessKey\": {\"AccessKeyId\": \"access\", \"SecretAccessKey\": \"secret\"}\n    }\n    # Fake that the bucket does not exist\n    boto3.return_value.head_bucket.side_effect = botocore.exceptions.ClientError(\n        error_response={}, operation_name=\"\"\n    )\n    runner = CliRunner()\n    with runner.isolated_filesystem():\n        args = [\"create\", \"pytest-bucket-simonw-1\", \"-c\", \"--website\"]\n        result = runner.invoke(cli, args, catch_exceptions=False)\n        assert result.exit_code == 0\n        assert result.output == (\n            \"Created bucket: pytest-bucket-simonw-1\\n\"\n            \"Set public access block configuration\\n\"\n            \"Attached bucket policy allowing public access\\n\"\n            \"Configured website: IndexDocument=index.html, ErrorDocument=error.html\\n\"\n            \"Attached policy s3.read-write.pytest-bucket-simonw-1 to user s3.read-write.pytest-bucket-simonw-1\\n\"\n            \"Created access key for user: s3.read-write.pytest-bucket-simonw-1\\n\"\n            \"{\\n\"\n            '    \"AccessKeyId\": \"access\",\\n'\n            '    \"SecretAccessKey\": \"secret\"\\n'\n            \"}\\n\"\n        )\n        assert [str(c) for c in boto3.mock_calls] == [\n            \"call('s3')\",\n            \"call('iam')\",\n            \"call('sts')\",\n            \"call().head_bucket(Bucket='pytest-bucket-simonw-1')\",\n            \"call().create_bucket(Bucket='pytest-bucket-simonw-1')\",\n            \"call().put_public_access_block(Bucket='pytest-bucket-simonw-1', PublicAccessBlockConfiguration={'BlockPublicAcls': False, 'IgnorePublicAcls': False, 'BlockPublicPolicy': False, 'RestrictPublicBuckets': False})\",\n            'call().put_bucket_policy(Bucket=\\'pytest-bucket-simonw-1\\', Policy=\\'{\"Version\": \"2012-10-17\", \"Statement\": [{\"Sid\": \"AllowAllGetObject\", \"Effect\": \"Allow\", \"Principal\": \"*\", \"Action\": [\"s3:GetObject\"], \"Resource\": [\"arn:aws:s3:::pytest-bucket-simonw-1/*\"]}]}\\')',\n            \"call().put_bucket_website(Bucket='pytest-bucket-simonw-1', WebsiteConfiguration={'ErrorDocument': {'Key': 'error.html'}, 'IndexDocument': {'Suffix': 'index.html'}})\",\n            \"call().get_user(UserName='s3.read-write.pytest-bucket-simonw-1')\",\n            \"call().put_user_policy(PolicyDocument='{}', PolicyName='s3.read-write.pytest-bucket-simonw-1', UserName='s3.read-write.pytest-bucket-simonw-1')\".format(\n                READ_WRITE_POLICY.replace(\"$!BUCKET_NAME!$\", \"pytest-bucket-simonw-1\"),\n            ),\n            \"call().create_access_key(UserName='s3.read-write.pytest-bucket-simonw-1')\",\n        ]\n\n\ndef test_create_format_ini(mocker):\n    boto3 = mocker.patch(\"boto3.client\")\n    boto3.return_value = Mock()\n    boto3.return_value.create_access_key.return_value = {\n        \"AccessKey\": {\n            \"AccessKeyId\": \"access\",\n            \"SecretAccessKey\": \"secret\",\n            \"SessionToken\": \"session\",\n        }\n    }\n    runner = CliRunner()\n    result = runner.invoke(\n        cli,\n        [\"create\", \"test-bucket\", \"-c\", \"-f\", \"ini\"],\n    )\n    assert result.exit_code == 0\n    assert (\n        \"[default]\\naws_access_key_id=access\\naws_secret_access_key=secret\\n\"\n        in result.output\n    )\n\n\ndef test_create_format_duration_ini(mocked_for_duration):\n    runner = CliRunner()\n    result = runner.invoke(\n        cli,\n        [\"create\", \"test-bucket\", \"-c\", \"--duration\", \"15m\", \"-f\", \"ini\"],\n        catch_exceptions=False,\n    )\n    assert result.exit_code == 0\n    assert (\n        \"[default]\\n\"\n        \"aws_access_key_id=access\\n\"\n        \"aws_secret_access_key=secret\\n\"\n        \"aws_session_token=session\\n\"\n    ) in result.output\n\n\ndef test_list_user_policies(mocker):\n    boto3 = mocker.patch(\"boto3.client\")\n    boto3.return_value = Mock()\n    boto3.return_value.get_user_policy.return_value = {\n        \"PolicyDocument\": {\"policy\": \"here\"}\n    }\n\n    def get_paginator(type):\n        m = Mock()\n        if type == \"list_users\":\n            m.paginate.return_value = [\n                {\"Users\": [{\"UserName\": \"one\"}, {\"UserName\": \"two\"}]}\n            ]\n        elif type == \"list_user_policies\":\n            m.paginate.return_value = [{\"PolicyNames\": [\"policy-one\", \"policy-two\"]}]\n        return m\n\n    boto3().get_paginator.side_effect = get_paginator\n    runner = CliRunner()\n    with runner.isolated_filesystem():\n        result = runner.invoke(cli, [\"list-user-policies\"], catch_exceptions=False)\n        assert result.exit_code == 0\n        assert result.output == (\n            \"User: one\\n\"\n            \"PolicyName: policy-one\\n\"\n            \"{\\n\"\n            '    \"policy\": \"here\"\\n'\n            \"}\\n\"\n            \"PolicyName: policy-two\\n\"\n            \"{\\n\"\n            '    \"policy\": \"here\"\\n'\n            \"}\\n\"\n            \"User: two\\n\"\n            \"PolicyName: policy-one\\n\"\n            \"{\\n\"\n            '    \"policy\": \"here\"\\n'\n            \"}\\n\"\n            \"PolicyName: policy-two\\n\"\n            \"{\\n\"\n            '    \"policy\": \"here\"\\n'\n            \"}\\n\"\n        )\n        assert boto3.mock_calls == [\n            call(),\n            call(\"iam\"),\n            call().get_paginator(\"list_users\"),\n            call().get_paginator(\"list_user_policies\"),\n            call().get_user_policy(UserName=\"one\", PolicyName=\"policy-one\"),\n            call().get_user_policy(UserName=\"one\", PolicyName=\"policy-two\"),\n            call().get_paginator(\"list_user_policies\"),\n            call().get_user_policy(UserName=\"two\", PolicyName=\"policy-one\"),\n            call().get_user_policy(UserName=\"two\", PolicyName=\"policy-two\"),\n        ]\n\n\ndef test_delete_user(mocker):\n    boto3 = mocker.patch(\"boto3.client\")\n    boto3.return_value = Mock()\n    boto3.return_value.get_user_policy.return_value = {\n        \"PolicyDocument\": {\"policy\": \"here\"}\n    }\n\n    def get_paginator(type):\n        m = Mock()\n        if type == \"list_access_keys\":\n            m.paginate.return_value = [\n                {\"AccessKeyMetadata\": [{\"AccessKeyId\": \"one\"}, {\"AccessKeyId\": \"two\"}]}\n            ]\n        elif type == \"list_user_policies\":\n            m.paginate.return_value = [{\"PolicyNames\": [\"policy-one\"]}]\n        return m\n\n    boto3().get_paginator.side_effect = get_paginator\n    runner = CliRunner()\n    with runner.isolated_filesystem():\n        result = runner.invoke(cli, [\"delete-user\", \"user-123\"], catch_exceptions=False)\n        assert result.exit_code == 0\n        assert result.output == (\n            \"User: user-123\\n\"\n            \"  Deleted policy: policy-one\\n\"\n            \"  Deleted access key: one\\n\"\n            \"  Deleted access key: two\\n\"\n            \"  Deleted user\\n\"\n        )\n        assert boto3.mock_calls == [\n            call(),\n            call(\"iam\"),\n            call().get_paginator(\"list_user_policies\"),\n            call().delete_user_policy(UserName=\"user-123\", PolicyName=\"policy-one\"),\n            call().get_paginator(\"list_access_keys\"),\n            call().delete_access_key(UserName=\"user-123\", AccessKeyId=\"one\"),\n            call().delete_access_key(UserName=\"user-123\", AccessKeyId=\"two\"),\n            call().delete_user(UserName=\"user-123\"),\n        ]\n\n\ndef test_get_cors_policy(mocker):\n    boto3 = mocker.patch(\"boto3.client\")\n    boto3.return_value = Mock()\n    boto3.return_value.get_bucket_cors.return_value = {\n        \"CORSRules\": [\n            {\n                \"ID\": \"set-by-s3-credentials\",\n                \"AllowedMethods\": [\"GET\"],\n                \"AllowedOrigins\": [\"*\"],\n            }\n        ]\n    }\n    runner = CliRunner()\n    with runner.isolated_filesystem():\n        result = runner.invoke(\n            cli, [\"get-cors-policy\", \"my-bucket\"], catch_exceptions=False\n        )\n        assert result.exit_code == 0\n        assert result.output == (\n            \"[\"\n            \"\\n    {\"\n            '\\n        \"ID\": \"set-by-s3-credentials\",'\n            '\\n        \"AllowedMethods\": ['\n            '\\n            \"GET\"'\n            \"\\n        ],\"\n            '\\n        \"AllowedOrigins\": ['\n            '\\n            \"*\"'\n            \"\\n        ]\"\n            \"\\n    }\"\n            \"\\n]\\n\"\n        )\n\n        assert boto3.mock_calls == [\n            call(\"s3\"),\n            call().get_bucket_cors(Bucket=\"my-bucket\"),\n        ]\n\n\n@pytest.mark.parametrize(\n    \"options,expected_json\",\n    (\n        (\n            [],\n            {\n                \"ID\": \"set-by-s3-credentials\",\n                \"AllowedOrigins\": [\"*\"],\n                \"AllowedHeaders\": (),\n                \"AllowedMethods\": [\"GET\"],\n                \"ExposeHeaders\": (),\n            },\n        ),\n        (\n            [\n                \"--allowed-method\",\n                \"GET\",\n                \"--allowed-method\",\n                \"PUT\",\n                \"--allowed-origin\",\n                \"https://www.example.com/\",\n                \"--expose-header\",\n                \"ETag\",\n            ],\n            {\n                \"ID\": \"set-by-s3-credentials\",\n                \"AllowedOrigins\": (\"https://www.example.com/\",),\n                \"AllowedHeaders\": (),\n                \"AllowedMethods\": (\"GET\", \"PUT\"),\n                \"ExposeHeaders\": (\"ETag\",),\n            },\n        ),\n        (\n            [\"--max-age-seconds\", 60],\n            {\n                \"ID\": \"set-by-s3-credentials\",\n                \"AllowedOrigins\": [\"*\"],\n                \"AllowedHeaders\": (),\n                \"AllowedMethods\": [\"GET\"],\n                \"ExposeHeaders\": (),\n                \"MaxAgeSeconds\": 60,\n            },\n        ),\n    ),\n)\ndef test_set_cors_policy(mocker, options, expected_json):\n    boto3 = mocker.patch(\"boto3.client\")\n    boto3.return_value = Mock()\n    boto3.return_value.put_bucket_cors.return_value = {}\n    runner = CliRunner()\n    with runner.isolated_filesystem():\n        result = runner.invoke(\n            cli, [\"set-cors-policy\", \"my-bucket\"] + options, catch_exceptions=False\n        )\n        assert result.exit_code == 0\n        assert result.output == \"\"\n        assert boto3.mock_calls == [\n            call(\"s3\"),\n            call().head_bucket(Bucket=\"my-bucket\"),\n            call().put_bucket_cors(\n                Bucket=\"my-bucket\", CORSConfiguration={\"CORSRules\": [expected_json]}\n            ),\n        ]\n\n\n@pytest.mark.parametrize(\n    \"strategy,expected_error\",\n    (\n        (\"stdin\", \"Input contained invalid JSON\"),\n        (\"filepath\", \"File contained invalid JSON\"),\n        (\"string\", \"Invalid JSON string\"),\n    ),\n)\n@pytest.mark.parametrize(\"use_valid_string\", (True, False))\ndef test_verify_create_policy_option(\n    tmpdir, mocker, strategy, expected_error, use_valid_string\n):\n    # Ensure \"bucket does not exist\" error to terminate after verification\n    boto3 = mocker.patch(\"boto3.client\")\n    boto3.return_value.head_bucket.side_effect = botocore.exceptions.ClientError(\n        error_response={}, operation_name=\"\"\n    )\n    if use_valid_string:\n        content = '{\"policy\": \"...\"}'\n    else:\n        content = \"{Invalid JSON\"\n    # Only used by strategy==filepath\n    filepath = str(tmpdir / \"policy.json\")\n    open(filepath, \"w\").write(content)\n\n    runner = CliRunner()\n    args = [\"create\", \"my-bucket\", \"--policy\"]\n    kwargs = {}\n    if strategy == \"stdin\":\n        args.append(\"-\")\n        kwargs[\"input\"] = content\n    elif strategy == \"filepath\":\n        args.append(filepath)\n    elif strategy == \"string\":\n        args.append(content)\n\n    result = runner.invoke(cli, args, **kwargs)\n    if use_valid_string:\n        assert result.exit_code == 1\n        assert (\n            result.output\n            == \"Error: Bucket does not exist: my-bucket - try --create-bucket to create it\\n\"\n        )\n    else:\n        assert result.exit_code\n        assert (\n            \"Error: Invalid value for '--policy': {}\".format(expected_error)\n            in result.output\n        )\n\n\n@pytest.mark.parametrize(\n    \"content\",\n    (\n        '{\"AccessKeyId\": \"access\", \"SecretAccessKey\": \"secret\"}',\n        \"[default]\\naws_access_key_id=access\\naws_secret_access_key=secret\",\n    ),\n)\n@pytest.mark.parametrize(\"use_stdin\", (True, False))\ndef test_auth_option(tmpdir, mocker, content, use_stdin):\n    boto3 = mocker.patch(\"boto3.client\")\n    boto3.return_value = Mock()\n    boto3().get_paginator().paginate.return_value = [{\"Users\": []}]\n\n    filepath = None\n    if use_stdin:\n        input = content\n        arg = \"-\"\n    else:\n        input = None\n        filepath = str(tmpdir / \"input\")\n        open(filepath, \"w\").write(content)\n        arg = filepath\n\n    runner = CliRunner()\n    with runner.isolated_filesystem():\n        result = runner.invoke(\n            cli, [\"list-users\", \"-a\", arg], catch_exceptions=False, input=input\n        )\n        assert result.exit_code == 0\n\n    assert boto3.mock_calls == [\n        call(),\n        call().get_paginator(),\n        call(\"iam\", aws_access_key_id=\"access\", aws_secret_access_key=\"secret\"),\n        call().get_paginator(\"list_users\"),\n        call().get_paginator().paginate(),\n    ]\n\n\n@pytest.mark.parametrize(\n    \"extra_option\", [\"--access-key\", \"--secret-key\", \"--session-token\"]\n)\ndef test_auth_option_errors(extra_option):\n    runner = CliRunner()\n    result = runner.invoke(\n        cli,\n        [\"list-users\", \"-a\", \"-\", extra_option, \"blah\"],\n        catch_exceptions=False,\n        input=\"\",\n    )\n    assert result.exit_code == 1\n    assert (\n        result.output\n        == \"Error: --auth cannot be used with --access-key, --secret-key or --session-token\\n\"\n    )\n\n\n@pytest.mark.parametrize(\n    \"options,expected\",\n    (\n        ([], READ_WRITE_POLICY),\n        ([\"--read-only\"], READ_ONLY_POLICY),\n        ([\"--write-only\"], WRITE_ONLY_POLICY),\n        ([\"--prefix\", \"my-prefix/\"], PREFIX_POLICY),\n        (\n            [\n                \"--statement\",\n                '{\"Effect\": \"Allow\", \"Action\": \"textract:*\", \"Resource\": \"*\"}',\n            ],\n            EXTRA_STATEMENTS_POLICY,\n        ),\n    ),\n)\ndef test_policy(options, expected):\n    runner = CliRunner()\n    result = runner.invoke(\n        cli,\n        [\"policy\", \"pytest-bucket-simonw-1\"] + options,\n        catch_exceptions=False,\n    )\n    assert json.loads(result.output) == json.loads(expected)\n\n\n@pytest.mark.parametrize(\n    \"options,expected\",\n    (\n        (\n            [],\n            (\n                \"[\\n\"\n                \"  {\\n\"\n                '    \"Key\": \"yolo-causeway-1.jpg\",\\n'\n                '    \"LastModified\": \"2019-12-26 17:00:22+00:00\",\\n'\n                '    \"ETag\": \"\\\\\"87abea888b22089cabe93a0e17cf34a4\\\\\"\",\\n'\n                '    \"Size\": 5923104,\\n'\n                '    \"StorageClass\": \"STANDARD\"\\n'\n                \"  },\\n\"\n                \"  {\\n\"\n                '    \"Key\": \"yolo-causeway-2.jpg\",\\n'\n                '    \"LastModified\": \"2019-12-26 17:00:22+00:00\",\\n'\n                '    \"ETag\": \"\\\\\"87abea888b22089cabe93a0e17cf34a4\\\\\"\",\\n'\n                '    \"Size\": 5923104,\\n'\n                '    \"StorageClass\": \"STANDARD\"\\n'\n                \"  }\\n\"\n                \"]\\n\"\n            ),\n        ),\n        (\n            [\"--nl\"],\n            (\n                '{\"Key\": \"yolo-causeway-1.jpg\", \"LastModified\": \"2019-12-26 17:00:22+00:00\", \"ETag\": \"\\\\\"87abea888b22089cabe93a0e17cf34a4\\\\\"\", \"Size\": 5923104, \"StorageClass\": \"STANDARD\"}\\n'\n                '{\"Key\": \"yolo-causeway-2.jpg\", \"LastModified\": \"2019-12-26 17:00:22+00:00\", \"ETag\": \"\\\\\"87abea888b22089cabe93a0e17cf34a4\\\\\"\", \"Size\": 5923104, \"StorageClass\": \"STANDARD\"}\\n'\n            ),\n        ),\n        (\n            [\"--tsv\"],\n            (\n                \"Key\\tLastModified\\tETag\\tSize\\tStorageClass\\tOwner\\n\"\n                'yolo-causeway-1.jpg\\t2019-12-26 17:00:22+00:00\\t\"\"\"87abea888b22089cabe93a0e17cf34a4\"\"\"\\t5923104\\tSTANDARD\\t\\n'\n                'yolo-causeway-2.jpg\\t2019-12-26 17:00:22+00:00\\t\"\"\"87abea888b22089cabe93a0e17cf34a4\"\"\"\\t5923104\\tSTANDARD\\t\\n'\n            ),\n        ),\n        (\n            [\"--csv\"],\n            (\n                \"Key,LastModified,ETag,Size,StorageClass,Owner\\n\"\n                'yolo-causeway-1.jpg,2019-12-26 17:00:22+00:00,\"\"\"87abea888b22089cabe93a0e17cf34a4\"\"\",5923104,STANDARD,\\n'\n                'yolo-causeway-2.jpg,2019-12-26 17:00:22+00:00,\"\"\"87abea888b22089cabe93a0e17cf34a4\"\"\",5923104,STANDARD,\\n'\n            ),\n        ),\n    ),\n)\ndef test_list_bucket(stub_s3, options, expected):\n    stub_s3.add_response(\n        \"list_objects_v2\",\n        {\n            \"Contents\": [\n                {\n                    \"Key\": \"yolo-causeway-1.jpg\",\n                    \"LastModified\": \"2019-12-26 17:00:22+00:00\",\n                    \"ETag\": '\"87abea888b22089cabe93a0e17cf34a4\"',\n                    \"Size\": 5923104,\n                    \"StorageClass\": \"STANDARD\",\n                },\n                {\n                    \"Key\": \"yolo-causeway-2.jpg\",\n                    \"LastModified\": \"2019-12-26 17:00:22+00:00\",\n                    \"ETag\": '\"87abea888b22089cabe93a0e17cf34a4\"',\n                    \"Size\": 5923104,\n                    \"StorageClass\": \"STANDARD\",\n                },\n            ]\n        },\n    )\n    runner = CliRunner()\n    with runner.isolated_filesystem():\n        result = runner.invoke(cli, [\"list-bucket\", \"test-bucket\"] + options)\n        assert result.exit_code == 0\n        assert result.output == expected\n\n\ndef test_list_bucket_empty(stub_s3):\n    stub_s3.add_response(\"list_objects_v2\", {})\n    runner = CliRunner()\n    with runner.isolated_filesystem():\n        result = runner.invoke(cli, [\"list-bucket\", \"test-bucket\"])\n        assert result.exit_code == 0\n        assert result.output == \"[]\\n\"\n\n\n@pytest.fixture\ndef stub_iam_for_list_roles(stub_iam):\n    stub_iam.add_response(\n        \"list_roles\",\n        {\n            \"Roles\": [\n                {\n                    \"RoleName\": \"role-one\",\n                    \"Path\": \"/\",\n                    \"Arn\": \"arn:aws:iam::462092780466:role/role-one\",\n                    \"RoleId\": \"36b2eeee501c5952a8ac119f9e521\",\n                    \"CreateDate\": \"2020-01-01 00:00:00+00:00\",\n                }\n            ]\n        },\n    )\n    stub_iam.add_response(\n        \"list_role_policies\",\n        {\"PolicyNames\": [\"policy-one\"]},\n    )\n    stub_iam.add_response(\n        \"get_role_policy\",\n        {\n            \"RoleName\": \"role-one\",\n            \"PolicyName\": \"policy-one\",\n            \"PolicyDocument\": '{\"foo\": \"bar}',\n        },\n    )\n    stub_iam.add_response(\n        \"list_attached_role_policies\",\n        {\"AttachedPolicies\": [{\"PolicyArn\": \"arn:123:must-be-at-least-tweny-chars\"}]},\n    )\n    stub_iam.add_response(\n        \"get_policy\",\n        {\"Policy\": {\"DefaultVersionId\": \"v1\"}},\n    )\n    stub_iam.add_response(\n        \"get_policy_version\",\n        {\"PolicyVersion\": {\"CreateDate\": \"2020-01-01 00:00:00+00:00\"}},\n    )\n\n\n@pytest.mark.parametrize(\"details\", (False, True))\ndef test_list_roles_details(stub_iam_for_list_roles, details):\n    runner = CliRunner()\n    with runner.isolated_filesystem():\n        result = runner.invoke(cli, [\"list-roles\"] + ([\"--details\"] if details else []))\n        assert result.exit_code == 0\n        expected = {\n            \"RoleName\": \"role-one\",\n            \"Path\": \"/\",\n            \"Arn\": \"arn:aws:iam::462092780466:role/role-one\",\n            \"RoleId\": \"36b2eeee501c5952a8ac119f9e521\",\n            \"CreateDate\": \"2020-01-01 00:00:00+00:00\",\n            \"inline_policies\": [\n                {\n                    \"RoleName\": \"role-one\",\n                    \"PolicyName\": \"policy-one\",\n                    \"PolicyDocument\": '{\"foo\": \"bar}',\n                }\n            ],\n            \"attached_policies\": [\n                {\n                    \"DefaultVersionId\": \"v1\",\n                    \"PolicyVersion\": {\"CreateDate\": \"2020-01-01 00:00:00+00:00\"},\n                }\n            ],\n        }\n        if not details:\n            expected.pop(\"inline_policies\")\n            expected.pop(\"attached_policies\")\n        assert json.loads(result.output) == [expected]\n\n\ndef test_list_roles_csv(stub_iam_for_list_roles):\n    runner = CliRunner()\n    with runner.isolated_filesystem():\n        result = runner.invoke(cli, [\"list-roles\", \"--csv\", \"--details\"])\n        assert result.exit_code == 0\n    assert result.output == (\n        \"Path,RoleName,RoleId,Arn,CreateDate,AssumeRolePolicyDocument,Description,MaxSessionDuration,PermissionsBoundary,Tags,RoleLastUsed,inline_policies,attached_policies\\n\"\n        '/,role-one,36b2eeee501c5952a8ac119f9e521,arn:aws:iam::462092780466:role/role-one,2020-01-01 00:00:00+00:00,,,,,,,\"[\\n'\n        \"  {\\n\"\n        '    \"\"RoleName\"\": \"\"role-one\"\",\\n'\n        '    \"\"PolicyName\"\": \"\"policy-one\"\",\\n'\n        '    \"\"PolicyDocument\"\": \"\"{\\\\\"\"foo\\\\\"\": \\\\\"\"bar}\"\"\\n'\n        \"  }\\n\"\n        ']\",\"[\\n'\n        \"  {\\n\"\n        '    \"\"DefaultVersionId\"\": \"\"v1\"\",\\n'\n        '    \"\"PolicyVersion\"\": {\\n'\n        '      \"\"CreateDate\"\": \"\"2020-01-01 00:00:00+00:00\"\"\\n'\n        \"    }\\n\"\n        \"  }\\n\"\n        ']\"\\n'\n    )\n\n\n@pytest.mark.parametrize(\n    \"files,patterns,expected,error\",\n    (\n        # Without arguments return everything\n        (None, None, {\"one.txt\", \"directory/two.txt\", \"directory/three.json\"}, None),\n        # Positional arguments returns files\n        ([\"one.txt\"], None, {\"one.txt\"}, None),\n        ([\"directory/two.txt\"], None, {\"directory/two.txt\"}, None),\n        ([\"one.txt\"], None, {\"one.txt\"}, None),\n        (\n            [\"directory/two.txt\", \"directory/three.json\"],\n            None,\n            {\"directory/two.txt\", \"directory/three.json\"},\n            None,\n        ),\n        # Invalid positional argument downloads file and shows error\n        (\n            [\"directory/two.txt\", \"directory/bad.json\"],\n            None,\n            {\"directory/two.txt\"},\n            \"Not found: directory/bad.json\",\n        ),\n        # --pattern returns files matching pattern\n        (None, [\"*e.txt\"], {\"one.txt\"}, None),\n        (None, [\"*e.txt\", \"invalid-pattern\"], {\"one.txt\"}, None),\n        (None, [\"directory/*\"], {\"directory/two.txt\", \"directory/three.json\"}, None),\n        # positional and patterns can be combined\n        ([\"one.txt\"], [\"directory/*.json\"], {\"one.txt\", \"directory/three.json\"}, None),\n    ),\n)\n@pytest.mark.parametrize(\"output\", (None, \"out\"))\ndef test_get_objects(moto_s3_populated, output, files, patterns, expected, error):\n    runner = CliRunner()\n    with runner.isolated_filesystem():\n        args = [\"get-objects\", \"my-bucket\"] + (files or [])\n        if patterns:\n            for pattern in patterns:\n                args.extend([\"--pattern\", pattern])\n        if output:\n            args.extend([\"--output\", output])\n        result = runner.invoke(cli, args, catch_exceptions=False)\n        if error:\n            assert result.exit_code != 0\n        else:\n            assert result.exit_code == 0\n        # Build list of all files in output directory using glob\n        output_dir = pathlib.Path(output or \".\")\n        all_files = {\n            str(p.relative_to(output_dir))\n            for p in output_dir.glob(\"**/*\")\n            if p.is_file()\n        }\n        assert all_files == expected\n        if error:\n            assert error in result.output\n\n\n@pytest.mark.parametrize(\n    \"args,expected,expected_output\",\n    (\n        ([\".\"], {\"one.txt\", \"directory/two.txt\", \"directory/three.json\"}, None),\n        ([\"one.txt\"], {\"one.txt\"}, None),\n        ([\"directory\"], {\"directory/two.txt\", \"directory/three.json\"}, None),\n        (\n            [\"directory\", \"--prefix\", \"o\"],\n            {\"o/directory/two.txt\", \"o/directory/three.json\"},\n            None,\n        ),\n        # --dry-run tests\n        (\n            [\"directory\", \"--prefix\", \"o\", \"--dry-run\"],\n            None,\n            (\n                \"directory/two.txt => s3://my-bucket/o/directory/two.txt\\n\"\n                \"directory/three.json => s3://my-bucket/o/directory/three.json\\n\"\n            ),\n        ),\n        (\n            [\".\", \"--prefix\", \"p\"],\n            {\"p/one.txt\", \"p/directory/two.txt\", \"p/directory/three.json\"},\n            None,\n        ),\n    ),\n)\ndef test_put_objects(moto_s3, args, expected, expected_output):\n    runner = CliRunner()\n    with runner.isolated_filesystem():\n        # Create files\n        pathlib.Path(\"one.txt\").write_text(\"one\")\n        pathlib.Path(\"directory\").mkdir()\n        pathlib.Path(\"directory/two.txt\").write_text(\"two\")\n        pathlib.Path(\"directory/three.json\").write_text('{\"three\": 3}')\n        result = runner.invoke(\n            cli, [\"put-objects\", \"my-bucket\"] + args, catch_exceptions=False\n        )\n        assert result.exit_code == 0, result.output\n        if expected_output:\n            # Check all expected output lines are present (order may vary)\n            for line in expected_output.strip().split(\"\\n\"):\n                assert line in result.output\n        # Check files were uploaded\n        keys = {\n            obj[\"Key\"]\n            for obj in moto_s3.list_objects(Bucket=\"my-bucket\").get(\"Contents\") or []\n        }\n        assert keys == (expected or set())\n\n\n@pytest.mark.parametrize(\n    \"args,expected,expected_error\",\n    (\n        ([], None, \"Error: Specify one or more keys or use --prefix\"),\n        (\n            [\"one.txt\", \"--prefix\", \"directory/\"],\n            None,\n            \"Cannot pass both keys and --prefix\",\n        ),\n        ([\"one.txt\"], [\"directory/two.txt\", \"directory/three.json\"], None),\n        ([\"one.txt\", \"directory/two.txt\"], [\"directory/three.json\"], None),\n        ([\"--prefix\", \"directory/\"], [\"one.txt\"], None),\n    ),\n)\ndef test_delete_objects(moto_s3_populated, args, expected, expected_error):\n    runner = CliRunner()\n    with runner.isolated_filesystem():\n        result = runner.invoke(\n            cli, [\"delete-objects\", \"my-bucket\"] + args, catch_exceptions=False\n        )\n        if expected_error:\n            assert result.exit_code != 0\n            assert expected_error in result.output\n        else:\n            assert result.exit_code == 0, result.output\n            # Check expected files are left in bucket\n            keys = {\n                obj[\"Key\"]\n                for obj in moto_s3_populated.list_objects(Bucket=\"my-bucket\").get(\n                    \"Contents\"\n                )\n                or []\n            }\n            assert keys == set(expected)\n\n\n@pytest.mark.parametrize(\"arg\", (\"-d\", \"--dry-run\"))\ndef test_delete_objects_dry_run(moto_s3_populated, arg):\n    runner = CliRunner()\n\n    def get_keys():\n        return {\n            obj[\"Key\"]\n            for obj in moto_s3_populated.list_objects(Bucket=\"my-bucket\").get(\n                \"Contents\"\n            )\n            or []\n        }\n\n    with runner.isolated_filesystem():\n        before_keys = get_keys()\n        result = runner.invoke(\n            cli, [\"delete-objects\", \"my-bucket\", \"--prefix\", \"directory/\", arg]\n        )\n        assert result.exit_code == 0\n        assert \"The following keys would be deleted:\" in result.output\n        assert \"directory/three.json\" in result.output\n        assert \"directory/two.txt\" in result.output\n        after_keys = get_keys()\n        assert before_keys == after_keys\n"
  }
]