[
  {
    "path": ".circleci/config.yml",
    "content": "#Copyright 2017 Reactive Ops Inc.\n#\n#Licensed under the Apache License, Version 2.0 (the “License”);\n#you may not use this file except in compliance with the License.\n#You may obtain a copy of the License at\n#\n#    http://www.apache.org/licenses/LICENSE-2.0\n#\n#Unless required by applicable law or agreed to in writing, software\n#distributed under the License is distributed on an “AS IS” BASIS,\n#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n#See the License for the specific language governing permissions and\n#limitations under the License.\n\nversion: 2\n\njobs:\n  build:\n    docker:\n      - image: circleci/python:2\n    working_directory: ~/pentagon\n    steps:\n      - run:\n          name: Setup PATH to support pip user installs\n          command: echo 'export PATH=$PATH:/home/circleci/.local/bin' >> $BASH_ENV\n      - checkout\n      - run:\n          name: Test Migration\n          command: |\n            # this fails but that is the intent hence \"|| true\"\n            last_version=$(pip install pentagon==  2>&1 | grep \"Could not find\" | awk -F',' '{ print $(NF -1) }' | sed s/[[:blank:]]/''/g) || true\n            pip install --user pentagon==${last_version}\n            # nohup to get rid of interactive and thus prompts\n            nohup pentagon start-project migration-test --aws-access-key=fake --aws-secret-key=fake\n            cd migration-test-infrastructure\n            export INFRASTRUCTURE_REPO=$(pwd)\n            cd inventory/default/clusters/production\n            nohup pentagon add kops.cluster -f vars.yml -o cluster-config\n            cd $INFRASTRUCTURE_REPO\n            # faking git config. The repo must have at least one commit for the migration to work\n            git add . && git -c user.name='fake' -c user.email='fake@email.org' commit -m 'initial commit'\n            pip install --user ~/pentagon\n            pentagon --version\n            pentagon migrate --yes\n      - run:\n          name: Unit Tests\n          command: |\n            pip install --user -r ${HOME}/pentagon/tests/requirements.txt\n            nosetests\n      - run:\n          name: Test Start Project\n          command: |\n            nohup pentagon start-project circleci-test --aws-access-key=fake --aws-secret-key=fake\n\n  release:\n    docker:\n      - image: circleci/python:2\n    environment:\n      PYPI_USERNAME: ReactiveOps\n      GITHUB_ORGANIZATION: $CIRCLE_PROJECT_USERNAME\n      GITHUB_REPOSITORY: $CIRCLE_PROJECT_REPONAME\n    working_directory: ~/pentagon\n    steps:\n      - checkout\n      - run:\n          name: init .pypirc\n          command: |\n            echo -e \"[pypi]\" >> ~/.pypirc\n            echo -e \"username = $PYPI_USERNAME\" >> ~/.pypirc\n            echo -e \"password = $PYPI_PASSWORD\" >> ~/.pypirc\n      - run:\n          name: create release\n          command: |\n            git fetch --tags\n            curl -O https://raw.githubusercontent.com/reactiveops/release.sh/v0.0.2/release\n            /bin/bash release || true\n      - run:\n          name: package and upload\n          command: |\n            sudo pip install twine\n            python setup.py sdist bdist_wheel\n            twine upload dist/*\n\nworkflows:\n  version: 2\n  build:\n    jobs:\n      - build:\n          filters:\n            tags:\n              only: /.*/\n            branches:\n              only: /.*/\n      - release:\n          requires:\n            - build\n          filters:\n            tags:\n              only: /.*/\n            branches:\n              ignore: /.*/\n\n\n"
  },
  {
    "path": ".github/stale.yml",
    "content": "# Number of days of inactivity before an issue becomes stale\ndaysUntilStale: 60\n# Number of days of inactivity before a stale issue is closed\ndaysUntilClose: 7\n# Issues with these labels will never be considered stale\nexemptLabels:\n  - pinned\n  - security\n# Label to use when marking an issue as stale\nstaleLabel: wontfix\n# Comment to post when marking an issue as stale. Set to `false` to disable\nmarkComment: >\n  This issue has been automatically marked as stale because it has not had\n  recent activity. It will be closed if no further activity occurs. Thank you\n  for your contributions.\n# Comment to post when closing a stale issue. Set to `false` to disable\ncloseComment: false\n"
  },
  {
    "path": ".gitignore",
    "content": ".DS_Store\n.terraform\nconfig/private\n*.pyc\n*.pem\n*.pub\npentagon.egg-info\n.vscode\nvenv\ndist\nbuild\n"
  },
  {
    "path": "CHANGELOG.md",
    "content": "# Changelog\nAll notable changes to this project will be documented in this file.\n\nThe format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/)\nand this project adheres to [Semantic Versioning](http://semver.org/spec/v2.0.0.html).\n\n## 3.1.4\n### Fixes\n- A required GCP start-project param was accidentally removed on a refactor of the CLI. Added it back.\n\n## 3.1.3\n### Changed\n- Update dependencies\n\n## 3.1.2\n### Fixed\n- Changed `defaults.node_count` from 3 to 1, so that only 3 total nodes (one per `InstanceGroup`) are created\n\n## 3.1.1\n### Fixed\n- In certain cases a migration would cause duplicate hooks\n- In certain cases, migrations were not run because kops.sh had been deleted\n\n## 3.1.0\n### Fixed\n- issue where prompt=true was not respecting the default values \n- display of option values was munging booleans\n\n### Added\n- Migration to enable kops hook that patches runc\n- validation of prompted valued for click to ensure non-empty strings\n\n## 3.0.2\n### Changed\n- `TILLER_NAMESPACE` is now set to `tiller` by default\n\n## 3.0.1\n### Fixed\n- Non-populating values for kubernetes version in gcp deploys\n- Bucket not required values for gcp deploys\n\n## 3.0.0\n### Added\n- Support for GCP / GKE terraform templates on inventory init\n\n### Changed\n- Now all pentagon runs will confirm all the values that are set and what the values are set to (one step closer to better transparency)\n\n## 2.7.3\n### Fixed\n- missing imports for latest migration\n\n## 2.7.2\n### Added\n- Instructions on how to setup the development environment.\n- revised cli help text\n- migrations for kops settings that were missed in the last migration\n- made `anonymousAuth: false` default for Kops clusters. This currently conflicts with metricserver version > 3.0.0\n\n## 2.7.1\n### Fixed\n- migration\n\n\n## 2.7.0 - 2019-1-3\n\n## Updated\n- add aws-iam-authenticator to kops spec by default\n- Etcd now at version 3 in Kops spec\n- default to multiple az instance groups for Kops\n- updated generated docs\n\n## Fixed\n- kops availability zone calculation\n\n\n## 2.6.1 - 2018-10-30\n\n## Fixed\n- Remove deprecated VPC Terraform module variables.\n\n## 2.6.0 - 2018-10-29\n\n### Updated\n- Bumped default VPC Terraform module to version 3.0.0. Removes AWS provider from module in favor of inferred provider.\n\n## 2.5.0 - 2018-10-26\n\n## Fixed\n- add new inventory now creates a more complete inventory instead of an empty one\n- component arguments may now have '-' or '_'\n\n## Updated\n- Docs\n\n## Added\n- 'project_name' arg to some components and to the `config.yml` that gets written on 'start-project'\n\n## 2.4.3 - 2018-10-16\n\n### Fixed\n- bug where cli -D were not begin passed properly\n\n## 2.4.2 - 2018-10-15\n### Updated\n- Default Kops settings to improve security and auditing\n\n### Fixed\n- Reading from config fil\n- Templating local path for ssh_config\n- Installation requirements\n- Worker and Master variable name for kubernetes arguements\n\n### Added \n\n### Removed\n- Makefiles\n\n## [2.4.1] - 2018-09-21\n### Updated\n\n### Fixed\n\n### Added\n- PyPi upload to circleci config\n\n## [2.4.0] - 2018-8-21\n\n### Updated\n- replaced PyYaml with oyaml and added capability to have multidocument yaml files for component declarations\n- Kops cluster `authorization` default changed to rbac\n- Updated the inventory config to refer to `${INVENTORY}` vs assigning the `{{name}}` statically. `pentagon/component/inventory/files/common/config/local/vars.yml.jinja`\n\n### Fixed\n- `kubernetes_version` parameter value wasn't applying to the kops cluster config from `values.yml` file\n\n## [2.3.1] - 2018-5-30\n\n### Fixed\n- Version dependancies\n\n## [2.3.0] - 2018-5-30\n\n### Added\n- Some better behavior with migrations where a patch is made but not changes in structure was made\n\n### Updated\n- Allowed more value to be optional in the kops templates\n- Updated docs\n- Bumped terraform-vpc module source version\n\n### Fixed\n- Issue where kops clusters were created with the same network cidr\n\n## [2.2.1] - 2018-4-9\n\n## Removed `auto-approve` from terraform Makefile\n\n## [2.2.0] - 2018-3-30\n\n### Added\n- colorful logging\n- bug fixes and better support for GCP infrastructure\n- `--gcp-revion` as part of the above change\n\n### Updated\n- `yaml_source` no longer throws errors when file is empty, just logs a message\n- made the component class location method more flexible\n- reorganized terraform files and made terraform a first class citizen and part of the `inventory.Inventory` component\n- renamed vpc.VPC component to aws_vpc.AWSVpc as part of above change\n- reorganize the defaul `secrets.yml` and removed unnecessary lines\n\n\n## [2.1.0] - 2018-2-27\n\n## Added\n- `--version` flag to output version\n- added cluster auto scaling iam policies by default\n- added `--cloud` flag and supporting flags to create GCP/GKE infrastructure\n\n### Updated\n- Version handling in setup.py\n- Updated yaml loader for config file reading to force string behavior\n- Inventory component will use -D name= as the targe directory instead needing -o. \n- Inventory -D account replaced with -D name\n\n\n## [2.0.0] - 2018-2-1\n### Added\n- `yaml_source` script to replace env-vars.sh\n- Environment variables are now checked in ComponentBase class\n- Defaults to component\n- overwrite to template rendering\n- added inventory component\n- added vpn component\n\n### Removed\n- env-vars.sh script\n- untracked roles directory for ansible\n\n### Updated \n- makefile to support `yaml_source` change\n- added distutil.dir_util to allow overwriting exisint directories\n- added exit on failure for ComponentBase class\n- added default config out file for Pentaong start-project\n- updated config file output to sanitize and not include blank values\n\n## [1.2.0] - 2017-11-8\n### Added\n- Added kops component\n\n### Changed\n- Added VPN name to include project name. Allows multiple VPN instances per VPC\n- Set default versions to ansible roles\n- Updated default kops cluster templates to use new kops component\n- Updated make file to use Terraform outputs and improve robustness of creat and destroy\n- Fixed legacy authorization bug in gcp coponent\n\n### Removed\n- Removed the older kops cluster creation\n\n\n## [1.1.0] - 2017-10-4\n### Added\n- Added Changelog\n- Added `add` method to `pentagon` command line\n- Added component base class\n- Added GCP and VPC components\n- Added Example component\n\n### Changed\n- Changed VPC directory creation to utilize component class instead of \n- Change Click libary usage to \"setup tools\" method\n\n### Removed\n- Section about \"changelog\" vs \"CHANGELOG\".\n\n## [1.0.0]\n\n### Added\n- First open source version of Pentagon\n"
  },
  {
    "path": "CODEOWNERS",
    "content": "*  @ejether @endzyme\n"
  },
  {
    "path": "CODE_OF_CONDUCT.md",
    "content": "# Contributor Covenant Code of Conduct\n\n## Our Pledge\n\nIn the interest of fostering an open and welcoming environment, we as\ncontributors and maintainers pledge to making participation in our project and\nour community a harassment-free experience for everyone, regardless of age, body\nsize, disability, ethnicity, gender identity and expression, level of experience,\nnationality, personal appearance, race, religion, or sexual identity and\norientation.\n\n## Our Standards\n\nExamples of behavior that contributes to creating a positive environment\ninclude:\n\n* Using welcoming and inclusive language\n* Being respectful of differing viewpoints and experiences\n* Gracefully accepting constructive criticism\n* Focusing on what is best for the community\n* Showing empathy towards other community members\n\nExamples of unacceptable behavior by participants include:\n\n* The use of sexualized language or imagery and unwelcome sexual attention or\nadvances\n* Trolling, insulting/derogatory comments, and personal or political attacks\n* Public or private harassment\n* Publishing others' private information, such as a physical or electronic\n  address, without explicit permission\n* Other conduct which could reasonably be considered inappropriate in a\n  professional setting\n\n## Our Responsibilities\n\nProject maintainers are responsible for clarifying the standards of acceptable\nbehavior and are expected to take appropriate and fair corrective action in\nresponse to any instances of unacceptable behavior.\n\nProject maintainers have the right and responsibility to remove, edit, or\nreject comments, commits, code, wiki edits, issues, and other contributions\nthat are not aligned to this Code of Conduct, or to ban temporarily or\npermanently any contributor for other behaviors that they deem inappropriate,\nthreatening, offensive, or harmful.\n\n## Scope\n\nThis Code of Conduct applies both within project spaces and in public spaces\nwhen an individual is representing the project or its community. Examples of\nrepresenting a project or community include using an official project e-mail\naddress, posting via an official social media account, or acting as an appointed\nrepresentative at an online or offline event. Representation of a project may be\nfurther defined and clarified by project maintainers.\n\n## Enforcement\n\nInstances of abusive, harassing, or otherwise unacceptable behavior may be\nreported by contacting the project team at [INSERT EMAIL ADDRESS]. All\ncomplaints will be reviewed and investigated and will result in a response that\nis deemed necessary and appropriate to the circumstances. The project team is\nobligated to maintain confidentiality with regard to the reporter of an incident.\nFurther details of specific enforcement policies may be posted separately.\n\nProject maintainers who do not follow or enforce the Code of Conduct in good\nfaith may face temporary or permanent repercussions as determined by other\nmembers of the project's leadership.\n\n## Attribution\n\nThis Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4,\navailable at [http://contributor-covenant.org/version/1/4][version]\n\n[homepage]: http://contributor-covenant.org\n[version]: http://contributor-covenant.org/version/1/4/\n"
  },
  {
    "path": "CONTRIBUTING.md",
    "content": "# How to contribute\n\nIssues, whether bugs, tasks, or feature requests are essential for keeping Pentagon (and ReactiveOps in general) great. We believe it should be as easy as possible to contribute changes that\nget things working in your environment. There are a few guidelines that we\nneed contributors to follow so that we can have a chance of keeping on\ntop of things.\no\n\n## Setting up your development environment\n\n1. Clone this repo and cd into it\n    ```\n    git clone git@github.com:reactiveops/pentagon.git\n    cd pentagon\n    ```\n2. Create a virtual environment and source it. You need to source everytime you want to develop pentagon.\n    ```\n    virtualenv venv \n    source venv/bin/activate\n    ```\n3. Finally, install pentagon into the venv. The `-e` means that it will take any of your file changes into account.\n    ```\n    pip install -e . \n    ```\n4. If you run `which pentagon` it should point at the venv inside the newly created repo.\n    ```\n    $ which pentagon\n    .../pentagon/venv/bin/pentagon\n    ```\n\n\n## Getting Started\n\n* Submit a ticket for your issue, assuming one does not already exist.\n  * Clearly describe the issue including steps to reproduce when it is a bug.\n  * Apply the appropriate labels, whether it is bug, feature, or task.\n\n## Making Changes\n\n* Create a feature branch from where you want to base your work.\n  * This is usually the master branch.\n  * To quickly create a topic branch based on master; `git checkout -b\n    feature master`. Please avoid working directly on the\n    `master` branch.\n* Try to make commits of logical units.\n* Make sure you have added the necessary tests for your changes (coming soon).\n* Make sure you have added any required documentation changes.\n\n## Making Trivial Changes\n\n### Documentation\n\nFor changes of a trivial nature to comments and documentation, it is not\nalways necessary to create a new issue in GitHub. In these cases, a branch with pull request is sufficient.\n\n## Submitting Changes\n\n* Push your changes to a topic branch.\n* Submit a pull request.\n* Update the issue with the `PR-available` label to mark that you have submitted code and are ready for it to be reviewed, and include a link to the pull request in the ticket.\n\n\nAttribution\n===========\nPortions of this text are copied from the [Puppet Contributing](https://github.com/puppetlabs/puppet/blob/master/CONTRIBUTING.md) documentation.\n"
  },
  {
    "path": "DESIGN.md",
    "content": "# Pentagon Design Document:\n\n## Intent\n\nPentagon is a framework for generating an Infrastructure As Code Repository (IACR). It is intended to provide a flexible and meaningful hierarchical structure to manage cloud infrastructure using a common set of tools. At ReactiveOps we use Pentagon generated IACRs to manage and maintain our client's cloud infrastructure. Our practice and experience has driven us to devise a highly flexible, highly repeatable framework that ensures uniformity of process. Pentagon has grown from a series of sensible decisions about how an IACR is “shaped”. It has a strict organization that is intended to enable automation and remain flexible to a wide variety of clouds, network, clusters and to provide a thoughtful structure for external resources.\n\n## Key Design Elements\n\n### Pentagon is a framework for components that are generators.\n\nIt is loosely modeled after Rails or Django and aims to provide an extensible framework for component modules. These component modules may be native or external but when external modules are installed, the interface is transparent to the user. Pentagon generators produce configuration files that should have sensible defaults provided for most values, but can be overridden by configuration.\n\n### Pentagon provides a way to keep your IACRs up to date.\n\nAs new decisions are made, new features are added, and standards or requirements change, it is important to keep your IACR up to date. As Pentagon versions changes, so should your IACRs. Pentagon provides a migration framework so that updating the configuration and content of your IACR is defined in code. Any structure or code change should involve a new versioned migration. Exceptions may be where an update would be a breaking change or where large scale recreation of assets is required.\n\n## Scope\n\n### In Scope:\n\n- Any process or component module that templates or creates files and directories for use within the context of the IACR\n- Migrations to update standards and defaults in an older IACR to a newer version\n- Read only interaction with infrastructure resources\n\n### Out of Scope:\n\n- Deep documentation how to use the supporting tools (terraform, ansible, kops etc)\n- Automations and scripts to support workflows for infrastructure management practices\n- Tooling to support interaction with the infrastructure repository\n- Creating, or modifying any infrastructure resources\n\n## Architecture\n\nTBD\n"
  },
  {
    "path": "Dockerfile",
    "content": "FROM ubuntu:16.04\n\nRUN apt-get update && apt-get install software-properties-common -y\nRUN apt-add-repository ppa:ansible/ansible -y && apt-get update\nRUN apt-get install -y ansible git python-dev python-pip python-dev libffi-dev libssl-dev wget vim zip openvpn awscli jq\n\n\nRUN wget https://releases.hashicorp.com/terraform/0.10.0/terraform_0.10.0_linux_amd64.zip && unzip terraform_0.10.0_linux_amd64.zip && mv terraform /usr/local/bin/\n\nRUN wget https://github.com/kubernetes/kops/releases/download/1.6.1/kops-linux-amd64 && \\\n    chmod +x kops-linux-amd64 &&\\\n    mv kops-linux-amd64 /usr/local/bin/kops\n\nRUN mkdir -p /pentagon \nCOPY . /pentagon/\n\nRUN pip install -U -e  ./pentagon"
  },
  {
    "path": "LICENSE",
    "content": "                                 Apache License\n                           Version 2.0, January 2004\n                        http://www.apache.org/licenses/\n\n   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION\n\n   1. Definitions.\n\n      \"License\" shall mean the terms and conditions for use, reproduction,\n      and distribution as defined by Sections 1 through 9 of this document.\n\n      \"Licensor\" shall mean the copyright owner or entity authorized by\n      the copyright owner that is granting the License.\n\n      \"Legal Entity\" shall mean the union of the acting entity and all\n      other entities that control, are controlled by, or are under common\n      control with that entity. For the purposes of this definition,\n      \"control\" means (i) the power, direct or indirect, to cause the\n      direction or management of such entity, whether by contract or\n      otherwise, or (ii) ownership of fifty percent (50%) or more of the\n      outstanding shares, or (iii) beneficial ownership of such entity.\n\n      \"You\" (or \"Your\") shall mean an individual or Legal Entity\n      exercising permissions granted by this License.\n\n      \"Source\" form shall mean the preferred form for making modifications,\n      including but not limited to software source code, documentation\n      source, and configuration files.\n\n      \"Object\" form shall mean any form resulting from mechanical\n      transformation or translation of a Source form, including but\n      not limited to compiled object code, generated documentation,\n      and conversions to other media types.\n\n      \"Work\" shall mean the work of authorship, whether in Source or\n      Object form, made available under the License, as indicated by a\n      copyright notice that is included in or attached to the work\n      (an example is provided in the Appendix below).\n\n      \"Derivative Works\" shall mean any work, whether in Source or Object\n      form, that is based on (or derived from) the Work and for which the\n      editorial revisions, annotations, elaborations, or other modifications\n      represent, as a whole, an original work of authorship. For the purposes\n      of this License, Derivative Works shall not include works that remain\n      separable from, or merely link (or bind by name) to the interfaces of,\n      the Work and Derivative Works thereof.\n\n      \"Contribution\" shall mean any work of authorship, including\n      the original version of the Work and any modifications or additions\n      to that Work or Derivative Works thereof, that is intentionally\n      submitted to Licensor for inclusion in the Work by the copyright owner\n      or by an individual or Legal Entity authorized to submit on behalf of\n      the copyright owner. For the purposes of this definition, \"submitted\"\n      means any form of electronic, verbal, or written communication sent\n      to the Licensor or its representatives, including but not limited to\n      communication on electronic mailing lists, source code control systems,\n      and issue tracking systems that are managed by, or on behalf of, the\n      Licensor for the purpose of discussing and improving the Work, but\n      excluding communication that is conspicuously marked or otherwise\n      designated in writing by the copyright owner as \"Not a Contribution.\"\n\n      \"Contributor\" shall mean Licensor and any individual or Legal Entity\n      on behalf of whom a Contribution has been received by Licensor and\n      subsequently incorporated within the Work.\n\n   2. Grant of Copyright License. Subject to the terms and conditions of\n      this License, each Contributor hereby grants to You a perpetual,\n      worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n      copyright license to reproduce, prepare Derivative Works of,\n      publicly display, publicly perform, sublicense, and distribute the\n      Work and such Derivative Works in Source or Object form.\n\n   3. Grant of Patent License. Subject to the terms and conditions of\n      this License, each Contributor hereby grants to You a perpetual,\n      worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n      (except as stated in this section) patent license to make, have made,\n      use, offer to sell, sell, import, and otherwise transfer the Work,\n      where such license applies only to those patent claims licensable\n      by such Contributor that are necessarily infringed by their\n      Contribution(s) alone or by combination of their Contribution(s)\n      with the Work to which such Contribution(s) was submitted. If You\n      institute patent litigation against any entity (including a\n      cross-claim or counterclaim in a lawsuit) alleging that the Work\n      or a Contribution incorporated within the Work constitutes direct\n      or contributory patent infringement, then any patent licenses\n      granted to You under this License for that Work shall terminate\n      as of the date such litigation is filed.\n\n   4. Redistribution. You may reproduce and distribute copies of the\n      Work or Derivative Works thereof in any medium, with or without\n      modifications, and in Source or Object form, provided that You\n      meet the following conditions:\n\n      (a) You must give any other recipients of the Work or\n          Derivative Works a copy of this License; and\n\n      (b) You must cause any modified files to carry prominent notices\n          stating that You changed the files; and\n\n      (c) You must retain, in the Source form of any Derivative Works\n          that You distribute, all copyright, patent, trademark, and\n          attribution notices from the Source form of the Work,\n          excluding those notices that do not pertain to any part of\n          the Derivative Works; and\n\n      (d) If the Work includes a \"NOTICE\" text file as part of its\n          distribution, then any Derivative Works that You distribute must\n          include a readable copy of the attribution notices contained\n          within such NOTICE file, excluding those notices that do not\n          pertain to any part of the Derivative Works, in at least one\n          of the following places: within a NOTICE text file distributed\n          as part of the Derivative Works; within the Source form or\n          documentation, if provided along with the Derivative Works; or,\n          within a display generated by the Derivative Works, if and\n          wherever such third-party notices normally appear. The contents\n          of the NOTICE file are for informational purposes only and\n          do not modify the License. You may add Your own attribution\n          notices within Derivative Works that You distribute, alongside\n          or as an addendum to the NOTICE text from the Work, provided\n          that such additional attribution notices cannot be construed\n          as modifying the License.\n\n      You may add Your own copyright statement to Your modifications and\n      may provide additional or different license terms and conditions\n      for use, reproduction, or distribution of Your modifications, or\n      for any such Derivative Works as a whole, provided Your use,\n      reproduction, and distribution of the Work otherwise complies with\n      the conditions stated in this License.\n\n   5. Submission of Contributions. Unless You explicitly state otherwise,\n      any Contribution intentionally submitted for inclusion in the Work\n      by You to the Licensor shall be under the terms and conditions of\n      this License, without any additional terms or conditions.\n      Notwithstanding the above, nothing herein shall supersede or modify\n      the terms of any separate license agreement you may have executed\n      with Licensor regarding such Contributions.\n\n   6. Trademarks. This License does not grant permission to use the trade\n      names, trademarks, service marks, or product names of the Licensor,\n      except as required for reasonable and customary use in describing the\n      origin of the Work and reproducing the content of the NOTICE file.\n\n   7. Disclaimer of Warranty. Unless required by applicable law or\n      agreed to in writing, Licensor provides the Work (and each\n      Contributor provides its Contributions) on an \"AS IS\" BASIS,\n      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n      implied, including, without limitation, any warranties or conditions\n      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A\n      PARTICULAR PURPOSE. You are solely responsible for determining the\n      appropriateness of using or redistributing the Work and assume any\n      risks associated with Your exercise of permissions under this License.\n\n   8. Limitation of Liability. In no event and under no legal theory,\n      whether in tort (including negligence), contract, or otherwise,\n      unless required by applicable law (such as deliberate and grossly\n      negligent acts) or agreed to in writing, shall any Contributor be\n      liable to You for damages, including any direct, indirect, special,\n      incidental, or consequential damages of any character arising as a\n      result of this License or out of the use or inability to use the\n      Work (including but not limited to damages for loss of goodwill,\n      work stoppage, computer failure or malfunction, or any and all\n      other commercial damages or losses), even if such Contributor\n      has been advised of the possibility of such damages.\n\n   9. Accepting Warranty or Additional Liability. While redistributing\n      the Work or Derivative Works thereof, You may choose to offer,\n      and charge a fee for, acceptance of support, warranty, indemnity,\n      or other liability obligations and/or rights consistent with this\n      License. However, in accepting such obligations, You may act only\n      on Your own behalf and on Your sole responsibility, not on behalf\n      of any other Contributor, and only if You agree to indemnify,\n      defend, and hold each Contributor harmless for any liability\n      incurred by, or claims asserted against, such Contributor by reason\n      of your accepting any such warranty or additional liability.\n\n   END OF TERMS AND CONDITIONS\n\n   APPENDIX: How to apply the Apache License to your work.\n\n      To apply the Apache License to your work, attach the following\n      boilerplate notice, with the fields enclosed by brackets \"{}\"\n      replaced with your own identifying information. (Don't include\n      the brackets!)  The text should be enclosed in the appropriate\n      comment syntax for the file format. We also recommend that a\n      file or class name and description of purpose be included on the\n      same \"printed page\" as the copyright notice for easier\n      identification within third-party archives.\n\n   Copyright 2017, Reactive Ops Inc.\n\n   Licensed under the Apache License, Version 2.0 (the \"License\");\n   you may not use this file except in compliance with the License.\n   You may obtain a copy of the License at\n\n       http://www.apache.org/licenses/LICENSE-2.0\n\n   Unless required by applicable law or agreed to in writing, software\n   distributed under the License is distributed on an \"AS IS\" BASIS,\n   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n   See the License for the specific language governing permissions and\n   limitations under the License.\n"
  },
  {
    "path": "MANIFEST.in",
    "content": "recursive-include pentagon/component *"
  },
  {
    "path": "README.md",
    "content": "# Pentagon\n\n# *Pentagon has been deprecated and will no longer be maintained.*\n\n## What is Pentagon?\n\n**Pentagon is a cli tool to generate repeatable, cloud-based [Kubernetes](https://kubernetes.io/) infrastructure.**\nIt can be used as a “batteries included” default which can:\n- provide a network with a cluster\n- Two HA KOPS based Kubernetes clusters\n- Segregated multiple development / non-production environments\n- VPN-based access control\n- A highly-available network, built across multiple Availability Zones\n\n## How does it work?\n **Pentagon produces a directory.** The directory defines a basic set of configurations for [Ansible](https://www.ansible.com/), [Terraform](https://www.terraform.io/) and [kops](https://github.com/kubernetes/kops)). When those tools are run in a specific order the result is a VPC with a VPN and a Kubernetes Cluster in AWS. GKE Support is built in but not default. It is designed to be customizable while at the same time built with defaults that fit the needs of most web application companies.\n\n\n## Getting Started\n\nThe [Getting Started](docs/getting-started.md) has information about installing Pentagon and creating your first project.\n\nTable Of Contents\n=================\n\n* [Requirements](docs/getting-started.md#requirements)\n* [Installation](docs/getting-started.md#installation)\n* [Quick Start Guide](docs/getting-started.md)\n  * [VPC](docs/getting-started.md#vpc-setup)\n  * [VPN](docs/getting-started.md#vpn-setup)\n  * [KOPS](docs/getting-started.md#kops)\n* [Advanced Usage](docs/getting-started.md#advanced-project-initialization)\n* [Infrastrucure Repository Overview](docs/overview.md)\n* [Component](docs/components.md)\n\n\n## AWS Virtual Private Cloud\n\nA VPC configuration is provided with Terraform. Details can be found on the [VPC Setup Page](docs/vpc.md).\n\n## Virtual Private Network\n\nConfiguration is provided for an OpenVPN setup in the VPC. Details can be found on the [VPN Setup Page](docs/vpn.md).\n\n\n\n[![CLA assistant](https://cla-assistant.io/readme/badge/reactiveops/pentagon)](https://cla-assistant.io/reactiveops/pentagon)\n"
  },
  {
    "path": "bin/yaml_source",
    "content": "#!/bin/bash -e\n\nusage=\"$0 file [unset] --   Where file.yml is a yml file of key value pairs\n  Sets environment variable where Key is the variable name and Value is its value\n  If unset is used, it unsets the keys in the file\n\"\n\nvars_file=$1\n\nget_keys() {\n  cat $vars_file| shyaml keys\n}\n\nset_vars() {\nfor key in `get_keys`\ndo\n  raw_value=$(cat $vars_file | shyaml get-value $key)\n  # some values in vars.yml use other variables that need to be dereferenced\n  dereferenced_value=$(eval echo $raw_value)\n  export $key=\"${dereferenced_value}\"\ndone\n}\n\nunset_vars() {\n  for key in `get_keys`\n  do\n    unset \"${key}\"\n  done\n}\n\nif  [ -z $1 ]\nthen\n  echo $usage\nelif [ ! -f \"${vars_file}\" ]\nthen\n  echo $vars_file does not exist.\nelif [ ! -s \"${vars_file}\" ]\nthen\n  echo $vars_file is empty\nelse\n  if  [[  $2 == 'unset' ]] ; then\n    unset_vars\n  else\n    set_vars\n  fi\nfi\n"
  },
  {
    "path": "docs/_config.yml",
    "content": "theme: jekyll-theme-dinky"
  },
  {
    "path": "docs/components.md",
    "content": "# Pentagon Components\n\nThe functionality of Pentagon can be extended with components. Currently only two commands are accepted `add` and `get`. Data is passed to the compenent in `Key=Value` pairs and `-D` flag or from a datafile in yml or json format. For some components, environment variables may also be used. See documentation for the particular component.\n\nGlobal options for both `get` and `add` component commands:\n\n```\nUsage: pentagon [add|get] [OPTIONS] COMPONENT_PATH [ADDITIONAL_ARGS]...\n\nOptions:\n  -D, --data TEXT   Individual Key=Value pairs used by the component. There should be no spaces surrounding the `=`\n  -f, --file TEXT   File to read Key=Value pair from (yaml or json are\n                    supported)\n  -o, --out TEXT    Path to output module result, if any\n  --log-level TEXT  Log Level DEBUG,INFO,WARN,ERROR\n  --help            Show this message and exit.\n```\n\n## Built in components\n\n### gcp.cluster\n\n**This component is deprecated and not maintained. We are working on a new Terraform module to manage GKE clusters. Use this at your own risk**\n\n- add:\n    - Creates `./<cluster_name>/create_cluster.sh` compiled from the data passed in.\n    - `bash ./<cluster_name>/create_cluster.sh` will create the cluster as configured.\n    - Argument keys are lower case, underscore separated version of the [gcloud container cluster create](https://cloud.google.com/sdk/gcloud/reference/beta/container/clusters/create) command.\n    - If a `-f` file is passed in, data are merged with `-D` values ovveriding the file values.\n    - Example:\n        ```\n        pentagon --log-level=DEBUG add  gcp.cluster -D cluster_name=\"reactiveopsio-cluster\" -D project=\"reactiveopsio\" -D network=\"temp-network\" -o ./demo -D node_locations=\"us-central1-a,us-central1-b\" -D zone=us-central1-a\n        ```\n\n- get:\n    - Creates `./<cluster_name>/create_cluster.sh` by querying the state of an existing cluster and parsing values. For when you have an existing cluster that you want to capture its configuration.\n    - Creates `./<cluster_name>/node_pools/<node_pool_name>/create_nodepool.sh` for any nodepools that are not named `default-pool`. Set `-D get_default_nodepools=true` to capture configuration of `default-pool`. This is typically unecessary as the `create_cluster.sh` will already contain the configuration of the `default-pool`\n    - `bash ./<cluster_name>/create_cluster.sh` will result in an error indicating the cluster is already present.\n    - Argument keys are lower case, underscore separated version of the [gcloud container cluster describe](https://cloud.google.com/sdk/gcloud/reference/beta/container/node-pools/describe) command.\n    - If `-f` file is passed in, data are merged with `-D` values ovveriding the file values\n    - If `cluster` is omitted it will act on all clusters in the project\n    - Example:\n      ```\n      pentagon get gcp.cluster -D project=\"pentagon\" -D zone=\"us-central1-a -D cluster=\"pentagon-1\" -D get_default_nodepool=\"true\"\n      ```\n\n### gcp.nodepool\n\n**This component is deprecated and not maintained. We are working on a new Terraform module to manage GKE clusters. Use this at your own risk**\n\n\n- add:\n    - Creates `./<nodepool_name>/create_nodepool.sh` compiled from the data passed in.\n    - `bash ./<nodepool_name>/create_nodepool.sh` will create the nodepool as configured\n    - Argument keys are lower case, underscore separated version of the [gcloud container node-pools create](https://cloud.google.com/sdk/gcloud/reference/beta/container/node-pools/create) command\n    - If a `-f` file is passed in, data are merged with `-D` values ovveriding the file values\n    - Example:\n      ```\n      pentagon add gcp.nodepool -D name=\"pentagon-1-nodepool\" -D project=\"pentagon\" -D zone=\"us-central1-a\" -D additional_zones=\"us-central1-b,us-central1-b\" -D machine_type=\"n1-standard-64\" --enable-autoscaling\n      ```\n\n- get:\n    - Creates `./<nodepool_name>/create_nodepool.sh` by querying the state of an existing cluster nodepool and parsing values. For when you have an existing cluster that you want to capture its configuration.\n    - Creates `./<nodepool_name>/create_nodepool.sh`\n    - `bash ./<nodepool_name>/create_nodepool.sh` will result in an error indicating the cluster is already present.\n    - Argument keys are lower case, underscore separated version of the [gcloud container node-pools describe](https://cloud.google.com/sdk/gcloud/reference/beta/container/node-pools/describe) command\n    - If a `-f` file is passed in, data are merged with `-D` values ovveriding the file values\n    - If `name` is omitted it will act on all nodepool in the cluster\n    - Example:\n        ```\n        pentagon get gcp.nodepool -D project=\"pentagon\" -D zone=\"us-central1-a -D cluster=\"pentagon-1\" -D name=\"pentagon-1-nodepool\"\n        ```\n\n\n### vpc\n\n- add:\n    - Creates `./vpc/` directory with Terraform code for the Pentagon default AWS VPC described [here](#network).\n    - `cd ./vpc; make all` will create the vpc as describe by the arguments passed in\n    - In the normal course of using Pentagon and the infrastructure repository, it is unlikely you'll use this component as it is automatically installed by default.\n    - Arguments:\n        - vpc_name\n        - vpc_cidr_base\n        - aws_availabilty_zones\n        - aws_availability_zone_count\n        - aws_region\n        - infrastructure_bucket\n    - Without the arguments above, the `add` will complete but the output will be missing values required to create the VPC. You must edit the output files to add those values before it will function properly\n    - Example:\n        ```\n        pentagon add vpc -D vpc_name=\"pentagon-vpc\" -D vpc_cidr_base=\"172.20\" -D aws_availability_zones=\"ap-northeast-1a, ap-northeast-1c\" -D aws_availability_zone_count = \"2\" -D aws_region = \"ap-northeast-1\"\n        ```\n\n### kops.cluster\n\n- add:\n    - Creates yml files in  `./<cluster_name>/` compiled from the data passed in.\n    - `bash ./<cluster_name>/kops.sh` will create the cluster as configured.\n    - Argument/ ConfigFile keys:\n      - `additional_policies`: Additional IAM policies to add to masters, nodes, or both\n      - `vpc_id`: AWS VPC Id of VPC to create cluster in (required)\n      - `cluster_name`: Name of the cluster to create (required)\n      - `kops_state_store_bucket`: Name of the s3 bucket where Kops State will be stored (required)\n      - `cluster_dns`: DNS domain for cluster records (required)\n      - `master_availability_zones`:  List of AWS Availability zones to place masters (required)\n      - `availability_zones`:  List of AWS Availability zones to place nodes (required)\n      - `kubernetes_version`: Version of Kubernetes Kops will install (required)\n      - `nat_gateways`: List of AWS ids of the nat-gateways the Private Kops subnets will use as egress. Must be in the same order as the `availability_zones` from above. (required)\n      - `master_node_type`: AWS instance type the masters should be (required)\n      - `worker_node_type`: AWS instance type the default node group should be (required)\n      - `ig_max_size`: Max number of instance in the default node group. (default: 3)\n      - `ig_min_size`: Min number of instance in the default node group. (default: 3)\n      - `ssh_key_path`: Path of public key for ssh access to nodes. (required)\n      - `network_cidr`: VPC cidr for Kops created Kubernetes subnetes (default: 172.0.0.0/16)\n      - `network_cidr_base`: First two octects of the network to template subnet cidrs from  (default: 172.0)\n      - `third_octet`: Starting value for the third octet of the subnet cidrs (default: 16)\n      - `network_mask`: Value for network mask in subnet cidrs (defalt: 24)\n      - `third_octet_increment`: Increment to increase third octet by for each of the Kubernetes subnets (default: 1) By default, the cidr of the first three private subnets will be 172.20.16.0/24, 172.20.17.0/24, 172.20.18.0/24\n      - `authorization`: Authorization type for cluster. Allowed values are `alwaysAllow` and `rbac` (default: rbac)\n    - Example Config File\n    ```\n    availability_zones: [eu-west-1a, eu-west-1b, eu-west-1c]\n    additional_policies: |\n      {\n          \"Effect\": \"Allow\",\n          \"Action\": [\n              \"autoscaling:DescribeAutoScalingGroups\",\n              \"autoscaling:DescribeAutoScalingInstances\",\n              \"autoscaling:DescribeTags\",\n              \"autoscaling:SetDesiredCapacity\",\n              \"autoscaling:TerminateInstanceInAutoScalingGroup\"\n          ],\n          \"Resource\": \"*\"\n      }\n    cluster_dns: cluster1.reactiveops.io\n    cluster_name: working-1.cluster1.reactiveops.io\n    ig_max_size: 3\n    ig_min_size: 3\n    kops_state_store_bucket: reactiveops.io-infrastructure\n    kubernetes_version: 1.5.7\n    master_availability_zones: [eu-west-1a, eu-west-1b, eu-west-1c]\n    master_node_type: t2.medium\n    node_type: t2.medium\n    ssh_key_path: ${INFRASTRUCTURE_REPO}/config/private//working-kube.pub\n    vpc_id: vpc-4aa3fa2d\n    network_cidr: 172.0.0.0/16\n    network_cidr_base: 172.0\n    third_octet: 16\n    third_octet_increment: 1\n    network_mask: 24\n    nat_gateways:\n      - nat-0c6ef9261d8ebd788\n      - nat-0de4ec4c946e3b7ce\n      - nat-08806276217bae9b5\n    ```\n    - If a `-f` file is passed in, data are merged with `-D` values overiding the file values.\n    - Example:\n        ```\n        pentagon --log-level=DEBUG add kops.cluster -f `pwd`/vars.yml\n        ```\n- get:\n    - Creates yml files in `./<cluster_name>/create_cluster.sh` by querying the state of an existing cluster and parsing values. For when you have an existing cluster that you want to capture its configuration.\n    - Creates `./<cluster_name>/cluster.yml`, `./<cluster_name>/nodes.yml`, `./<cluster_name>/master.yml`, `./<cluster_name>/secret.sh`\n    - `secret.sh` does not have the content of the secret and will be able re-create the cluster secret if needed. You will have to transform the key id into a saved public key.\n    - Arguments:\n      - `name`: Kops cluster name you are getting (required). Argument can also be set through and environment variable called \"CLUSTER_NAME\".\n      - `kops_state_store_bucket`: s3 bucket name where cluster state is stored (required). Argument can also be set through and environment variable called \"KOPS_STATE_STORE_BUCKET\"\n    - Example:\n      ```\n      pentagon get kops.cluster -Dname=working-1.cluster.reactiveops.io -Dkops_state_store=reactiveops.io-infrastructure\n      ```\n\n### inventory\n- add:\n    - Creates account configuration directory. Creates all necessary files in `config`, `clusters` and `resources`. Depending on `type` it may also add a `vpc` component and `vpn` component under `resources`. Creates `clusters` directory but does not create cluster configuration. Use the cluster component for that.\n    - Arguments:\n      - `name`: name of account to add to inventory (required)\n      - `type`: type of account to add to inventory aws or gcp (required).\n      - `project_name`: name of the project the inventory is being added to. (required)\n    - If a `-f` file is passed in, data are merged with `-D` values overriding the file values\n\n    - Example:\n        ```\n        pentagon add inventory -Dtype=aws -Dname=prod -Daws_access_key=KEY -Daws_secret_key=SECRETKEY -Daws_default_region=us-east-1\n        ```\n\n\n\n## Writing your own components\n\nComponent modules must be named `pentagon<component_name>`. Classes are subclasses of the `pentagon.component.ComponentBase` class and they must be named <Component> (note the capital first letter).  The `pentagon add <component_name>` command will prefer built in components to external components so ensure your component name is not already in use. The <component_name> argument can be a dot separated module path ie `gcp.cluster` where the last parameter is the lowercase class name. For example. `gcp.cluster` finds the Cluster class in the cluster module in the gcp module.\n\nExamples of plugin component package module name and use:\n- pentagon_examplecomponent:\n    *  package name: `pentagon-example-component`\n    *  command: `pentagon add component`\n    *  module path: `pentagon_component`\n    *  class: `Component()`\n- pentagon_kops\n    *  package name: `pentagon-kops`\n    *  command: `pentagon add kops`\n    *  module path: `pentagon_kops`\n    *  class: `Kops()`\n- pentagon_kops.cluster\n    *  package name: `pentagon-kops`\n    *  command: `pentagon add kops.cluster`\n    *  module path:  `pentagon_kops.kops`\n    *  class: `Cluster()`\n\n\nSee [example](/example-component)\n"
  },
  {
    "path": "docs/getting-started.md",
    "content": "# What is Pentagon?\n\n**Pentagon is a cli tool to generate repeatable, cloud-based [Kubernetes](https://kubernetes.io/) infrastructure**.\nPentagon is “batteries included”- not only does one get a network with a cluster, but the defaults include these commonly desired features:\n- At it's core, powered by Kubernetes. Configured to be highly-available: masters and nodes are clustered\n- Segregated multiple development / non-production environments\n- VPN-based access control\n- A highly-available network, built across multiple Availability Zones\n\n## How does it work?\n **Pentagon produces a directory.** The directory defines a basic set of configurations for [Ansible](https://www.ansible.com/), [Terraform](https://www.terraform.io/), and [kops](https://github.com/kubernetes/kops). When these tools are run in a specific order the result is a VPC with a VPN and a Kubernetes cluster in AWS. (GKE Support is in the works). Pentagon is designed to be customizable but has defaults that fit most software infrastructure needs.\n\n# Getting Started with Pentagon\n\n## Requirements\n* python2 >= 2.7 [Install Python](https://www.python.org/downloads/)\n* pip [Install Pip](https://pip.pypa.io/en/stable/installing/)\n* git [Install Git](https://git-scm.com/book/en/v2/Getting-Started-Installing-Git)\n* Terraform [Install Terraform ](https://www.terraform.io/downloads.html)\n* Ansible [Install Ansible](http://docs.ansible.com/ansible/latest/intro_installation.html)\n* Kubectl [Install kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl/)\n* kops [Install kops](https://github.com/kubernetes/kops#installing)\n* jq [Install JQ](https://stedolan.github.io/jq/download/)\n\n## Installation\n* `pip install pentagon`\n\n# Basic Usage\n## Quick Start\n### Create a AWS Pentagon Project\n* `pentagon start-project <project-name> --aws-access-key <aws-access-key> --aws-secret-key <aws-secret-key> --aws-default-region <aws-default-region> --dns-zone <your-dns-zone-name>`\n### Create a GCP/GKE Pentagon Project\n* `pentagon --log-level=DEBUG start-project --cloud=gcp  <project-name> --gcp-zones=<zone_1>,<zone_2>,..,<zone_n> --gcp-project <gcp_project_name> --gcp-region <gcp_region>`\n###\n* With the above basic options set, defaults will be set for you. See [Advanced Project Initialization](#advanced-project-initialization) for more options.\n  * Arguments may also be set using environment variable in the format `PENTAGON_<argument_name_with_underscores>`.\n  * Or using a yaml file with key value pairs where the key is the option  name\n* Enter the directory project`cd <project-name>-infrastructure`\n\n#### Next steps\nThe `pentagon` commands will take no action in your cloud infrastructure. You will need to run these commands to finish creation of a default project\n\n* `export INFRASTRUCTURE_REPO=$(pwd)`\n* `export INVENTORY=default`\n* `. yaml_source inventory/default/config/local/vars.yml`\n* `. yaml_source inventory/default/config/private/secrets.yml`\n  * Sources environment variables required for the following steps. This will be required each time you work with the infrastructure repository or if you move the repository to another location.\n* `bash inventory/default/config/local/local-config-init`\n* `. yaml_source inventory/default/config/local/vars.yml`\n* If using AWS, create an S3 bucket named `<project-name>-infrastructure` in your AWS account. Terraform will store its state file here. Make sure the AWS IAM user has write access to it.\n  * `aws s3 mb s3://<project-name>-infrastructure`\n\n## AWS\n\n### Create a VPC\nThis creates the VPC and private, public, and admin subnets in that VPC for non Kubernetes resources. Read more about networking [here](network.md).\n* `cd inventory/default/terraform`\n* Edit `aws_vpc.auto.tfvars` and verify the generated `aws_azs` actually exist in `aws_region`\n* `terraform init`\n* `terraform plan`\n* `terraform apply`\n* In `inventory/default/clusters/*/vars.yml`, set `VPC_ID` using the newly created VPC ID. You can find that ID in Terraform output or using the AWS web console.\n* Also, add the `aws_nat_gateway_ids` from the Terraform output to `inventory/default/clusters/*/vars.yml` as a list `nat_gateways` \n\n### Configure DNS and Route53\nIf you don't already have a Route53 Hosted Zone configured, do that now.\n* Create a Route53 Hosted Zone (e.g. `pentagon.mycompany.com`)\n* In `inventory/default/clusters/*/vars.yml`, set `dns_zone` to your Hosted Zone (e.g. `pentagon.mycompany.com`)\n\n### Setup a VPN\nThis creates a AWS instance running [OpenVPN](https://openvpn.net/). Read more about the VPN [here](vpn.md).\n* `cd $INFRASTRUCTURE_REPO`\n* `ansible-galaxy install -r ansible-requirements.yml`\n* `cd inventory/default/resources/admin-environment`\n* In `env.yml`, set the list of user names that should have access to the VPN under `openvpn_clients`. You can add more later.\n* Run Ansible a few times\n  * Run `ansible-playbook vpn.yml` until it fails on `VPN security groups`\n  * Run `ansible-playbook vpn.yml` a second time and it will succeed \n  * Edit `inventory/default/config/private/ssh_config` and add the IP address from ansible's output to the `#VPN instance` section.\n\n### Configure a Kubernetes Cluster\nPentagon uses Kops to create clusters in AWS. The default layout creates configurations for two Kubernetes clusters: `working` and `production`. See [Overview](overview.md) for a more comprehensive description of the directory layout.\n\n* Make sure your KOPS variables are set correctly with `. yaml_source inventory/default/config/local/vars.yml && . yaml_source inventory/default/config/private/secrets.yml`\n* Move into to the path for the cluster you want to work on with `cd inventory/default/clusters/<production|working>`\n* If you are using the `aws_vpc` Terraform provided, ensure you have set `nat_gateways` in the `vars.yml` for each cluster and that they the order of the `nat_gateway` ids matches the order of the subnets listed. This will ensure that the Kops cluster will have a properly configured network with the private subnets associated to the existing NAT gateways.\n\n### Create Kubernetes Cluster\n* Use the [Kops component](components.md#kopscluster) to create your cluster.\n* By default a `vars.yml` will be created at `inventory/default/clusters/working` and `inventory/default/clusters/production`. Those files are sufficient to create a cluster using the kops.cluster but you will need to enter `nat_gateways` and `vpc_id` as described in [kops component documentation](components.md#kopscluster)\n\n* To generate the cluster configs run `pentagon --log-level=DEBUG add kops.cluster -f vars.yml` in the directory of the cluster you wish to create.\n* To actually create the cluster: `cd cluster` then `kops.sh`\n* Use [kops](https://github.com/kubernetes/kops/blob/master/docs/cli/kops.md) to manage the cluster if necessary.\n  * Run `kops edit cluster <clustername>` to view or edit the `cluster.spec`\n  * You may also wish to edit the instance groups prior to cluster creation:\n    * `kops get instancegroups --name <clustername>` to list them (one master group per AZ and one node group)\n    * `kops edit instancegroups --name <clustername> <instancegroupname>` to edit any of them\n* Run `kops update cluster <clustername>` and review the out put to ensure it matches the cluster you wish to create\n* Run `kops update cluster <clustername> --yes` to create the cluster\n* While waiting for the cluster to create, consult the [kops documentation](https://github.com/kubernetes/kops/blob/master/docs/README.md) for more information about using Kops and interacting with your new cluster\n\n## GCP/GKE\n\nThis component is deprecated and not maintained. We are working on a new Terraform module to manage GKE clusters. Use this at your own risk\n\n\n### Intialize Terraform\n* Make backend: `gsutil mb gs://<project_name>-infrastructure`\n* `cd inventory/default/terraform/ && terraform init`\n\n### Create Kubernetes Cluster\n* `cd ${INFRASTRUCTURE_REPO}/inventory/default/clusters/*`\n* `bash create_cluster.sh`\n\n## Creating Resources Outside of Kubernetes\n\nTypically infrastructure will be required outside of your Kubernetes cluster. Other EC2, RDS, or Elasticache instances, etc are often require for an application.\n\nPentagon convention suggests you use Ansible to create these resources and that the Ansible playbooks can be saved in the `inventory/default/resources/` or the `inventory/default/clusters/<cluster>/resources/` directory. This depends on the scope with which the play book will be utilized. If the resources are not specific to either cluster, then we suggest you save it at the `default/resources/` level. Likewise, if it is a resource that will only be used by one cluster, such as a staging database or a production database, then we suggest writing the Ansible playbook at the `default/cluster/<cluster>/resources/` level. Writing Ansible roles can be very helpful to DRY up your resource configurations.\n\n\n# Advanced Project Initialization\n\nIf you wish to utilize the templating ability of the `pentagon start-project` command, but need to modify the defaults, a comprehensive list of command line flags (listed below) should be able to customize the output of the `pentagon start-project` command to your liking.\n\n\n### Start New Project\n* `pentagon start-project <project-name> <options>`\n  * This will create a skeleton repository with placeholder strings in place of the options shown above in the [QUICK START]\n  * Edit the `config/private/secrets.yml` and `config/local/env.yml` before proceeding onto the next step\n\n### Clone Existing Project\n* `pentagon start-project <project-name> --git-repo <repository-of-existing-project> <options>`\n\n### Available Commands\n* `pentagon start-project`\n\n### _start-project_\n\n `pentagon start-project` creates a new project in your workspace directory and creates a matching virtualenv for you. Most values have defaults that should get you up and running very quickly with a new Pentagon project. You may also clone an existing Pentagon project if one exists.  You may set any of these options as environment variables instead by prefixing them with `PENTAGON_`, for example, for security purposes `PENTAGON_aws_access_key` can be used instead of `--aws-access-key`\n\n #### Options\n  * **-f, --config-file**:\n    * File to read configuration options from.\n    * No default\n    * ***File supercedes command line options.***\n  * **-o, --output-file**:\n    * No default\n  * **--cloud**:\n    * Cloud provider to create default inventory.\n    * Defaults to 'aws'. [aws,gcp,none]\n  * **--repository-name**:\n    * Name of the folder to initialize the infrastructure repository\n    * Defaults to `<project-name>-infrastructure`\n  * **--configure / --no-configure:**:\n    * Configure project with default settings\n    * Default to True\n    * If you choose `--no-configure`, placeholder values will be used instead of defaults and you will have to manually edit the configuration files\n  * **--force / --no-force**:\n    * Ignore existing directories and copy project anyway\n    * Defaults to False\n  * **--aws-access-key**:\n    * AWS access key\n    * No Default\n  * **--aws-secret-key**:\n    * AWS secret key\n    * No Default\n  * **--aws-default-region**:\n    * AWS default region\n    * No Default\n    * If the `--aws-default-region` option is set it will allow the default to be set for `--aws-availability-zones` and `--aws-availability-zone-count`\n  * **--aws-availability-zones**:\n    * AWS availability zones as a comma delimited list.\n    * Defaults to `<aws-default-region>a`, `<aws-default-region>b`, ... `<aws-default-region>z` when `--aws-default-region` is set calculated using the `--aws-available-zone-count` value. Otherwise, a placeholder string is used.\n  * **--aws-availability-zone-count**:\n    * Number of availability zones to use\n    * Defaults to 3 when a default region is entered. Otherwise, a placeholder string is used\n  * **--dns-zone**:\n    * DNS Zone of the project. Used for VPN instance and Kubernetes api\n    * Kubernetes dns zones can be overriden with arguments found below\n    * Defaults to `<project-name>.com`\n  * **--infrastructure-bucket**:\n    * Name of S3 Bucket to store state\n    * Defaults to `<project-name>-infrastructure`\n    * pentagon start-project does not create this bucket and it will need to be created\n  * **--git-repo**:\n    * Existing git repository to clone\n    * No Default\n    * ***When --git-repo is set, no configuration actions are taken. Pentagon will setup the virutualenv and clone the repository only***\n  * **--create-keys / --no-create-keys**:\n    * Create SSH keys or not\n    * Defaults to True\n    * Keys are saved to `<workspace>/<repository-name>/config/private`\n    * 5 keys will be created:\n      * `admin_vpn`: key for the VPN instances\n      * `working_kube`: key for working Kubernetes instances\n      * `production_kube`: key for production Kubernetes instance\n      * `working_private`: key for non-Kubernetes resources in the working private subnets\n      * `production_private`: key for non-Kubernetes resources in the production private subnets\n    * ***Keys are not uploaded to AWS. When needed, this will need to be done manually***\n  * **--admin-vpn-key**:\n    * Name of the SSH key for the admin user of the VPN instance\n    * Defaults to 'admin_vpn'\n  * **--working-kube-key**:\n    * Name of the SSH key for the working Kubernetes cluster\n    * Defaults to 'working_kube'\n  * **--production-kube-key**:\n    * Name of the SSH key for the production Kubernetes cluster\n    * Defaults to 'production_kube'\n  * **--working-private-key**:\n    * Name of the SSH key for the working non-Kubernetes instances\n    * Defaults to 'working_private'\n  * **--production-private-key**:\n    * Name of the SSH key for the production non-Kubernetes instances\n    * Defaults to 'production_private'\n  * **--vpc-name**:\n    * Name of VPC to create\n    * Defaults to date string in the format `<YYYYMMDD>`\n  * **--vpc-cidr-base**\n    * First two octets of the VPC ip space\n    * Defaults to '172.20'\n  * **--working-kubernetes-cluster-name**:\n    * Name of the working Kubernetes cluster nodes\n    * Defaults to `working-1.<project-name>.com`\n  * **--working-kubernetes-node-count**:\n    * Number of the working Kubernetes cluster nodes\n    * Defaults to 3\n  * **--working-kubernetes-master-aws-zone**:\n    * Availability zone to place the Kube master in\n    * Defaults to the first zone in --aws-availability-zones\n  * **--working-kubernetes-master-node-type**:\n    * AWS instance type of the Kube master node in the working cluster\n    * Defaults to t2.medium\n  * **--working-kubernetes-worker-node-type**:\n    * AWS instance type of the Kube worker nodes in the working cluster\n    * Defaults to t2.medium\n  * **--working-kubernetes-dns-zone**:\n    * DNS Zone of the Kubernetes working cluster\n    * Defaults to `working.<project-name>.com`\n  * **--working-kubernetes-v-log-level**:\n    * V Log Level Kubernetes working cluster\n    * Defaults to 10\n  * **--working-kubernetes-network-cidr**:\n    * Network cidr of the Kubernetes working cluster\n    * Defaults to `172.20.0.0/16`\n  * **--production-kubernetes-cluster-name**:\n    * Name of the production Kubernetes cluster nodes\n    * Defaults to `production-1.<project-name>.com`\n  * **--production-kubernetes-node-count**:\n    * Number of the production Kubernetes cluster nodes\n    * Defaults to 3\n  * **--production-kubernetes-master-aws-zone**:\n    * Availability zone to place the Kube master in\n    * Defaults to the first zone in --AWS-availability-zones\n  * **--production-kubernetes-master-node-type**:\n    * AWS instance type of the Kube master node in the production cluster\n    * Defaults to t2.medium\n  * **--production-kubernetes-worker-node-type**:\n    * AWS instance type of the Kube worker nodes in the production cluster\n    * Defaults to t2.medium\n  * **--production-kubernetes-dns-zone**:\n    * DNS Zone of the Kubernetes production cluster\n    * Defaults to `production.<project-name>.com`\n  * **--production-kubernetes-v-log-level**:\n    * V Log Level Kubernetes production cluster\n    * Defaults to 10\n  * **--production-kubernetes-network-cidr**:\n    * Network cidr of the Kubernetes production cluster\n    * Defaults to `172.20.0.0/16`\n  * **--configure-vpn/--no-configure-vpn**:\n    * Do, or do not configure the vpn env.yaml file\n    * Defaults to True\n  * **--vpn-ami-id**\n    * AWS ami id to use for the VPN instance\n    * Defaults to looking up ami-id from AWS\n  * **--log-level**:\n    * Pentagon CLI Log Level. Accepts DEBUG,INFO,WARN,ERROR\n    * Defaults to INFO\n  * **--help**:\n    * Show help message and exit.\n  * **--gcp-project**\n    * Google Cloud Project to create clusters in\n    * This argument required when --cloud=gcp\n  * **--gcp-zones**\n    * Google Cloud Project zones to create clusters in. Comma separated list.\n    * This argument required when --cloud=gcp\n  * **--gcp-region**\n    * Google Cloud region to create resoures in.\n    * This argument required when --cloud=gcp\n"
  },
  {
    "path": "docs/network.md",
    "content": "# VPC Description\nWe create a base VPC with [terraform-vpc](https://github.com/reactiveops/terraform-vpc) that allocates capacity for AWS-based resources that a client needs to host, including `kubernetes`. We then let `kops` work in the same VPC to carve out a dedicated space for itself so that `kubernetes` is self-contained and manageable.\n\nAfter running `pentagon start-project` you can alter the configuration of the VPC by editing the  `default/vpc/terraform.tfvars` and  `default/vpc/main.tf` files in the infrastructure. You can also configure the VPC using command line arguments to `pentagon start-project`\n\n## VPC\nThe VPC is created by Terraform VPC which sets up a standard RO-style network platform. `kops` is then used to configure and deploy `kubernetes` into this existing VPC.\n\n### Subnets\nPer AZ, terraform-vpc creates 4 subnets: 1 `admin`, 1 `public`, and 2 `private` (one `working` and one `production`). Use these subnets to deploy any resources other than those directly associated with `kubernetes`.\n\nLet `kops` create dedicated public and private subnets that run in parallel to those created by terraform-vpc. Each AZ consists of a pair of kops-defined subnets- `public` and `private`. In `kops edit cluster`, allocate CIDRs of available address space.  \n\n### NAT Gateways\nNAT Gateways are created by terraform-vpc and one is needed for each AZ. You can share a NAT Gateway for use by `kubernetes` and your other AWS-based resources simultaneously. This is the only exception to the separation of `kops` and TF. During `kops edit cluster`, specify the NAT Gateway in the private subnet using the keyword `egress` as shown in the [kops Example networking spec](#kops-example-networking-spec). Egress is currently only useful if you are using private subnets as defined in kops.\n\n## Route tables\nterraform-vpc sets up route tables for all of the standard subnets. The `private` subnets default route for external traffic is the NAT Gateway in that zone. The `public` subnets default route is through an Internet Gateway.\n\n`kops` manages the subnets for your `kubernetes` resources so it also manages these route tables. Specifying the NAT Gateway that terraform-vpc created in `egress` will configure the default routes for these subnets to its specified NAT Gateway.\n\nBecause NAT Gateways don't have tags on AWS, `kops` keeps track of this NAT Gateway by AWS-tagging the route table with K=V pair `AssociatedNatGateway=nat-05ee835341f099286`. This is for the delete logic in `kops` that likely wouldn't actually be able to delete the Gateway (because it would still be in use by other routes), but it would attempt to delete it as a \"related resource\".\n\n## Tags\nterraform-vpc tags all of the resources that it creates and manages as `Managed By=Terraform`. Likewise, `kops` tags the resources that it creates and manages with `KubernetesCluster=<clustername>`. By letting `kops` create its own subnets, `kops` related tags are all restricted to resources that are owned by `kops`, so terraform-vpc doesn't ever need to know about `kops` and vice versa.\n\n# Kops network design\n\n## Network overview diagram\n\n| **Subnet Name (abstracted)**     | **Example Name**                                         | **Private / Public** | **Created / Managed by** |\n| -------------------------------- | -------------------------------------------------------- | -------------------- | ------------------------ |\n| admin_az$n                       | admin_az1                                                | Private              | terraform-vpc            |\n| private_working_az$n             | private_working_az1                                      | Private              | terraform-vpc            |\n| private_prod_az$n                | private_prod_az1                                         | Private              | terraform-vpc            |\n| public_ax$n                      | public_az1                                               | Public               | terraform-vpc            |\n| az$n.$cluster_identifier         | us-east-1a.working-1.shareddev.dev.hillghost.com         | Private              | kops                     |\n| utility-az$n.$cluster_identifier | utility-us-east-1a.working-1.shareddev.dev.hillghost.com | Public               | kops                     |\n\n\n\nCIDRs should always be allocated assuming a 4AZ layout for possible future expansion, even if the client doesn't initially need all of the AZs. [This Document](https://docs.google.com/spreadsheets/d/1wObSMI8xvgztqYEhUkIALNw8fDBFVx-Xv4a9UC8Z7HE) lays out some potential subnet CIDRs for various types of layouts.\n\n\n## Example of possible network section of the kops cluster.spec\n```\nsubnets:\n- cidr: 172.20.16.0/24\n  egress: nat-05ee835341f099286\n  name: us-east-1a\n  type: Private\n  zone: us-east-1a\n- cidr: 172.20.17.0/24\n  egress: nat-0973eca2e99f9249c\n  name: us-east-1b\n  type: Private\n  zone: us-east-1b\n- cidr: 172.20.18.0/24\n  egress: nat-015aa74ead665693d\n  name: us-east-1c\n  type: Private\n  zone: us-east-1c\n- cidr: 172.20.20.0/24\n  name: utility-us-east-1a\n  type: Utility\n  zone: us-east-1a\n- cidr: 172.20.21.0/24\n  name: utility-us-east-1b\n  type: Utility\n  zone: us-east-1b\n- cidr: 172.20.22.0/24\n  name: utility-us-east-1c\n  type: Utility\n  zone: us-east-1c\n```\n\n\n\n\n\n\n\n\n"
  },
  {
    "path": "docs/overview.md",
    "content": "# Infrastructure Repository Overview\nAfter running `pentagon start-project` you will have a directory with a layout similiar to:\n```\n.\n├── README.md\n├── ansible-requirements.yml\n├── inventory/\n├── docs/\n├── plugins/\n└── requirements.txt\n```\nSee also [Extended Layout](#extended-layout-description)\n\nGenerally speaking, the layout of the infrastructure repository is heierachical. That is to say, higher level directories contain scripts, resources, and variables that are intended to be used earlier in the creation of your infrastructure.\n\n## Core Directories\n\n### inventory/\nThe inventory directory is used to store an arbitrary segment of your infrastructure. It can be a separate AWS account, AWS VPC, GCP Project or, GCP Netrowk. It can be as fine grained as you like, but the config directory in each \"inventory item\" is scoped to, at most, one AWS Account+VPC or one GCP Project+Network.  By default, the `inventory` directoy includes one `default` directory with configurtion for one VPC and two Kops clusters. You can pass `pentagon start-project` the `--no-configure` flag to build your own.\n\n### inventory/(default)/config/\nThe config directory is separated into `local` and `private`. Files, scripts, and templates in `config/local` are checked into source control and should not contain any workstation specific values.\n\n`config/local/env-vars.sh` uses a specific list of variable names, locates the values in `config/local/vars.yml` and `config/private/secrets.yml` and exports them as an environment variable. These environment variables are used throughout the infrastructure repository so make sure you `source config/local/env-vars.sh`.\n\nSome configurations require absolute paths which, if checked into source control, can make working with teams challenging. The `config/local/local-config-init` script makes this easier by providing a fast way to generate workstation specific configurations from the `ansible.cfg-default` and `ssh_config-default` template files. The generated workstation specific configuration files are written to `config/private`.\n\n`config/private/ssh_config` and `config/private/ansible.cfg` greatly simplify interaction with your cloud VMs. It is configured to automatically use the correct key and user name based on the IP address of the host. You can either use the command `ssh -F '${INFRASTRUCTURE_REPO}/config/local/ssh_config` or alias SSH with `alias ssh=\"ssh -F '${INFRASTRUCTURE_REPO}/config/local/ssh_config'`.\n\n`config/private`, in addition to `secrets.yml` also contains SSH keys generated by `start-project`. Unless you opted to not create the keys, the `admin-vpn` key pair will be uploaded to AWS for you when the VPN instance is created and the `*-kube` keys will automatically be uploaded when `kops` is invoked to create the Kubernetes cluster. The other keys, `production-private`, `working-private` are created as a convenience to be used for any instances that are created in the VPC `private-working` and `private-production` subnets. When `kops` is invoked to create the cluster, the Kubernetes config secret will also be created as `config/private/kube_config`\n\n### inventory/(default)/\nThe `default/` contains most of the moving parts of the infrastructure repository. The name `default` is not important! The contents are. The goal is that the contents of the `default` directory can be deep copied and create parallel (cloud provider, cloud account, vpc) infrastructure in a single repository. *Consider this a guidline, not a rule!*\n\n```\n├── clusters\n├── resources\n└── vpc\n```\n\n### inventory/(default)/clusters/\nContains `working/` and `production/` directories. Both are laid out identically.\n`working` is intended to contain any non-production Kubernetes pods, deployments, services. `production` is intended to contain any production Kubernetes objects pods, deployments, services etc.\n```\n├── kops.sh\n├── cluster.yml\n├── nodes.yml\n├── masters.yml\n└── secret.sh\n```\n`kops.sh` is a bash script that uploads the yml files the S3 bucket set in `inventory/(default)/config/local/vars.yml`\n`secrets.sh` creates the secret that is the ssh public key mateial for the the nodes in the cluster\n\n### inventory/(default)/terraform\nThe `terraform/` directory is for the AWS VPC Terraform. It is intended to hold the configuration for all Terraform for the \"inventory item.\" Terraform modules should be used to organize the Terraform code.\n\n### inventory/(default)/resources\nThis `resources/` is the directory into which Ansible playbooks to _non cluster specific_ cloud resources can be stored. The `admin-environment` playbook, which creates and configures the OpenVPN instance, is present \"out of the box\".\n\n## Supporting Directories\n\n### plugins/\nThis is the Ansible plugins directory. The `ec2` infrastructure plugin is enabled by default. Set in `config/private/ansible.cfg`.\n\n### roles/\nThe Ansible roles are installed here by default. Set in `config/private/ansible.cfg`.\nThis is not checked into Git.\n\n## Extended Layout Description\n\n```\n├── README.md\n├── ansible-requirements.yml\n├── config.yml\n├── inventory\n│   └── default                              * Directory for default cloud\n│       ├── clusters                         * Directory for Clusters\n│       │   ├── production                   * Production Cluster Directory\n│       │   │   └── vars.yml                 * Variables specific to production. Used by `pentagon add kops.cluster`\n│       │   └── working                      * Working Cluster Directory\n│       │       └── vars.yml                 * Variables specific to working. Used by `pentagon add kops.cluster`\n│       ├── config                           * Configuration Directory\n│       │   ├── local                        * Local, non-secret configuration\n│       │   │   ├── ansible.cfg-default      * templating code to create private configuration\n│       │   │   ├── local-config-init\n│       │   │   ├── ssh_config-default\n│       │   │   └── vars.yml\n│       │   └── private                      * Private, secret configs. ignored by git\n│       │       ├── admin-vpn                * SSH key pairs generated by at `start-project`\n│       │       ├── admin-vpn.pub\n│       │       ├── production-kube\n│       │       ├── production-kube.pub\n│       │       ├── production-private\n│       │       ├── production-private.pub\n│       │       ├── secrets.yml              * Secret values in yaml config file\n│       │       ├── working-kube\n│       │       ├── working-kube.pub\n│       │       ├── working-private\n│       │       └── working-private.pub\n│       ├── kubernetes                       * You can store kubernetes manifests here\n│       ├── resources                        * Ansible playbook for creating the OpenVPN instance\n│       │   └── admin-environment\n│       │       ├── destroy.yml\n│       │       ├── env.yml\n│       │       └── vpn.yml\n│       └── terraform                        * Terraform for entire inventory item\n│           ├── aws_vpc.auto.tfvars\n│           ├── aws_vpc.tf\n│           ├── aws_vpc_variables.tf\n│           ├── backend.tf\n│           └── provider.tf\n├── plugins                                  * Ansible plugins\n└── requirements.txt\n```\n"
  },
  {
    "path": "docs/vpn.md",
    "content": "# VPN\n\n## Setup\nThe VPN allows ssh access to intances in the private subnets in the VPC. This includes the KOPS created subnets and the private subnets created during VPC creation. \nThis can be done before or after configuring and deploying your kubernetes cluster(s). It is required to have your VPC setup prior to starting VPN setup. By default an ssh key is created for the vpn instance during `pentagon start_project`. The playbook will upload the key and associate it with the new AWS instance.\n\n* Review `account/vars.yml` and ensure that `vpc_tag_name`, `org_name`, `canonical_zone` and `vpn_bucket` are set.\n* Ensure `config/local/ssh_config` has the key path and subnets set for ssh access\n* In `default/resources/admin-environment/env.yml` verify the following are set properly\n  - `aws_key_name` : name of the key pair created earlier\n  - `default_ami` : If not preset, se the [Ubuntu AMI locator](https://cloud-images.ubuntu.com/locator/). Use Ubuntu Trusty and make sure it is located in correct region, instance type: `hvm:ebs-ssd`.\n  - Edit other variables as needed. VPN users to be created, aka VPN clients, are contained in the Ansible array, `openvpn_clients`\n* If you haven't already, in the project directory, install ansible requirements:\n\n```\nansible-galaxy install -r ansible-requirements.yml\n```\n\n* Run the VPN playbook:\n\n```\nansible-playbook default/resources/admin-environment/vpn.yml\n```\n\n* Even when all the inputs are correct, sometimes you will need to re-run ansible a couple times to get through all of the steps.\n\n\n## Usage\n\nThe VPN playbook will create an instance with OpenVPN software that you can connect to using a VPN client. On OSX, one possible alternative is Tunnelblick. See: [How to connect to access server from OSX](https://openvpn.net/index.php/access-server/docs/admin-guides/183-how-to-connect-to-access-server-from-a-mac.html)\n\n\nNo matter the client you choose to use, the keys for each of the users will be deposited into the s3 bucket specified in `default/resources/admin-environment/env.yml` before. Download these and keep use them to access your cluster.\n"
  },
  {
    "path": "example-component/LICENSE",
    "content": "                                 Apache License\n                           Version 2.0, January 2004\n                        http://www.apache.org/licenses/\n\n   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION\n\n   1. Definitions.\n\n      \"License\" shall mean the terms and conditions for use, reproduction,\n      and distribution as defined by Sections 1 through 9 of this document.\n\n      \"Licensor\" shall mean the copyright owner or entity authorized by\n      the copyright owner that is granting the License.\n\n      \"Legal Entity\" shall mean the union of the acting entity and all\n      other entities that control, are controlled by, or are under common\n      control with that entity. For the purposes of this definition,\n      \"control\" means (i) the power, direct or indirect, to cause the\n      direction or management of such entity, whether by contract or\n      otherwise, or (ii) ownership of fifty percent (50%) or more of the\n      outstanding shares, or (iii) beneficial ownership of such entity.\n\n      \"You\" (or \"Your\") shall mean an individual or Legal Entity\n      exercising permissions granted by this License.\n\n      \"Source\" form shall mean the preferred form for making modifications,\n      including but not limited to software source code, documentation\n      source, and configuration files.\n\n      \"Object\" form shall mean any form resulting from mechanical\n      transformation or translation of a Source form, including but\n      not limited to compiled object code, generated documentation,\n      and conversions to other media types.\n\n      \"Work\" shall mean the work of authorship, whether in Source or\n      Object form, made available under the License, as indicated by a\n      copyright notice that is included in or attached to the work\n      (an example is provided in the Appendix below).\n\n      \"Derivative Works\" shall mean any work, whether in Source or Object\n      form, that is based on (or derived from) the Work and for which the\n      editorial revisions, annotations, elaborations, or other modifications\n      represent, as a whole, an original work of authorship. For the purposes\n      of this License, Derivative Works shall not include works that remain\n      separable from, or merely link (or bind by name) to the interfaces of,\n      the Work and Derivative Works thereof.\n\n      \"Contribution\" shall mean any work of authorship, including\n      the original version of the Work and any modifications or additions\n      to that Work or Derivative Works thereof, that is intentionally\n      submitted to Licensor for inclusion in the Work by the copyright owner\n      or by an individual or Legal Entity authorized to submit on behalf of\n      the copyright owner. For the purposes of this definition, \"submitted\"\n      means any form of electronic, verbal, or written communication sent\n      to the Licensor or its representatives, including but not limited to\n      communication on electronic mailing lists, source code control systems,\n      and issue tracking systems that are managed by, or on behalf of, the\n      Licensor for the purpose of discussing and improving the Work, but\n      excluding communication that is conspicuously marked or otherwise\n      designated in writing by the copyright owner as \"Not a Contribution.\"\n\n      \"Contributor\" shall mean Licensor and any individual or Legal Entity\n      on behalf of whom a Contribution has been received by Licensor and\n      subsequently incorporated within the Work.\n\n   2. Grant of Copyright License. Subject to the terms and conditions of\n      this License, each Contributor hereby grants to You a perpetual,\n      worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n      copyright license to reproduce, prepare Derivative Works of,\n      publicly display, publicly perform, sublicense, and distribute the\n      Work and such Derivative Works in Source or Object form.\n\n   3. Grant of Patent License. Subject to the terms and conditions of\n      this License, each Contributor hereby grants to You a perpetual,\n      worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n      (except as stated in this section) patent license to make, have made,\n      use, offer to sell, sell, import, and otherwise transfer the Work,\n      where such license applies only to those patent claims licensable\n      by such Contributor that are necessarily infringed by their\n      Contribution(s) alone or by combination of their Contribution(s)\n      with the Work to which such Contribution(s) was submitted. If You\n      institute patent litigation against any entity (including a\n      cross-claim or counterclaim in a lawsuit) alleging that the Work\n      or a Contribution incorporated within the Work constitutes direct\n      or contributory patent infringement, then any patent licenses\n      granted to You under this License for that Work shall terminate\n      as of the date such litigation is filed.\n\n   4. Redistribution. You may reproduce and distribute copies of the\n      Work or Derivative Works thereof in any medium, with or without\n      modifications, and in Source or Object form, provided that You\n      meet the following conditions:\n\n      (a) You must give any other recipients of the Work or\n          Derivative Works a copy of this License; and\n\n      (b) You must cause any modified files to carry prominent notices\n          stating that You changed the files; and\n\n      (c) You must retain, in the Source form of any Derivative Works\n          that You distribute, all copyright, patent, trademark, and\n          attribution notices from the Source form of the Work,\n          excluding those notices that do not pertain to any part of\n          the Derivative Works; and\n\n      (d) If the Work includes a \"NOTICE\" text file as part of its\n          distribution, then any Derivative Works that You distribute must\n          include a readable copy of the attribution notices contained\n          within such NOTICE file, excluding those notices that do not\n          pertain to any part of the Derivative Works, in at least one\n          of the following places: within a NOTICE text file distributed\n          as part of the Derivative Works; within the Source form or\n          documentation, if provided along with the Derivative Works; or,\n          within a display generated by the Derivative Works, if and\n          wherever such third-party notices normally appear. The contents\n          of the NOTICE file are for informational purposes only and\n          do not modify the License. You may add Your own attribution\n          notices within Derivative Works that You distribute, alongside\n          or as an addendum to the NOTICE text from the Work, provided\n          that such additional attribution notices cannot be construed\n          as modifying the License.\n\n      You may add Your own copyright statement to Your modifications and\n      may provide additional or different license terms and conditions\n      for use, reproduction, or distribution of Your modifications, or\n      for any such Derivative Works as a whole, provided Your use,\n      reproduction, and distribution of the Work otherwise complies with\n      the conditions stated in this License.\n\n   5. Submission of Contributions. Unless You explicitly state otherwise,\n      any Contribution intentionally submitted for inclusion in the Work\n      by You to the Licensor shall be under the terms and conditions of\n      this License, without any additional terms or conditions.\n      Notwithstanding the above, nothing herein shall supersede or modify\n      the terms of any separate license agreement you may have executed\n      with Licensor regarding such Contributions.\n\n   6. Trademarks. This License does not grant permission to use the trade\n      names, trademarks, service marks, or product names of the Licensor,\n      except as required for reasonable and customary use in describing the\n      origin of the Work and reproducing the content of the NOTICE file.\n\n   7. Disclaimer of Warranty. Unless required by applicable law or\n      agreed to in writing, Licensor provides the Work (and each\n      Contributor provides its Contributions) on an \"AS IS\" BASIS,\n      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n      implied, including, without limitation, any warranties or conditions\n      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A\n      PARTICULAR PURPOSE. You are solely responsible for determining the\n      appropriateness of using or redistributing the Work and assume any\n      risks associated with Your exercise of permissions under this License.\n\n   8. Limitation of Liability. In no event and under no legal theory,\n      whether in tort (including negligence), contract, or otherwise,\n      unless required by applicable law (such as deliberate and grossly\n      negligent acts) or agreed to in writing, shall any Contributor be\n      liable to You for damages, including any direct, indirect, special,\n      incidental, or consequential damages of any character arising as a\n      result of this License or out of the use or inability to use the\n      Work (including but not limited to damages for loss of goodwill,\n      work stoppage, computer failure or malfunction, or any and all\n      other commercial damages or losses), even if such Contributor\n      has been advised of the possibility of such damages.\n\n   9. Accepting Warranty or Additional Liability. While redistributing\n      the Work or Derivative Works thereof, You may choose to offer,\n      and charge a fee for, acceptance of support, warranty, indemnity,\n      or other liability obligations and/or rights consistent with this\n      License. However, in accepting such obligations, You may act only\n      on Your own behalf and on Your sole responsibility, not on behalf\n      of any other Contributor, and only if You agree to indemnify,\n      defend, and hold each Contributor harmless for any liability\n      incurred by, or claims asserted against, such Contributor by reason\n      of your accepting any such warranty or additional liability.\n\n   END OF TERMS AND CONDITIONS\n\n   APPENDIX: How to apply the Apache License to your work.\n\n      To apply the Apache License to your work, attach the following\n      boilerplate notice, with the fields enclosed by brackets \"{}\"\n      replaced with your own identifying information. (Don't include\n      the brackets!)  The text should be enclosed in the appropriate\n      comment syntax for the file format. We also recommend that a\n      file or class name and description of purpose be included on the\n      same \"printed page\" as the copyright notice for easier\n      identification within third-party archives.\n\n   Copyright 2017, Reactive Ops Inc.\n\n   Licensed under the Apache License, Version 2.0 (the \"License\");\n   you may not use this file except in compliance with the License.\n   You may obtain a copy of the License at\n\n       http://www.apache.org/licenses/LICENSE-2.0\n\n   Unless required by applicable law or agreed to in writing, software\n   distributed under the License is distributed on an \"AS IS\" BASIS,\n   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n   See the License for the specific language governing permissions and\n   limitations under the License.\n"
  },
  {
    "path": "example-component/MANIFEST.in",
    "content": "recursive-include pentagoncomponent/files/ *\n\n"
  },
  {
    "path": "example-component/README.md",
    "content": ""
  },
  {
    "path": "example-component/pentagon_component/__init__.py",
    "content": "from pentagon.component import ComponentBase\nimport os\n\n\nclass Component(ComponentBase):\n    _path = os.path.dirname(__file__)\n"
  },
  {
    "path": "example-component/pentagon_component/files/__init__.py",
    "content": ""
  },
  {
    "path": "example-component/pentagon_component/files/example_template.jinja",
    "content": "# blank"
  },
  {
    "path": "example-component/requirement.txt",
    "content": ""
  },
  {
    "path": "example-component/setup.py",
    "content": "#!/usr/bin/env python\n# -- coding: utf-8 --\n# Copyright 2017 Reactive Ops Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the “License”);\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#    http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an “AS IS” BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport os\nimport sys\nfrom setuptools import setup, find_packages\n\ntry:\n    from setuptools import setup, find_packages\nexcept ImportError:\n    print(\"setup tools required. Please run: \"\n          \"pip install setuptools).\")\n    sys.exit(1)\n\nsetup(name='pentagon-example-component',\n      version='0.0.1',\n      description='Example Pentagon Component',\n      author='ReactiveOp Inc.',\n      author_email='reactive@reactiveops.com',\n      url='http://reactiveops.com/',\n      license='Apache2.0',\n      include_package_data=True,\n      install_requires=[],\n      data_files=[],\n      packages=find_packages()\n      )\n"
  },
  {
    "path": "pentagon/__init__.py",
    "content": ""
  },
  {
    "path": "pentagon/cli.py",
    "content": "#!/usr/bin/env python\nimport os\nimport click\nimport logging\nimport coloredlogs\nimport traceback\nimport oyaml as yaml\nimport json\nimport migration\nfrom pydoc import locate\nfrom .pentagon import PentagonException\nfrom .pentagon import GCPPentagonProject, AWSPentagonProject, PentagonProject\nfrom helpers import merge_dict\nfrom meta import __version__\n\n\nclass RequiredIf(click.Option):\n\n    def __init__(self, *args, **kwargs):\n        self.required_if = kwargs.pop('required_if').split('=')\n        self.required_option = self.required_if[0]\n        self.required_value = self.required_if[1]\n        assert self.required_if, \"'required_if' parameter required\"\n        kwargs['help'] = (kwargs.get('help', '') +\n                          ' NOTE: This argument required when --%s=%s' %\n                          (self.required_option, self.required_value)\n                          ).strip()\n        super(RequiredIf, self).__init__(*args, **kwargs)\n\n    def handle_parse_result(self, ctx, opts, args):\n        other_present = self.required_option in ctx.params\n        if not other_present or ctx.params[self.required_option] != self.required_value:\n            self.prompt = None\n\n        return super(RequiredIf, self).handle_parse_result(\n            ctx, opts, args)\n\n\ndef validate_not_empty_string(ctx, param, value):\n    ''' Validates that value of prompted entry is not and empty string '''\n    try:\n        if value is not None and value.strip() == '':\n            raise click.BadParameter('{} cannot be empty'.format(param.name))\n        else:\n            return value\n    except click.BadParameter as e:\n        click.echo(e)\n        value = click.prompt(param.prompt)\n        return validate_not_empty_string(ctx, param, value)\n\n\n@click.group()\n@click.version_option(__version__)\n@click.option('--log-level', default=\"INFO\", help=\"Log Level DEBUG,INFO,WARN,ERROR\")\n@click.pass_context\ndef cli(ctx, log_level, *args, **kwargs):\n    coloredlogs.install(level=log_level)\n\n\n@click.command()\n@click.pass_context\n@click.argument('name')\n# General directory and file name options\n@click.option('-f', '--config-file', help='File to read configuration options from. File supercedes command line options.')\n@click.option('-o', '--output-file', default='config.yml', help='File to write output after completion.')\n@click.option('--workspace-directory', help='Directory to place new project. Defaults to ./')\n@click.option('--configure/--no-configure', default=True, help='Configure project with default settings.')\n@click.option('--force/--no-force', help=\"Ignore existing directories and copy project.\")\n@click.option('--cloud', default=\"aws\", help=\"Cloud provider to create default inventory. Defaults to 'aws'. [aws,gcp,none]\")\n# Currently only AWS but maybe we can/should add GCP later\n@click.option('--configure-vpn/--no-configure-vpn', default=True, help=\"Whether or not to configure a vpn. Default True.\")\n@click.option('--vpc-name', help=\"Name of VPC to create.\")\n@click.option('--vpc-cidr-base', help=\"First two octets of the VPC ip space.\")\n@click.option('--vpc-id', help=\"AWS VPC id where the clusters are going to be created.\")\n@click.option('--admin-vpn-key', help=\"Name of the ssh key for the admin user of the VPN instance.\")\n@click.option('--vpn-ami-id', help=\"ami-id to use for the VPN instance.\")\n# General Kubernetes options\n@click.option('--kubernetes-version', help=\"Version of kubernetes to use for cluster nodes.\")\n@click.option('--disk-size', help=\"Size disk to provision on the kubernetes vms.\")\n# Working\n@click.option('--working-kubernetes-cluster-name', help=\"Name of the working kubernetes cluster nodes.\")\n@click.option('--working-kubernetes-node-count', help=\"Number of nodes for the working kubernetes cluster.\")\n@click.option('--working-kubernetes-worker-node-type', help=\"Node type of the kube workers.\")\n@click.option('--working-kubernetes-network-cidr', help=\"Network CIDR of the kubernetes working cluster.\")\n# Production\n@click.option('--production-kubernetes-cluster-name', help=\"Name of the production kubernetes cluster nodes.\")\n@click.option('--production-kubernetes-node-count', help=\"Number of nodes for the production kubernetes cluster nodes.\")\n@click.option('--production-kubernetes-worker-node-type', help=\"Node type of the kube workers.\")\n@click.option('--production-kubernetes-network-cidr', help=\"Network CIDR of the kubernetes working cluster.\")\n# AWS Cloud options\n@click.option('--aws-access-key', prompt=True, callback=validate_not_empty_string, default=lambda: os.environ.get('PENTAGON_aws_access_key'), help=\"AWS access key.\", cls=RequiredIf, required_if='cloud=aws')\n@click.option('--aws-secret-key', prompt=True, callback=validate_not_empty_string, default=lambda: os.environ.get('PENTAGON_aws_secret_key'), help=\"AWS secret key.\", cls=RequiredIf, required_if='cloud=aws')\n@click.option('--aws-default-region', help=\"AWS default region.\", cls=RequiredIf, required_if='cloud=aws')\n@click.option('--aws-availability-zones', help=\"[Deprecated] Use \\\"--availability-zones\\\". AWS availability zones as a comma delimited with spaces. Default to region a, region b, ... region z.\")\n@click.option('--aws-availability-zone-count', help=\"Number of availability zones to use.\")\n@click.option('--infrastructure-bucket', help=\"Name of S3 Bucket to store state.\")\n@click.option('--dns-zone', help=\"DNS zone to configure DNS records in.\")\n@click.option('--create-keys/--no-create-keys', default=True, help=\"Create ssh keys or not.\")\n# AWS only Kubernetes options\n# Working\n@click.option('--working-kubernetes-master-aws-zone', help=\"Availability zone to place the kube master in.\")\n@click.option('--working-kubernetes-master-node-type', help=\"AWS only. Node type of the kube master.\")\n@click.option('--working-kube-key', help=\"Name of the ssh key for the working kubernetes cluster.\")\n@click.option('--working-private-key', help=\"Name of the ssh key for the working non kubernetes instances.\")\n@click.option('--working-kubernetes-dns-zone', help=\"DNS Zone of the kubernetes working cluster.\")\n@click.option('--working-kubernetes-v-log-level', help=\"V Log Level kubernetes working cluster.\")\n# Production\n@click.option('--production-kubernetes-master-aws-zone', help=\"Availability zone to place the kube master in.\")\n@click.option('--production-kubernetes-master-node-type', help=\" AWS only. Node type of the kube master.\")\n@click.option('--production-kube-key', help=\"Name of the ssh key for the production kubernetes cluster.\")\n@click.option('--production-private-key', help=\"Name of the ssh key for the production non kubernetes instances.\")\n@click.option('--production-kubernetes-dns-zone', help=\"DNS Zone of the kubernetes production cluster.\")\n@click.option('--production-kubernetes-v-log-level', help=\"V Log Level kubernetes production cluster.\")\n# GCP Cloud options\n@click.option('--gcp-project', prompt=True, callback=validate_not_empty_string, help=\"Google Cloud Project to create clusters in.\", cls=RequiredIf, required_if='cloud=gcp')\n@click.option('--gcp-region', prompt=True, callback=validate_not_empty_string, help=\"Google Cloud Project Region to use for Cluster.\", cls=RequiredIf, required_if='cloud=gcp')\n@click.option('--gcp-cluster-name', prompt=True, callback=validate_not_empty_string, help=\"Google GKE Cluster Name.\", cls=RequiredIf, required_if='cloud=gcp')\n@click.option('--gcp-nodes-cidr', prompt=True, callback=validate_not_empty_string, help=\"Google GKE Nodes CIDR.\", cls=RequiredIf, required_if='cloud=gcp')\n@click.option('--gcp-services-cidr', prompt=True, callback=validate_not_empty_string, help=\"Google GKE services CIDR.\", cls=RequiredIf, required_if='cloud=gcp')\n@click.option('--gcp-pods-cidr', prompt=True, callback=validate_not_empty_string, help=\"Google GKE pods CIDR.\", cls=RequiredIf, required_if='cloud=gcp')\n@click.option('--gcp-kubernetes-version', prompt=True, callback=validate_not_empty_string, help=\"Version of kubernetes to use for cluster nodes.\", cls=RequiredIf, required_if='cloud=gcp')\n@click.option('--gcp-infra-bucket', prompt=True, callback=validate_not_empty_string, help=\"The bucket where terraform will store its state for GCP.\", cls=RequiredIf, required_if='cloud=gcp')\ndef start_project(ctx, name, **kwargs):\n    \"\"\" Create an infrastructure project from scratch with the configured options \"\"\"\n\n    try:\n\n        logging.basicConfig(level=kwargs.get('log_level'))\n        file_data = {}\n        if kwargs.get('config_file'):\n            file_data = parse_in_file(kwargs.get('config_file'))[0]\n        kwargs.update(file_data)\n        logging.debug(kwargs)\n        cloud = kwargs.get('cloud')\n        if cloud.lower() == 'aws':\n            project = AWSPentagonProject(name, kwargs)\n        elif cloud.lower() == 'gcp':\n            project = GCPPentagonProject(name, kwargs)\n        elif cloud.lower() == 'none':\n            project = PentagonProject(name, kwargs)\n        else:\n            raise PentagonException(\n                \"Value passed for option --cloud not 'aws' or 'gcp'\")\n        logging.debug('Creating {} project {} with {}'.format(\n            cloud.upper(), name, kwargs))\n        project.start()\n    except Exception as e:\n        logging.error(e)\n        logging.debug(traceback.format_exc(e))\n\n\n@click.command()\n@click.pass_context\n@click.argument('component_path')\n@click.option('--data', '-D', multiple=True, help='Individual Key=Value pairs used by the component. There should be no spaces surrounding the `=`')\n@click.option('--file', '-f', help='File to read Key=Value pair from (yaml or json are supported)')\n@click.option('--out', '-o', default='./', help=\"Path to output module result, if any\")\n@click.argument('additional-args', nargs=-1, default=None)\ndef add(ctx, component_path, additional_args, **kwargs):\n    _run('add', component_path, additional_args, kwargs)\n\n\n@click.command()\n@click.pass_context\n@click.argument('component_path')\n@click.option('--data', '-D', multiple=True, help='Individual Key=Value pairs used by the component.')\n@click.option('--file', '-f', help='File to read Key=Value pair from (yaml or json are supported).')\n@click.option('--out', '-o', default='./', help=\"Path to output module result, if any.\")\n@click.argument('additional-args', nargs=-1, default=None)\ndef get(ctx, component_path, additional_args, **kwargs):\n    _run('get', component_path, additional_args, kwargs)\n\n\n@cli.command()\n@click.pass_context\n@click.option(\"--dry-run/--no-dry-run\", default=False, help=\"Test migration before applying.\")\n@click.option('--log-level', default=\"INFO\", help=\"Log Level DEBUG,INFO,WARN,ERROR.\")\n@click.option('--branch', default=\"migration\", help=\"Name of branch to create for migration. Default='migration'\")\n@click.option('--yes/--no', default=False, help=\"Confirm to run migration.\")\ndef migrate(ctx, **kwargs):\n    \"\"\" Update Infrastructure Repository to the latest configuration \"\"\"\n    logging.basicConfig(level=kwargs.get('log_level'))\n    migration.migrate(kwargs['branch'], kwargs['yes'])\n\n\ndef _run(action, component_path, additional_args, options):\n    logging.basicConfig(level=options.get('log_level'))\n    logging.debug(\"Importing module Pentagon {}\".format(component_path))\n    logging.debug(\"with options: {}\".format(options))\n    logging.debug(\"and additional arguments: {}\".format(additional_args))\n\n    documents = [{}]\n    data = parse_data(options.get('data', {}))\n\n    try:\n        file = options.get('file', None)\n        if file is not None:\n            documents = parse_in_file(file)\n    except Exception as e:\n        logging.error(\"Error parsing data from file or -D arguments\")\n        logging.error(e)\n\n    component_class = get_component_class(component_path)\n    try:\n        for doc in documents:\n            if callable(component_class):\n                data = merge_dict(doc, data, clobber=True)\n                data['prompt'] = options.get('prompt', True)\n                # Making data keys more flexible and allowing keys with\n                # - to be be corrected in place\n                data_copy = data.copy()\n                for key, value in data.iteritems():\n                    flex_key = key.replace('-', '_')\n                    if flex_key != key:\n                        data_copy[flex_key] = value\n                data = data_copy\n\n                getattr(component_class(data, additional_args),\n                        action)(options.get('out'))\n            else:\n                logging.error(\n                    \"Error locating module or class: {}\".format(component_path))\n    except Exception, e:\n        logging.error(e)\n        logging.debug(traceback.format_exc(e))\n\n\n# Making names more terminal friendly\ncli.add_command(start_project, \"start-project\")\ncli.add_command(add, \"add\")\ncli.add_command(get, \"get\")\n\n\ndef get_component_class(component_path):\n    \"\"\" Construct Class path from component input \"\"\"\n\n    component_path_list = component_path.split(\".\")\n    possible_component_paths = []\n    if len(component_path_list) > 1:\n        component_name = \".\".join(component_path.split(\".\")[0:-1])\n        component_class_name = component_path.split(\".\")[-1]\n    else:\n        component_name = component_path\n        component_class_name = component_path\n\n    # Compile list of possible class paths\n    possible_component_paths.append(\n        '{}.{}'.format(component_name, component_class_name))\n    possible_component_paths.append('{}.{}'.format(\n        component_name, component_class_name.title()))\n    possible_component_paths.append(\n        'pentagon.component.{}.{}'.format(component_name, component_class_name))\n    possible_component_paths.append('pentagon.component.{}.{}'.format(\n        component_name, component_class_name.title()))\n    possible_component_paths.append(\n        'pentagon_{}.{}'.format(component_name, component_class_name))\n    possible_component_paths.append('pentagon_{}.{}'.format(\n        component_name, component_class_name.title()))\n\n    # Find Class if it exists\n    for class_path in possible_component_paths:\n        logging.debug('Seeking {}'.format(class_path))\n        component_class = locate(class_path)\n        if component_class is not None:\n            logging.debug(\"Found {}\".format(component_class))\n            return component_class\n\n        logging.debug('{} Not found'.format(class_path))\n\n\ndef parse_in_file(file):\n    \"\"\" Parse data structure from file into dictionary for component use \"\"\"\n    with open(file, 'r') as data_file:\n        try:\n            data = json.load(data_file)\n            logging.debug(\"Data parsed from file {}: {}\".format(file, data))\n            return data\n        except ValueError as json_error:\n            pass\n\n        data_file.seek(0)\n\n        try:\n            data = list(yaml.load_all(\n                data_file, Loader=yaml.loader.FullLoader))\n            logging.debug(\"Data parsed from file {}: {}\".format(file, data))\n            return data\n        except yaml.YAMLError as yaml_error:\n            pass\n\n    logging.error(\"Unable to parse in file. {} {} \".format(\n        json_error, yaml_error))\n\n\ndef parse_data(data, d=None):\n    \"\"\" Function to parse the incoming -D options into a dict \"\"\"\n    if d is None:\n        d = {}\n\n    for kv in data:\n        key = kv.split('=')[0]\n        try:\n            val = kv.split('=', 1)[1]\n        except IndexError:\n            val = True\n\n        d[key] = val\n\n    return d\n"
  },
  {
    "path": "pentagon/component/__init__.py",
    "content": "import os\nimport glob\nimport shutil\nimport logging\nimport traceback\nimport sys\nimport re\nimport click\n\nfrom pentagon.helpers import render_template\nfrom pentagon.defaults import AWSPentagonDefaults as PentagonDefaults\n\n\nclass ComponentBase(object):\n    \"\"\" Base class for Pentagon Components. \"\"\"\n    _required_parameters = []\n\n    # List of environment variables to use.\n    # If set, they should override other data sources.\n    # Lower Case here will find upper case environment variables.\n    # If a dictionary is passed, the key is the variable name used in context,\n    # and the value is the environment variable name.\n    _environment = []\n    _defaults = {}\n\n    def __init__(self, data, additional_args=None, **kwargs):\n\n        self._data = data\n        self._additional_args = additional_args\n        self._process_env_vars()\n        self._process_defaults()\n\n        missing_parameters = []\n        for item in self._required_parameters:\n            if item not in self._data.keys():\n                missing_parameters.append(item)\n\n        if missing_parameters:\n            logging.error(\"Missing required data parameters: {}\".format(\n                \", \".join(missing_parameters)))\n            logging.error(\"You can set parameters with '-Dparam_name=value'.\")\n            sys.exit(1)\n\n    @property\n    def _destination_directory_name(self):\n        if self._destination != './':\n            return self._destination\n        return self._data.get('name', self.__class__.__name__.lower())\n\n    @property\n    def _files_directory(self):\n        return sys.modules[self.__module__].__path__[0] + \"/files\"\n\n    def _process_env_vars(self):\n        logging.debug('Fetching environment variables')\n        environ_data = {}\n        for item in self._environment:\n            if type(item) is dict:\n                context_var = item.keys()[0]\n                env_var = os.environ.get(item.values()[0])\n            else:\n                context_var = item.lower()\n                env_var = os.environ.get(item.upper())\n\n            environ_data[context_var] = env_var\n\n        self._merge_data(environ_data)\n\n    def _process_defaults(self):\n        \"\"\" Use _defaults from global pentagon defaults, then class and add them to missing values on the _data dict \"\"\"\n\n        logging.debug('Processing Defaults')\n        self._merge_data(self._defaults)\n\n        try:\n            class_name = self.__class__.__name__.lower()\n            pentagon_defaults = getattr(PentagonDefaults, class_name)\n            logging.debug(\n                \"Adding Pentagon Defaults Last {}\".format(pentagon_defaults))\n            self._merge_data(pentagon_defaults)\n        except AttributeError, e:\n            logging.info(\"No top level defaults for Pentagon component {} \".format(\n                class_name.lower()))\n\n    def _render_directory_templates(self):\n        \"\"\" Loop and use render_template helper method on all templates in destination directory  \"\"\"\n        template_location = self._destination_directory_name\n        if os.path.isfile(template_location):\n            template_location = os.path.dirname(template_location)\n            logging.debug(\"{} is a file. Using the directory {} instead.\".format(\n                self._destination_directory_name, template_location))\n        logging.debug(\"Rendering Templates in {}\".format(template_location))\n        for folder, dirnames, files in os.walk(template_location):\n            for template in glob.glob(folder + \"/*.jinja\"):\n                logging.debug(\"Rendering {}\".format(template))\n                template_file_name = template.split('/')[-1]\n                path = '/'.join(template.split('/')[0:-1])\n                target_file_name = re.sub(r'\\.jinja$', '', template_file_name)\n                target = folder + \"/\" + target_file_name\n                render_template(template_file_name, path, target,\n                                self._data, overwrite=self._overwrite)\n\n    def _remove_init_file(self):\n        \"\"\" delete init file, if it exists from template target directory \"\"\"\n\n        for root, dirs, files in os.walk(self._destination_directory_name):\n            for name in files:\n                if \"__init__.py\" == name or \"__init__.pyc\" == name:\n                    logging.debug('Removing: {}'.format(\n                        os.path.join(root, name)))\n                    os.remove(os.path.join(root, name))\n\n    def _merge_data(self, new_data, clobber=False):\n        \"\"\" accepts new_data (dict) and clobber (boolean). Merges dictionary with existing instance dictionary _data. If clobber is True, overwrites value. Defaults to false \"\"\"\n        for key, value in new_data.items():\n            if self._data.get(key) is None or clobber:\n                logging.debug(\n                    \"Setting component data {}: {}\".format(key, value))\n                self._data[key] = value\n\n    def add(self, destination, overwrite=False):\n        self._destination = destination\n        self._overwrite = overwrite\n\n        self._display_settings_to_user()\n        try:\n            # Add all files from the component templates to the destination directory\n            self._add_files()\n            # Remove any __init__ files in the destination that were copied from the component templates\n            self._remove_init_file()\n            # For all the jinja templates in the destination directory, render them\n            self._render_directory_templates()\n            logging.info(\"New component added. Source your environment before \"\n                         \"proceeding or unexpected behavior may result.\")\n        except Exception as e:\n            logging.error(\"Error occurred configuring component\")\n            logging.error(e)\n            logging.debug(traceback.format_exc(e))\n            sys.exit(1)\n\n    def _display_settings_to_user(self):\n        logging.info(\"Pentagon will write to the following directory: \"\n                     \"(set with '-o ./path')\")\n        logging.info(\"  Path: \\\"{}\\\"\".format(self._destination_directory_name))\n        logging.info(\"Displaying provided and default values for this component: \"\n                     \"(e.g. '-Dparam_name=abcd')\")\n        for key in sorted(self._data):\n            value = self._data[key]\n            using_defaults = False\n            if key in self._defaults.keys():\n                if self._data[key] == self._defaults[key]:\n                    using_defaults = True\n\n            is_default = \"(Default Value)\" if using_defaults else \"\"\n            logging.info(\"  {0:40} = {1:20} {2}\".format(\n                key,\n                str(value),\n                is_default,\n            ))\n\n        if sys.stdin.isatty():\n            if click.confirm('This look ok to proceed?'):\n                return\n            else:\n                logging.info(\"Exiting because you did not accept the inputs.\")\n                exit()\n\n    def _add_files(self, sub_path=None):\n        \"\"\" Copies files and templates from <component>/files \"\"\"\n        if self._overwrite:\n            from distutils.dir_util import copy_tree\n        else:\n            from shutil import copytree as copy_tree\n        if sub_path is not None:\n            source = ('{}/{}').format(self._files_directory, sub_path)\n        else:\n            source = self._files_directory\n\n        logging.debug(\"Adding file: {} -> {}\".format(source,\n                                                     self._destination_directory_name))\n        if os.path.isfile(source):\n            shutil.copy(source, self._destination_directory_name)\n        elif os.path.isdir(source):\n            copy_tree(source, self._destination_directory_name)\n"
  },
  {
    "path": "pentagon/component/aws_vpc/__init__.py",
    "content": "import os\n\nfrom pentagon.component import ComponentBase\nfrom pentagon.defaults import AWSPentagonDefaults as PentagonDefaults\nfrom pentagon.helpers import allege_aws_availability_zones\n\n\nclass AWSVpc(ComponentBase):\n\n    _required_parameters = ['aws_region']\n\n    def add(self, destination, overwrite):\n        for key, value in PentagonDefaults.vpc.iteritems():\n            if not self._data.get(key):\n                self._data[key] = value\n\n        if self._data.get('aws_availability_zones') is None:\n            self._data['aws_availability_zones'] = allege_aws_availability_zones(self._data['aws_region'], self._data['aws_availability_zone_count'])\n\n        return super(AWSVpc, self).add(destination, overwrite=overwrite)\n"
  },
  {
    "path": "pentagon/component/aws_vpc/files/aws_vpc.auto.tfvars.jinja",
    "content": "aws_vpc_name  = \"{{ vpc_name }}\"\nvpc_cidr_base = \"{{ vpc_cidr_base }}\"\naws_azs = \"{{ aws_availability_zones }}\"\naz_count = \"{{ aws_availability_zone_count }}\"\naws_inventory_path = \"$INFRASTRUCTURE_REPO/plugins/inventory\"\naws_region = \"{{ aws_region }}\"\n\nadmin_subnet_parent_cidr = \".0.0/22\"\nadmin_subnet_cidrs = {\n    zone0 = \".0.0/24\"\n    zone1 = \".1.0/24\"\n    zone2 = \".2.0/24\"\n    zone3 = \".3.0/24\"\n  }\n\npublic_subnet_parent_cidr = \".4.0/22\"\npublic_subnet_cidrs = {\n    zone0 = \".4.0/24\"\n    zone1 = \".5.0/24\"\n    zone2 = \".6.0/24\"\n    zone3 = \".7.0/24\"\n  }\n\nprivate_prod_subnet_parent_cidr = \".8.0/22\"\nprivate_prod_subnet_cidrs = {\n    zone0 = \".8.0/24\"\n    zone1 = \".9.0/24\"\n    zone2 = \".10.0/24\"\n    zone3 = \".11.0/24\"\n  }\n\nprivate_working_subnet_parent_cidr = \".12.0/22\"\nprivate_working_subnet_cidrs = {\n    zone0 = \".12.0/24\"\n    zone1 = \".13.0/24\"\n    zone2 = \".14.0/24\"\n    zone3 = \".15.0/24\"\n  }\n"
  },
  {
    "path": "pentagon/component/aws_vpc/files/aws_vpc.tf.jinja",
    "content": "\nmodule \"vpc\" {\n  source                             = \"git::https://github.com/reactiveops/terraform-vpc.git?ref=v3.0.0\"\n  aws_vpc_name                       = \"${var.aws_vpc_name}\"\n  aws_region                         = \"${var.aws_region}\"\n\n  az_count                           = \"${var.az_count}\"\n  aws_azs                            = \"${var.aws_azs}\"\n\n  vpc_cidr_base                      = \"${var.vpc_cidr_base}\"\n\n  admin_subnet_parent_cidr           = \"${var.admin_subnet_parent_cidr}\"\n  admin_subnet_cidrs                 = \"${var.admin_subnet_cidrs}\"\n\n  public_subnet_parent_cidr          = \"${var.public_subnet_parent_cidr}\"\n  public_subnet_cidrs                = \"${var.public_subnet_cidrs}\"\n\n  private_prod_subnet_parent_cidr    = \"${var.private_prod_subnet_parent_cidr}\"\n  private_prod_subnet_cidrs          = \"${var.private_prod_subnet_cidrs}\"\n\n  private_working_subnet_parent_cidr = \"${var.private_working_subnet_parent_cidr}\"\n  private_working_subnet_cidrs       = \"${var.private_working_subnet_cidrs}\"\n\n}\n\n\n// Output VPC values to allow this statefile to be used as a datasource\n\noutput \"aws_vpc_subnet_admin_ids\" {\n  value = \"${module.vpc.aws_subnet_admin_ids}\"\n}\n\noutput \"aws_vpc_subnet_private_working_ids\" {\n  value = \"${module.vpc.aws_subnet_private_working_ids}\"\n}\n\noutput \"aws_vpc_subnet_private_prod_ids\" {\n  value = \"${module.vpc.aws_subnet_private_prod_ids}\"\n}\n\noutput \"aws_vpc_subnet_public_ids\" {\n  value = \"${module.vpc.aws_subnet_public_ids}\"\n}\n\noutput \"aws_vpc_id\" {\n  value = \"${module.vpc.aws_vpc_id}\"\n}\n\noutput \"aws_vpc_cidr\" {\n  value = \"${module.vpc.aws_vpc_cidr}\"\n}\n\noutput \"aws_nat_gateway_ids\" {\n  value = \"${module.vpc.aws_nat_gateway_ids}\"\n}\n\n"
  },
  {
    "path": "pentagon/component/aws_vpc/files/aws_vpc_variables.tf",
    "content": "variable \"aws_region\" {}\nvariable \"aws_azs\" {}\n\nvariable \"aws_vpc_name\" {}\n\nvariable \"az_count\" {}\nvariable \"vpc_cidr_base\" {}\n\nvariable \"admin_subnet_parent_cidr\" {}\n\nvariable \"admin_subnet_cidrs\" {\n  default = {}\n}\n\nvariable \"public_subnet_parent_cidr\" {}\n\nvariable \"public_subnet_cidrs\" {\n  default = {}\n}\n\nvariable \"private_prod_subnet_parent_cidr\" {}\n\nvariable \"private_prod_subnet_cidrs\" {\n  default = {}\n}\n\nvariable \"private_working_subnet_parent_cidr\" {}\n\nvariable \"private_working_subnet_cidrs\" {\n  default = {}\n}\n"
  },
  {
    "path": "pentagon/component/core/__init__.py",
    "content": "from pentagon.component import ComponentBase\n\n\nclass Core(ComponentBase):\n    pass\n"
  },
  {
    "path": "pentagon/component/core/files/.gitignore",
    "content": ".DS_Store\n.terraform\n*.pyc\n*.pem\n*.pub\n*secret*.yml\nroles\nhelm\n"
  },
  {
    "path": "pentagon/component/core/files/README.md",
    "content": "# Documentation\n\n## Getting Started\n\n### System Requirements\n\nThis repository relies on system tools and Python libraries for your system.\n\nSystem Tools:\n* [Terraform](https://www.terraform.io)\n* [kops](https://github.com/kubernetes/kops)\n* [kubectl](https://kubernetes.io/docs/user-guide/kubectl-overview/)\n\nKubectl should try to match the cluster version.\n\nPython Libraries:\n\nLibraries required can be installed with `pip install -r requirements.txt`. These can be installed into a [virtualenv](https://virtualenv.pypa.io/en/stable/) to isolate the installation from your system.\n\n### Shell environment\n\nShell variables are used extensively for configuration of the above tools. Where possible these are checked into the repository. Secrets/credentials must be obtained separately.\n\nSome shell variables are stored as YAML and require [shyaml](https://github.com/0k/shyaml) installed as a Python requirement above.\n\nFirst create the `config/private/secrets.yml` file and supply values:\n\n```\nAWS_ACCESS_KEY:\nAWS_ACCESS_KEY_ID:\nTF_VAR_aws_access_key:\nAWS_SECRET_KEY:\nAWS_SECRET_ACCESS_KEY:\nTF_VAR_aws_secret_key:\n```\n\nSet `INFRASTRUCTURE_REPO` to the location of this project then source the environment variable setup script:\n\n```\nINFRASTRUCTURE_REPO=\"$(pwd)\"\nsource config/local/env-vars.sh\nsource default/account/vars.sh\n```\n\nPer AWS account variables are in `default/account/vars.sh` used for creating the Kubernetes cluster but not needed for most other operations. When creating a cluster the `kops.sh` file\n\nPer user configuration needs to be generated. These files cannot directly use the `$INFRASTRUCTURE_REPO` environment variable for various reasons. The `config/local/local-config-init` script will generate the files needed from templates in that same folder.\n\n```\n./config/local/local-config-init\n```\n\n### VPC\n\nThe VPC Terraform code is in `default/vpc`.\n\n### Kubernetes\n\nIf you have an existing Kubernetes Config you can place the file as `config/private/kube_config`\n\nIf you are creating a cluster then the `default/clusters/*/cluster-config/kops.sh` file has steps to do so.\n"
  },
  {
    "path": "pentagon/component/core/files/ansible-requirements.yml",
    "content": "---\n\n##\n# Dependents not located in galaxy.ansible.com need to precede their parents\n##\n- src: \"git+https://github.com/reactiveops/ansible-get-vpc-facts.git\"\n  name: reactiveops.get-vpc-facts\n  version: 1.1.3\n\n##\n# End dependents not located in galaxy.ansible.com\n##\n\n- src: \"git+https://github.com/reactiveops/ansible-vpn-stack.git\"\n  name: reactiveops.vpn-stack\n  version: 1.2.0\n\n- src: \"https://github.com/Stouts/Stouts.users.git\"\n  version: 1.2.0\n  name: Stouts.users-master\n\n- src: \"https://github.com/reactiveops/Stouts.openvpn.git\"\n  version: 3.0.0\n  name: Stouts.openvpn-master\n\n- src: \"git+https://git@github.com/reactiveops/ansible-iam-role.git\"\n  version: 1.0.0\n  name: reactiveops.iam-role\n"
  },
  {
    "path": "pentagon/component/core/files/inventory/__init__.py",
    "content": ""
  },
  {
    "path": "pentagon/component/core/files/plugins/filter_plugins/flatten.py",
    "content": "# This function will take an irregular list composed of lists \n# and flatten it\n\nfrom compiler.ast import flatten\n\nclass FilterModule (object):\n    def filters(self):\n        return {\n            \"flatten\": flatten\n        }\n"
  },
  {
    "path": "pentagon/component/core/files/plugins/inventory/base",
    "content": "# https://github.com/ansible/ansible-modules-core/issues/2601#issuecomment-189503881\n[all:vars]\nansible_python_interpreter = /usr/bin/env python\n\n[localhost]\n127.0.0.1\n"
  },
  {
    "path": "pentagon/component/core/files/plugins/inventory/ec2.ini",
    "content": "# Ansible EC2 external inventory script settings\n#\n\n[ec2]\n\n# to talk to a private eucalyptus instance uncomment these lines\n# and edit edit eucalyptus_host to be the host name of your cloud controller\n#eucalyptus = True\n#eucalyptus_host = clc.cloud.domain.org\n\n# AWS regions to make calls to. Set this to 'all' to make request to all regions\n# in AWS and merge the results together. Alternatively, set this to a comma\n# separated list of regions. E.g. 'us-east-1,us-west-1,us-west-2'\nregions = all\nregions_exclude = us-gov-west-1,cn-north-1\n\n# When generating inventory, Ansible needs to know how to address a server.\n# Each EC2 instance has a lot of variables associated with it. Here is the list:\n#   http://docs.pythonboto.org/en/latest/ref/ec2.html#module-boto.ec2.instance\n# Below are 2 variables that are used as the address of a server:\n#   - destination_variable\n#   - vpc_destination_variable\n\n# This is the normal destination variable to use. If you are running Ansible\n# from outside EC2, then 'public_dns_name' makes the most sense. If you are\n# running Ansible from within EC2, then perhaps you want to use the internal\n# address, and should set this to 'private_dns_name'. The key of an EC2 tag\n# may optionally be used; however the boto instance variables hold precedence\n# in the event of a collision.\n#destination_variable = public_dns_name\ndestination_variable = private_dns_name\n\n# For server inside a VPC, using DNS names may not make sense. When an instance\n# has 'subnet_id' set, this variable is used. If the subnet is public, setting\n# this to 'ip_address' will return the public IP address. For instances in a\n# private subnet, this should be set to 'private_ip_address', and Ansible must\n# be run from within EC2. The key of an EC2 tag may optionally be used; however\n# the boto instance variables hold precedence in the event of a collision.\n# WARNING: - instances that are in the private vpc, _without_ public ip address\n# will not be listed in the inventory until You set:\n#vpc_destination_variable = ip_address\nvpc_destination_variable = private_ip_address\n\n# To tag instances on EC2 with the resource records that point to them from\n# Route53, uncomment and set 'route53' to True.\nroute53 = False\n\n# To exclude RDS instances from the inventory, uncomment and set to False.\nrds = False\n\n# To exclude ElastiCache instances from the inventory, uncomment and set to False.\nelasticache = False\n\n# Additionally, you can specify the list of zones to exclude looking up in\n# 'route53_excluded_zones' as a comma-separated list.\n# route53_excluded_zones = samplezone1.com, samplezone2.com\n\n# By default, only EC2 instances in the 'running' state are returned. Set\n# 'all_instances' to True to return all instances regardless of state.\nall_instances = False\n\n# By default, only EC2 instances in the 'running' state are returned. Specify\n# EC2 instance states to return as a comma-separated list. This\n# option is overriden when 'all_instances' is True.\n# instance_states = pending, running, shutting-down, terminated, stopping, stopped\n\n# By default, only RDS instances in the 'available' state are returned.  Set\n# 'all_rds_instances' to True return all RDS instances regardless of state.\nall_rds_instances = False\n\n# By default, only ElastiCache clusters and nodes in the 'available' state\n# are returned. Set 'all_elasticache_clusters' and/or 'all_elastic_nodes'\n# to True return all ElastiCache clusters and nodes, regardless of state.\n#\n# Note that all_elasticache_nodes only applies to listed clusters. That means\n# if you set all_elastic_clusters to false, no node will be return from\n# unavailable clusters, regardless of the state and to what you set for\n# all_elasticache_nodes.\nall_elasticache_replication_groups = False\nall_elasticache_clusters = False\nall_elasticache_nodes = False\n\n# API calls to EC2 are slow. For this reason, we cache the results of an API\n# call. Set this to the path you want cache files to be written to. Two files\n# will be written to this directory:\n#   - ansible-ec2.cache\n#   - ansible-ec2.index\ncache_path = ~/.ansible/tmp\n\n# The number of seconds a cache file is considered valid. After this many\n# seconds, a new API call will be made, and the cache file will be updated.\n# To disable the cache, set this value to 0\ncache_max_age = 300\n\n# Organize groups into a nested/hierarchy instead of a flat namespace.\nnested_groups = False\n\n# The EC2 inventory output can become very large. To manage its size,\n# configure which groups should be created.\ngroup_by_instance_id = True\ngroup_by_region = True\ngroup_by_availability_zone = True\ngroup_by_ami_id = True\ngroup_by_instance_type = True\ngroup_by_key_pair = True\ngroup_by_vpc_id = True\ngroup_by_security_group = True\ngroup_by_tag_keys = True\ngroup_by_tag_none = True\ngroup_by_route53_names = True\ngroup_by_rds_engine = True\ngroup_by_rds_parameter_group = True\ngroup_by_elasticache_engine = True\ngroup_by_elasticache_cluster = True\ngroup_by_elasticache_parameter_group = True\ngroup_by_elasticache_replication_group = True\n\n# If you only want to include hosts that match a certain regular expression\n# pattern_include = staging-*\n\n# If you want to exclude any hosts that match a certain regular expression\n# pattern_exclude = staging-*\n\n# Instance filters can be used to control which instances are retrieved for\n# inventory. For the full list of possible filters, please read the EC2 API\n# docs: http://docs.aws.amazon.com/AWSEC2/latest/APIReference/ApiReference-query-DescribeInstances.html#query-DescribeInstances-filters\n# Filters are key/value pairs separated by '=', to list multiple filters use\n# a list separated by commas. See examples below.\n\n# Retrieve only instances with (key=value) env=staging tag\n# instance_filters = tag:env=staging\n\n# Retrieve only instances with role=webservers OR role=dbservers tag\n# instance_filters = tag:role=webservers,tag:role=dbservers\n\n# Retrieve only t1.micro instances OR instances with tag env=staging\n# instance_filters = instance-type=t1.micro,tag:env=staging\n\n# You can use wildcards in filter values also. Below will list instances which\n# tag Name value matches webservers1*\n# (ex. webservers15, webservers1a, webservers123 etc)\n# instance_filters = tag:Name=webservers1*\n"
  },
  {
    "path": "pentagon/component/core/files/plugins/inventory/ec2.py",
    "content": "#!/usr/bin/env python\n\n'''\nEC2 external inventory script\n=================================\n\nGenerates inventory that Ansible can understand by making API request to\nAWS EC2 using the Boto library.\n\nNOTE: This script assumes Ansible is being executed where the environment\nvariables needed for Boto have already been set:\n    export AWS_ACCESS_KEY_ID='AK123'\n    export AWS_SECRET_ACCESS_KEY='abc123'\n\nThis script also assumes there is an ec2.ini file alongside it.  To specify a\ndifferent path to ec2.ini, define the EC2_INI_PATH environment variable:\n\n    export EC2_INI_PATH=/path/to/my_ec2.ini\n\nIf you're using eucalyptus you need to set the above variables and\nyou need to define:\n\n    export EC2_URL=http://hostname_of_your_cc:port/services/Eucalyptus\n\nFor more details, see: http://docs.pythonboto.org/en/latest/boto_config_tut.html\n\nWhen run against a specific host, this script returns the following variables:\n - ec2_ami_launch_index\n - ec2_architecture\n - ec2_association\n - ec2_attachTime\n - ec2_attachment\n - ec2_attachmentId\n - ec2_client_token\n - ec2_deleteOnTermination\n - ec2_description\n - ec2_deviceIndex\n - ec2_dns_name\n - ec2_eventsSet\n - ec2_group_name\n - ec2_hypervisor\n - ec2_id\n - ec2_image_id\n - ec2_instanceState\n - ec2_instance_type\n - ec2_ipOwnerId\n - ec2_ip_address\n - ec2_item\n - ec2_kernel\n - ec2_key_name\n - ec2_launch_time\n - ec2_monitored\n - ec2_monitoring\n - ec2_networkInterfaceId\n - ec2_ownerId\n - ec2_persistent\n - ec2_placement\n - ec2_platform\n - ec2_previous_state\n - ec2_private_dns_name\n - ec2_private_ip_address\n - ec2_publicIp\n - ec2_public_dns_name\n - ec2_ramdisk\n - ec2_reason\n - ec2_region\n - ec2_requester_id\n - ec2_root_device_name\n - ec2_root_device_type\n - ec2_security_group_ids\n - ec2_security_group_names\n - ec2_shutdown_state\n - ec2_sourceDestCheck\n - ec2_spot_instance_request_id\n - ec2_state\n - ec2_state_code\n - ec2_state_reason\n - ec2_status\n - ec2_subnet_id\n - ec2_tenancy\n - ec2_virtualization_type\n - ec2_vpc_id\n\nThese variables are pulled out of a boto.ec2.instance object. There is a lack of\nconsistency with variable spellings (camelCase and underscores) since this\njust loops through all variables the object exposes. It is preferred to use the\nones with underscores when multiple exist.\n\nIn addition, if an instance has AWS Tags associated with it, each tag is a new\nvariable named:\n - ec2_tag_[Key] = [Value]\n\nSecurity groups are comma-separated in 'ec2_security_group_ids' and\n'ec2_security_group_names'.\n'''\n\n# (c) 2012, Peter Sankauskas\n#\n# This file is part of Ansible,\n#\n# Ansible is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# Ansible is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with Ansible.  If not, see <http://www.gnu.org/licenses/>.\n\n######################################################################\n\nimport sys\nimport os\nimport argparse\nimport re\nfrom time import time\nimport boto\nfrom boto import ec2\nfrom boto import rds\nfrom boto import elasticache\nfrom boto import route53\nimport six\n\nfrom six.moves import configparser\nfrom collections import defaultdict\n\ntry:\n    import json\nexcept ImportError:\n    import simplejson as json\n\n\nclass Ec2Inventory(object):\n    def _empty_inventory(self):\n        return {\"_meta\" : {\"hostvars\" : {}}}\n\n    def __init__(self):\n        ''' Main execution path '''\n\n        # Inventory grouped by instance IDs, tags, security groups, regions,\n        # and availability zones\n        self.inventory = self._empty_inventory()\n\n        # Index of hostname (address) to instance ID\n        self.index = {}\n\n        # Read settings and parse CLI arguments\n        self.read_settings()\n        self.parse_cli_args()\n\n        # Cache\n        if self.args.refresh_cache:\n            self.do_api_calls_update_cache()\n        elif not self.is_cache_valid():\n            self.do_api_calls_update_cache()\n\n        # Data to print\n        if self.args.host:\n            data_to_print = self.get_host_info()\n\n        elif self.args.list:\n            # Display list of instances for inventory\n            if self.inventory == self._empty_inventory():\n                data_to_print = self.get_inventory_from_cache()\n            else:\n                data_to_print = self.json_format_dict(self.inventory, True)\n\n        print(data_to_print)\n\n\n    def is_cache_valid(self):\n        ''' Determines if the cache files have expired, or if it is still valid '''\n\n        if os.path.isfile(self.cache_path_cache):\n            mod_time = os.path.getmtime(self.cache_path_cache)\n            current_time = time()\n            if (mod_time + self.cache_max_age) > current_time:\n                if os.path.isfile(self.cache_path_index):\n                    return True\n\n        return False\n\n\n    def read_settings(self):\n        ''' Reads the settings from the ec2.ini file '''\n        if six.PY2:\n            config = configparser.SafeConfigParser()\n        else:\n            config = configparser.ConfigParser()\n        ec2_default_ini_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'ec2.ini')\n        ec2_ini_path = os.path.expanduser(os.path.expandvars(os.environ.get('EC2_INI_PATH', ec2_default_ini_path)))\n        config.read(ec2_ini_path)\n\n        # is eucalyptus?\n        self.eucalyptus_host = None\n        self.eucalyptus = False\n        if config.has_option('ec2', 'eucalyptus'):\n            self.eucalyptus = config.getboolean('ec2', 'eucalyptus')\n        if self.eucalyptus and config.has_option('ec2', 'eucalyptus_host'):\n            self.eucalyptus_host = config.get('ec2', 'eucalyptus_host')\n\n        # Regions\n        self.regions = []\n        configRegions = config.get('ec2', 'regions')\n        configRegions_exclude = config.get('ec2', 'regions_exclude')\n        if (configRegions == 'all'):\n            if self.eucalyptus_host:\n                self.regions.append(boto.connect_euca(host=self.eucalyptus_host).region.name)\n            else:\n                for regionInfo in ec2.regions():\n                    if regionInfo.name not in configRegions_exclude:\n                        self.regions.append(regionInfo.name)\n        else:\n            self.regions = configRegions.split(\",\")\n\n        # Destination addresses\n        self.destination_variable = config.get('ec2', 'destination_variable')\n        self.vpc_destination_variable = config.get('ec2', 'vpc_destination_variable')\n\n        # Route53\n        self.route53_enabled = config.getboolean('ec2', 'route53')\n        self.route53_excluded_zones = []\n        if config.has_option('ec2', 'route53_excluded_zones'):\n            self.route53_excluded_zones.extend(\n                config.get('ec2', 'route53_excluded_zones', '').split(','))\n\n        # Include RDS instances?\n        self.rds_enabled = True\n        if config.has_option('ec2', 'rds'):\n            self.rds_enabled = config.getboolean('ec2', 'rds')\n\n        # Include ElastiCache instances?\n        self.elasticache_enabled = True\n        if config.has_option('ec2', 'elasticache'):\n            self.elasticache_enabled = config.getboolean('ec2', 'elasticache')\n\n        # Return all EC2 instances?\n        if config.has_option('ec2', 'all_instances'):\n            self.all_instances = config.getboolean('ec2', 'all_instances')\n        else:\n            self.all_instances = False\n\n        # Instance states to be gathered in inventory. Default is 'running'.\n        # Setting 'all_instances' to 'yes' overrides this option.\n        ec2_valid_instance_states = [\n            'pending',\n            'running',\n            'shutting-down',\n            'terminated',\n            'stopping',\n            'stopped'\n        ]\n        self.ec2_instance_states = []\n        if self.all_instances:\n            self.ec2_instance_states = ec2_valid_instance_states\n        elif config.has_option('ec2', 'instance_states'):\n          for instance_state in config.get('ec2', 'instance_states').split(','):\n            instance_state = instance_state.strip()\n            if instance_state not in ec2_valid_instance_states:\n              continue\n            self.ec2_instance_states.append(instance_state)\n        else:\n          self.ec2_instance_states = ['running']\n\n        # Return all RDS instances? (if RDS is enabled)\n        if config.has_option('ec2', 'all_rds_instances') and self.rds_enabled:\n            self.all_rds_instances = config.getboolean('ec2', 'all_rds_instances')\n        else:\n            self.all_rds_instances = False\n\n        # Return all ElastiCache replication groups? (if ElastiCache is enabled)\n        if config.has_option('ec2', 'all_elasticache_replication_groups') and self.elasticache_enabled:\n            self.all_elasticache_replication_groups = config.getboolean('ec2', 'all_elasticache_replication_groups')\n        else:\n            self.all_elasticache_replication_groups = False\n\n        # Return all ElastiCache clusters? (if ElastiCache is enabled)\n        if config.has_option('ec2', 'all_elasticache_clusters') and self.elasticache_enabled:\n            self.all_elasticache_clusters = config.getboolean('ec2', 'all_elasticache_clusters')\n        else:\n            self.all_elasticache_clusters = False\n\n        # Return all ElastiCache nodes? (if ElastiCache is enabled)\n        if config.has_option('ec2', 'all_elasticache_nodes') and self.elasticache_enabled:\n            self.all_elasticache_nodes = config.getboolean('ec2', 'all_elasticache_nodes')\n        else:\n            self.all_elasticache_nodes = False\n\n        # Cache related\n        cache_dir = os.path.expanduser(config.get('ec2', 'cache_path'))\n        if not os.path.exists(cache_dir):\n            os.makedirs(cache_dir)\n\n        self.cache_path_cache = cache_dir + \"/ansible-ec2.cache\"\n        self.cache_path_index = cache_dir + \"/ansible-ec2.index\"\n        self.cache_max_age = config.getint('ec2', 'cache_max_age')\n\n        # Configure nested groups instead of flat namespace.\n        if config.has_option('ec2', 'nested_groups'):\n            self.nested_groups = config.getboolean('ec2', 'nested_groups')\n        else:\n            self.nested_groups = False\n\n        # Configure which groups should be created.\n        group_by_options = [\n            'group_by_instance_id',\n            'group_by_region',\n            'group_by_availability_zone',\n            'group_by_ami_id',\n            'group_by_instance_type',\n            'group_by_key_pair',\n            'group_by_vpc_id',\n            'group_by_security_group',\n            'group_by_tag_keys',\n            'group_by_tag_none',\n            'group_by_route53_names',\n            'group_by_rds_engine',\n            'group_by_rds_parameter_group',\n            'group_by_elasticache_engine',\n            'group_by_elasticache_cluster',\n            'group_by_elasticache_parameter_group',\n            'group_by_elasticache_replication_group',\n        ]\n        for option in group_by_options:\n            if config.has_option('ec2', option):\n                setattr(self, option, config.getboolean('ec2', option))\n            else:\n                setattr(self, option, True)\n\n        # Do we need to just include hosts that match a pattern?\n        try:\n            pattern_include = config.get('ec2', 'pattern_include')\n            if pattern_include and len(pattern_include) > 0:\n                self.pattern_include = re.compile(pattern_include)\n            else:\n                self.pattern_include = None\n        except configparser.NoOptionError as e:\n            self.pattern_include = None\n\n        # Do we need to exclude hosts that match a pattern?\n        try:\n            pattern_exclude = config.get('ec2', 'pattern_exclude');\n            if pattern_exclude and len(pattern_exclude) > 0:\n                self.pattern_exclude = re.compile(pattern_exclude)\n            else:\n                self.pattern_exclude = None\n        except configparser.NoOptionError as e:\n            self.pattern_exclude = None\n\n        # Instance filters (see boto and EC2 API docs). Ignore invalid filters.\n        self.ec2_instance_filters = defaultdict(list)\n        if config.has_option('ec2', 'instance_filters'):\n            for instance_filter in config.get('ec2', 'instance_filters', '').split(','):\n                instance_filter = instance_filter.strip()\n                if not instance_filter or '=' not in instance_filter:\n                    continue\n                filter_key, filter_value = [x.strip() for x in instance_filter.split('=', 1)]\n                if not filter_key:\n                    continue\n                self.ec2_instance_filters[filter_key].append(filter_value)\n\n    def parse_cli_args(self):\n        ''' Command line argument processing '''\n\n        parser = argparse.ArgumentParser(description='Produce an Ansible Inventory file based on EC2')\n        parser.add_argument('--list', action='store_true', default=True,\n                           help='List instances (default: True)')\n        parser.add_argument('--host', action='store',\n                           help='Get all the variables about a specific instance')\n        parser.add_argument('--refresh-cache', action='store_true', default=False,\n                           help='Force refresh of cache by making API requests to EC2 (default: False - use cache files)')\n        self.args = parser.parse_args()\n\n\n    def do_api_calls_update_cache(self):\n        ''' Do API calls to each region, and save data in cache files '''\n\n        if self.route53_enabled:\n            self.get_route53_records()\n\n        for region in self.regions:\n            self.get_instances_by_region(region)\n            if self.rds_enabled:\n                self.get_rds_instances_by_region(region)\n            if self.elasticache_enabled:\n                self.get_elasticache_clusters_by_region(region)\n                self.get_elasticache_replication_groups_by_region(region)\n\n        self.write_to_cache(self.inventory, self.cache_path_cache)\n        self.write_to_cache(self.index, self.cache_path_index)\n\n    def connect(self, region):\n        ''' create connection to api server'''\n        if self.eucalyptus:\n            conn = boto.connect_euca(host=self.eucalyptus_host)\n            conn.APIVersion = '2010-08-31'\n        else:\n            conn = ec2.connect_to_region(region)\n        # connect_to_region will fail \"silently\" by returning None if the region name is wrong or not supported\n        if conn is None:\n            self.fail_with_error(\"region name: %s likely not supported, or AWS is down.  connection to region failed.\" % region)\n        return conn\n\n    def get_instances_by_region(self, region):\n        ''' Makes an AWS EC2 API call to the list of instances in a particular\n        region '''\n\n        try:\n            conn = self.connect(region)\n            reservations = []\n            if self.ec2_instance_filters:\n                for filter_key, filter_values in self.ec2_instance_filters.items():\n                    reservations.extend(conn.get_all_instances(filters = { filter_key : filter_values }))\n            else:\n                reservations = conn.get_all_instances()\n\n            for reservation in reservations:\n                for instance in reservation.instances:\n                    self.add_instance(instance, region)\n\n        except boto.exception.BotoServerError as e:\n            if e.error_code == 'AuthFailure':\n                error = self.get_auth_error_message()\n            else:\n                backend = 'Eucalyptus' if self.eucalyptus else 'AWS' \n                error = \"Error connecting to %s backend.\\n%s\" % (backend, e.message)\n            self.fail_with_error(error, 'getting EC2 instances')\n\n    def get_rds_instances_by_region(self, region):\n        ''' Makes an AWS API call to the list of RDS instances in a particular\n        region '''\n\n        try:\n            conn = rds.connect_to_region(region)\n            if conn:\n                instances = conn.get_all_dbinstances()\n                for instance in instances:\n                    self.add_rds_instance(instance, region)\n        except boto.exception.BotoServerError as e:\n            error = e.reason\n\n            if e.error_code == 'AuthFailure':\n                error = self.get_auth_error_message()\n            if not e.reason == \"Forbidden\":\n                error = \"Looks like AWS RDS is down:\\n%s\" % e.message\n            self.fail_with_error(error, 'getting RDS instances')\n\n    def get_elasticache_clusters_by_region(self, region):\n        ''' Makes an AWS API call to the list of ElastiCache clusters (with\n        nodes' info) in a particular region.'''\n\n        # ElastiCache boto module doesn't provide a get_all_intances method,\n        # that's why we need to call describe directly (it would be called by\n        # the shorthand method anyway...)\n        try:\n            conn = elasticache.connect_to_region(region)\n            if conn:\n                # show_cache_node_info = True\n                # because we also want nodes' information\n                response = conn.describe_cache_clusters(None, None, None, True)\n\n        except boto.exception.BotoServerError as e:\n            error = e.reason\n\n            if e.error_code == 'AuthFailure':\n                error = self.get_auth_error_message()\n            if not e.reason == \"Forbidden\":\n                error = \"Looks like AWS ElastiCache is down:\\n%s\" % e.message\n            self.fail_with_error(error, 'getting ElastiCache clusters')\n\n        try:\n            # Boto also doesn't provide wrapper classes to CacheClusters or\n            # CacheNodes. Because of that wo can't make use of the get_list\n            # method in the AWSQueryConnection. Let's do the work manually\n            clusters = response['DescribeCacheClustersResponse']['DescribeCacheClustersResult']['CacheClusters']\n\n        except KeyError as e:\n            error = \"ElastiCache query to AWS failed (unexpected format).\"\n            self.fail_with_error(error, 'getting ElastiCache clusters')\n\n        for cluster in clusters:\n            self.add_elasticache_cluster(cluster, region)\n\n    def get_elasticache_replication_groups_by_region(self, region):\n        ''' Makes an AWS API call to the list of ElastiCache replication groups\n        in a particular region.'''\n\n        # ElastiCache boto module doesn't provide a get_all_intances method,\n        # that's why we need to call describe directly (it would be called by\n        # the shorthand method anyway...)\n        try:\n            conn = elasticache.connect_to_region(region)\n            if conn:\n                response = conn.describe_replication_groups()\n\n        except boto.exception.BotoServerError as e:\n            error = e.reason\n\n            if e.error_code == 'AuthFailure':\n                error = self.get_auth_error_message()\n            if not e.reason == \"Forbidden\":\n                error = \"Looks like AWS ElastiCache [Replication Groups] is down:\\n%s\" % e.message\n            self.fail_with_error(error, 'getting ElastiCache clusters')\n\n        try:\n            # Boto also doesn't provide wrapper classes to ReplicationGroups\n            # Because of that wo can't make use of the get_list method in the\n            # AWSQueryConnection. Let's do the work manually\n            replication_groups = response['DescribeReplicationGroupsResponse']['DescribeReplicationGroupsResult']['ReplicationGroups']\n\n        except KeyError as e:\n            error = \"ElastiCache [Replication Groups] query to AWS failed (unexpected format).\"\n            self.fail_with_error(error, 'getting ElastiCache clusters')\n\n        for replication_group in replication_groups:\n            self.add_elasticache_replication_group(replication_group, region)\n\n    def get_auth_error_message(self):\n        ''' create an informative error message if there is an issue authenticating'''\n        errors = [\"Authentication error retrieving ec2 inventory.\"]\n        if None in [os.environ.get('AWS_ACCESS_KEY_ID'), os.environ.get('AWS_SECRET_ACCESS_KEY')]:\n            errors.append(' - No AWS_ACCESS_KEY_ID or AWS_SECRET_ACCESS_KEY environment vars found')\n        else:\n            errors.append(' - AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY environment vars found but may not be correct')\n\n        boto_paths = ['/etc/boto.cfg', '~/.boto', '~/.aws/credentials']\n        boto_config_found = list(p for p in boto_paths if os.path.isfile(os.path.expanduser(p)))\n        if len(boto_config_found) > 0:\n            errors.append(\" - Boto configs found at '%s', but the credentials contained may not be correct\" % ', '.join(boto_config_found))\n        else:\n            errors.append(\" - No Boto config found at any expected location '%s'\" % ', '.join(boto_paths))\n\n        return '\\n'.join(errors)\n\n    def fail_with_error(self, err_msg, err_operation=None):\n        '''log an error to std err for ansible-playbook to consume and exit'''\n        if err_operation:\n            err_msg = 'ERROR: \"{err_msg}\", while: {err_operation}'.format(\n                err_msg=err_msg, err_operation=err_operation)\n        sys.stderr.write(err_msg)\n        sys.exit(1)\n\n    def get_instance(self, region, instance_id):\n        conn = self.connect(region)\n\n        reservations = conn.get_all_instances([instance_id])\n        for reservation in reservations:\n            for instance in reservation.instances:\n                return instance\n\n    def add_instance(self, instance, region):\n        ''' Adds an instance to the inventory and index, as long as it is\n        addressable '''\n\n        # Only return instances with desired instance states\n        if instance.state not in self.ec2_instance_states:\n            return\n\n        # Select the best destination address\n        if instance.subnet_id:\n            dest = getattr(instance, self.vpc_destination_variable, None)\n            if dest is None:\n                dest = getattr(instance, 'tags').get(self.vpc_destination_variable, None)\n        else:\n            dest = getattr(instance, self.destination_variable, None)\n            if dest is None:\n                dest = getattr(instance, 'tags').get(self.destination_variable, None)\n\n        if not dest:\n            # Skip instances we cannot address (e.g. private VPC subnet)\n            return\n\n        # if we only want to include hosts that match a pattern, skip those that don't\n        if self.pattern_include and not self.pattern_include.match(dest):\n            return\n\n        # if we need to exclude hosts that match a pattern, skip those\n        if self.pattern_exclude and self.pattern_exclude.match(dest):\n            return\n\n        # Add to index\n        self.index[dest] = [region, instance.id]\n\n        # Inventory: Group by instance ID (always a group of 1)\n        if self.group_by_instance_id:\n            self.inventory[instance.id] = [dest]\n            if self.nested_groups:\n                self.push_group(self.inventory, 'instances', instance.id)\n\n        # Inventory: Group by region\n        if self.group_by_region:\n            self.push(self.inventory, region, dest)\n            if self.nested_groups:\n                self.push_group(self.inventory, 'regions', region)\n\n        # Inventory: Group by availability zone\n        if self.group_by_availability_zone:\n            self.push(self.inventory, instance.placement, dest)\n            if self.nested_groups:\n                if self.group_by_region:\n                    self.push_group(self.inventory, region, instance.placement)\n                self.push_group(self.inventory, 'zones', instance.placement)\n\n        # Inventory: Group by Amazon Machine Image (AMI) ID\n        if self.group_by_ami_id:\n            ami_id = self.to_safe(instance.image_id)\n            self.push(self.inventory, ami_id, dest)\n            if self.nested_groups:\n                self.push_group(self.inventory, 'images', ami_id)\n\n        # Inventory: Group by instance type\n        if self.group_by_instance_type:\n            type_name = self.to_safe('type_' + instance.instance_type)\n            self.push(self.inventory, type_name, dest)\n            if self.nested_groups:\n                self.push_group(self.inventory, 'types', type_name)\n\n        # Inventory: Group by key pair\n        if self.group_by_key_pair and instance.key_name:\n            key_name = self.to_safe('key_' + instance.key_name)\n            self.push(self.inventory, key_name, dest)\n            if self.nested_groups:\n                self.push_group(self.inventory, 'keys', key_name)\n\n        # Inventory: Group by VPC\n        if self.group_by_vpc_id and instance.vpc_id:\n            vpc_id_name = self.to_safe('vpc_id_' + instance.vpc_id)\n            self.push(self.inventory, vpc_id_name, dest)\n            if self.nested_groups:\n                self.push_group(self.inventory, 'vpcs', vpc_id_name)\n\n        # Inventory: Group by security group\n        if self.group_by_security_group:\n            try:\n                for group in instance.groups:\n                    key = self.to_safe(\"security_group_\" + group.name)\n                    self.push(self.inventory, key, dest)\n                    if self.nested_groups:\n                        self.push_group(self.inventory, 'security_groups', key)\n            except AttributeError:\n                self.fail_with_error('\\n'.join(['Package boto seems a bit older.', \n                                            'Please upgrade boto >= 2.3.0.']))\n\n        # Inventory: Group by tag keys\n        if self.group_by_tag_keys:\n            for k, v in instance.tags.items():\n                if v:\n                    key = self.to_safe(\"tag_\" + k + \"=\" + v)\n                else:\n                    key = self.to_safe(\"tag_\" + k)\n                self.push(self.inventory, key, dest)\n                if self.nested_groups:\n                    self.push_group(self.inventory, 'tags', self.to_safe(\"tag_\" + k))\n                    self.push_group(self.inventory, self.to_safe(\"tag_\" + k), key)\n\n        # Inventory: Group by Route53 domain names if enabled\n        if self.route53_enabled and self.group_by_route53_names:\n            route53_names = self.get_instance_route53_names(instance)\n            for name in route53_names:\n                self.push(self.inventory, name, dest)\n                if self.nested_groups:\n                    self.push_group(self.inventory, 'route53', name)\n\n        # Global Tag: instances without tags\n        if self.group_by_tag_none and len(instance.tags) == 0:\n            self.push(self.inventory, 'tag_none', dest)\n            if self.nested_groups:\n                self.push_group(self.inventory, 'tags', 'tag_none')\n\n        # Global Tag: tag all EC2 instances\n        self.push(self.inventory, 'ec2', dest)\n\n        self.inventory[\"_meta\"][\"hostvars\"][dest] = self.get_host_info_dict_from_instance(instance)\n\n\n    def add_rds_instance(self, instance, region):\n        ''' Adds an RDS instance to the inventory and index, as long as it is\n        addressable '''\n\n        # Only want available instances unless all_rds_instances is True\n        if not self.all_rds_instances and instance.status != 'available':\n            return\n\n        # Select the best destination address\n        dest = instance.endpoint[0]\n\n        if not dest:\n            # Skip instances we cannot address (e.g. private VPC subnet)\n            return\n\n        # Add to index\n        self.index[dest] = [region, instance.id]\n\n        # Inventory: Group by instance ID (always a group of 1)\n        if self.group_by_instance_id:\n            self.inventory[instance.id] = [dest]\n            if self.nested_groups:\n                self.push_group(self.inventory, 'instances', instance.id)\n\n        # Inventory: Group by region\n        if self.group_by_region:\n            self.push(self.inventory, region, dest)\n            if self.nested_groups:\n                self.push_group(self.inventory, 'regions', region)\n\n        # Inventory: Group by availability zone\n        if self.group_by_availability_zone:\n            self.push(self.inventory, instance.availability_zone, dest)\n            if self.nested_groups:\n                if self.group_by_region:\n                    self.push_group(self.inventory, region, instance.availability_zone)\n                self.push_group(self.inventory, 'zones', instance.availability_zone)\n\n        # Inventory: Group by instance type\n        if self.group_by_instance_type:\n            type_name = self.to_safe('type_' + instance.instance_class)\n            self.push(self.inventory, type_name, dest)\n            if self.nested_groups:\n                self.push_group(self.inventory, 'types', type_name)\n\n        # Inventory: Group by VPC\n        if self.group_by_vpc_id and instance.subnet_group and instance.subnet_group.vpc_id:\n            vpc_id_name = self.to_safe('vpc_id_' + instance.subnet_group.vpc_id)\n            self.push(self.inventory, vpc_id_name, dest)\n            if self.nested_groups:\n                self.push_group(self.inventory, 'vpcs', vpc_id_name)\n\n        # Inventory: Group by security group\n        if self.group_by_security_group:\n            try:\n                if instance.security_group:\n                    key = self.to_safe(\"security_group_\" + instance.security_group.name)\n                    self.push(self.inventory, key, dest)\n                    if self.nested_groups:\n                        self.push_group(self.inventory, 'security_groups', key)\n\n            except AttributeError:\n                self.fail_with_error('\\n'.join(['Package boto seems a bit older.', \n                                            'Please upgrade boto >= 2.3.0.']))\n\n\n        # Inventory: Group by engine\n        if self.group_by_rds_engine:\n            self.push(self.inventory, self.to_safe(\"rds_\" + instance.engine), dest)\n            if self.nested_groups:\n                self.push_group(self.inventory, 'rds_engines', self.to_safe(\"rds_\" + instance.engine))\n\n        # Inventory: Group by parameter group\n        if self.group_by_rds_parameter_group:\n            self.push(self.inventory, self.to_safe(\"rds_parameter_group_\" + instance.parameter_group.name), dest)\n            if self.nested_groups:\n                self.push_group(self.inventory, 'rds_parameter_groups', self.to_safe(\"rds_parameter_group_\" + instance.parameter_group.name))\n\n        # Global Tag: all RDS instances\n        self.push(self.inventory, 'rds', dest)\n\n        self.inventory[\"_meta\"][\"hostvars\"][dest] = self.get_host_info_dict_from_instance(instance)\n\n    def add_elasticache_cluster(self, cluster, region):\n        ''' Adds an ElastiCache cluster to the inventory and index, as long as\n        it's nodes are addressable '''\n\n        # Only want available clusters unless all_elasticache_clusters is True\n        if not self.all_elasticache_clusters and cluster['CacheClusterStatus'] != 'available':\n            return\n\n        # Select the best destination address\n        if 'ConfigurationEndpoint' in cluster and cluster['ConfigurationEndpoint']:\n            # Memcached cluster\n            dest = cluster['ConfigurationEndpoint']['Address']\n            is_redis = False\n        else:\n            # Redis sigle node cluster\n            # Because all Redis clusters are single nodes, we'll merge the\n            # info from the cluster with info about the node\n            dest = cluster['CacheNodes'][0]['Endpoint']['Address']\n            is_redis = True\n\n        if not dest:\n            # Skip clusters we cannot address (e.g. private VPC subnet)\n            return\n\n        # Add to index\n        self.index[dest] = [region, cluster['CacheClusterId']]\n\n        # Inventory: Group by instance ID (always a group of 1)\n        if self.group_by_instance_id:\n            self.inventory[cluster['CacheClusterId']] = [dest]\n            if self.nested_groups:\n                self.push_group(self.inventory, 'instances', cluster['CacheClusterId'])\n\n        # Inventory: Group by region\n        if self.group_by_region and not is_redis:\n            self.push(self.inventory, region, dest)\n            if self.nested_groups:\n                self.push_group(self.inventory, 'regions', region)\n\n        # Inventory: Group by availability zone\n        if self.group_by_availability_zone and not is_redis:\n            self.push(self.inventory, cluster['PreferredAvailabilityZone'], dest)\n            if self.nested_groups:\n                if self.group_by_region:\n                    self.push_group(self.inventory, region, cluster['PreferredAvailabilityZone'])\n                self.push_group(self.inventory, 'zones', cluster['PreferredAvailabilityZone'])\n\n        # Inventory: Group by node type\n        if self.group_by_instance_type and not is_redis:\n            type_name = self.to_safe('type_' + cluster['CacheNodeType'])\n            self.push(self.inventory, type_name, dest)\n            if self.nested_groups:\n                self.push_group(self.inventory, 'types', type_name)\n\n        # Inventory: Group by VPC (information not available in the current\n        # AWS API version for ElastiCache)\n\n        # Inventory: Group by security group\n        if self.group_by_security_group and not is_redis:\n\n            # Check for the existence of the 'SecurityGroups' key and also if\n            # this key has some value. When the cluster is not placed in a SG\n            # the query can return None here and cause an error.\n            if 'SecurityGroups' in cluster and cluster['SecurityGroups'] is not None:\n                for security_group in cluster['SecurityGroups']:\n                    key = self.to_safe(\"security_group_\" + security_group['SecurityGroupId'])\n                    self.push(self.inventory, key, dest)\n                    if self.nested_groups:\n                        self.push_group(self.inventory, 'security_groups', key)\n\n        # Inventory: Group by engine\n        if self.group_by_elasticache_engine and not is_redis:\n            self.push(self.inventory, self.to_safe(\"elasticache_\" + cluster['Engine']), dest)\n            if self.nested_groups:\n                self.push_group(self.inventory, 'elasticache_engines', self.to_safe(cluster['Engine']))\n\n        # Inventory: Group by parameter group\n        if self.group_by_elasticache_parameter_group:\n            self.push(self.inventory, self.to_safe(\"elasticache_parameter_group_\" + cluster['CacheParameterGroup']['CacheParameterGroupName']), dest)\n            if self.nested_groups:\n                self.push_group(self.inventory, 'elasticache_parameter_groups', self.to_safe(cluster['CacheParameterGroup']['CacheParameterGroupName']))\n\n        # Inventory: Group by replication group\n        if self.group_by_elasticache_replication_group and 'ReplicationGroupId' in cluster and cluster['ReplicationGroupId']:\n            self.push(self.inventory, self.to_safe(\"elasticache_replication_group_\" + cluster['ReplicationGroupId']), dest)\n            if self.nested_groups:\n                self.push_group(self.inventory, 'elasticache_replication_groups', self.to_safe(cluster['ReplicationGroupId']))\n\n        # Global Tag: all ElastiCache clusters\n        self.push(self.inventory, 'elasticache_clusters', cluster['CacheClusterId'])\n\n        host_info = self.get_host_info_dict_from_describe_dict(cluster)\n\n        self.inventory[\"_meta\"][\"hostvars\"][dest] = host_info\n\n        # Add the nodes\n        for node in cluster['CacheNodes']:\n            self.add_elasticache_node(node, cluster, region)\n\n    def add_elasticache_node(self, node, cluster, region):\n        ''' Adds an ElastiCache node to the inventory and index, as long as\n        it is addressable '''\n\n        # Only want available nodes unless all_elasticache_nodes is True\n        if not self.all_elasticache_nodes and node['CacheNodeStatus'] != 'available':\n            return\n\n        # Select the best destination address\n        dest = node['Endpoint']['Address']\n\n        if not dest:\n            # Skip nodes we cannot address (e.g. private VPC subnet)\n            return\n\n        node_id = self.to_safe(cluster['CacheClusterId'] + '_' + node['CacheNodeId'])\n\n        # Add to index\n        self.index[dest] = [region, node_id]\n\n        # Inventory: Group by node ID (always a group of 1)\n        if self.group_by_instance_id:\n            self.inventory[node_id] = [dest]\n            if self.nested_groups:\n                self.push_group(self.inventory, 'instances', node_id)\n\n        # Inventory: Group by region\n        if self.group_by_region:\n            self.push(self.inventory, region, dest)\n            if self.nested_groups:\n                self.push_group(self.inventory, 'regions', region)\n\n        # Inventory: Group by availability zone\n        if self.group_by_availability_zone:\n            self.push(self.inventory, cluster['PreferredAvailabilityZone'], dest)\n            if self.nested_groups:\n                if self.group_by_region:\n                    self.push_group(self.inventory, region, cluster['PreferredAvailabilityZone'])\n                self.push_group(self.inventory, 'zones', cluster['PreferredAvailabilityZone'])\n\n        # Inventory: Group by node type\n        if self.group_by_instance_type:\n            type_name = self.to_safe('type_' + cluster['CacheNodeType'])\n            self.push(self.inventory, type_name, dest)\n            if self.nested_groups:\n                self.push_group(self.inventory, 'types', type_name)\n\n        # Inventory: Group by VPC (information not available in the current\n        # AWS API version for ElastiCache)\n\n        # Inventory: Group by security group\n        if self.group_by_security_group:\n\n            # Check for the existence of the 'SecurityGroups' key and also if\n            # this key has some value. When the cluster is not placed in a SG\n            # the query can return None here and cause an error.\n            if 'SecurityGroups' in cluster and cluster['SecurityGroups'] is not None:\n                for security_group in cluster['SecurityGroups']:\n                    key = self.to_safe(\"security_group_\" + security_group['SecurityGroupId'])\n                    self.push(self.inventory, key, dest)\n                    if self.nested_groups:\n                        self.push_group(self.inventory, 'security_groups', key)\n\n        # Inventory: Group by engine\n        if self.group_by_elasticache_engine:\n            self.push(self.inventory, self.to_safe(\"elasticache_\" + cluster['Engine']), dest)\n            if self.nested_groups:\n                self.push_group(self.inventory, 'elasticache_engines', self.to_safe(\"elasticache_\" + cluster['Engine']))\n\n        # Inventory: Group by parameter group (done at cluster level)\n\n        # Inventory: Group by replication group (done at cluster level)\n\n        # Inventory: Group by ElastiCache Cluster\n        if self.group_by_elasticache_cluster:\n            self.push(self.inventory, self.to_safe(\"elasticache_cluster_\" + cluster['CacheClusterId']), dest)\n\n        # Global Tag: all ElastiCache nodes\n        self.push(self.inventory, 'elasticache_nodes', dest)\n\n        host_info = self.get_host_info_dict_from_describe_dict(node)\n\n        if dest in self.inventory[\"_meta\"][\"hostvars\"]:\n            self.inventory[\"_meta\"][\"hostvars\"][dest].update(host_info)\n        else:\n            self.inventory[\"_meta\"][\"hostvars\"][dest] = host_info\n\n    def add_elasticache_replication_group(self, replication_group, region):\n        ''' Adds an ElastiCache replication group to the inventory and index '''\n\n        # Only want available clusters unless all_elasticache_replication_groups is True\n        if not self.all_elasticache_replication_groups and replication_group['Status'] != 'available':\n            return\n\n        # Select the best destination address (PrimaryEndpoint)\n        dest = replication_group['NodeGroups'][0]['PrimaryEndpoint']['Address']\n\n        if not dest:\n            # Skip clusters we cannot address (e.g. private VPC subnet)\n            return\n\n        # Add to index\n        self.index[dest] = [region, replication_group['ReplicationGroupId']]\n\n        # Inventory: Group by ID (always a group of 1)\n        if self.group_by_instance_id:\n            self.inventory[replication_group['ReplicationGroupId']] = [dest]\n            if self.nested_groups:\n                self.push_group(self.inventory, 'instances', replication_group['ReplicationGroupId'])\n\n        # Inventory: Group by region\n        if self.group_by_region:\n            self.push(self.inventory, region, dest)\n            if self.nested_groups:\n                self.push_group(self.inventory, 'regions', region)\n\n        # Inventory: Group by availability zone (doesn't apply to replication groups)\n\n        # Inventory: Group by node type (doesn't apply to replication groups)\n\n        # Inventory: Group by VPC (information not available in the current\n        # AWS API version for replication groups\n\n        # Inventory: Group by security group (doesn't apply to replication groups)\n        # Check this value in cluster level\n\n        # Inventory: Group by engine (replication groups are always Redis)\n        if self.group_by_elasticache_engine:\n            self.push(self.inventory, 'elasticache_redis', dest)\n            if self.nested_groups:\n                self.push_group(self.inventory, 'elasticache_engines', 'redis')\n\n        # Global Tag: all ElastiCache clusters\n        self.push(self.inventory, 'elasticache_replication_groups', replication_group['ReplicationGroupId'])\n\n        host_info = self.get_host_info_dict_from_describe_dict(replication_group)\n\n        self.inventory[\"_meta\"][\"hostvars\"][dest] = host_info\n\n    def get_route53_records(self):\n        ''' Get and store the map of resource records to domain names that\n        point to them. '''\n\n        r53_conn = route53.Route53Connection()\n        all_zones = r53_conn.get_zones()\n\n        route53_zones = [ zone for zone in all_zones if zone.name[:-1]\n                          not in self.route53_excluded_zones ]\n\n        self.route53_records = {}\n\n        for zone in route53_zones:\n            rrsets = r53_conn.get_all_rrsets(zone.id)\n\n            for record_set in rrsets:\n                record_name = record_set.name\n\n                if record_name.endswith('.'):\n                    record_name = record_name[:-1]\n\n                for resource in record_set.resource_records:\n                    self.route53_records.setdefault(resource, set())\n                    self.route53_records[resource].add(record_name)\n\n\n    def get_instance_route53_names(self, instance):\n        ''' Check if an instance is referenced in the records we have from\n        Route53. If it is, return the list of domain names pointing to said\n        instance. If nothing points to it, return an empty list. '''\n\n        instance_attributes = [ 'public_dns_name', 'private_dns_name',\n                                'ip_address', 'private_ip_address' ]\n\n        name_list = set()\n\n        for attrib in instance_attributes:\n            try:\n                value = getattr(instance, attrib)\n            except AttributeError:\n                continue\n\n            if value in self.route53_records:\n                name_list.update(self.route53_records[value])\n\n        return list(name_list)\n\n    def get_host_info_dict_from_instance(self, instance):\n        instance_vars = {}\n        for key in vars(instance):\n            value = getattr(instance, key)\n            key = self.to_safe('ec2_' + key)\n\n            # Handle complex types\n            # state/previous_state changed to properties in boto in https://github.com/boto/boto/commit/a23c379837f698212252720d2af8dec0325c9518\n            if key == 'ec2__state':\n                instance_vars['ec2_state'] = instance.state or ''\n                instance_vars['ec2_state_code'] = instance.state_code\n            elif key == 'ec2__previous_state':\n                instance_vars['ec2_previous_state'] = instance.previous_state or ''\n                instance_vars['ec2_previous_state_code'] = instance.previous_state_code\n            elif type(value) in [int, bool]:\n                instance_vars[key] = value\n            elif isinstance(value, six.string_types):\n                instance_vars[key] = value.strip()\n            elif type(value) == type(None):\n                instance_vars[key] = ''\n            elif key == 'ec2_region':\n                instance_vars[key] = value.name\n            elif key == 'ec2__placement':\n                instance_vars['ec2_placement'] = value.zone\n            elif key == 'ec2_tags':\n                for k, v in value.items():\n                    key = self.to_safe('ec2_tag_' + k)\n                    instance_vars[key] = v\n            elif key == 'ec2_groups':\n                group_ids = []\n                group_names = []\n                for group in value:\n                    group_ids.append(group.id)\n                    group_names.append(group.name)\n                instance_vars[\"ec2_security_group_ids\"] = ','.join([str(i) for i in group_ids])\n                instance_vars[\"ec2_security_group_names\"] = ','.join([str(i) for i in group_names])\n            else:\n                pass\n                # TODO Product codes if someone finds them useful\n                #print key\n                #print type(value)\n                #print value\n\n        return instance_vars\n\n    def get_host_info_dict_from_describe_dict(self, describe_dict):\n        ''' Parses the dictionary returned by the API call into a flat list\n            of parameters. This method should be used only when 'describe' is\n            used directly because Boto doesn't provide specific classes. '''\n\n        # I really don't agree with prefixing everything with 'ec2'\n        # because EC2, RDS and ElastiCache are different services.\n        # I'm just following the pattern used until now to not break any\n        # compatibility.\n\n        host_info = {}\n        for key in describe_dict:\n            value = describe_dict[key]\n            key = self.to_safe('ec2_' + self.uncammelize(key))\n\n            # Handle complex types\n\n            # Target: Memcached Cache Clusters\n            if key == 'ec2_configuration_endpoint' and value:\n                host_info['ec2_configuration_endpoint_address'] = value['Address']\n                host_info['ec2_configuration_endpoint_port'] = value['Port']\n\n            # Target: Cache Nodes and Redis Cache Clusters (single node)\n            if key == 'ec2_endpoint' and value:\n                host_info['ec2_endpoint_address'] = value['Address']\n                host_info['ec2_endpoint_port'] = value['Port']\n\n            # Target: Redis Replication Groups\n            if key == 'ec2_node_groups' and value:\n                host_info['ec2_endpoint_address'] = value[0]['PrimaryEndpoint']['Address']\n                host_info['ec2_endpoint_port'] = value[0]['PrimaryEndpoint']['Port']\n                replica_count = 0\n                for node in value[0]['NodeGroupMembers']:\n                    if node['CurrentRole'] == 'primary':\n                        host_info['ec2_primary_cluster_address'] = node['ReadEndpoint']['Address']\n                        host_info['ec2_primary_cluster_port'] = node['ReadEndpoint']['Port']\n                        host_info['ec2_primary_cluster_id'] = node['CacheClusterId']\n                    elif node['CurrentRole'] == 'replica':\n                        host_info['ec2_replica_cluster_address_'+ str(replica_count)] = node['ReadEndpoint']['Address']\n                        host_info['ec2_replica_cluster_port_'+ str(replica_count)] = node['ReadEndpoint']['Port']\n                        host_info['ec2_replica_cluster_id_'+ str(replica_count)] = node['CacheClusterId']\n                        replica_count += 1\n\n            # Target: Redis Replication Groups\n            if key == 'ec2_member_clusters' and value:\n                host_info['ec2_member_clusters'] = ','.join([str(i) for i in value])\n\n            # Target: All Cache Clusters\n            elif key == 'ec2_cache_parameter_group':\n                host_info[\"ec2_cache_node_ids_to_reboot\"] = ','.join([str(i) for i in value['CacheNodeIdsToReboot']])\n                host_info['ec2_cache_parameter_group_name'] = value['CacheParameterGroupName']\n                host_info['ec2_cache_parameter_apply_status'] = value['ParameterApplyStatus']\n\n            # Target: Almost everything\n            elif key == 'ec2_security_groups':\n\n                # Skip if SecurityGroups is None\n                # (it is possible to have the key defined but no value in it).\n                if value is not None:\n                    sg_ids = []\n                    for sg in value:\n                        sg_ids.append(sg['SecurityGroupId'])\n                    host_info[\"ec2_security_group_ids\"] = ','.join([str(i) for i in sg_ids])\n\n            # Target: Everything\n            # Preserve booleans and integers\n            elif type(value) in [int, bool]:\n                host_info[key] = value\n\n            # Target: Everything\n            # Sanitize string values\n            elif isinstance(value, six.string_types):\n                host_info[key] = value.strip()\n\n            # Target: Everything\n            # Replace None by an empty string\n            elif type(value) == type(None):\n                host_info[key] = ''\n\n            else:\n                # Remove non-processed complex types\n                pass\n\n        return host_info\n\n    def get_host_info(self):\n        ''' Get variables about a specific host '''\n\n        if len(self.index) == 0:\n            # Need to load index from cache\n            self.load_index_from_cache()\n\n        if not self.args.host in self.index:\n            # try updating the cache\n            self.do_api_calls_update_cache()\n            if not self.args.host in self.index:\n                # host might not exist anymore\n                return self.json_format_dict({}, True)\n\n        (region, instance_id) = self.index[self.args.host]\n\n        instance = self.get_instance(region, instance_id)\n        return self.json_format_dict(self.get_host_info_dict_from_instance(instance), True)\n\n    def push(self, my_dict, key, element):\n        ''' Push an element onto an array that may not have been defined in\n        the dict '''\n        group_info = my_dict.setdefault(key, [])\n        if isinstance(group_info, dict):\n            host_list = group_info.setdefault('hosts', [])\n            host_list.append(element)\n        else:\n            group_info.append(element)\n\n    def push_group(self, my_dict, key, element):\n        ''' Push a group as a child of another group. '''\n        parent_group = my_dict.setdefault(key, {})\n        if not isinstance(parent_group, dict):\n            parent_group = my_dict[key] = {'hosts': parent_group}\n        child_groups = parent_group.setdefault('children', [])\n        if element not in child_groups:\n            child_groups.append(element)\n\n    def get_inventory_from_cache(self):\n        ''' Reads the inventory from the cache file and returns it as a JSON\n        object '''\n\n        cache = open(self.cache_path_cache, 'r')\n        json_inventory = cache.read()\n        return json_inventory\n\n\n    def load_index_from_cache(self):\n        ''' Reads the index from the cache file sets self.index '''\n\n        cache = open(self.cache_path_index, 'r')\n        json_index = cache.read()\n        self.index = json.loads(json_index)\n\n\n    def write_to_cache(self, data, filename):\n        ''' Writes data in JSON format to a file '''\n\n        json_data = self.json_format_dict(data, True)\n        cache = open(filename, 'w')\n        cache.write(json_data)\n        cache.close()\n\n    def uncammelize(self, key):\n        temp = re.sub('(.)([A-Z][a-z]+)', r'\\1_\\2', key)\n        return re.sub('([a-z0-9])([A-Z])', r'\\1_\\2', temp).lower()\n\n    def to_safe(self, word):\n        ''' Converts 'bad' characters in a string to underscores so they can be\n        used as Ansible groups '''\n\n        return re.sub(\"[^A-Za-z0-9\\_]\", \"_\", word)\n\n    def json_format_dict(self, data, pretty=False):\n        ''' Converts a dict to a JSON object and dumps it as a formatted\n        string '''\n\n        if pretty:\n            return json.dumps(data, sort_keys=True, indent=2)\n        else:\n            return json.dumps(data)\n\n\n# Run the script\nEc2Inventory()\n"
  },
  {
    "path": "pentagon/component/core/files/requirements.txt",
    "content": ""
  },
  {
    "path": "pentagon/component/gcp/__init__.py",
    "content": "import cluster\n"
  },
  {
    "path": "pentagon/component/gcp/cluster.py",
    "content": "\"\"\"\ncluster.py\nThis class has a lot of magic in ComponentBase from pentagon. It can be\ndifficult to discern what properties, and class vars are needed to make this\nrun correctly. Best advice I can give to future time travelers is use a\ndebugger or trial and error.\n\"\"\"\n\nfrom pentagon.component import ComponentBase\nimport pkg_resources\n\n\nclass Public(ComponentBase):\n    \"\"\"\n    Adds all the terraform modules that create a single public cluster with\n    one node pool. This includes the network, cluster and node pool.\n    \"\"\"\n\n    _required_parameters = [\n        'cluster_id',\n        'cluster_name',\n        'kubernetes_version',\n        'network_name',\n        'nodes_cidr',\n        'nodes_subnetwork_name',\n        'pods_cidr',\n        'project',\n        'region',\n        'services_cidr',\n        'tf_module_gcp_vpc_native_version',\n        'tf_module_gke_module_version',\n        'tf_module_nodepool_module_version',\n    ]\n\n    _defaults = {\n        'cluster_id': '1',\n        'network_name': 'kube',\n        'nodes_subnetwork_name': 'kube-nodes',\n        'region': 'us-central1',\n        'tf_module_gcp_vpc_native_version': 'default-v1.0.0',\n        'tf_module_gke_module_version': 'public-vpc-native-v1.0.0',\n        'tf_module_nodepool_module_version': 'node-pool-v1.0.0',\n    }\n\n    @property\n    def _files_directory(self):\n        _template_path = 'files/public_cluster'\n        if pkg_resources.resource_isdir(__name__, _template_path):\n            return pkg_resources.resource_filename(__name__, _template_path)\n        else:\n            raise StandardError(\n                'Could not find template path ({})'.format(_template_path))\n"
  },
  {
    "path": "pentagon/component/gcp/files/public_cluster/cluster.tf.jinja",
    "content": "# These local variables can be used as inputs to both a network and this GKE VPC Native cluster module.                                                                                                                                       \nlocals {\n  project            = \"{{ project }}\"\n  region             = \"{{ region }}\"\n  network_name       = \"{{ network_name }}\"\n  kubernetes_version = \"{{ kubernetes_version }}\"\n}\n\nmodule \"network_{{ cluster_id }}\" {\n  source = \"git@github.com:reactiveops/terraform-gcp-vpc-native.git//default?ref={{ tf_module_gcp_vpc_native_version }}\"\n\n  // base network parameters\n  network_name     = \"${local.network_name}\"\n  subnetwork_name  = \"{{ nodes_subnetwork_name }}\"\n  region           = \"${local.region}\"\n  enable_flow_logs = \"false\"\n\n  //specify the staging subnetwork primary and secondary CIDRs for IP aliasing\n  subnetwork_range    = \"{{ nodes_cidr }}\"\n  subnetwork_pods     = \"{{ pods_cidr }}\"\n  subnetwork_services = \"{{ services_cidr }}\"\n}\n\n# Ref: https://github.com/reactiveops/terraform-gcp-vpc-native\nmodule \"cluster_{{ cluster_id }}\" {\n  # Change the ref below to use a vX.Y.Z release instead of master.\n  source = \"git@github.com:/reactiveops/terraform-gke//public-vpc-native?ref={{ tf_module_gke_module_version }}\"\n\n  name                             = \"{{ cluster_name }}-{{ cluster_id }}\"\n  region                           = \"${local.region}\"\n  project                          = \"${local.project}\"\n  kubernetes_version               = \"${local.kubernetes_version}\"\n  network_name                     = \"${local.network_name}\"\n  nodes_subnetwork_name            = \"${module.network_{{ cluster_id }}.subnetwork}\"\n  pods_secondary_ip_range_name     = \"${module.network_{{ cluster_id }}.gke_pods_1}\"\n  services_secondary_ip_range_name = \"${module.network_{{ cluster_id }}.gke_services_1}\"\n\n  master_authorized_network_cidrs = [\n    {\n      # This is the module default, but demonstrates specifying this input.\n      cidr_block   = \"0.0.0.0/0\"\n      display_name = \"from the Internet\"\n    },\n  ]\n}\n\nmodule \"node_pool_{{ cluster_id }}\" {\n  source = \"git@github.com:/reactiveops/terraform-gke//node_pool?ref={{ tf_module_nodepool_module_version }}\"\n\n  name             = \"node-pool-1\"\n  region           = \"${module.cluster_{{ cluster_id }}.region}\"\n  gke_cluster_name = \"${module.cluster_{{ cluster_id }}.name}\"\n  machine_type     = \"n1-standard-2\"\n  min_node_count   = \"1\"\n  max_node_count   = \"1\"\n\n  # Match the Kubernetes version from the GKE cluster!\n  kubernetes_version = \"${module.cluster_{{ cluster_id }}.kubernetes_version}\"\n}\n"
  },
  {
    "path": "pentagon/component/inventory/__init__.py",
    "content": "import os\nimport json\nimport sys\nimport logging\nimport traceback\n\nfrom pentagon.component import ComponentBase\nfrom pentagon.component.aws_vpc import AWSVpc as Vpc\nfrom pentagon.component.vpn import Vpn\nfrom pentagon.component import gcp\nfrom pentagon.helpers import create_rsa_key\nfrom pentagon.defaults import AWSPentagonDefaults as PentagonDefaults\n\n\nclass Inventory(ComponentBase):\n\n    _defaults = {'cloud': 'aws'}\n    _required_parameters = [\n        'name',\n        'infrastructure_bucket',\n        'aws_access_key',\n        'aws_secret_key',\n        'project_name'\n    ]\n\n    def __init__(self, data, additional_args=None, **kwargs):\n        # HACK this if is to support start-project workflow\n        if 'cloud' in data.keys():\n            # HACK satisfy AWS requirements above in _required_parameters\n            if data['cloud'] == 'gcp':\n                data['aws_access_key'] = 'shouldneverbeused'\n                data['aws_secret_key'] = 'shouldneverbeused'\n        super(Inventory, self).__init__(data, additional_args, **kwargs)\n        self._ssh_keys = {\n            'admin_vpn_key': self._data.get('admin_vpn_key', PentagonDefaults.ssh['admin_vpn_key']),\n            'working_kube_key': self._data.get('working_kube_key', PentagonDefaults.ssh['working_kube_key']),\n            'production_kube_key': self._data.get('production_kube_key', PentagonDefaults.ssh['production_kube_key']),\n            'working_private_key': self._data.get('working_private_key', PentagonDefaults.ssh['working_private_key']),\n            'production_private_key': self._data.get('production_private_key', PentagonDefaults.ssh['production_private_key']),\n        }\n\n    @property\n    def _files_directory(self):\n        return sys.modules[self.__module__].__path__[0] + \"/files/common\"\n\n    def add(self, destination, overwrite=False):\n        \"\"\"Inventory version of Component.add Copies files and templates from <component>/files and templates the *.jinja files \"\"\"\n        if destination == './':\n            self._destination = self._data.get('name', './default')\n        else:\n            self._destination = destination\n\n        self._overwrite = overwrite\n        self._display_settings_to_user()\n\n        try:\n            self._add_files()\n\n            if self._data['cloud'].lower() == 'aws':\n                self._data['aws_region'] = self._data.get('aws_default_region')\n                self._data['account'] = os.path.basename(self._destination)\n                self._merge_data(self._ssh_keys)\n                self.__create_keys()\n\n                Aws(self._data).add(\"{}/terraform\".format(self._destination))\n                if self._data.get('configure_vpn', True):\n                    Vpn(self._data).add(\n                        \"{}/resources\".format(self._destination), overwrite=True)\n\n            if self._data['cloud'].lower() == 'gcp':\n                Gcp(self._data).add('{}/terraform/'.format(self._destination))\n\n            self._remove_init_file()\n            self._render_directory_templates()\n        except Exception as e:\n            logging.error(\"Error occurred configuring component\")\n            logging.error(e)\n            logging.debug(traceback.format_exc(e))\n            sys.exit(1)\n\n    def __create_keys(self):\n        key_path = \"{}/{}/\".format(self._destination, \"config/private\")\n\n        for key in self._ssh_keys:\n            logging.debug(\"Creating ssh key {}\".format(key))\n            key_name = \"{}\".format(self._ssh_keys[key])\n            if not os.path.isfile(\"{}{}\".format(key_path, key_name)):\n                create_rsa_key(key_name, key_path)\n            else:\n                logging.warn(\"Key {}{} exist!\".format(key_path, key_name))\n\n\nclass Aws(ComponentBase):\n\n    def add(self, destination):\n        Vpc(self._data).add(\"./{}\".format(destination), overwrite=True)\n\n\nclass Gcp(ComponentBase):\n\n    def add(self, destination):\n        gcp.cluster.Public(self._data).add(\n            \"./{}\".format(destination), overwrite=True)\n"
  },
  {
    "path": "pentagon/component/inventory/files/__init__.py",
    "content": ""
  },
  {
    "path": "pentagon/component/inventory/files/common/clusters/__init__.py",
    "content": "#__init__.py\n"
  },
  {
    "path": "pentagon/component/inventory/files/common/config/local/ansible.cfg-default.jinja",
    "content": "[defaults]\ninventory = $INFRASTRUCTURE_REPO/plugins/inventory\nroles_path = $INFRASTRUCTURE_REPO/roles\nfilter_plugins = $INFRASTRUCTURE_REPO/plugins/filter_plugins\nretry_files_save_path = ~/.ansible-retry\nhash_behavior = merge\n\n[ssh_connection]\n# this needs the path defined without the use of ENV variables\nssh_args = -F __INFRA_REPO_PATH__/inventory/{{ account }}/config/private/ssh_config\n"
  },
  {
    "path": "pentagon/component/inventory/files/common/config/local/local-config-init.jinja",
    "content": "#!/usr/bin/env bash\n\n# this script creates personalized copies of *-default files\n# the scripts primary purpose is to create config files which\n# populate paths for items which do not leverage the\n# $INFRASTRUCTURE_REPO environment\n# The script basically replaces all instances of the string\n# __INFRA_REPO_PATH__ with the contents of $INFRASTRUCTURE_REPO\n# and stores the output in the ../private directory (which are .gitignored)\n\nOUT_DIR=\"../private\"\n\nif [ -z \"${INFRASTRUCTURE_REPO}\" ]; then\n  echo \"INFRASTRUCTURE_REPO environment variable must be set\"\n  exit 1\nelif [ ! -d \"${INFRASTRUCTURE_REPO}\" ]; then\n  echo \"${INFRASTRUCTURE_REPO} doesn't exist or isn't a directory\"\n  exit 1\nfi\n\ncd \"${INFRASTRUCTURE_REPO}/inventory/{{ name }}/config/local\" || exit 1\n\nfor default_file in *-default; do\n  out_file=\"${OUT_DIR}/${default_file//-default}\"\n  echo -n \"${default_file} -> ${out_file} \"\n  if [ -e \"${out_file}\" ]; then\n    echo \"already exists. skipping.\"\n    continue\n  else\n    cat \"${default_file}\" | sed -e \"s@__INFRA_REPO_PATH__@$INFRASTRUCTURE_REPO@g\" > \"${out_file}\"\n    echo \"created.\"\n  fi\ndone\n"
  },
  {
    "path": "pentagon/component/inventory/files/common/config/local/ssh_config-default.jinja",
    "content": "# for the kube / kops working instances\nHost 172.20.64.* 172.20.65.* 172.20.66.* 172.20.67.* 172.20.68.* 172.20.69.* 172.20.70.* 172.20.71.* 172.20.72.* 172.20.73.* 172.20.74.* 172.20.75.*\n  User admin\n  IdentityFile __INFRA_REPO_PATH__/inventory/{{ account }}/config/private/{{ working_kube_key }}\n  StrictHostKeyChecking no\n  UserKnownHostsFile=/dev/null\n\n# for the kube / kops prod instances\nHost 172.20.96.* 172.20.97.* 172.20.98.* 172.20.99.* 172.20.100.* 172.20.101.* 172.20.102.* 172.20.103.* 172.20.104.* 172.20.105.* 172.20.106.* 172.20.107.*\n  User admin\n  IdentityFile __INFRA_REPO_PATH__/inventory/{{ account }}/config/private/{{ production_kube_key }}\n  StrictHostKeyChecking no\n  UserKnownHostsFile=/dev/null\n\n# for instances in private_working\nHost 172.20.48.* 172.20.49.* 172.20.50.* 172.20.51.* 172.20.52.* 172.20.53.* 172.20.54.* 172.20.55.* 172.20.56.* 172.20.57.* 172.20.58.* 172.20.59.*\n  User ubuntu\n  IdentityFile __INFRA_REPO_PATH__/inventory/{{ account }}/config/private/{{ working_private_key }}\n  StrictHostKeyChecking no\n  UserKnownHostsFile=/dev/null\n\n# for instances in private_prod\nHost 172.20.32.* 172.20.33.* 172.20.34.* 172.20.35.* 172.20.36.* 172.20.37.* 172.20.38.* 172.20.39.* 172.20.40.* 172.20.41.* 172.20.42.* 172.20.43.*\n  User ubuntu\n  IdentityFile __INFRA_REPO_PATH__/inventory/{{ account }}/config/private/{{ production_private_key }}\n  StrictHostKeyChecking no\n  UserKnownHostsFile=/dev/null\n\n# for instances in admin\nHost 172.20.0.* 172.20.1.* 172.20.2.* 172.20.3.* 172.20.4.* 172.20.5.* 172.20.6.* 172.20.7.* 172.20.8.* 172.20.9.* 172.20.10.* 172.20.11.*\n  User ubuntu\n  IdentityFile __INFRA_REPO_PATH__/inventory/{{ account }}/config/private/{{ admin_vpn_key }}\n  StrictHostKeyChecking no\n  UserKnownHostsFile=/dev/null\n\n# VPN instance\n# Replace the '*' with the IP address of the VPN instance\nHost *\n  User ubuntu\n  IdentityFile __INFRA_REPO_PATH__/inventory/{{ account }}/config/private/{{ admin_vpn_key }}\n  IdentitiesOnly yes\n  StrictHostKeyChecking no\n  UserKnownHostsFile=/dev/null"
  },
  {
    "path": "pentagon/component/inventory/files/common/config/local/vars.yml.jinja",
    "content": "ANSIBLE_CONFIG: '${INFRASTRUCTURE_REPO}/inventory/${INVENTORY}/config/private/ansible.cfg'\nKUBECONFIG: '${INFRASTRUCTURE_REPO}/inventory/${INVENTORY}/config/private/kube_config'\nHELM_HOME: \"${INFRASTRUCTURE_REPO}/helm\"\nTILLER_NAMESPACE: \"tiller\"\n\n{%- if cloud | lower == 'aws' %}\nVPC_NAME: \"{{ vpc_name }}\"\n\nINFRASTRUCTURE_BUCKET: \"{{ infrastructure_bucket }}\"\n\nAWS_DEFAULT_REGION:  \"{{ aws_default_region }}\"\nAWS_AVAILABILITY_ZONES: \"{{ aws_availability_zones }}\"\nAWS_AVAILABILITY_ZONE_COUNT: \"{{ aws_availability_zone_count }}\"\n\nAWS_INVENTORY_PATH: '${INFRASTRUCTURE_REPO}/plugins/'\n\nKOPS_STATE_STORE_BUCKET: \"{{ infrastructure_bucket }}\"\nKOPS_STATE_STORE: \"s3://${KOPS_STATE_STORE_BUCKET}\"\n\nvpc_tag_name: \"{{ vpc_name }}\"\norg: \"{{ project_name }}\"\n\ncanonical_zone: \"{{ dns_zone }}\"\nvpn_bucket: \"{{project_name}}-vpn\"\n\n{%- elif cloud == 'gcp' %}\n\nCLOUDSDK_CORE_PROJECT: \"{{ gcp_project }}\"\nCLOUDSDK_COMPUTE_ZONE: \"{{ gcp_zone }}\"\nCLOUDSDK_COMPUTE_REGION: \"{{ gcp_region }}\"\n\n{%- endif %}\n"
  },
  {
    "path": "pentagon/component/inventory/files/common/config/private/.gitignore",
    "content": "*\n!.gitignore\n"
  },
  {
    "path": "pentagon/component/inventory/files/common/kubernetes/__init__.py",
    "content": "#__init__.py\n"
  },
  {
    "path": "pentagon/component/inventory/files/common/terraform/.gitignore",
    "content": "*.tfplan\n*.tfstate\n*.tfstate.backup\n.terraform/\n.DS_Store\n"
  },
  {
    "path": "pentagon/component/inventory/files/common/terraform/backend.tf.jinja",
    "content": "// terraform backend config\n\nterraform {\n{%- if cloud | lower == 'aws' %}\n  backend \"s3\" {\n    bucket = \"{{ infrastructure_bucket }}\"\n    key    = \"{{ name }}/terraform/tf.state\"\n    region = \"{{ aws_region }}\"\n  }\n{%- elif cloud == 'gcp' %}\n    backend \"gcs\" {\n        bucket  = \"{{ infrastructure_bucket }}\"\n        prefix  = \"{{ name }}/terraform/tf.state\"\n    }\n{%- endif %}\n}\n"
  },
  {
    "path": "pentagon/component/inventory/files/common/terraform/provider.tf.jinja",
    "content": "\n{%- if cloud | lower == 'aws' %}\nprovider \"aws\" {\n    # Configuration set in env vars $AWS_ACCESS_KEY_ID, $AWS_SECRET_ACCESS_KEY and, $AWS_DEFAULT_REGION\n}\n{%- elif cloud == 'gcp' %}\nprovider \"google\" {\n    version = \"~> 2.0\"\n    # Configuration set in env vars $GOOGLE_PROJECT, $GCLOUD_PROJECT\n}\n{%- endif %}\n"
  },
  {
    "path": "pentagon/component/kops/__init__.py",
    "content": "import os\nimport glob\nimport shutil\nimport logging\nimport traceback\nimport sys\nimport re\nimport subprocess\nimport yaml\n\nfrom pentagon.component import ComponentBase\nfrom pentagon.helpers import render_template\nfrom pentagon.defaults import AWSPentagonDefaults as PentagonDefaults\n\n\nclass Cluster(ComponentBase):\n    _path = os.path.dirname(__file__)\n\n    def add(self, destination):\n        for key in PentagonDefaults.kubernetes:\n            if not self._data.get(key):\n                self._data[key] = PentagonDefaults.kubernetes[key]\n\n        if not self._data.get('network_cidr_base'):\n            self._data['network_cidr_base'] = PentagonDefaults.vpc['vpc_cidr_base']\n\n        for key in ['authorization', 'networking']:\n            self._data[key] = yaml.dump(self._data[key])\n\n        return super(Cluster, self).add(destination)\n\n    def get(self, destination):\n\n        self._cluster_name = self._data.get('name', os.environ.get('CLUSTER_NAME'))\n        self._bucket = self._data.get('kops_state_store', os.environ.get('KOPS_STATE_STORE'))\n        self._destination = destination\n\n        if self._bucket is None:\n            logging.error(\"kops_state_store required.\")\n            sys.exit(1)\n\n        if self._cluster_name is None:\n            logging.error(\"name is required.\")\n            sys.exit(1)\n\n        os.mkdir(self._cluster_name)\n        os.chdir(self._cluster_name)\n\n        self._get_cluster_yaml()\n\n        for ig in self._cluster_instance_groups:\n            self._get_instance_group_yaml(ig)\n\n        self._get_cluster_admin_secret()\n\n    @property\n    def _cluster_instance_groups(self):\n        # get igs yaml\n        logging.debug(\"Getting instance groups.\")\n        args = ['kops',\n                'get',\n                'ig',\n                '--name={}'.format(self._cluster_name),\n                '--state=s3://{}'.format(self._bucket)]\n\n        return [ig.split(\"\\t\")[0] for ig in subprocess.check_output(args).split(\"\\n\")][1:-1]\n\n    def _get_instance_group_yaml(self, ig):\n        args = ['kops',\n                'get',\n                'ig',\n                ig,\n                '--name={}'.format(self._cluster_name),\n                '--state=s3://{}'.format(self._bucket),\n                '-oyaml']\n\n        ig_yaml = subprocess.check_output(args)\n\n        file_mode = 'w'\n        if \"master\" in ig:\n            ig_file_name = \"master.yml\"\n            file_mode = 'a'\n        else:\n            ig_file_name = \"{}.yml\".format(ig)\n\n        with open(ig_file_name, file_mode) as ig_file:\n            ig_file.write(\"---\\n\")\n            ig_file.write(\"{}\\n\".format(ig_yaml))\n            ig_file.close()\n\n    def _get_cluster_admin_secret(self):\n        # get secret sorta\n        logging.debug(\"Getting ssh key secret. This will require transformation before a new cluster can be created\")\n        with open('secret.sh', 'w') as sf:\n            args = ['kops',\n                    'get',\n                    'secret',\n                    'admin',\n                    '--name={}'.format(self._cluster_name),\n                    '--state=s3://{}'.format(self._bucket)]\n\n            subprocess.Popen(args, stdout=sf)\n\n    def _get_cluster_yaml(self):\n        # get cluster yaml\n        logging.debug(\"Getting cluster.\")\n        with open('cluster.yml', 'w') as cf:\n            args = ['kops',\n                    'get',\n                    'cluster',\n                    '--name={}'.format(self._cluster_name),\n                    '--state=s3://{}'.format(self._bucket),\n                    '-oyaml']\n\n            p = subprocess.Popen(args, stdout=cf)\n            stdout, stderr = p.communicate()\n            if p.returncode != 0:\n                logging.error(\"Error getting cluster: {}\".format(stderr))\n                sys.exit(1)\n"
  },
  {
    "path": "pentagon/component/kops/files/cluster.yml.jinja",
    "content": "apiVersion: kops/v1alpha2\nkind: Cluster\nmetadata:\n  name: {{ cluster_name }}\nspec:\n  kubelet:\n    anonymousAuth: false\n  {%- if additional_policies %}\n  additionalPolicies:\n   {{ additional_policies|indent(4) }}\n  {%- endif %}\n  api:\n    loadBalancer:\n      type: Public\n  authorization:\n    {{ authorization|indent(2) }}\n  channel: stable\n  cloudProvider: aws\n  configBase: s3://{{ kops_state_store_bucket }}/{{ cluster_name }}\n  dnsZone: {{ cluster_dns }}\n  etcdClusters:\n  - name: main\n    enableEtcdTLS: true\n    version: 3.2.24\n    etcdMembers:\n    {%- for az in master_availability_zones %}\n    - instanceGroup: master-{{ az }}\n      name: {{ az }}\n      encryptedVolume: true\n    {%- endfor %}\n  - name: events\n    enableEtcdTLS: true\n    version: 3.2.24\n    etcdMembers:\n    {%- for az in master_availability_zones %}\n    - instanceGroup: master-{{ az }}\n      name: {{ az }}\n      encryptedVolume: true\n    {%- endfor %}\n  kubeAPIServer:\n    authenticationTokenWebhookConfigFile: /srv/kubernetes/aws-iam-authenticator/kubeconfig.yaml\n    auditLogPath: /var/log/kube-apiserver-audit.log\n    auditLogMaxAge: 10\n    auditLogMaxBackups: 1\n    auditLogMaxSize: 100\n    auditPolicyFile: /srv/kubernetes/audit.yaml\n  kubernetesApiAccess:\n  - 0.0.0.0/0\n  kubernetesVersion: {{ kubernetes_version }}\n  masterPublicName: api.{{ cluster_name }}\n  networkCIDR: {{ network_cidr }}\n  {%- if vpc_id %}\n  networkID: {{ vpc_id }}\n  {%- endif %}\n  networking:\n    {{ networking|indent(2) }}\n  nonMasqueradeCIDR: 100.64.0.0/10\n  sshAccess:\n  - 0.0.0.0/0\n  subnets:\n  {%- for az in availability_zones %}\n  - cidr: {{ network_cidr_base|string + \".\" + ((third_octet|int) + (loop.index - 1) * third_octet_increment)|string + \".0/\" + network_mask|string }}\n    name: {{ az }}\n    type: Private\n    zone: {{ az }}\n    {%- if nat_gateways %}\n    egress: {{ nat_gateways[loop.index-1] }}\n    {%- endif %}\n  {%- endfor -%}\n  {%- for az in availability_zones %}\n  - cidr: {{ network_cidr_base|string + \".\" + ((third_octet|int + 4 * third_octet_increment) + (loop.index - 1) * third_octet_increment)|string + \".0/\" + network_mask|string }}\n    name: utility-{{ az }}\n    type: Utility\n    zone: {{ az }}\n  {%- endfor %}\n  topology:\n    dns:\n      type: Public\n    masters: private\n    nodes: private\n  fileAssets:\n  - name: auditPolicyFile\n    path: /srv/kubernetes/audit.yaml\n    roles: [Master]\n    content: |\n      apiVersion: audit.k8s.io/v1beta1\n      kind: Policy\n      rules:\n        # The following requests were manually identified as high-volume and low-risk,\n        # so drop them.\n        - level: None\n          users: [\"system:kube-proxy\"]\n          verbs: [\"watch\"]\n          resources:\n            - group: \"\" # core\n              resources: [\"endpoints\", \"services\", \"services/status\"]\n        - level: None\n          # Ingress controller reads 'configmaps/ingress-uid' through the unsecured port.\n          # TODO(#46983): Change this to the ingress controller service account.\n          users: [\"system:unsecured\"]\n          namespaces: [\"kube-system\"]\n          verbs: [\"get\"]\n          resources:\n            - group: \"\" # core\n              resources: [\"configmaps\"]\n        - level: None\n          users: [\"kubelet\"] # legacy kubelet identity\n          verbs: [\"get\"]\n          resources:\n            - group: \"\" # core\n              resources: [\"nodes\", \"nodes/status\"]\n        - level: None\n          userGroups: [\"system:nodes\"]\n          verbs: [\"get\"]\n          resources:\n            - group: \"\" # core\n              resources: [\"nodes\", \"nodes/status\"]\n        - level: None\n          users:\n            - system:kube-controller-manager\n            - system:kube-scheduler\n            - system:serviceaccount:kube-system:endpoint-controller\n          verbs: [\"get\", \"update\"]\n          namespaces: [\"kube-system\"]\n          resources:\n            - group: \"\" # core\n              resources: [\"endpoints\"]\n        - level: None\n          users: [\"system:apiserver\"]\n          verbs: [\"get\"]\n          resources:\n            - group: \"\" # core\n              resources: [\"namespaces\", \"namespaces/status\", \"namespaces/finalize\"]\n        # Don't log HPA fetching metrics.\n        - level: None\n          users:\n            - system:kube-controller-manager\n          verbs: [\"get\", \"list\"]\n          resources:\n            - group: \"metrics.k8s.io\"\n        # Don't log these read-only URLs.\n        - level: None\n          nonResourceURLs:\n            - /healthz*\n            - /version\n            - /swagger*\n        # Don't log events requests.\n        - level: None\n          resources:\n            - group: \"\" # core\n              resources: [\"events\"]\n        # node and pod status calls from nodes are high-volume and can be large, don't log responses for expected updates from nodes\n        - level: Request\n          users: [\"kubelet\", \"system:node-problem-detector\", \"system:serviceaccount:kube-system:node-problem-detector\"]\n          verbs: [\"update\",\"patch\"]\n          resources:\n            - group: \"\" # core\n              resources: [\"nodes/status\", \"pods/status\"]\n          omitStages:\n            - \"RequestReceived\"\n        - level: Request\n          userGroups: [\"system:nodes\"]\n          verbs: [\"update\",\"patch\"]\n          resources:\n            - group: \"\" # core\n              resources: [\"nodes/status\", \"pods/status\"]\n          omitStages:\n            - \"RequestReceived\"\n        # deletecollection calls can be large, don't log responses for expected namespace deletions\n        - level: Request\n          users: [\"system:serviceaccount:kube-system:namespace-controller\"]\n          verbs: [\"deletecollection\"]\n          omitStages:\n            - \"RequestReceived\"\n        # Secrets, ConfigMaps, and TokenReviews can contain sensitive & binary data,\n        # so only log at the Metadata level.\n        - level: Metadata\n          resources:\n            - group: \"\" # core\n              resources: [\"secrets\", \"configmaps\"]\n            - group: authentication.k8s.io\n              resources: [\"tokenreviews\"]\n          omitStages:\n            - \"RequestReceived\"\n        # Get repsonses can be large; skip them.\n        - level: Request\n          verbs: [\"get\", \"list\", \"watch\"]\n          resources:\n            - group: \"\" # core\n            - group: \"admissionregistration.k8s.io\"\n            - group: \"apiextensions.k8s.io\"\n            - group: \"apiregistration.k8s.io\"\n            - group: \"apps\"\n            - group: \"authentication.k8s.io\"\n            - group: \"authorization.k8s.io\"\n            - group: \"autoscaling\"\n            - group: \"batch\"\n            - group: \"certificates.k8s.io\"\n            - group: \"extensions\"\n            - group: \"metrics.k8s.io\"\n            - group: \"networking.k8s.io\"\n            - group: \"policy\"\n            - group: \"rbac.authorization.k8s.io\"\n            - group: \"scheduling.k8s.io\"\n            - group: \"settings.k8s.io\"\n            - group: \"storage.k8s.io\"\n          omitStages:\n            - \"RequestReceived\"\n        # Default level for known APIs\n        - level: RequestResponse\n          resources:\n            - group: \"\" # core\n            - group: \"admissionregistration.k8s.io\"\n            - group: \"apiextensions.k8s.io\"\n            - group: \"apiregistration.k8s.io\"\n            - group: \"apps\"\n            - group: \"authentication.k8s.io\"\n            - group: \"authorization.k8s.io\"\n            - group: \"autoscaling\"\n            - group: \"batch\"\n            - group: \"certificates.k8s.io\"\n            - group: \"extensions\"\n            - group: \"metrics.k8s.io\"\n            - group: \"networking.k8s.io\"\n            - group: \"policy\"\n            - group: \"rbac.authorization.k8s.io\"\n            - group: \"scheduling.k8s.io\"\n            - group: \"settings.k8s.io\"\n            - group: \"storage.k8s.io\"\n          omitStages:\n            - \"RequestReceived\"\n        # Default level for all other requests.\n        - level: Metadata\n          omitStages:\n            - \"RequestReceived\"\n  hooks:\n  - name: kops-hook-authenticator-config.service\n    before:\n      - kubelet.service\n    roles: [Master]\n    manifest: |\n      [Unit]\n        Description=Initialize AWS IAM Authenticator cert and Kube API Server config\n      [Service]\n        Type=oneshot\n        ExecStartPre=/bin/mkdir -p /srv/kubernetes/aws-iam-authenticator\n        ExecStartPre=/bin/sh -c '/usr/bin/test -r /srv/kubernetes/aws-iam-authenticator/README || /bin/echo These files were created by the kops-hook-authenticator-config service, which ran aws-iam-authenticator init via a temporary Docker container. >/srv/kubernetes/aws-iam-authenticator/README'\n        ExecStartPre=/bin/chown 10000:10000 /srv/kubernetes/aws-iam-authenticator\n        ExecStartPost=/bin/sh -c '(/usr/bin/id -u aws-iam-authenticator >/dev/null 2>&1 || /usr/sbin/groupadd -g 10000 aws-iam-authenticator) ; (/usr/bin/id -u aws-iam-authenticator >/dev/null 2>&1 || /usr/sbin/useradd -s /usr/sbin/nologin -c \"AWS IAM Authenticator configs\" -d /srv/kubernetes/aws-iam-authenticator -u 10000 -g aws-iam-authenticator aws-iam-authenticator)'\n        ExecStart=/bin/sh -c '(set -x ; /usr/bin/docker run --net=host --rm -w /srv/kubernetes/aws-iam-authenticator -v /srv/kubernetes/aws-iam-authenticator:/srv/kubernetes/aws-iam-authenticator --name aws-iam-authenticator-initialize gcr.io/heptio-images/authenticator:v0.3.0 init -i clustername ; /bin/mv /srv/kubernetes/aws-iam-authenticator/heptio-authenticator-aws.kubeconfig /srv/kubernetes/aws-iam-authenticator/kubeconfig.yaml)'\n"
  },
  {
    "path": "pentagon/component/kops/files/kops.sh",
    "content": "#!/bin/bash\nset -x\nset -e\n\nkops create -f cluster.yml\nkops create -f masters.yml\nkops create -f nodes.yml\nbash ./secret.sh\n"
  },
  {
    "path": "pentagon/component/kops/files/masters.yml.jinja",
    "content": "{% for az in master_availability_zones -%}\n---\napiVersion: kops/v1alpha2\nkind: InstanceGroup\nmetadata:\n  labels:\n    kops.k8s.io/cluster: {{ cluster_name }}\n  name: master-{{ az }}\nspec:\n  machineType: {{ master_node_type }}\n  image: {{ master_node_image }}\n  maxSize: 1\n  minSize: 1\n  role: Master\n  subnets:\n  - {{ az }}\n{% endfor %}\n"
  },
  {
    "path": "pentagon/component/kops/files/nodes.yml.jinja",
    "content": "{% for az in availability_zones -%}\n---\napiVersion: kops/v1alpha2\nkind: InstanceGroup\nmetadata:\n  labels:\n    kops.k8s.io/cluster: {{ cluster_name }}\n  name: nodes-{{ az }}\nspec:\n  machineType: {{ worker_node_type }}\n  image: {{ worker_node_image }}\n  maxSize: {{ ig_max_size if ig_max_size else node_count }}\n  minSize: {{ ig_min_size if ig_min_size else node_count }}\n  role: Node\n  subnets:\n  - {{ az }}\n  rootVolumeSize: {{ node_root_volume_size }}\n  rootVolumeType: gp2\n  cloudLabels:\n    k8s.io/cluster-autoscaler/enabled: ''\n    kubernetes.io/cluster/{{ cluster_name }}: ''\n{% endfor -%}\n"
  },
  {
    "path": "pentagon/component/kops/files/secret.sh.jinja",
    "content": "kops create secret sshpublickey admin -i {{ ssh_key_path }} --name {{ cluster_name }}\n"
  },
  {
    "path": "pentagon/component/vpn/__init__.py",
    "content": "\nimport os\nimport logging\nimport boto3\n\nfrom pentagon.component import ComponentBase\n\n\nclass Vpn(ComponentBase):\n\n    _required_parameters = [\n        'aws_access_key',\n        'aws_secret_key',\n        'project_name'\n    ]\n\n    _ami_owners = ['099720109477']  # Amazon AMI owner\n    _vpn_ami_id_placeholder = \"<ami_id>\"\n    _vpn_ami_filters = [{'Name': 'virtualization-type', 'Values': ['hvm']},\n                        {'Name': 'architecture', 'Values': ['x86_64']},\n                        {'Name': 'name', 'Values': ['ubuntu/images/hvm-ssd/ubuntu-trusty*']}]\n\n    def add(self, destination, overwrite=False):\n        self._get_vpn_ami_id()\n        return super(Vpn, self).add(destination, overwrite=overwrite)\n\n    def _get_vpn_ami_id(self):\n\n        if self._data.get('vpn_ami_id'):\n            self._data['vpn_ami_id'] = self._data.get('vpn_ami_id')\n        else:\n            logging.info(\"Getting VPN ami-id from AWS\")\n\n            try:\n                client = boto3.client('ec2',\n                                      aws_access_key_id=self._data['aws_access_key'],\n                                      aws_secret_access_key=self._data['aws_secret_key'],\n                                      region_name=self._data['aws_default_region']\n                                      )\n                images = client.describe_images(Owners=self._ami_owners, Filters=self._vpn_ami_filters)\n                self._data['vpn_ami_id'] = images['Images'][-1]['ImageId']\n            except Exception, e:\n                logging.error(\"Encountered \\\" {} \\\" getting ami-id. VPN not configured fully. See docs/vpn.md for more information\".format(e))\n"
  },
  {
    "path": "pentagon/component/vpn/files/admin-environment/destroy.yml",
    "content": "---\n- name: remove admin ssh key\n  hosts: localhost\n  connection: local\n  gather_facts: False\n\n  pre_tasks:\n    - include_vars: \"{{ item }}\"\n      with_items:\n        - ../../config/local/vars.yml\n        - env.yml\n      tags: always\n  \n  tasks:\n    - ec2_key:\n        name: \"{{ aws_key_name }}\"\n        state: absent\n\n- name:  set up vpn prerequisites\n  hosts: localhost\n  vars:\n    destroy: true\n  connection: local\n  gather_facts: False\n\n  pre_tasks:\n    - include_vars: \"{{ item }}\"\n      with_items:\n        - ../../config/local/vars.yml\n        - env.yml\n      tags: always\n\n  roles:\n    - reactiveops.vpn-stack\n"
  },
  {
    "path": "pentagon/component/vpn/files/admin-environment/env.yml.jinja",
    "content": "---\n{% raw -%}\nenv: \"admin-{{ org }}\"\n{%- endraw %}\naws_key_name: '{{ admin_vpn_key }}'\ndefault_ami: '{{ vpn_ami_id }}'\n\n# VPN\nopenvpn_key_country: US\nopenvpn_key_province: NY\nopenvpn_key_city: New York\nopenvpn_key_org: KO\nopenvpn_key_email: admin-dev@example.net\nopenvpn_use_pam: no\n\n{% raw -%}\nopenvpn_host: \"vpn-{{ org }}.{{ canonical_zone }}\"\nopenvpn_client_create_gateway_config: no\n\nvpn_bucket: \"{{ org }}-vpn\"\n\nopenvpn_s3_conf_path: \"s3://{{ vpn_bucket }}/stacks/vpn\"\n{%- endraw %}\n# the pool of IP addresses that the VPN server manages\nopenvpn_server: 172.16.137.0 255.255.255.0\n\n# the second line is the route from a VPN client to the VPC\nopenvpn_server_options:\n  - 'up /etc/openvpn/server.up.sh'\n  - 'push \"route 172.20.0.0 255.255.0.0\"'\n\nopenvpn_create_server_up: yes\n\nopenvpn_clients:\n  - 'vpn-user1'\n  - 'vpn-user2'\n  - 'vpn-user3'\n"
  },
  {
    "path": "pentagon/component/vpn/files/admin-environment/vpn.yml",
    "content": "---\n- name: upload admin ssh key\n  hosts: localhost\n  connection: local\n  gather_facts: False\n\n  pre_tasks:\n    - include_vars: \"{{ item }}\"\n      with_items:\n        - ../../config/local/vars.yml\n        - env.yml\n      tags: always\n  \n  tasks:\n    - ec2_key:\n        name: \"{{ aws_key_name }}\"\n        key_material: \"{{ item }}\"\n      with_file: \"../../config/private/{{ aws_key_name }}.pub\"\n\n- name:  set up vpn prerequisites\n  hosts: localhost\n  connection: local\n  gather_facts: False\n\n  pre_tasks:\n    - include_vars: \"{{ item }}\"\n      with_items:\n        - ../../config/local/vars.yml\n        - env.yml\n      tags: always\n\n  roles:\n    - reactiveops.vpn-stack\n\n- name: configure vpn instance\n  hosts: tag_Layer_vpn_public_ip\n  become: yes\n  become_user: root\n\n  pre_tasks:\n    - include_vars: \"{{ item }}\"\n      with_items:\n        - ../../config/local/vars.yml\n        - env.yml\n\n  roles:\n    - role: Stouts.openvpn-master\n      vpn_role: 'primary'\n      openvpn_first_run: \"{{ hostvars['localhost']['openvpn_first_run'] }}\"\n"
  },
  {
    "path": "pentagon/defaults.py",
    "content": "from datetime import datetime\n\n\nclass AWSPentagonDefaults(object):\n    ssh = {\n        'admin_vpn_key': 'admin-vpn',\n        'production_kube_key': 'production-kube',\n        'production_private_key': 'production-private',\n        'working_kube_key': 'working-kube',\n        'working_private_key': 'working-private',\n    }\n\n    kubernetes = {\n        'authorization': {'rbac': {}},\n        'kubernetes_version': '1.10.8',\n        'master_node_image': '379101102735/debian-stretch-hvm-x86_64-gp2-2018-10-01-66564',\n        'master_node_type': 't2.medium',\n        'network_cidr': '172.20.0.0/16',\n        'network_mask': 24,\n        'networking': {'flannel': {'backend': 'vxlan'}},\n        'node_additional_policies': '[{\"Effect\": \"Allow\",\"Action\": [\"autoscaling:DescribeAutoScalingGroups\", \"autoscaling:DescribeAutoScalingInstances\", \"autoscaling:DescribeTags\", \"autoscaling:SetDesiredCapacity\", \"autoscaling:TerminateInstanceInAutoScalingGroup\"],\"Resource\": \"*\"}]',\n        'node_count': 1,\n        'node_root_volume_size': 200,\n        'production_third_octet': 16,\n        'ssh_key_path': '~/.ssh/id_rsa.pub',\n        'third_octet_increment': 1,\n        'third_octet': 16,\n        'v_log_level': 10,\n        'worker_node_image': '379101102735/debian-stretch-hvm-x86_64-gp2-2018-10-01-66564',\n        'worker_node_type': 't2.medium',\n        'working_third_octet': 24,\n    }\n\n    vpc = {\n        'aws_availability_zone_count': 3,\n        'vpc_cidr_base': '172.20',\n        'vpc_name': datetime.today().strftime('%Y%m%d'),\n    }\n"
  },
  {
    "path": "pentagon/filters.py",
    "content": "import re\n\n\ndef register_filters():\n\t\"\"\"Register a function with decorator\"\"\"\n\tregistry = {}\n\tdef registrar(func):\n\t\tregistry[func.__name__] = func\n\t\treturn func \n\tregistrar.all = registry\n\treturn registrar\n\n\nfilter = register_filters()\n\n\ndef get_jinja_filters():\n\t\"\"\"Return all registered custom jinja filters\"\"\"\n\treturn filter.all\n\n\n@filter\ndef regex_trim(input, regex, replace=''):\n\t\"\"\"\n\tTrims or replaces the regex match in an input string.\n\tinput (string): the input string to search for matches\n\tregex (string): regex to match\n\treplace (string - optional): a string to replace any matches with.  Defaults to trimming the match.\n\t\"\"\"\n\treturn re.sub(regex, replace, input)"
  },
  {
    "path": "pentagon/helpers.py",
    "content": "import logging\nimport os\nimport traceback\nimport jinja2\nimport string\n\nimport oyaml as yaml\nfrom Crypto.PublicKey import RSA\nfrom stat import *\nfrom collections import OrderedDict\nimport filters as jinja_filters\n\n\ndef render_template(template_name, template_path, target, context, delete_template=True, overwrite=False):\n    \"\"\"\n    Helper function to write out DRY up templating. Accepts template name (string),\n    path (string), target directory (string), context (dictionary) and delete_template (boolean)\n    Default behavior is to use the key of the dictionary as the template variable names, replace\n    them with the value in the tempalate and delete the template if delete_template is\n    True\n    \"\"\"\n\n    logging.info(\"Writing {}\".format(target))\n    logging.debug(\"Template Context: {}\".format(context))\n    logging.debug(\"Overwrite is {}\".format(overwrite))\n    if os.path.isfile(target) and overwrite is not True:\n        logging.warn(\"Cowardly refusing to overwrite existing file {}\".format(target))\n        return False\n\n    logging.debug(\"Attempting to write {} from template {}/{}\".format(target, template_path, template_name))\n\n    template_path = os.path.normpath(template_path)\n    template_name = os.path.normpath(template_name)\n    template_permissions = os.stat(template_path + '/' + template_name).st_mode\n\n    with open(target, 'w+') as vars_file:\n        try:\n            env = jinja2.Environment(loader=jinja2.FileSystemLoader(template_path))\n            for k,v in jinja_filters.get_jinja_filters().items():\n                env.filters[k] = v\n\n            template = env.get_template(template_name)\n            vars_file.write(template.render(context))\n        except Exception, e:\n            logging.error(\"Error writing {}. {}\".format(target, traceback.print_exc(e)))\n            return False\n\n    os.chmod(target, template_permissions)\n\n    if delete_template:\n        logging.debug(\"Removing {}/{}\".format(template_path, template_name))\n        os.remove(\"{}/{}\".format(template_path, template_name))\n\n\ndef write_yaml_file(filename, d, overwrite=False):\n    \"\"\" Accepts  filepath,  dictionary. Writes dictionary in yaml to file path, recursively creating path if necessary \"\"\"\n    if not os.path.exists(os.path.dirname(filename)) and overwrite is False:\n        try:\n            os.makedirs(os.path.dirname(filename))\n        except OSError as exc:\n            if exc.errno != errno.EEXIST:\n                raise\n    logging.debug(\"Writing yaml file {}\".format(filename))\n    logging.debug(d)\n    with open(filename, 'w+') as f:\n        yaml.safe_dump(d, f, default_flow_style=False)\n\n\ndef create_rsa_key(name, path, bits=2048):\n    \"\"\" creates an ssh key pair. Accepts name, path and bits. Name is the name of the key pair to generate at Path. Bits defaults to 2048 \"\"\"\n\n    key = RSA.generate(bits)\n\n    private_key = \"{}{}\".format(path, name)\n    public_key = \"{}{}.pub\".format(path, name)\n\n    with open(private_key, 'w') as content_file:\n        os.chmod(private_key, 0600)\n        content_file.write(key.exportKey('PEM'))\n\n    pubkey = key.publickey()\n    with open(public_key, 'w') as content_file:\n        content_file.write(pubkey.exportKey('OpenSSH'))\n\n\ndef merge_dict(d, new_data, clobber=False):\n    \"\"\" accepts new_data (dict) and clobbber (boolean). Merges dictionary with dictionary 'd'. If clobber is True, overwrites value. Defaults to false \"\"\"\n    for key, value in new_data.items():\n        if d.get(key) is None or clobber:\n            logging.debug(\"Setting component data {}: {}\".format(key, value))\n            d[key] = value\n    return d\n\n\ndef allege_aws_availability_zones(region, count):\n    \"\"\"\n    Accepts a region (string) and count (int) and returns a list of potential aws availability zones\n    It does no verification that the region is correct or that the az actually exists\n    Ex: for region 'us-west-1' and count '3' it will return ['us-west-1a', 'us-west-1b', 'us-west-1c']\n    \"\"\"\n    azs = []\n    logging.info(\"Guessing at default AWS AZs\")\n    for i in range(0, count):\n        azs += [\"{}{}\".format(region, list(string.ascii_lowercase)[i])]\n\n    return (\", \").join(azs)\n"
  },
  {
    "path": "pentagon/meta.py",
    "content": "__version__ = \"3.1.4\"\n__author__ = 'ReactiveOps, Inc.'\n"
  },
  {
    "path": "pentagon/migration/__init__.py",
    "content": "\nimport logging\nimport os\nimport shutil\nimport sys\nimport glob\nimport git\nimport oyaml as yaml\nimport semver\nimport fnmatch\n\nfrom collections import OrderedDict\n\nfrom pentagon.migration import migrations\nfrom pentagon.pentagon import PentagonException\nfrom pentagon.meta import __version__ as pentagon_version\n\nfrom pydoc import locate\nfrom distutils.version import StrictVersion\n\ndefault_version = \"1.2.0\"\nversion_file = '.version'\nmigration_readme_file = 'migrations.md'\n\n\ndef migrate(branch='migration', yes=False):\n    \"\"\" Find applicable migrations and run them \"\"\"\n    logging.info(\"Pentagon Version: {}\".format(installed_version()))\n    logging.info(\"Starting Repository Version: {}\".format(current_version()))\n\n    migrations = migrations_to_run(current_version(), available_migrations())\n    if migrations:\n        logging.info(\"There are Migrations to run: \")\n        logging.info(migrations)\n        if yes:\n            for migration in migrations:\n                logging.info('Starting migration: {}'.format(migration))\n                migration_name = \"migration_{}\".format(migration.replace('.', '_'))\n                migration_class = locate(\"pentagon.migration.migrations.{}\".format(migration_name))\n                migration_class.Migration(branch).start()\n            logging.info(\"Migrations complete. Use git to merge the migration branch.\")\n            logging.info(\"Current Repository Version: {}\".format(current_version()))\n        else:\n            logging.info(\"Use: `pentagon migrate --yes` to run migrations\")\n    else:\n        logging.info(\"No Migrations to run.\")\n        compare_value = semver.compare(installed_version(), current_version())\n        if compare_value == -1:\n            logging.error(\"Repository Version > Installed Version. Upgrade Pentagon\")\n            sys.exit(1)\n        elif compare_value == 1:\n            logging.info(\"Installed Version > Repository Version.\")\n            logging.info(\" Use `pentagon migrate --yes` to update Repository Version\")\n            if yes:\n                Migration(None).version_only()\n        elif compare_value == 0:\n            logging.info(\"You are at the latest version!\")\n\n\ndef migrations_to_run(current_version, available_migrations):\n    m = [v for v in available_migrations if StrictVersion(v) >= StrictVersion(current_version)]\n    logging.debug(\"Migrations to run: {}\".format(m))\n    return m\n\n\ndef available_migrations():\n    \"\"\" Gets and returns a list of migration modules \"\"\"\n    m = []\n    for file in glob.glob(\"{}/migration_*.py\".format(migrations.__path__[0])):\n        m.append(os.path.basename(os.path.splitext(file)[0]).replace('migration_', '').replace('_', '.'))\n        logging.debug(\"Migration Found: {}\".format(file))\n    m.sort(key=StrictVersion)\n    logging.debug(\"Available Migrations: {}\".format(m))\n    return m\n\n\ndef installed_version():\n    \"\"\" get installed version of pentagon \"\"\"\n    return pentagon_version\n\n\ndef infrastructure_repository():\n    infrastructure_repo = os.environ.get('INFRASTRUCTURE_REPO')\n    if infrastructure_repo is None:\n        raise PentagonException('Required environment variable INFRASTRUCTURE_REPO is not set.')\n    return infrastructure_repo\n\n\ndef current_version(version_file=version_file):\n    \"\"\" get current version of the infrastructure_repo \"\"\"\n    try:\n        with open(\"{}/{}\".format(infrastructure_repository(), version_file)) as vf:\n            version = vf.readline()\n    except IOError:\n        logging.warn(\"{} not found. Using default version {}\".format(version_file, default_version))\n        version = default_version\n\n    return version\n\n\nclass Migration(object):\n    \"\"\" Parent class for pentagon migrations \"\"\"\n\n    class YamlEditor(object):\n\n        def __init__(self, file=None):\n            # Fetch yaml file as ordered dict\n            self.file = file \n            self.data = {}\n            if self.file:\n                with open(self.file) as yf:\n                    self.data = yaml.load(yf.read())\n                logging.debug(self.data)\n            else:\n                logging.debug(\"YamlEditor initialized with no file\")\n\n        def update(self, new_data, clobber=False):\n            \"\"\" accepts a dict and appends keys to ordered dict. Updates keys if clobber is True\"\"\"\n            nd = OrderedDict(new_data)\n            self.data.update(nd)\n\n        def remove(self, keys):\n            \"\"\" accepts a list of keys to remove from yaml \"\"\"\n            for key in keys:\n                if key in self.data.keys():\n                    del self.data[key]\n\n        def get_data(self):\n            \"\"\" return ordered dict of yaml \"\"\"\n            return self.data\n\n        def write(self, file=None):\n            if file is not None:\n                self.file = file\n                with open(self.file, 'w') as yf:\n                    yf.write(yaml.dump(self.data))\n\n        def get(self, key, default=None):\n            return self.data.get(key, default)\n\n        def __getitem__(self, key):\n            return self.data[key]\n\n        def __setitem__(self, key, value):\n            self.data[key] = value\n\n        def __str__(self):\n            str(self.data)\n\n        def __enter__(self):\n            return self\n\n        def __exit__(self, type, value, traceback):\n            pass\n\n    def __init__(self, branch_name):\n        logging.debug(\"This got run\")\n        self._infrastructure_repository = infrastructure_repository()\n        self.branch = branch_name\n\n    def start(self):\n        \"\"\" run migration \"\"\"\n        self._run()\n\n    def version_only(self):\n        \"\"\" Only increase version in .version_file \"\"\"\n        self.overwrite_file(version_file, installed_version())\n\n    def real_path(self, path):\n        return os.path.normpath(\"{}/{}\".format(self._infrastructure_repository, path))\n\n    def _branch(self):\n        repo = git.Repo(self._infrastructure_repository)\n        try:\n            repo.create_head(self.branch)\n        except OSError as e:\n            logging.error(\"OSError %s\", e)\n            logging.error(\"Most likely the migration branch still exists.  Please delete it and try again.\")\n            sys.exit(1)\n\n        repo.git.checkout(self.branch)\n\n    def _run(self):\n        os.chdir(self._infrastructure_repository)\n        self._branch()\n        self.run()\n        self._write_new_version(installed_version())\n        self._append_migration_readme()\n\n    def _write_new_version(self, version):\n        \"\"\" write new file with new version following the migration \"\"\"\n        self.overwrite_file(version_file, version)\n\n    def _append_migration_readme(self):\n        if hasattr(self, \"_readme_string\"):\n            with open(migration_readme_file, 'a+') as mrf:\n                mrf.write(self._readme_string)\n\n    def move(self, source, destination):\n        \"\"\" move files and directories with extreme prejudice \"\"\"\n        logging.info(\"Moving {} -> {}\".format(self.real_path(source), self.real_path(destination)))\n\n        if os.path.isfile(source):\n            _move = shutil.move\n        elif os.path.exists(destination):\n            from distutils.dir_util import copy_tree as _move\n        else:\n            from shutil import copytree as _move\n\n        _move(self.real_path(source), self.real_path(destination))\n        self.delete(source)\n\n    def overwrite_file(self, path, content, executable=False):\n        \"\"\" alias create_file \"\"\"\n        self.create_file(path, content, executable)\n\n    def create_file(self, path, content, executable=False):\n        \"\"\" Create a new file \"\"\"\n        path = \"{}/{}\".format(self._infrastructure_repository, path)\n        with open(path, 'w') as f:\n            f.write(content)\n\n        if executable is True:\n            mode = os.stat(path).st_mode\n            mode |= (mode & 0o444) >> 2    # copy R bits to X\n            os.chmod(path, mode)\n\n    def create_dir(self, path):\n        \"\"\" Recursively create a directory \"\"\"\n        path = \"{}/{}\".format(self._infrastructure_repository, path)\n        try:\n            os.makedirs(path)\n        except OSError:\n            if not os.path.isdir(path):\n                raise\n\n    def get_file_content(self, path):\n        \"\"\" Retreive file contents in a string \"\"\"\n        with open(self.real_path(path), 'r') as f:\n            return f.read()\n\n    @property\n    def inventory(self, exclude=[]):\n        \"\"\" Returns list of inventory item, excluding list 'exclude' \"\"\"\n        return [d for d in os.walk(\"{}/inventory\".format(self._infrastructure_repository)).next()[1] if d not in exclude]\n\n    def delete(self, path):\n        \"\"\" deletes file or directory \"\"\"\n        logging.info(\"Deleting {}\".format(path))\n        if os.path.isfile(self.real_path(path)):\n            return os.remove(self.real_path(path))\n\n        if os.path.isdir(self.real_path(path)):\n            return shutil.rmtree(self.real_path(path))\n\n        return False\n\n    def find_files(self, path='./', file_pattern=None):\n        matches = []\n        for root, dirnames, filenames in os.walk(path):\n            for filename in fnmatch.filter(filenames, file_pattern):\n                matches.append(os.path.join(root, filename))\n\n        if len(matches) == 0:\n            logging.warn(\"No {} files found!\".format(file_pattern))\n        return matches\n"
  },
  {
    "path": "pentagon/migration/migrations/__init__.py",
    "content": "\nfrom pentagon.migration.migrations import *\n"
  },
  {
    "path": "pentagon/migration/migrations/migration_1_2_0.py",
    "content": "\nimport pentagon\nfrom pentagon import migration\nfrom collections import OrderedDict\nfrom pentagon.migration import *\n\n\nclass Migration(migration.Migration):\n    _starting_version = '1.2.0'\n    _ending_version = '2.0.0'\n\n    def run(self):\n\n        # Create inventory directory if it exists\n        inventory_dir = '{}/inventory'.format(self._infrastructure_repository)\n        if not os.path.isdir(inventory_dir):\n            os.mkdir(inventory_dir)\n\n        # Move default\n        if os.path.exists('{}/default'.format(self._infrastructure_repository)):\n            self.move('default', \"inventory/default\")\n\n        self.delete('config/local/env-vars.sh')\n        if os.path.exists('{}/config'.format(self._infrastructure_repository)):\n            self.move('config/', 'inventory/default/config')\n\n        for item in self.inventory:\n\n            inventory_path = \"inventory/{}\".format(item)\n            logging.debug('Inventory Path: {}'.format(inventory_path))\n\n            if os.path.exists('{}/account/vars.yml'.format(self._infrastructure_repository)):\n                account_vars_yml = self.YamlEditor('{}/account/vars.yml'.format(inventory_path)).get()\n            else:\n                account_vars_yml = OrderedDict()\n\n            if os.path.exists('{}/account/vars.sh'.format(self._infrastructure_repository)):\n                account_vars_sh = self.get_file_content('{}/account/vars.sh'.format(inventory_path)).get()\n                account_vars = OrderedDict()\n\n                for line in account_vars_sh.split('\\n'):\n                    line = line.replace('export ', '')\n                    llist = line.split('=', 1)\n                    account_vars[llist[0]] = llist[1]\n            else:\n                account_vars = OrderedDict()\n            if os.path.isfile('{}/config/local/vars.yml'.format(inventory_path)):\n                config_vars_yml = self.YamlEditor('{}/config/local/vars.yml'.format(inventory_path))\n                config_vars_yml.update(account_vars_yml)\n                config_vars_yml.update(account_vars)\n\n                config_vars_yml['ANSIBLE_CONFIG'] = '${{INFRASTRUCTURE_REPO}}/inventory/{}/config'.format(item)\n                config_vars_yml['KUBECONFIG'] = \"${{INFRASTRUCTURE_REPO}}/inventory/{item}/config/private/kubeconfig\".format(item=item)\n\n                config_vars_yml.write()\n            self.delete('{}/account'.format(inventory_path))\n\n            if os.path.exists(\"inventory/{item}/config/local/1password.yml\".format(item=item)):\n                with self.YamlEditor(\"inventory/{item}/config/local/1password.yml\".format(item=item)) as secrets_yml:\n                    secrets_yml['path'] = \"inventory/{item}/config/private/\".format(item=item)\n                    secrets_yml.write()\n\n            # fix ansible path vars\n            for file in ['vpn.yml', 'destroy.yml']:\n                p = \"{}/resources/admin-environment/{}\".format(inventory_path, file)\n                if os.path.exists(\"{}/{}\".format(self._infrastructure_repository, p)):\n                    c = self.get_file_content(p)\n                    new_c = c.replace('../../account/vars.yml', '../../config/local/vars.yml')\n                    self.overwrite_file(p, new_c)\n\n            local_config_init = '''\nif [ -z \"${{INFRASTRUCTURE_REPO}}\" ]; then\n  echo \"INFRASTRUCTURE_REPO environment variable must be set\"\n  exit 1\nelif [ ! -d \"${{INFRASTRUCTURE_REPO}}\" ]; then\n  echo \"${{INFRASTRUCTURE_REPO}} doesn't exist or isn't a directory\"\n  exit 1\nfi\n\ncd \"${{INFRASTRUCTURE_REPO}}/inventory/{item}/config/local\" || exit 1\n\nfor default_file in *-default; do\n  out_file=\"../private/${{default_file//-default}}\"\n  echo -n \"${{default_file}} -> ${{out_file}} \"\n  if [ -e \"${{out_file}}\" ]; then\n    echo \"already exists. skipping.\"\n    continue\n  else\n    cat \"${{default_file}}\" | sed -e \"s@__INFRA_REPO_PATH__@$INFRASTRUCTURE_REPO@g\" > \"${{out_file}}\"\n    echo \"created.\"\n  fi\ndone\n'''.format(item=item)\n\n            self.overwrite_file('{}/config/local/local-config-init'.format(inventory_path), local_config_init, True)\n\n            ansible_cfg_default = '''\n[defaults]\ninventory = $INFRASTRUCTURE_REPO/plugins/inventory\nroles_path = $INFRASTRUCTURE_REPO/roles\nfilter_plugins = $INFRASTRUCTURE_REPO/plugins/filter_plugins\nretry_files_save_path = ~/.ansible-retry\nhash_behavior = merge\n\n[ssh_connection]\n# this needs the path defined without the use of ENV variables\nssh_args = -F __INFRA_REPO_PATH__/inventory/{item}/config/private/ssh_config\n'''.format(item=item)\n            self.overwrite_file('{}/config/local/ansible.cfg-default'.format(inventory_path), ansible_cfg_default)\n\n            ssh_config_default = '''\n# for the kube / kops working instances\nHost 172.20.64.* 172.20.65.* 172.20.66.* 172.20.67.* 172.20.68.* 172.20.69.* 172.20.70.* 172.20.71.* 172.20.72.* 172.20.73.* 172.20.74.* 172.20.75.*\n  User admin\n  IdentityFile __INFRA_REPO_PATH__/inventory/{item}/config/private/working_kube\n  StrictHostKeyChecking no\n  UserKnownHostsFile=/dev/null\n\n# for the kube / kops prod instances\nHost 172.20.96.* 172.20.97.* 172.20.98.* 172.20.99.* 172.20.100.* 172.20.101.* 172.20.102.* 172.20.103.* 172.20.104.* 172.20.105.* 172.20.106.* 172.20.107.*\n  User admin\n  IdentityFile __INFRA_REPO_PATH__/inventory/{item}/config/private/production_kube\n  StrictHostKeyChecking no\n  UserKnownHostsFile=/dev/null\n\n# for instances in private_working\nHost 172.20.48.* 172.20.49.* 172.20.50.* 172.20.51.* 172.20.52.* 172.20.53.* 172.20.54.* 172.20.55.* 172.20.56.* 172.20.57.* 172.20.58.* 172.20.59.*\n  User ubuntu\n  IdentityFile __INFRA_REPO_PATH__/inventory/{item}/config/private/working_private\n  StrictHostKeyChecking no\n  UserKnownHostsFile=/dev/null\n\n# for instances in private_prod\nHost 172.20.32.* 172.20.33.* 172.20.34.* 172.20.35.* 172.20.36.* 172.20.37.* 172.20.38.* 172.20.39.* 172.20.40.* 172.20.41.* 172.20.42.* 172.20.43.*\n  User ubuntu\n  IdentityFile __INFRA_REPO_PATH__/inventory/{item}/config/private/production_private\n  StrictHostKeyChecking no\n  UserKnownHostsFile=/dev/null\n\n# for instances in admin\nHost 172.20.0.* 172.20.1.* 172.20.2.* 172.20.3.* 172.20.4.* 172.20.5.* 172.20.6.* 172.20.7.* 172.20.8.* 172.20.9.* 172.20.10.* 172.20.11.*\n  User ubuntu\n  IdentityFile __INFRA_REPO_PATH__/inventory/{item}/config/private/admin_private\n  StrictHostKeyChecking no\n  UserKnownHostsFile=/dev/null\n\n# VPN instance\n# Replace the '*' with the IP address of the VPN instance\nHost *\n  User ubuntu\n  IdentityFile __INFRA_REPO_PATH__/inventory/{item}/config/private/admin_vpn\n  StrictHostKeyChecking no\n  UserKnownHostsFile=/dev/null\n'''.format(item=item)\n        self.overwrite_file('{}/config/local/ssh_config-default'.format(inventory_path), ssh_config_default)\n        # update core\n\n        self.delete('{}/config/local/ansible.cfg'.format(inventory_path))\n        self.delete('{}/config/local/ssh_config'.format(inventory_path))\n        self.delete('tests')\n        self.delete('tasks')\n        self.delete('components')\n        pentagon.component.core.Core({}).add(self._infrastructure_repository, overwrite=True)\n"
  },
  {
    "path": "pentagon/migration/migrations/migration_2_0_0.py",
    "content": "\nfrom pentagon import migration\nfrom pentagon.migration import *\n\n\nclass Migration(migration.Migration):\n    _starting_version = '2.0.0'\n    _ending_version = '2.1.0'\n\n    def run(self):\n\n        for item in self.inventory:\n\n            inventory_path = \"inventory/{}\".format(item)\n            logging.debug('Inventory Path: {}'.format(inventory_path))\n\n            if os.path.isfile('{}/config/local/vars.yml'.format(inventory_path)):\n                with self.YamlEditor('{}/config/local/vars.yml'.format(inventory_path)) as vars_yml:\n                    if not vars_yml.get('HELM_HOME'):\n                        vars_yml['HELM_HOME'] = '${INFRASTRUCTURE_REPO}/helm'\n                        vars_yml.write()\n\n            self.delete('roles')\n"
  },
  {
    "path": "pentagon/migration/migrations/migration_2_1_0.py",
    "content": "\nfrom pentagon import migration\nfrom pentagon.migration import *\nfrom pentagon.component import core, inventory\nfrom pentagon.helpers import merge_dict\n\n\nclass Migration(migration.Migration):\n    _starting_version = '2.1.0'\n    _ending_version = '2.2.0'\n\n    def run(self):\n\n        # Add new versions of files\n        c = core.Core({'cloud': 'aws'})\n        c._overwrite = True\n        c._destination = \"./Makefile.jinja\"\n        c._add_files('Makefile.jinja')\n        c._render_directory_templates()\n\n\n        for item in self.inventory:\n\n            inventory_path = \"inventory/{}\".format(item)\n            logging.debug('Inventory Path: {}'.format(inventory_path))\n\n            with self.YamlEditor('{}/config/local/vars.yml'.format(inventory_path)) as vars_yml:\n                inventory_vars = vars_yml.get_data()\n\n            template_context  = {\n                'aws_region': inventory_vars.get('AWS_DEFAULT_REGION'),\n                'infrastructure_bucket': inventory_vars.get('INFRASTRUCTURE_BUCKET')\n            }\n\n            if os.path.exists(\"{}/vpc\".format(inventory_path)):\n                # Move files around\n                self.move(\"{}/vpc/\".format(inventory_path), \"{}/terraform/\".format(inventory_path))\n                self.move(\"{}/terraform/terraform.tfvars\".format(inventory_path), \"{}/terraform/aws_vpc.auto.tfvars\".format(inventory_path))\n                self.move(\"{}/terraform/variables.tf\".format(inventory_path), \"{}/terraform/aws_vpc_variables.tf\".format(inventory_path))\n                self.move(\"{}/terraform/main.tf\".format(inventory_path), \"{}/terraform/aws_vpc.tf\".format(inventory_path))\n\n                # Mutate files\n                aws_vpc_file_content = self.get_file_content(\"{}/terraform/aws_vpc.tf\".format(inventory_path)).split('\\n')\n                new_aws_vpc_file_content = aws_vpc_file_content[:]\n                if '// terraform backend config' in aws_vpc_file_content:\n                    i = aws_vpc_file_content.index(\"// terraform backend config\")\n                    # Should remove provider and backend config if present\n                    new_aws_vpc_file_content = \\\n                        aws_vpc_file_content[0:i-1] + \\\n                        aws_vpc_file_content[i+9:]\n\n                self.delete(\"{}/terraform/terraform-remote.sh\".format(inventory_path))\n\n                new_aws_vpc_file_content = ('\\n').join(new_aws_vpc_file_content[6:-1])\n                self.overwrite_file(\"{}/terraform/aws_vpc.tf\".format(inventory_path), new_aws_vpc_file_content)\n\n                # Add new versions of files\n                i = inventory.Inventory(merge_dict(template_context, {'cloud': 'aws', 'name': item}))\n                i._overwrite = True\n                i._destination = \"{}/terraform/\".format(inventory_path)\n                i._add_files('terraform/backend.tf.jinja')\n                i._add_files('terraform/Makefile.jinja')\n                i._add_files('terraform/provider.tf.jinja')\n                i._render_directory_templates()\n\n                logging.warn(\"####### IMPORTANT: Your s3 backend configuration as changed ######\")\n                logging.warn(\"Move your state path in s3:\")\n                logging.warn(\"Command example: (only for example purposes) \")\n                logging.warn(\"aws s3 sync s3://{bucket}/{vpc_tag_name}/{old_path}/ s3://{bucket}/{item}/{new_path}/\".format(\n                    bucket=inventory_vars.get('INFRASTRUCTURE_BUCKET'),\n                    vpc_tag_name=inventory_vars.get('vpc_tag_name'),\n                    item=item,\n                    old_path='terraform-vpc',\n                    new_path='terraform')\n                )\n                logging.warn(\"aws s3 rm s3://{bucket}/{org}/{old_path}/\".format(\n                    bucket=inventory_vars.get('INFRASTRUCTURE_BUCKET'),\n                    org=inventory_vars.get('org_name'),\n                    old_path='terraform-vpc')\n                )\n\n"
  },
  {
    "path": "pentagon/migration/migrations/migration_2_2_0.py",
    "content": "\nfrom pentagon import migration\nfrom pentagon.migration import *\nfrom pentagon.component import core, inventory\nfrom pentagon.helpers import merge_dict\n\n\nclass Migration(migration.Migration):\n    _starting_version = '2.2.0'\n    _ending_version = '2.3.0'\n\n    def run(self):\n\n        for item in self.inventory:\n            inventory_path = \"inventory/{}\".format(item)\n            logging.debug('Inventory Path: {}'.format(inventory_path))\n            self.delete('{}/config/local/local-config-init'.format(inventory_path))\n            self.delete('{}/terraform/Makefile'.format(inventory_path))\n"
  },
  {
    "path": "pentagon/migration/migrations/migration_2_3_1.py",
    "content": "from pentagon import migration\nfrom pentagon.migration import *\nfrom pentagon.component import core, inventory\nfrom pentagon.helpers import merge_dict\n\n\nclass Migration(migration.Migration):\n    _starting_version = '2.3.1'\n    _ending_version = '2.4.0'\n\n    def run(self):\n        for item in self.inventory:\n            inventory_path = \"inventory/{}\".format(item)\n            logging.debug(\n                'Processing Inventory Item: {}'\n                .format(inventory_path)\n            )\n            with self.YamlEditor('{}/config/local/vars.yml'.format(inventory_path)) as vars_yml:\n                vars_yml_dict = vars_yml.get_data()\n\n            logging.info('Found KUBECONFIG in vars.yml = {}'\n                         .format(vars_yml_dict.get('KUBECONFIG')))\n            logging.info('Found ANSIBLE_CONFIGin vars.yml = {}'\n                         .format(vars_yml_dict.get('ANSIBLE_CONFIG')))\n            vars_yml_dict['KUBECONFIG'] = '${INFRASTRUCTURE_REPO}/inventory/${INVENTORY}/config/private/kube_config'\n            vars_yml_dict['ANSIBLE_CONFIG'] = '${INFRASTRUCTURE_REPO}/inventory/${INVENTORY}/config/private/ansible.cfg'\n            logging.info('Changed KUBECONFIG to be {}'\n                         .format(vars_yml_dict.get('KUBECONFIG')))\n            logging.info('Changed ANSIBLE_CONFIG to be {}'\n                         .format(vars_yml_dict.get('ANSIBLE_CONFIG')))\n\n            logging.warn(\n                '####### IMPORTANT: Your kube and ansible config paths have changed.')\n\n            vars_yml.write()\n"
  },
  {
    "path": "pentagon/migration/migrations/migration_2_4_1.py",
    "content": "from pentagon import migration\nfrom pentagon.migration import *\nfrom pentagon.component import core, inventory\nfrom pentagon.helpers import merge_dict\n\n\nclass Migration(migration.Migration):\n    _starting_version = '2.4.1'\n    _ending_version = '2.4.2'\n\n    def run(self):\n        for item in self.inventory:\n            inventory_path = \"inventory/{}\".format(item)\n            self.delete(\"{}/terraform/Makefile\".format(inventory_path))\n\n        self.delete('Makefile')\n"
  },
  {
    "path": "pentagon/migration/migrations/migration_2_4_3.py",
    "content": "import oyaml as yaml\n\nfrom pentagon import migration\nfrom pentagon.migration import *\nfrom pentagon.component import core, inventory\nfrom pentagon.helpers import merge_dict\n\n\nclass Migration(migration.Migration):\n    _starting_version = '2.4.3'\n    _ending_version = '2.5.0'\n\n    def run(self):\n\n        # Remove Orgname from vars.yml\n        # Replace org-name with org in all vpn files\n        # Remove 'org' arg for vpn role call  in vpn.yml\n        for item in self.inventory:\n            inventory_path = \"inventory/{}\".format(item)\n            with self.YamlEditor('{}/config/local/vars.yml'.format(inventory_path)) as vars_yml:\n                vars_yml.remove('org-name')\n                vars_yml.remove('secrets_bucket')\n                vars_yml.write()\n\n            if os.path.exists(\"{}/resources/admin-environment/\".format(inventory_path)):\n                with self.YamlEditor(\"{}/resources/admin-environment/vpn.yml\".format(inventory_path)) as vpn_yml:\n                    data = vpn_yml.get_data()\n                    try:\n                        del data[2]['roles'][0]['org']\n                    except (KeyError, IndexError) as e:\n                        logging.error(e)\n\n                    self.overwrite_file(\"{}/resources/admin-environment/vpn.yml\".format(inventory_path), yaml.safe_dump(data, default_flow_style=False))\n\n                with self.YamlEditor(\"{}/resources/admin-environment/env.yml\".format(inventory_path)) as env_yml:\n                    env_yml['env'] = \"{{ org }}\"\n                    env_yml['open_vpn_host'] = \"vpn-{{ org }}.{{ canonical_zone }}\"\n                    env_yml.write()\n\n                with self.YamlEditor(\"{}/resources/admin-environment/env.yml\".format(inventory_path)) as env_yml:\n                    data = env_yml.get_data()\n                    try:\n                        del data['vpn_bucket']\n                    except KeyError, e:\n                        pass\n\n                    self.overwrite_file(\"{}/resources/admin-environment/env.yml\".format(inventory_path), yaml.safe_dump(data, default_flow_style=False))\n"
  },
  {
    "path": "pentagon/migration/migrations/migration_2_5_0.py",
    "content": "from pentagon import migration\nfrom pentagon.migration import *\nfrom pentagon.component import core, inventory\nfrom pentagon.helpers import merge_dict\n\nimport re\n\n\nclass Migration(migration.Migration):\n    _starting_version = '2.5.0'\n    _ending_version = '2.6.0'\n\n    def run(self):\n        for item in self.inventory:\n            inventory_path = \"inventory/{}\".format(item)\n            logging.debug(\n                'Processing Inventory Item: {}'\n                .format(inventory_path)\n            )\n\n            # Update version of VPC TF module\n            aws_vpc_file = \"{}/terraform/aws_vpc.tf\".format(inventory_path)\n            if os.path.exists(aws_vpc_file):\n                aws_vpc_file_content = self.get_file_content(aws_vpc_file)\n                aws_vpc_file_content = re.sub(r'terraform-vpc.git\\?ref=v\\d+\\.\\d+.\\d+', 'terraform-vpc.git?ref=v3.0.0', aws_vpc_file_content)\n                aws_vpc_file_content = re.sub(r'\\n\\s*aws_secret_key\\s+=.+', '', aws_vpc_file_content)\n                aws_vpc_file_content = re.sub(r'\\n\\s*aws_access_key\\s+=.+', '', aws_vpc_file_content)\n                self.overwrite_file(aws_vpc_file, aws_vpc_file_content)\n                logging.info('Terraform VPC module updated to 3.0.0 in {}'.format(item))\n\n            # Remove TF AWS provider variables from secrets. No longer referenced directly in VPC module.\n            secret_file = \"{}/config/private/secrets.yml\".format(inventory_path)\n            if os.path.exists(secret_file):\n                secrets_file_content = self.get_file_content(secret_file)\n                original_secrets_content = secrets_file_content\n                secrets_file_content = re.sub(r'# Terraform.*\\n', '', secrets_file_content)\n                secrets_file_content = re.sub(r'TF_VAR_aws_secret_key:.*\\n', '', secrets_file_content)\n                secrets_file_content = re.sub(r'TF_VAR_aws_access_key:.*\\n\\n?', '', secrets_file_content)\n                self.overwrite_file(secret_file, secrets_file_content)\n                \n                if original_secrets_content != secrets_file_content:\n                    logging.warn(\"####### IMPORTANT: Secrets file has been updated #######\")\n                    logging.warn(\"  Update changed secrets file in 1Password: {}\".format(secret_file))\n                    logging.warn(\"  Terraform AWS provider variables removed in VPC module update and no longer needed in secrets.\")\n"
  },
  {
    "path": "pentagon/migration/migrations/migration_2_6_0.py",
    "content": "from pentagon import migration\nfrom pentagon.migration import *\nfrom pentagon.component import core, inventory\nfrom pentagon.helpers import merge_dict\n\nimport re\n\n\nclass Migration(migration.Migration):\n    _starting_version = '2.6.0'\n    _ending_version = '2.6.1'\n\n    def run(self):\n        for item in self.inventory:\n            inventory_path = \"inventory/{}\".format(item)\n            logging.debug(\n                'Processing Inventory Item: {}'\n                .format(inventory_path)\n            )\n\n            # Remove deprecated variables from VPC TF module usage\n            aws_vpc_vars_file = \"{}/terraform/aws_vpc_variables.tf\".format(inventory_path)\n            if os.path.exists(aws_vpc_vars_file):\n                aws_vpc_vars_file_content = self.get_file_content(aws_vpc_vars_file)\n                aws_vpc_vars_file_content = re.sub(r'\\n\\s*variable \"aws_access_key\" {}', '', aws_vpc_vars_file_content)\n                aws_vpc_vars_file_content = re.sub(r'\\n\\s*variable \"aws_secret_key\" {}', '', aws_vpc_vars_file_content)\n                self.overwrite_file(aws_vpc_vars_file, aws_vpc_vars_file_content)\n                logging.info('Deprecated Terraform VPC module variables removed in {}'.format(item))\n"
  },
  {
    "path": "pentagon/migration/migrations/migration_2_6_2.py",
    "content": "from copy import deepcopy\n\n\nfrom pentagon import migration\nfrom pentagon.migration import *\nimport yaml\n\nig_message = \"\"\"\n# This file has been updated to contain a new set of InstanceGroups with one per Subnet\n# The min/max size should be the original size divided by the number of subnets\n# In order to put the new InstanceGroups into service you will need to:\n# 1. Ensure that the Min/Max size values are reasonable and double check the specs\n# 2. `kops replace -f` this file\n# 3. `kops update --yes` this cluster\n# 4. ensure the InstanceGroups come up properly\n# 5. cordon and drain the old nodes\n# 6. Update the ClusterAutoScaler configuration. You should take this opportunity to\n#    make it auto discover if appropriate: https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler/cloudprovider/aws/README.md#auto-discovery-setup\n# 7. `kops delete` the old, multi-subnet InstanceGroup and delete it from this file\n# 8. Delete this comment and check the whole shebang into Git\n# 9. Go engage in the relaxing activity of your choice\n\n\"\"\"\n\nreadme = \"\"\"\n\n# Migration 2.6.2 -> 2.7.1\n\n## This migration:\n- removes older artifacts like the `post-kops.sh` if they exist\n- renames `inventory/<inventory>/clusters/<cluster>/cluster` -> `inventory/<inventory>/clusters/<cluster>/cluster-config` to match the current standard\n- splits any Kops instance group with more than on subnet into multiple instance groups with a single subnet.\n  * it attempts to guess on the correct min/max size of the instance groups by `current min/max / number of subnets` as an integer.\n  * it leaves the existing instance group in place to ease the migration\n  * there are instructions in each `inventory/<inventory>/clusters/<cluster>/cluster-config/nodes.yml`\n- adds audit logging to all kops clusters if not already there\n- adds cloud labels to allow cluster-autoscaler auto detect\n  * you may still need to add the iam policy to the nodes for it to function properly\n- updates the aws-iam-authenticator hook to note require any cloud storage\n\n## Risks:\n- the manifold update to the kops clusters will be a multi step process and may incur some risk.\n\n## Follow up tasks:\n- the update to the aws-iam-authenticator config no longer requires any cloud storage. Delete the bucket if it exists.\n- this version update changes the standards for the EtcD verion. This is a breaking change so it is not handled automatically in this migration.\n\n\"\"\"\n\naws_iam_kops_hook = \"\"\"\nname: kops-hook-authenticator-config.service\nbefore:\n  - kubelet.service\nroles: [Master]\nmanifest: |\n  [Unit]\n    Description=Initialize AWS IAM Authenticator cert and Kube API Server config\n  [Service]\n    Type=oneshot\n    ExecStartPre=/bin/mkdir -p /srv/kubernetes/aws-iam-authenticator\n    ExecStartPre=/bin/sh -c '/usr/bin/test -r /srv/kubernetes/aws-iam-authenticator/README || /bin/echo These files were created by the kops-hook-authenticator-config service, which ran aws-iam-authenticator init via a temporary Docker container. >/srv/kubernetes/aws-iam-authenticator/README'\n    ExecStartPre=/bin/chown 10000:10000 /srv/kubernetes/aws-iam-authenticator\n    ExecStartPost=/bin/sh -c '(/usr/bin/id -u aws-iam-authenticator >/dev/null 2>&1 || /usr/sbin/groupadd -g 10000 aws-iam-authenticator) ; (/usr/bin/id -u aws-iam-authenticator >/dev/null 2>&1 || /usr/sbin/useradd -s /usr/sbin/nologin -c \"AWS IAM Authenticator configs\" -d /srv/kubernetes/aws-iam-authenticator -u 10000 -g aws-iam-authenticator aws-iam-authenticator)'\n    ExecStart=/bin/sh -c '(set -x ; /usr/bin/docker run --net=host --rm -w /srv/kubernetes/aws-iam-authenticator -v /srv/kubernetes/aws-iam-authenticator:/srv/kubernetes/aws-iam-authenticator --name aws-iam-authenticator-initialize gcr.io/heptio-images/authenticator:v0.3.0 init -i clustername ; /bin/mv /srv/kubernetes/aws-iam-authenticator/heptio-authenticator-aws.kubeconfig /srv/kubernetes/aws-iam-authenticator/kubeconfig.yaml)'\n\"\"\"\n\naudit_log_api_server_settings = {\n    'auditLogPath': '/var/log/kube-apiserver-audit.log',\n    'auditLogMaxAge': 10,\n    'auditLogMaxBackups': 1,\n    'auditLogMaxSize': 100,\n    'auditPolicyFile': '/srv/kubernetes/audit.yaml'\n}\n\naudit_log_file_assets_string = \"\"\"\nname: auditPolicyFile\npath: /srv/kubernetes/audit.yaml\nroles: [Master]\ncontent: |\n  apiVersion: audit.k8s.io/v1beta1\n  kind: Policy\n  rules:\n    # The following requests were manually identified as high-volume and low-risk,\n    # so drop them.\n    - level: None\n      users: [\"system:kube-proxy\"]\n      verbs: [\"watch\"]\n      resources:\n        - group: \"\" # core\n          resources: [\"endpoints\", \"services\", \"services/status\"]\n    - level: None\n      # Ingress controller reads 'configmaps/ingress-uid' through the unsecured port.\n      # TODO(#46983): Change this to the ingress controller service account.\n      users: [\"system:unsecured\"]\n      namespaces: [\"kube-system\"]\n      verbs: [\"get\"]\n      resources:\n        - group: \"\" # core\n          resources: [\"configmaps\"]\n    - level: None\n      users: [\"kubelet\"] # legacy kubelet identity\n      verbs: [\"get\"]\n      resources:\n        - group: \"\" # core\n          resources: [\"nodes\", \"nodes/status\"]\n    - level: None\n      userGroups: [\"system:nodes\"]\n      verbs: [\"get\"]\n      resources:\n        - group: \"\" # core\n          resources: [\"nodes\", \"nodes/status\"]\n    - level: None\n      users:\n        - system:kube-controller-manager\n        - system:kube-scheduler\n        - system:serviceaccount:kube-system:endpoint-controller\n      verbs: [\"get\", \"update\"]\n      namespaces: [\"kube-system\"]\n      resources:\n        - group: \"\" # core\n          resources: [\"endpoints\"]\n    - level: None\n      users: [\"system:apiserver\"]\n      verbs: [\"get\"]\n      resources:\n        - group: \"\" # core\n          resources: [\"namespaces\", \"namespaces/status\", \"namespaces/finalize\"]\n    # Don't log HPA fetching metrics.\n    - level: None\n      users:\n        - system:kube-controller-manager\n      verbs: [\"get\", \"list\"]\n      resources:\n        - group: \"metrics.k8s.io\"\n    # Don't log these read-only URLs.\n    - level: None\n      nonResourceURLs:\n        - /healthz*\n        - /version\n        - /swagger*\n    # Don't log events requests.\n    - level: None\n      resources:\n        - group: \"\" # core\n          resources: [\"events\"]\n    # node and pod status calls from nodes are high-volume and can be large, don't log responses for expected updates from nodes\n    - level: Request\n      users: [\"kubelet\", \"system:node-problem-detector\", \"system:serviceaccount:kube-system:node-problem-detector\"]\n      verbs: [\"update\",\"patch\"]\n      resources:\n        - group: \"\" # core\n          resources: [\"nodes/status\", \"pods/status\"]\n      omitStages:\n        - \"RequestReceived\"\n    - level: Request\n      userGroups: [\"system:nodes\"]\n      verbs: [\"update\",\"patch\"]\n      resources:\n        - group: \"\" # core\n          resources: [\"nodes/status\", \"pods/status\"]\n      omitStages:\n        - \"RequestReceived\"\n    # deletecollection calls can be large, don't log responses for expected namespace deletions\n    - level: Request\n      users: [\"system:serviceaccount:kube-system:namespace-controller\"]\n      verbs: [\"deletecollection\"]\n      omitStages:\n        - \"RequestReceived\"\n    # Secrets, ConfigMaps, and TokenReviews can contain sensitive & binary data,\n    # so only log at the Metadata level.\n    - level: Metadata\n      resources:\n        - group: \"\" # core\n          resources: [\"secrets\", \"configmaps\"]\n        - group: authentication.k8s.io\n          resources: [\"tokenreviews\"]\n      omitStages:\n        - \"RequestReceived\"\n    # Get responses can be large; skip them.\n    - level: Request\n      verbs: [\"get\", \"list\", \"watch\"]\n      resources:\n        - group: \"\" # core\n        - group: \"admissionregistration.k8s.io\"\n        - group: \"apiextensions.k8s.io\"\n        - group: \"apiregistration.k8s.io\"\n        - group: \"apps\"\n        - group: \"authentication.k8s.io\"\n        - group: \"authorization.k8s.io\"\n        - group: \"autoscaling\"\n        - group: \"batch\"\n        - group: \"certificates.k8s.io\"\n        - group: \"extensions\"\n        - group: \"metrics.k8s.io\"\n        - group: \"networking.k8s.io\"\n        - group: \"policy\"\n        - group: \"rbac.authorization.k8s.io\"\n        - group: \"scheduling.k8s.io\"\n        - group: \"settings.k8s.io\"\n        - group: \"storage.k8s.io\"\n      omitStages:\n        - \"RequestReceived\"\n    # Default level for known APIs\n    - level: RequestResponse\n      resources:\n        - group: \"\" # core\n        - group: \"admissionregistration.k8s.io\"\n        - group: \"apiextensions.k8s.io\"\n        - group: \"apiregistration.k8s.io\"\n        - group: \"apps\"\n        - group: \"authentication.k8s.io\"\n        - group: \"authorization.k8s.io\"\n        - group: \"autoscaling\"\n        - group: \"batch\"\n        - group: \"certificates.k8s.io\"\n        - group: \"extensions\"\n        - group: \"metrics.k8s.io\"\n        - group: \"networking.k8s.io\"\n        - group: \"policy\"\n        - group: \"rbac.authorization.k8s.io\"\n        - group: \"scheduling.k8s.io\"\n        - group: \"settings.k8s.io\"\n        - group: \"storage.k8s.io\"\n      omitStages:\n        - \"RequestReceived\"\n    # Default level for all other requests.\n    - level: Metadata\n      omitStages:\n        - \"RequestReceived\"\n\"\"\"\n\n# Magic to make block formatting in yaml.dump work as expected\n\n\nclass folded_unicode(unicode):\n    pass\n\n\nclass literal_unicode(unicode):\n    pass\n\n\ndef folded_unicode_representer(dumper, data):\n    return dumper.represent_scalar(u'tag:yaml.org,2002:str', data, style='>')\n\n\ndef literal_unicode_representer(dumper, data):\n    return dumper.represent_scalar(u'tag:yaml.org,2002:str', data, style='|')\n\n\nyaml.add_representer(folded_unicode, folded_unicode_representer)\nyaml.add_representer(literal_unicode, literal_unicode_representer)\n# https://stackoverflow.com/questions/6432605/any-yaml-libraries-in-python-that-support-dumping-of-long-strings-as-block-liter\n\n\nclass Migration(migration.Migration):\n    _starting_version = '2.6.2'\n    _ending_version = '2.7.1'\n\n    _readme_string = readme\n\n    def run(self):\n\n        old_nodes_file_name = \"nodes.yml\"\n\n        for item in self.inventory:\n            inventory_path = \"inventory/{}\".format(item)\n            # If there are no clusters, move on.\n            if not os.path.isdir('{}/clusters/'.format(inventory_path)):\n                continue\n\n            for cluster_item in os.listdir('{}/clusters/'.format(inventory_path)):\n                item_path = '{}/clusters/{}'.format(inventory_path, cluster_item)\n                # Is this a kops cluster?\n\n                # There is a small amount of variation here where some cluster config\n                # directories are `cluster` and some are `cluster-config`\n                # Align these\n                if os.path.isdir(\"{}/cluster\".format(item_path)):\n                    self.move(\"{}/cluster\".format(item_path), \"{}/cluster-config\".format(item_path))\n\n                # Remove Post Kops if it exists\n                try:\n                    os.remove(\"{}/cluster-config/post-kops.sh\".format(item_path, f))\n                except Exception:\n                    pass\n\n                if os.path.isdir(item_path) and os.path.exists(\"{}/cluster-config/cluster.yml\".format(item_path)):\n                    logging.info(\"Migrating {} {}.\".format(item, cluster_item))\n\n                    # Start node rejiggering, `if` here for code folding in IDE\n                    if True:\n                        old_node_groups = \"{}/cluster-config/{}\".format(item_path, old_nodes_file_name)\n                        # Align file names:\n                        if not os.path.isfile(old_node_groups):\n                            masters = []\n                            nodes = []\n                            for f in os.listdir(\"{}/cluster-config/\".format(item_path)):\n                                if f.endswith('yml') and f != 'cluster.yml':\n                                    with open(\"{}/cluster-config/{}\".format(item_path, f)) as yaml_file:\n                                        for document in yaml.load_all(yaml_file.read()):\n                                            if document.get('kind') == 'InstanceGroup':\n                                                if document['spec']['role'] == 'Node':\n                                                    for hook in document['spec'].get('hooks', []):\n                                                        if hook.get('manifest') is not None:\n                                                            hook['manifest'] = literal_unicode(hook['manifest'])\n                                                    nodes.append(document)\n                                                elif document['spec']['role'] == 'Master':\n                                                    for hook in document['spec'].get('hooks', []):\n                                                        if hook.get('manifest') is not None:\n                                                            hook['manifest'] = literal_unicode(hook['manifest'])\n                                                    masters.append(document)\n                                                else:\n                                                    continue\n\n                                    os.remove(\"{}/cluster-config/{}\".format(item_path, f))\n                            with open(\"{}/cluster-config/{}\".format(item_path, 'nodes.yml'), 'w') as nodes_file:\n                                nodes_file.write(yaml.dump_all(nodes, default_flow_style=False))\n\n                            with open(\"{}/cluster-config/{}\".format(item_path, 'masters.yml'), 'w') as masters_file:\n                                masters_file.write(yaml.dump_all(masters, default_flow_style=False))\n\n                        # Because the nodes.yml may have multiple documents, we need to abuse the YamlEditor class a little bit\n                        with open(old_node_groups) as oig:\n                            new_node_groups = []\n                            for node_group in yaml.load_all(oig.read()):\n\n                                # Keep exisiting node group in the file to ease manual steps\n                                for hook in node_group['spec'].get('hooks', []):\n                                    if hook.get('manifest') is not None:\n                                        hook['manifest'] = literal_unicode(hook['manifest'])\n\n                                new_node_groups.append(node_group)\n\n                                sn_count = len(node_group['spec']['subnets'])\n                                cluster_name = node_group['metadata']['labels']['kops.k8s.io/cluster']\n\n                                # Add cloud labels to the existing Node Group\n                                # Don't clobber existing cloud labels\n                                clabels = node_group['spec'].get('cloudLabels')\n\n                                as_clabels = {\n                                    'k8s.io/cluster-autoscaler/enabled': \"\",\n                                    'kubernetes.io/cluster/{}'.format(cluster_name): \"\"\n                                }\n\n                                if clabels:\n                                    for key in as_clabels:\n                                        if not clabels.get('k8s.io/cluster-autoscaler/enabled'):\n                                            clabels[key] = \"\"\n                                else:\n                                    node_group['spec']['cloudLabels'] = as_clabels\n\n                                write_message = False\n                                if sn_count > 1:\n                                    write_message = True\n                                    max_size = node_group['spec']['maxSize'] / sn_count\n                                    min_size = node_group['spec']['minSize'] / sn_count\n                                    name = node_group['metadata']['name']\n\n                                    logging.warn(\"Creating New Kops Instance Groups for {} group {}. This will require manual intervention.\"\n                                                 .format(cluster_item, node_group['metadata']['name']))\n                                    logging.warn(\"Best guess group sizing: MinSize = {} and MaxSize = {}\".format(min_size, max_size))\n\n                                    # Create new instance groups from existing instance group\n\n                                    for subnet in node_group['spec']['subnets']:\n                                        new_node_group = deepcopy(node_group)\n                                        new_node_group['spec']['subnets'] = [subnet]\n                                        new_node_group['spec']['minSize'] = min_size\n                                        new_node_group['spec']['maxSize'] = max_size\n                                        new_node_group['metadata']['name'] = \"{}-{}\".format(name, subnet)\n                                        new_node_groups.append(new_node_group)\n\n                                with open(\"{}/cluster-config/{}\".format(item_path, 'nodes.yml'), 'w') as nodes_file:\n                                    if write_message:\n                                        nodes_file.write(ig_message)\n                                    nodes_file.write(yaml.dump_all(new_node_groups, default_flow_style=False))\n                            # Stop node rejiggering\n\n                    # Setup cluster spec with aws-iam auth and audit logging\n                    if True:\n                        cluster_spec_file = \"{}/cluster-config/cluster.yml\".format(item_path)\n                        with open(cluster_spec_file) as yaml_file:\n                            cluster_config = yaml.load(yaml_file.read())\n                            cluster_spec = cluster_config['spec']\n\n                            hooks = cluster_spec.get(\"hooks\")\n                            if hooks:\n                                logging.debug(hooks)\n                                for hook in hooks:\n                                    if hook['name'] == 'kops-hook-authenticator-config.service':\n                                        kops_hook_index = hooks.index(hook)\n                                        logging.debug(\"Found kops auth hook at index %d\", kops_hook_index)\n                                        hooks.pop(kops_hook_index)\n                                        logging.debug(\"Removing existing kops-hook-authenticator-config.service at %d\", kops_hook_index)\n                                    else:\n                                        logging.debug(\"Found other existing hook %s\", hook['name'])\n                                        hook['manifest'] = literal_unicode(hook['manifest'])\n\n                            else:\n                                logging.debug(\"No hooks found in cluster spec.\")\n                                cluster_spec['hooks'] = []\n\n                            for policy_type in cluster_spec.get('additionalPolicies', {}):\n                                cluster_spec['additionalPolicies'][policy_type] = literal_unicode(cluster_spec['additionalPolicies'][policy_type])\n\n                            hook = yaml.load(aws_iam_kops_hook)\n                            hook['manifest'] = literal_unicode(hook['manifest'])\n                            cluster_spec['hooks'].append(hook)\n\n                            file_assets = cluster_spec.get('fileAssets')\n                            if not file_assets:\n                                cluster_spec['fileAssets'] = []\n                                file_assets = cluster_spec['fileAssets']\n\n                            audit_policy_file_assets = yaml.load(audit_log_file_assets_string)\n\n                            existing_audit_file_assets = [fa for fa in file_assets if fa['name'] == audit_policy_file_assets['name']]\n\n                            if len(existing_audit_file_assets) == 0:\n                                file_assets.append(audit_policy_file_assets)\n\n                            for fa in file_assets:\n                                if fa.get('content'):\n                                    fa['content'] = literal_unicode(fa['content'])\n\n                            if not cluster_spec.get('kubeAPIServer'):\n                                cluster_spec['kubeAPIServer'] = {}\n\n                            for setting in audit_log_api_server_settings:\n                                if cluster_spec['kubeAPIServer'].get(setting) is not None:\n                                    cluster_spec['kubeAPIServer'][setting] = audit_log_api_server_settings[setting]\n\n                            if cluster_spec['kubeAPIServer'].get('authenticationTokenWebhookConfigFile') != '/srv/kubernetes/aws-iam-authenticator/kubeconfig.yaml':\n                                cluster_spec['kubeAPIServer']['authenticationTokenWebhookConfigFile'] = '/srv/kubernetes/aws-iam-authenticator/kubeconfig.yaml'\n\n                        with open(cluster_spec_file, 'w') as yaml_file:\n                            yaml_file.write(yaml.dump(cluster_config, default_flow_style=False))\n"
  },
  {
    "path": "pentagon/migration/migrations/migration_2_7_1.py",
    "content": "from copy import deepcopy\n\n\nfrom pentagon import migration\nimport yaml\nimport os\nimport logging\n\nreadme = \"\"\"\n\n# Migration 2.7.1 -> 2.7.2\n\n## This migration:\n- adds kubelet flags that were missing in the last migration to take advantage of the audit policy\n- made `anonymousAuth: false` default for Kops clusters. This currently conflicts with metricserver version > 3.0.0\n\n\n## Risks:\n- this requires you to roll the cluster\n- metrics-server version compatibility \n\n## Follow up tasks:\n- roll the cluster\n\n\"\"\"\n\n# Magic to make block formatting in yaml.dump work as expected\n\n\nclass folded_unicode(unicode):\n    pass\n\n\nclass literal_unicode(unicode):\n    pass\n\n\ndef folded_unicode_representer(dumper, data):\n    return dumper.represent_scalar(u'tag:yaml.org,2002:str', data, style='>')\n\n\ndef literal_unicode_representer(dumper, data):\n    return dumper.represent_scalar(u'tag:yaml.org,2002:str', data, style='|')\n\n\nyaml.add_representer(folded_unicode, folded_unicode_representer)\nyaml.add_representer(literal_unicode, literal_unicode_representer)\n# https://stackoverflow.com/questions/6432605/any-yaml-libraries-in-python-that-support-dumping-of-long-strings-as-block-liter\n\naudit_settings = {\n    'auditLogPath': '/var/log/kube-apiserver-audit.log',\n    'auditLogMaxAge': 10,\n    'auditLogMaxBackups': 1,\n    'auditLogMaxSize': 100,\n    'auditPolicyFile': '/srv/kubernetes/audit.yaml'\n}\n\n\nclass Migration(migration.Migration):\n    _starting_version = '2.7.1'\n    _ending_version = '2.7.2'\n\n    _readme_string = readme\n\n    def run(self):\n\n        for item in self.inventory:\n            inventory_path = \"inventory/{}\".format(item)\n            # If there are no clusters, move on.\n            if not os.path.isdir('{}/clusters/'.format(inventory_path)):\n                continue\n\n            for cluster_item in os.listdir('{}/clusters/'.format(inventory_path)):\n                item_path = '{}/clusters/{}'.format(inventory_path, cluster_item)\n                # Is this a kops cluster?\n\n                # There is a small amount of variation here where some cluster config\n                # directories are `cluster` and some are `cluster-config`\n                # Align these\n                \n                if os.path.isdir(\"{}/cluster\".format(item_path)):\n                    logging.info(\"Moving {item_path}/cluster to {item_path}/cluster-config\".format(item_path))\n                    self.move(\"{}/cluster\".format(item_path), \"{}/cluster-config\".format(item_path))\n\n                if os.path.isdir(item_path) and os.path.exists(\"{}/cluster-config/cluster.yml\".format(item_path)):\n                    logging.info(\"Migrating {} {}.\".format(item, cluster_item))\n\n                    # Setup cluster spec with aws-iam auth and audit logging\n                    if True:\n                        cluster_spec_file = \"{}/cluster-config/cluster.yml\".format(item_path)\n                        with open(cluster_spec_file) as yaml_file:\n                            cluster_config = yaml.load(yaml_file.read())\n                            cluster_spec = cluster_config['spec']\n\n                            if cluster_spec.get('kubelet') is None:\n                                cluster_spec['kubelet'] = {}\n                            cluster_spec['kubelet']['anonymousAuth'] = False\n\n                            hooks = cluster_spec.get(\"hooks\")\n                            if hooks:\n                                logging.debug(hooks)\n                                for hook in hooks:\n                                    hook['manifest'] = literal_unicode(hook['manifest'])\n\n                            for policy_type in cluster_spec.get('additionalPolicies', {}):\n                                cluster_spec['additionalPolicies'][policy_type] = literal_unicode(cluster_spec['additionalPolicies'][policy_type])\n\n                            for fa in cluster_spec.get('fileAssets'):\n                                if fa.get('content'):\n                                    fa['content'] = literal_unicode(fa['content'])\n\n                            kube_api_server = cluster_spec['kubeAPIServer']\n\n                            for setting, value in audit_settings.items():\n                                if kube_api_server.get(setting) != value:\n                                    kube_api_server[setting] = value\n\n                        with open(cluster_spec_file, 'w') as yaml_file:\n                            yaml_file.write(yaml.dump(cluster_config, default_flow_style=False))\n"
  },
  {
    "path": "pentagon/migration/migrations/migration_2_7_3.py",
    "content": "from copy import deepcopy\n\n\nfrom pentagon import migration\nimport yaml\nimport os\nimport logging\n\nreadme = \"\"\"\n\n# Migration 2.7.2 -> 3.1.0\n\n## This migration:\n- adds an updated kops hook to patch docker-runc\n\n\n## Risks:\n- this requires you to roll the cluster\n\n## Follow up tasks:\n- roll the cluster\n\n\"\"\"\n\n# Magic to make block formatting in yaml.dump work as expected\n\n\nclass folded_unicode(unicode):\n    pass\n\n\nclass literal_unicode(unicode):\n    pass\n\n\ndef folded_unicode_representer(dumper, data):\n    return dumper.represent_scalar(u'tag:yaml.org,2002:str', data, style='>')\n\n\ndef literal_unicode_representer(dumper, data):\n    return dumper.represent_scalar(u'tag:yaml.org,2002:str', data, style='|')\n\n\nyaml.add_representer(folded_unicode, folded_unicode_representer)\nyaml.add_representer(literal_unicode, literal_unicode_representer)\n# https://stackoverflow.com/questions/6432605/any-yaml-libraries-in-python-that-support-dumping-of-long-strings-as-block-liter\n\nrunCHookContents = \"\"\"\nname: patch-runc\nbefore:\n- docker.service\nmanifest: |\n  Type=oneshot\n  ExecStart=/bin/bash -c \"wget https://artifacts.reactiveops.com/runc-cve/releases/download/CVE-2019-5736-build2/runc-v17.03.2-amd64 && chattr -i /usr/bin/docker-runc && mv runc-v17.03.2-amd64 /usr/bin/docker-runc && chmod +x /usr/bin/docker-runc && docker-runc --version && echo done || sudo shutdown now 'Patching docker-runc failed'\"\n  ExecStartPost=/bin/bash -c \"docker-runc --version | grep -q ae16ac34cda712253fdf199632fd6b5ec5645e27 || sudo shutdown now\"\nroles:\n- Node\n- Master\n\"\"\"\n\nrunCHook = yaml.load(runCHookContents)\nrunCHook['manifest'] = literal_unicode(runCHook['manifest'])\n\nclass Migration(migration.Migration):\n    _starting_version = '2.7.2'\n    _ending_version = '3.1.0'\n\n    _readme_string = readme\n\n    def run(self):\n\n        for item in self.inventory:\n            inventory_path = \"inventory/{}\".format(item)\n            # If there are no clusters, move on.\n            if not os.path.isdir('{}/clusters/'.format(inventory_path)):\n                continue\n\n            for cluster_item in os.listdir('{}/clusters/'.format(inventory_path)):\n                item_path = '{}/clusters/{}'.format(inventory_path, cluster_item)\n\n                if os.path.isdir(item_path) and os.path.exists(\"{}/cluster-config/cluster.yml\".format(item_path)):\n                    logging.info(\"Migrating {} {}.\".format(item, cluster_item))\n\n                    # Setup cluster spec with patch-runc hook\n                    if True:\n                        cluster_spec_file = \"{}/cluster-config/cluster.yml\".format(item_path)\n                        with open(cluster_spec_file) as yaml_file:\n                            cluster_config = yaml.load(yaml_file.read())\n                            cluster_spec = cluster_config['spec']\n\n                            for policy_type in cluster_spec.get('additionalPolicies', {}):\n                                cluster_spec['additionalPolicies'][policy_type] = literal_unicode(cluster_spec['additionalPolicies'][policy_type])\n\n                            for fa in cluster_spec.get('fileAssets'):\n                                if fa.get('content'):\n                                    fa['content'] = literal_unicode(fa['content'])\n\n                            hooks = cluster_spec.get(\"hooks\")\n                            runCHookIndex = None\n                            if hooks:\n                                logging.debug(hooks)\n                                for index, hook in enumerate(hooks):\n                                    hook['manifest'] = literal_unicode(hook['manifest'])\n                                    if hook['name'] == 'patch-runc':\n                                        logging.info(\"Found patch-runc hook at index %s\", index)\n                                        runCHookIndex = index\n                                if runCHookIndex:\n                                    hooks[runCHookIndex] = runCHook\n                                else:\n                                    hooks.append(runCHook)\n                            else:\n                                cluster_spec[\"hooks\"] = []\n                                cluster_spec[\"hooks\"].append(runCHook)\n\n\n                        with open(cluster_spec_file, 'w') as yaml_file:\n                            yaml_file.write(yaml.dump(cluster_config, default_flow_style=False))\n"
  },
  {
    "path": "pentagon/migration/migrations/migration_3_1_0.py",
    "content": "from copy import deepcopy\n\n\nfrom pentagon import migration\nimport yaml\nimport os\nimport logging\n\nreadme = \"\"\"\n\n# Migration 3.1.0 -> 3.1.1\n\n## This migration:\n- Fixes the bug introduced with the last migration.  This bug caused the migration to break if the patch-runc\n  hook existed at index 0.  This migration will fix any repos that have not been merged since then.\n\n## Risks:\n- this requires you to roll the cluster if you have not already adopted the newest patch-runc hook.\n\n## Follow up tasks:\n- roll the cluster if you have not already adopted the newest patch-runc hook.\n\"\"\"\n\n# Magic to make block formatting in yaml.dump work as expected\n\n\nclass folded_unicode(unicode):\n    pass\n\n\nclass literal_unicode(unicode):\n    pass\n\n\ndef folded_unicode_representer(dumper, data):\n    return dumper.represent_scalar(u'tag:yaml.org,2002:str', data, style='>')\n\n\ndef literal_unicode_representer(dumper, data):\n    return dumper.represent_scalar(u'tag:yaml.org,2002:str', data, style='|')\n\n\nyaml.add_representer(folded_unicode, folded_unicode_representer)\nyaml.add_representer(literal_unicode, literal_unicode_representer)\n# https://stackoverflow.com/questions/6432605/any-yaml-libraries-in-python-that-support-dumping-of-long-strings-as-block-liter\n\nrunCHookContents = \"\"\"\nname: patch-runc\nbefore:\n- docker.service\nmanifest: |\n  Type=oneshot\n  ExecStart=/bin/bash -c \"wget https://artifacts.reactiveops.com/runc-cve/releases/download/CVE-2019-5736-build2/runc-v17.03.2-amd64 && chattr -i /usr/bin/docker-runc && mv runc-v17.03.2-amd64 /usr/bin/docker-runc && chmod +x /usr/bin/docker-runc && docker-runc --version && echo done || sudo shutdown now 'Patching docker-runc failed'\"\n  ExecStartPost=/bin/bash -c \"docker-runc --version | grep -q ae16ac34cda712253fdf199632fd6b5ec5645e27 || sudo shutdown now\"\nroles:\n- Node\n- Master\n\"\"\"\n\nrunCHook = yaml.load(runCHookContents)\nrunCHook['manifest'] = literal_unicode(runCHook['manifest'])\n\n\nclass Migration(migration.Migration):\n    _starting_version = '2.7.2'\n    _ending_version = '3.1.0'\n\n    _readme_string = readme\n\n    def run(self):\n\n        for item in self.inventory:\n            inventory_path = \"inventory/{}\".format(item)\n            # If there are no clusters, move on.\n            if not os.path.isdir('{}/clusters/'.format(inventory_path)):\n                continue\n\n            for cluster_item in os.listdir('{}/clusters/'.format(inventory_path)):\n                item_path = '{}/clusters/{}'.format(inventory_path, cluster_item)\n\n                if os.path.isdir(item_path) and os.path.exists(\"{}/cluster-config/cluster.yml\".format(item_path)):\n                    logging.info(\"Migrating {} {}.\".format(item, cluster_item))\n\n                    # Setup cluster spec with patch-runc hook\n                    if True:\n                        cluster_spec_file = \"{}/cluster-config/cluster.yml\".format(item_path)\n                        with open(cluster_spec_file) as yaml_file:\n                            cluster_config = yaml.load(yaml_file.read())\n                            cluster_spec = cluster_config['spec']\n\n                            for policy_type in cluster_spec.get('additionalPolicies', {}):\n                                cluster_spec['additionalPolicies'][policy_type] = literal_unicode(cluster_spec['additionalPolicies'][policy_type])\n\n                            for fa in cluster_spec.get('fileAssets'):\n                                if fa.get('content'):\n                                    fa['content'] = literal_unicode(fa['content'])\n\n                            hooks = cluster_spec.get(\"hooks\")\n                            new_hooks = []\n                            if hooks:\n                                logging.debug(hooks)\n                                for hook in hooks:\n                                    hook['manifest'] = literal_unicode(hook['manifest'])\n                                    if hook['name'] == 'patch-runc':\n                                        logging.info(\"Found patch-runc hook. Removing it.\")\n                                    else:\n                                        new_hooks.append(hook)\n\n                            cluster_spec[\"hooks\"] = new_hooks\n                            cluster_spec[\"hooks\"].append(runCHook)\n\n                        with open(cluster_spec_file, 'w') as yaml_file:\n                            yaml_file.write(yaml.dump(cluster_config, default_flow_style=False))\n"
  },
  {
    "path": "pentagon/pentagon.py",
    "content": "# from __future__ import (absolute_import, division, print_function)\n# __metaclass__ = type\n\nimport datetime\nimport shutil\nimport logging\nimport os\nimport re\nimport sys\nimport traceback\nimport oyaml as yaml\nimport boto3\n\nfrom git import Repo, Git\nfrom shutil import copytree, ignore_patterns\n\nimport component.kops as kops\nimport component.inventory as inventory\nimport component.core as core\nimport component.gcp as gcp\nfrom helpers import render_template, write_yaml_file, create_rsa_key, merge_dict, allege_aws_availability_zones\nfrom meta import __version__, __author__\n\n\nclass PentagonException(Exception):\n    pass\n\n\nclass PentagonProject(object):\n    from defaults import AWSPentagonDefaults as PentagonDefaults\n    keys_to_sanitize = ['aws_access_key', 'aws_secret_key', 'output_file']\n\n    def __init__(self, name, data={}):\n        self._data = data\n        self._name = name\n        logging.debug(self._data)\n        self._force = self.get_data('force')\n        self._configure_project = self.get_data('configure')\n\n        # Set this before it gets overridden by the config file\n        self._outfile = self.get_data('output_file')\n\n        # Setting local path info\n        self._repository_name = os.path.expanduser(\n            \"{}-infrastructure\".format(name))\n        self._repository_directory = \"{}\".format(\n            self._repository_name)\n\n        self._infrastructure_bucket = self.get_data(\n            'infrastructure_bucket', self._repository_name)\n\n        self._private_path = \"inventory/default/config/private\"\n\n    def get_data(self, name, default=None):\n        \"\"\" Get argument name from click arguments, if it exists, or return default.\n            Builtin .get method is inadequate because click defaults to a value of None\n            which fools the .get() method \"\"\"\n        if self._data.get(name) is not None:\n            return self._data.get(name)\n        return default\n\n    def __git_init(self):\n        \"\"\" Initialize git repository in the project infrastructure path \"\"\"\n        Repo.init(self._repository_directory)\n\n    def __write_config_file(self):\n        \"\"\" Write sanitized yaml file of starting arguments \"\"\"\n        logging.info(\n            \"Writing arguments to file for Posterity: {}\".format(self._outfile))\n        config = {}\n\n        for key, value in self._data.items():\n            if value and key not in self.keys_to_sanitize:\n                config[key] = value\n        config['project_name'] = self._name\n\n        logging.debug(config)\n        try:\n            write_yaml_file(self._repository_directory +\n                            \"/\" + self._outfile, config)\n        except Exception as e:\n            logging.debug(traceback.format_exc(e))\n            logging.error(\"Failed to write arguments to file\")\n            logging.error(e)\n\n    def __repository_directory_exists(self):\n        \"\"\" Tests if the repository directory already exists \"\"\"\n        logging.debug(\"Checking for repository {}\".format(\n            self._repository_directory))\n        if os.path.isdir(self._repository_directory):\n            return True\n            logging.debug(\"Already Exists\")\n        logging.debug(\"Does not exist\")\n        return False\n\n    def start(self):\n        if not self.__repository_directory_exists() or self._force:\n            logging.info(\"Copying project files...\")\n            self.__create_repo_core()\n            self.__git_init()\n            self.__write_config_file()\n            with open('{}/.version'.format(self._repository_directory), 'w') as f:\n                f.write(__version__)\n\n            if self._configure_project is not False:\n                self.configure_default_project()\n        else:\n            raise PentagonException(\n                'Project path exists. Cowardly refusing to overwrite existing project.')\n\n    def __create_repo_core(self):\n        logging.debug(self._repository_directory)\n        core.Core(self._data).add('{}'.format(self._repository_directory))\n\n\nclass AWSPentagonProject(PentagonProject):\n    # Placeholders for when there is not sensible default\n\n    # AWS and VPC\n    _aws_access_key_placeholder = '<aws-access-key>'\n    _aws_secret_key_placeholder = '<aws-secret-key>'\n    _aws_default_region_placeholder = '<aws-default-region>'\n    _aws_availability_zone_count_placeholder = '<aws-availability-zone-count>'\n    _aws_availability_zones_placeholder = '<aws-availability-zones>'\n\n    # VPC\n    _vpc_name = '<vpc_name>'\n    _vpc_cidr_base = '<vpc_cidr_base>'\n    _vpc_id = '<vpc_id>'\n\n    # Working Kubernetes\n    _working_kubernetes_cluster_name = '<working_kubernetes_cluster_name>'\n    _working_kubernetes_dns_zone = '<working_kubernetes_dns_zone>'\n    _working_kubernetes_master_aws_zone = '<working_kubernetes_master_aws_zone>'\n\n    # Production Kubernetes\n    _production_kubernetes_cluster_name = '<production_kubernetes_cluster_name>'\n    _production_kubernetes_dns_zone = '<production_kubernetes_dns_zone>'\n    _production_kubernetes_node_count = '<production_kubernetes_node_count>'\n    _production_kubernetes_master_aws_zone = '<production_kubernetes_master_aws_zone>'\n\n    def __init__(self, name, data={}):\n        super(AWSPentagonProject, self).__init__(name, data)\n        self._create_keys = self.get_data('create_keys')\n\n        self._ssh_keys = {\n            'admin_vpn_key': self.get_data('admin_vpn_key', self.PentagonDefaults.ssh['admin_vpn_key']),\n            'working_kube_key': self.get_data('working_kube_key', self.PentagonDefaults.ssh['working_kube_key']),\n            'production_kube_key': self.get_data('production_kube_key', self.PentagonDefaults.ssh['production_kube_key']),\n            'working_private_key': self.get_data('working_private_key', self.PentagonDefaults.ssh['working_private_key']),\n            'production_private_key': self.get_data('production_private_key', self.PentagonDefaults.ssh['production_private_key']),\n        }\n\n        # AWS Specific Stuff\n        self._aws_access_key = self.get_data(\n            'aws_access_key', self._aws_access_key_placeholder)\n        self._aws_secret_key = self.get_data(\n            'aws_secret_key', self._aws_secret_key_placeholder)\n        if self.get_data('aws_default_region'):\n            self._aws_default_region = self.get_data('aws_default_region')\n            self._aws_availability_zone_count = int(self.get_data(\n                'aws_availability_zone_count', self.PentagonDefaults.vpc['aws_availability_zone_count']))\n            self._aws_availability_zones = self.get_data('aws_availability_zones') or allege_aws_availability_zones(\n                self._aws_default_region, self._aws_availability_zone_count)\n\n        else:\n            self._aws_default_region = self._aws_default_region_placeholder\n            self._aws_availability_zone_count = self._aws_availability_zone_count_placeholder\n            self._aws_availability_zones = self._aws_availability_zones_placeholder\n\n        # VPC information\n        self._vpc_name = self.get_data(\n            'vpc_name', self.PentagonDefaults.vpc['vpc_name'])\n        self._vpc_cidr_base = self.get_data(\n            'vpc_cidr_base', self.PentagonDefaults.vpc['vpc_cidr_base'])\n        self._vpc_id = self.get_data('vpc_id', self._vpc_id)\n\n        # DNS\n        self._dns_zone = self.get_data('dns_zone', '{}.com'.format(self._name))\n\n        # Kubernetes version\n        self._kubernetes_version = self.get_data(\n            'kubernetes_version', self.PentagonDefaults.kubernetes['kubernetes_version'])\n\n        # Working Kubernetes\n        self._working_kubernetes_cluster_name = self.get_data(\n            'working_kubernetes_cluster_name', 'working-1.{}'.format(self._dns_zone))\n        self._working_kubernetes_dns_zone = self.get_data(\n            'working_kubernetes_dns_zone', '{}'.format(self._dns_zone))\n\n        self._working_kubernetes_node_count = self.get_data(\n            'working_kubernetes_node_count', self.PentagonDefaults.kubernetes['node_count'])\n        self._working_kubernetes_master_aws_zones = self.get_data(\n            'working_kubernetes_master_aws_zones', self._aws_availability_zones)\n        self._working_kubernetes_master_node_type = self.get_data(\n            'working_kubernetes_master_node_type', self.PentagonDefaults.kubernetes['master_node_type'])\n        self._working_kubernetes_worker_node_type = self.get_data(\n            'working_kubernetes_worker_node_type', self.PentagonDefaults.kubernetes['worker_node_type'])\n        self._working_kubernetes_v_log_level = self.get_data(\n            'working_kubernetes_v_log_level', self.PentagonDefaults.kubernetes['v_log_level'])\n        self._working_kubernetes_network_cidr = self.get_data(\n            'working_kubernetes_network_cidr', self.PentagonDefaults.kubernetes['network_cidr'])\n        self._working_third_octet = self.get_data(\n            'working_third_octet', self.PentagonDefaults.kubernetes['working_third_octet'])\n\n        # Production Kubernetes\n        self._production_kubernetes_cluster_name = self.get_data(\n            'production_kubernetes_cluster_name', 'production-1.{}'.format(self._dns_zone))\n        self._production_kubernetes_dns_zone = self.get_data(\n            'production_kubernetes_dns_zone', '{}'.format(self._dns_zone))\n\n        self._production_kubernetes_node_count = self.get_data(\n            'production_kubernetes_node_count', self.PentagonDefaults.kubernetes['node_count'])\n        self._production_kubernetes_master_aws_zones = self.get_data(\n            'production_kubernetes_master_aws_zones', self._aws_availability_zones)\n        self._production_kubernetes_master_node_type = self.get_data(\n            'production_kubernetes_master_node_type', self.PentagonDefaults.kubernetes['master_node_type'])\n        self._production_kubernetes_worker_node_type = self.get_data(\n            'production_kubernetes_worker_node_type', self.PentagonDefaults.kubernetes['worker_node_type'])\n        self._production_kubernetes_v_log_level = self.get_data(\n            'production_kubernetes_v_log_level', self.PentagonDefaults.kubernetes['v_log_level'])\n        self._production_kubernetes_network_cidr = self.get_data(\n            'production_kubernetes_network_cidr', self.PentagonDefaults.kubernetes['network_cidr'])\n        self._production_third_octet = self.get_data(\n            'production_third_octet', self.PentagonDefaults.kubernetes['production_third_octet'])\n\n        self._vpn_ami_id = self.get_data('vpn_ami_id')\n\n    @property\n    def context(self):\n        self._context = {\n            'aws_secret_key': self._aws_secret_key,\n            'aws_access_key': self._aws_access_key,\n            'org_name': self._name,\n            'vpc_name': self._vpc_name,\n            'aws_default_region': self._aws_default_region,\n            'aws_availability_zones': self._aws_availability_zones,\n            'aws_availability_zone_count': self._aws_availability_zone_count,\n            'infrastructure_bucket': self._infrastructure_bucket,\n            'vpc_name': self._vpc_name,\n            'vpc_cidr_base': self._vpc_cidr_base,\n            'aws_availability_zones': self._aws_availability_zones,\n            'aws_availability_zone_count': self._aws_availability_zone_count,\n            'infrastructure_bucket': self._infrastructure_bucket,\n            'vpc_name': self._vpc_name,\n            'infrastructure_bucket': self._infrastructure_bucket,\n            'aws_region': self._aws_default_region,\n            'dns_zone': self._dns_zone,\n            'vpn_ami_id': self._vpn_ami_id,\n            'production_kube_key': self._ssh_keys['production_kube_key'],\n            'working_kube_key': self._ssh_keys['working_kube_key'],\n            'production_private_key': self._ssh_keys['production_private_key'],\n            'working_private_key': self._ssh_keys['working_private_key'],\n            'admin_vpn_key': self._ssh_keys['admin_vpn_key'],\n            'name': 'default',\n            'project_name': self._name,\n            'configure_vpn': self.get_data('configure_vpn'),\n        }\n        logging.debug(self._context)\n        return self._context\n\n    def __add_kops_working_cluster(self):\n        context = {\n            'cluster_name': self._working_kubernetes_cluster_name,\n            'availability_zones': re.sub(\" \", \"\", self._aws_availability_zones).split(\",\"),\n            'vpc_id': self._vpc_id,\n            'ssh_key_path': \"${{INFRASTRUCTURE_REPO}}/{}/{}.pub\".format(self._private_path, self._ssh_keys['working_kube_key']),\n            'kubernetes_version': self._kubernetes_version,\n            'ig_max_size': self._working_kubernetes_node_count,\n            'ig_min_size': self._working_kubernetes_node_count,\n            'master_availability_zones': [zone.strip() for zone in self._working_kubernetes_master_aws_zones.split(',')],\n            'master_node_type': self._working_kubernetes_master_node_type,\n            'worker_node_type': self._working_kubernetes_worker_node_type,\n            'cluster_dns': self._working_kubernetes_dns_zone,\n            'kubernetes_v_log_level': self._working_kubernetes_v_log_level,\n            'network_cidr': self._working_kubernetes_network_cidr,\n            'network_cidr_base': self._vpc_cidr_base,\n            'kops_state_store_bucket': self._infrastructure_bucket,\n            'third_octet': self._working_third_octet,\n        }\n        write_yaml_file(\n            \"{}/inventory/default/clusters/working/vars.yml\".format(self._repository_directory), context)\n\n    def __add_kops_production_cluster(self):\n        context = {\n            'cluster_name': self._production_kubernetes_cluster_name,\n            'availability_zones': re.sub(\" \", \"\", self._aws_availability_zones).split(\",\"),\n            'vpc_id': self._vpc_id,\n            'ssh_key_path': \"${{INFRASTRUCTURE_REPO}}/{}/{}.pub\".format(self._private_path, self._ssh_keys['production_kube_key']),\n            'kubernetes_version': self._kubernetes_version,\n            'ig_max_size': self._production_kubernetes_node_count,\n            'ig_min_size': self._production_kubernetes_node_count,\n            'master_availability_zones': [zone.strip() for zone in self._production_kubernetes_master_aws_zones.split(',')],\n            'master_node_type': self._production_kubernetes_master_node_type,\n            'worker_node_type': self._production_kubernetes_worker_node_type,\n            'cluster_dns': self._production_kubernetes_dns_zone,\n            'kubernetes_v_log_level': self._production_kubernetes_v_log_level,\n            'network_cidr': self._production_kubernetes_network_cidr,\n            'network_cidr_base': self._vpc_cidr_base,\n            'kops_state_store_bucket': self._infrastructure_bucket,\n            'third_octet': self._production_third_octet,\n        }\n        write_yaml_file(\n            \"{}/inventory/default/clusters/production/vars.yml\".format(self._repository_directory), context)\n\n    def configure_default_project(self):\n        inventory.Inventory(self.context).add(\n            '{}/inventory/default'.format(self._repository_directory))\n        self.__add_kops_working_cluster()\n        self.__add_kops_production_cluster()\n\n\nclass GCPPentagonProject(PentagonProject):\n\n    def __init__(self, name, data={}):\n        # Build translated data for inventory input\n        self._gcp_inventory_context = self._build_inv_params(name, data)\n        # Add the project_name since it isn't passed in from click\n        self._gcp_inventory_context['project_name'] = name\n        self._gcp_inventory_context['project'] = self._gcp_inventory_context['gcp_project']\n        self._gcp_inventory_context['name'] = name\n        super(GCPPentagonProject, self).__init__(name, data)\n\n    @staticmethod\n    def _build_inv_params(name, input_context):\n        gcp_context = input_context.copy()\n        inventory_map = {\n            'gcp_nodes_cidr': 'nodes_cidr',\n            'gcp_services_cidr': 'services_cidr',\n            'gcp_pods_cidr': 'pods_cidr',\n            'gcp_cluster_name': 'cluster_name',\n            'gcp_kubernetes_version': 'kubernetes_version',\n            'gcp_infra_bucket': 'infrastructure_bucket',\n        }\n\n        for old_key, new_key in inventory_map.iteritems():\n            if new_key in gcp_context.keys():\n                if gcp_context[new_key]:\n                    raise KeyError(\n                        'Key already exists, this should not happen.')\n            gcp_context[new_key] = gcp_context.pop(old_key)\n\n        # for two levels downstream into gke generator tf\n        gcp_context['region'] = gcp_context['gcp_region']\n\n        return gcp_context\n\n    def configure_default_project(self):\n        inventory.Inventory(self._gcp_inventory_context).add(\n            '{}/inventory/{}'.format(\n                self._repository_directory,\n                self._gcp_inventory_context['project']\n            )\n        )\n"
  },
  {
    "path": "setup.py",
    "content": "#!/usr/bin/env python\n# -- coding: utf-8 --\n# Copyright 2017 Reactive Ops Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the “License”);\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#    http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an “AS IS” BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport os\nimport sys\nfrom setuptools import setup, find_packages\nfrom pentagon import meta\n\ntry:\n    from setuptools import setup, find_packages\nexcept ImportError:\n    print(\"setup tools required. Please run: \"\n          \"pip install setuptools).\")\n    sys.exit(1)\n\n\ndef package_files(directory):\n    paths = []\n    for (path, directories, filenames) in os.walk(directory):\n        for filename in filenames:\n            paths.append(os.path.join('..', path, filename))\n    return paths\n\nextra_files = package_files('pentagon/component')\n\nsetup(name='pentagon',\n      version=meta.__version__,\n      description='Radically simple kubernetes',\n      author=meta.__author__,\n      author_email='services@reactiveops.com',\n      url='http://reactiveops.com/',\n      license='Apache2.0',\n      include_package_data=True,\n      install_requires=[\n        \"click==6.7\",\n        \"GitPython==2.1.3\",\n        \"Jinja2==2.9.5\",\n        \"pycrypto==2.6.1\",\n        \"oyaml>=0.8\",\n        \"PyYAML>=5.0\",\n        \"shyaml==0.6.1\",\n        \"ansible==2.5.2\",\n        \"awscli>=1.16.0\",\n        \"boto3>=1.9.0\",\n        \"botocore>=1.12.0\",\n        \"boto==2.49.0\",\n        \"google-api-python-client==1.6.2\",\n        \"coloredlogs==9.0\",\n        \"semver>=2.8.0\",\n      ],\n      classifiers=[\n          'Development Status :: 5 - Production/Stable',\n          'Environment :: Console',\n          'Intended Audience :: Developers',\n          'Intended Audience :: Information Technology',\n          'Intended Audience :: System Administrators',\n          'License :: OSI Approved :: Apache Software License',\n          'Natural Language :: English',\n          'Operating System :: POSIX',\n          'Programming Language :: Python :: 2.6',\n          'Programming Language :: Python :: 2.7',\n          'Topic :: System :: Installation/Setup',\n          'Topic :: System :: Systems Administration',\n          'Topic :: Utilities',\n      ],\n      entry_points=''' #for click integration\n          [console_scripts]\n          pentagon=pentagon.cli:cli\n      ''',\n      packages=find_packages(exclude=['tests', 'example-component']),\n      scripts=[\n         'bin/yaml_source',\n         ],\n      #package_data={'': extra_files},\n      data_files=[],\n      )\n"
  },
  {
    "path": "tests/__init__.py",
    "content": ""
  },
  {
    "path": "tests/requirements.txt",
    "content": "nose==1.3.7\npytest\nflake8\nautopep8\n"
  },
  {
    "path": "tests/test_args.py",
    "content": "import unittest\nimport pentagon.pentagon as pentagon\nimport os\nimport logging\nfrom tests.test_base import TestPentagonProject\n\n\nclass TestPentagonProjectWithoutArgs(TestPentagonProject):\n    name = 'test_pentagon_without_args'\n\n    def setUp(self):\n        self.p = pentagon.AWSPentagonProject(self.name)\n\n    def tearDown(self):\n        self.p = None\n\n\nclass TestPentagonProjectWithAllArgs(TestPentagonProject):\n    name = 'test_pentagon_with_all_args'\n    args = {\n        'configure': True,\n\n        # 'repository_name': 'test-repository-name',\n\n        # need to test some of these without all of them\n        'aws_access_key': 'test-aws-key',\n        'aws_secret_key': 'test-aws-secret-key',\n        'aws_default_region': 'test-aws-region',\n        'aws_availability_zone_count': 5,\n        'aws_availability_zones': 'test-aws-regiona,test-aws-regionb,test-aws-regionc',\n        'vpc_name': 'test_vpc_name',\n        'vpc_cidr_base': 'test_vpc_cidr_base',\n        'vpc_id': 'test_vpc_id',\n        # KOPS:\n        'infrastructure_bucket': 'test-statestore-bucket',\n        # DNS:\n        'dns_zone': 'test_dns_zone',\n        # Working Kubernetes\n        'working_kubernetes_cluster_name': 'test-working-cluster-name',\n        'working_kubernetes_dns_zone': 'test-working-cluster-dns-zone',\n        'working_kubernetes_node_count': 3,\n        'working_kubernetes_master_aws_zones': 'test-working-aws-master-zone',\n        'working_kubernetes_master_node_type': 'test-working-master-node-type',\n        'working_kubernetes_worker_node_type': 'test-working-worker-node-type',\n        'working_kubernetes_v_log_level': 'test-working-v-log-level',\n        'working_kubernetes_network_cidr': 'test-working-netwwork-cidr',\n        # Production Kubernetes\n        'production_kubernetes_cluster_name': 'test-production-cluster-name',\n        'production_kubernetes_dns_zone': 'test-production-cluster-dns-zone',\n        'production_kubernetes_node_count': 3,\n        'production_kubernetes_master_aws_zones': 'test-production-aws-master-zone',\n        'production_kubernetes_master_node_type': 'test-production-master-node-type',\n        'production_kubernetes_worker_node_type': 'test-production-worker-node-type',\n        'production_kubernetes_v_log_level': 'test-production-v-log-level',\n        'production_kubernetes_network_cidr': 'test-production-netwwork-cidr',\n        # ssh keys\n        'admin_vpn_key': 'test-admin-vpn-key',\n        'working_kube_key': 'test-working-kube-key',\n        'production_kube_key': 'test-production-kube-key',\n        'working_private_key': 'test-working-private-key',\n        'production_private_key': 'test-production-private-key',\n        }\n\n    def setUp(self):\n        self.p = pentagon.AWSPentagonProject(self.name, self.args)\n\n    def tearDown(self):\n        self.p = None\n\n    def test_configure_project(self):\n        self.assertEqual(self.p._configure_project, True)\n\n    def test_aws_availability_zones(self):\n        logging.debug(self.p._aws_availability_zone_count)\n        self.assertIsInstance(self.p._aws_availability_zone_count, int)\n        self.assertEqual(self.p._aws_default_region, self.args['aws_default_region'])\n        self.assertEqual(self.p._aws_availability_zones, self.args['aws_availability_zones'])\n\n    def test_vpc_name(self):\n        self.assertEqual(self.p._vpc_name, self.args['vpc_name'])\n\n    def test_kops_args(self):\n        self.assertEqual(self.p._infrastructure_bucket, self.args['infrastructure_bucket'])\n\n    def test_kubernetes_args(self):\n        base_kube_args = [\n            '_kubernetes_cluster_name',\n            '_kubernetes_dns_zone',\n            '_kubernetes_node_count',\n            '_kubernetes_master_aws_zones',\n            '_kubernetes_master_node_type',\n            '_kubernetes_worker_node_type',\n            '_kubernetes_v_log_level',\n            '_kubernetes_network_cidr'\n        ]\n\n        for env in ['working', 'production']:\n            for arg in base_kube_args:\n                arg_name = '{}{}'.format(env, arg)\n                attr_name = '_{}'.format(arg_name)\n                pentagon_attribute = getattr(self.p, attr_name)\n                self.assertEqual(pentagon_attribute, self.args.get(arg_name))\n\n        self.assertEqual(getattr(self.p, '_dns_zone'), self.args['dns_zone'])\n\n\nclass TestPentagonProjectWithMinimalArgs(TestPentagonProject):\n    name = 'test_pentagon_with_minimal_args'\n\n    args = {\n        'configure': True,\n        # need to test some of these without all of them\n        'aws_access_key': 'test-aws-key',\n        'aws_secret_key': 'test-aws-secret-key',\n        'aws_default_region': 'test-aws-region',\n        'aws_availability_zone_count': 5,\n        }\n\n    def setUp(self):\n        self.p = pentagon.AWSPentagonProject(self.name, self.args)\n\n    def tearDown(self):\n        self.p = None\n\n    def test_configure_project(self):\n        self.assertEqual(self.p._configure_project, self.args['configure'])\n\n    def test_aws_availability_zones(self):\n        azs = \"test-aws-regiona, test-aws-regionb, test-aws-regionc, test-aws-regiond, test-aws-regione\"\n        from pentagon.helpers import allege_aws_availability_zones\n        self.assertIsInstance(self.p._aws_availability_zone_count, int)\n        self.assertEqual(self.p._aws_default_region, self.args['aws_default_region'])\n        alleged_azs = allege_aws_availability_zones(self.p._aws_default_region, self.p._aws_availability_zone_count)\n        self.assertEqual(alleged_azs, azs)\n\n\nclass TestPentagon(TestPentagonProject):\n\n    def test_noninteget_az_count(self):\n        args = {\n            'configure': True,\n            'aws_default_region': 'test_default_region',\n            'aws_availability_zone_count': 'not_an_integer'\n        }\n        with self.assertRaises(ValueError):\n            p = pentagon.AWSPentagonProject(self.name, args)\n"
  },
  {
    "path": "tests/test_base.py",
    "content": "\nimport unittest\nimport pentagon.pentagon as pentagon\nimport os\nimport logging\n\n\nclass TestPentagonProject(unittest.TestCase):\n    name = \"test-pentagon-base\"\n\n    def setUp(self):\n        self.p = pentagon.AWSPentagonProject(self.name)\n\n    def tearDown(self):\n        self.p = None\n\n    def test_instance(self):\n        self.assertIsInstance(self.p, pentagon.PentagonProject)\n\n    def test_name(self):\n        print ('test')\n        self.assertEqual(self.p._name, self.name)\n\n    def test_repository_name(self):\n        self.assertEqual(self.p._repository_name, '{}-infrastructure'.format(self.name))\n\n    def test_repository_directory(self):\n        self.assertEqual(self.p._repository_directory, self.p._repository_name)\n"
  }
]