[
  {
    "path": ".circleci/config.yml",
    "content": "defaults: &defaults\n  docker:\n    - image: 087285199408.dkr.ecr.us-east-1.amazonaws.com/circle-ci-test-image-base:go1.16-tf1.0-tg31.1-pck1.7\nversion: 2\njobs:\n  test:\n    <<: *defaults\n    steps:\n      - checkout\n      - run:\n          # Fail the build if the pre-commit hooks don't pass. Note: if you run $ pre-commit install locally within this repo, these hooks will\n          # execute automatically every time before you commit, ensuring the build never fails at this step!\n          name: run pre-commit hooks\n          command: |\n            pip install pre-commit==1.21.0 cfgv==2.0.1\n            pre-commit install\n            pre-commit run --all-files\n      - run:\n          name: create log directory\n          command: mkdir -p /tmp/logs\n      - run:\n          name: run tests\n          command: run-go-tests --path test --timeout 2h | tee /tmp/logs/all.log\n          no_output_timeout: 3600s\n      - store_artifacts:\n          path: /tmp/logs\n      - store_test_results:\n          path: /tmp/logs\n  deploy:\n    <<: *defaults\n    steps:\n      - checkout\n      - run: echo 'export PATH=$HOME/terraform:$HOME/packer:$PATH' >> $BASH_ENV\n      - run: sudo -E gruntwork-install --module-name \"aws-helpers\" --repo \"https://github.com/gruntwork-io/module-ci\" --tag \"v0.29.0\"\n      - run: sudo -E gruntwork-install --module-name \"git-helpers\" --repo \"https://github.com/gruntwork-io/module-ci\" --tag \"v0.29.0\"\n      - run: sudo -E gruntwork-install --module-name \"build-helpers\" --repo \"https://github.com/gruntwork-io/module-ci\" --tag \"v0.29.0\"\n      # We generally only want to build AMIs on new releases, but when we are setting up AMIs in a new account for the\n      # first time, we want to build the AMIs but NOT run automated tests, since those tests will fail without an existing\n      # AMI already in the AWS Account.\n      - run: _ci/publish-amis.sh \"ubuntu16-ami\"\n      - run: _ci/publish-amis.sh \"ubuntu18-ami\"\n      - run: _ci/publish-amis.sh \"amazon-linux-2-amd64-ami\"\n      - run: _ci/publish-amis.sh \"amazon-linux-2-arm64-ami\"\nworkflows:\n  version: 2\n  build-and-test:\n    jobs:\n      - test:\n          filters:\n            branches:\n              ignore: publish-amis\n      - deploy:\n          requires:\n            - test\n          filters:\n            branches:\n              only: publish-amis\n            tags:\n              only: /^v.*/\n  nightly-test:\n    triggers:\n      - schedule:\n          cron: \"0 0 * * *\"\n          filters:\n            branches:\n              only:\n                - master\n    jobs:\n      - test\n"
  },
  {
    "path": ".github/ISSUE_TEMPLATE/bug_report.md",
    "content": "---\nname: Bug report\nabout: Create a bug report to help us improve.\ntitle: ''\nlabels: bug\nassignees: ''\n\n---\n\n<!--\nHave any questions? Check out the contributing docs at https://gruntwork.notion.site/Gruntwork-Coding-Methodology-02fdcd6e4b004e818553684760bf691e,\nor ask in this issue and a Gruntwork core maintainer will be happy to help :)\n-->\n\n**Describe the bug**\nA clear and concise description of what the bug is.\n\n**To Reproduce**\nSteps to reproduce the behavior including the relevant Terraform/Terragrunt/Packer version number and any code snippets and module inputs you used.\n\n```hcl\n// paste code snippets here\n```\n\n**Expected behavior**\nA clear and concise description of what you expected to happen.\n\n**Nice to have**\n- [ ] Terminal output\n- [ ] Screenshots\n\n**Additional context**\nAdd any other context about the problem here.\n"
  },
  {
    "path": ".github/ISSUE_TEMPLATE/feature_request.md",
    "content": "---\nname: Feature request\nabout: Submit a feature request for this repo.\ntitle: ''\nlabels: enhancement\nassignees: ''\n\n---\n\n<!--\nHave any questions? Check out the contributing docs at https://gruntwork.notion.site/Gruntwork-Coding-Methodology-02fdcd6e4b004e818553684760bf691e,\nor ask in this issue and a Gruntwork core maintainer will be happy to help :)\n-->\n\n**Describe the solution you'd like**\nA clear and concise description of what you want to happen.\n\n**Describe alternatives you've considered**\nA clear and concise description of any alternative solutions or features you've considered.\n\n**Additional context**\nAdd any other context or screenshots about the feature request here.\n"
  },
  {
    "path": ".github/pull_request_template.md",
    "content": "<!--\nHave any questions? Check out the contributing docs at https://gruntwork.notion.site/Gruntwork-Coding-Methodology-02fdcd6e4b004e818553684760bf691e,\nor ask in this Pull Request and a Gruntwork core maintainer will be happy to help :)\nNote: Remember to add '[WIP]' to the beginning of the title if this PR is still a work-in-progress. Remove it when it is ready for review!\n-->\n\n## Description\n\n<!-- Write a brief description of the changes introduced by this PR -->\n\n### Documentation\n\n<!--\n  If this is a feature PR, then where is it documented?\n\n  - If docs exist:\n    - Update any references, if relevant.\n  - If no docs exist:\n    - Create a stub for documentation including bullet points for how to use the feature, code snippets (including from happy path tests), etc.\n-->\n\n<!-- Important: Did you make any backward incompatible changes? If yes, then you must write a migration guide! -->\n\n## TODOs\n\nPlease ensure all of these TODOs are completed before asking for a review.\n\n- [ ] Ensure the branch is named correctly with the issue number. e.g: `feature/new-vpc-endpoints-955` or `bug/missing-count-param-434`.\n- [ ] Update the docs.\n- [ ] Keep the changes backward compatible where possible.\n- [ ] Run the pre-commit checks successfully.\n- [ ] Run the relevant tests successfully.\n- [ ] Ensure any 3rd party code adheres with our [license policy](https://www.notion.so/gruntwork/Gruntwork-licenses-and-open-source-usage-policy-f7dece1f780341c7b69c1763f22b1378) or delete this line if its not applicable.\n\n\n## Related Issues\n\n<!--\n  Link to related issues, and issues fixed or partially addressed by this PR.\n  e.g. Fixes #1234\n  e.g. Addresses #1234\n  e.g. Related to #1234\n-->\n"
  },
  {
    "path": ".gitignore",
    "content": "# Terraform files\n.terraform\nterraform.tfstate\nterraform.tfvars\n*.tfstate*\n\n# OS X files\n.history\n.DS_Store\n\n# IntelliJ files\n.idea_modules\n*.iml\n*.iws\n*.ipr\n.idea/\nbuild/\n*/build/\nout/\n\n# Go best practices dictate that libraries should not include the vendor directory\nvendor\n\n# Folder used to store temporary test data by Terratest\n.test-data\n# Ignore Terraform lock files, as we want to test the Terraform code in these repos with the latest provider\n# versions.\n.terraform.lock.hcl\n"
  },
  {
    "path": ".pre-commit-config.yaml",
    "content": "repos:\n  - repo: https://github.com/gruntwork-io/pre-commit\n    rev:  v0.1.10\n    hooks:\n      - id: terraform-fmt\n      - id: gofmt"
  },
  {
    "path": "CODEOWNERS",
    "content": "* @robmorgan @Etiene @anouarchattouna\n"
  },
  {
    "path": "LICENSE",
    "content": "Copyright (c) 2017 HashiCorp, Inc.\n\n\n                                 Apache License\n                           Version 2.0, January 2004\n                        http://www.apache.org/licenses/\n\n   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION\n\n   1. Definitions.\n\n      \"License\" shall mean the terms and conditions for use, reproduction,\n      and distribution as defined by Sections 1 through 9 of this document.\n\n      \"Licensor\" shall mean the copyright owner or entity authorized by\n      the copyright owner that is granting the License.\n\n      \"Legal Entity\" shall mean the union of the acting entity and all\n      other entities that control, are controlled by, or are under common\n      control with that entity. For the purposes of this definition,\n      \"control\" means (i) the power, direct or indirect, to cause the\n      direction or management of such entity, whether by contract or\n      otherwise, or (ii) ownership of fifty percent (50%) or more of the\n      outstanding shares, or (iii) beneficial ownership of such entity.\n\n      \"You\" (or \"Your\") shall mean an individual or Legal Entity\n      exercising permissions granted by this License.\n\n      \"Source\" form shall mean the preferred form for making modifications,\n      including but not limited to software source code, documentation\n      source, and configuration files.\n\n      \"Object\" form shall mean any form resulting from mechanical\n      transformation or translation of a Source form, including but\n      not limited to compiled object code, generated documentation,\n      and conversions to other media types.\n\n      \"Work\" shall mean the work of authorship, whether in Source or\n      Object form, made available under the License, as indicated by a\n      copyright notice that is included in or attached to the work\n      (an example is provided in the Appendix below).\n\n      \"Derivative Works\" shall mean any work, whether in Source or Object\n      form, that is based on (or derived from) the Work and for which the\n      editorial revisions, annotations, elaborations, or other modifications\n      represent, as a whole, an original work of authorship. For the purposes\n      of this License, Derivative Works shall not include works that remain\n      separable from, or merely link (or bind by name) to the interfaces of,\n      the Work and Derivative Works thereof.\n\n      \"Contribution\" shall mean any work of authorship, including\n      the original version of the Work and any modifications or additions\n      to that Work or Derivative Works thereof, that is intentionally\n      submitted to Licensor for inclusion in the Work by the copyright owner\n      or by an individual or Legal Entity authorized to submit on behalf of\n      the copyright owner. For the purposes of this definition, \"submitted\"\n      means any form of electronic, verbal, or written communication sent\n      to the Licensor or its representatives, including but not limited to\n      communication on electronic mailing lists, source code control systems,\n      and issue tracking systems that are managed by, or on behalf of, the\n      Licensor for the purpose of discussing and improving the Work, but\n      excluding communication that is conspicuously marked or otherwise\n      designated in writing by the copyright owner as \"Not a Contribution.\"\n\n      \"Contributor\" shall mean Licensor and any individual or Legal Entity\n      on behalf of whom a Contribution has been received by Licensor and\n      subsequently incorporated within the Work.\n\n   2. Grant of Copyright License. Subject to the terms and conditions of\n      this License, each Contributor hereby grants to You a perpetual,\n      worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n      copyright license to reproduce, prepare Derivative Works of,\n      publicly display, publicly perform, sublicense, and distribute the\n      Work and such Derivative Works in Source or Object form.\n\n   3. Grant of Patent License. Subject to the terms and conditions of\n      this License, each Contributor hereby grants to You a perpetual,\n      worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n      (except as stated in this section) patent license to make, have made,\n      use, offer to sell, sell, import, and otherwise transfer the Work,\n      where such license applies only to those patent claims licensable\n      by such Contributor that are necessarily infringed by their\n      Contribution(s) alone or by combination of their Contribution(s)\n      with the Work to which such Contribution(s) was submitted. If You\n      institute patent litigation against any entity (including a\n      cross-claim or counterclaim in a lawsuit) alleging that the Work\n      or a Contribution incorporated within the Work constitutes direct\n      or contributory patent infringement, then any patent licenses\n      granted to You under this License for that Work shall terminate\n      as of the date such litigation is filed.\n\n   4. Redistribution. You may reproduce and distribute copies of the\n      Work or Derivative Works thereof in any medium, with or without\n      modifications, and in Source or Object form, provided that You\n      meet the following conditions:\n\n      (a) You must give any other recipients of the Work or\n          Derivative Works a copy of this License; and\n\n      (b) You must cause any modified files to carry prominent notices\n          stating that You changed the files; and\n\n      (c) You must retain, in the Source form of any Derivative Works\n          that You distribute, all copyright, patent, trademark, and\n          attribution notices from the Source form of the Work,\n          excluding those notices that do not pertain to any part of\n          the Derivative Works; and\n\n      (d) If the Work includes a \"NOTICE\" text file as part of its\n          distribution, then any Derivative Works that You distribute must\n          include a readable copy of the attribution notices contained\n          within such NOTICE file, excluding those notices that do not\n          pertain to any part of the Derivative Works, in at least one\n          of the following places: within a NOTICE text file distributed\n          as part of the Derivative Works; within the Source form or\n          documentation, if provided along with the Derivative Works; or,\n          within a display generated by the Derivative Works, if and\n          wherever such third-party notices normally appear. The contents\n          of the NOTICE file are for informational purposes only and\n          do not modify the License. You may add Your own attribution\n          notices within Derivative Works that You distribute, alongside\n          or as an addendum to the NOTICE text from the Work, provided\n          that such additional attribution notices cannot be construed\n          as modifying the License.\n\n      You may add Your own copyright statement to Your modifications and\n      may provide additional or different license terms and conditions\n      for use, reproduction, or distribution of Your modifications, or\n      for any such Derivative Works as a whole, provided Your use,\n      reproduction, and distribution of the Work otherwise complies with\n      the conditions stated in this License.\n\n   5. Submission of Contributions. Unless You explicitly state otherwise,\n      any Contribution intentionally submitted for inclusion in the Work\n      by You to the Licensor shall be under the terms and conditions of\n      this License, without any additional terms or conditions.\n      Notwithstanding the above, nothing herein shall supersede or modify\n      the terms of any separate license agreement you may have executed\n      with Licensor regarding such Contributions.\n\n   6. Trademarks. This License does not grant permission to use the trade\n      names, trademarks, service marks, or product names of the Licensor,\n      except as required for reasonable and customary use in describing the\n      origin of the Work and reproducing the content of the NOTICE file.\n\n   7. Disclaimer of Warranty. Unless required by applicable law or\n      agreed to in writing, Licensor provides the Work (and each\n      Contributor provides its Contributions) on an \"AS IS\" BASIS,\n      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n      implied, including, without limitation, any warranties or conditions\n      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A\n      PARTICULAR PURPOSE. You are solely responsible for determining the\n      appropriateness of using or redistributing the Work and assume any\n      risks associated with Your exercise of permissions under this License.\n\n   8. Limitation of Liability. In no event and under no legal theory,\n      whether in tort (including negligence), contract, or otherwise,\n      unless required by applicable law (such as deliberate and grossly\n      negligent acts) or agreed to in writing, shall any Contributor be\n      liable to You for damages, including any direct, indirect, special,\n      incidental, or consequential damages of any character arising as a\n      result of this License or out of the use or inability to use the\n      Work (including but not limited to damages for loss of goodwill,\n      work stoppage, computer failure or malfunction, or any and all\n      other commercial damages or losses), even if such Contributor\n      has been advised of the possibility of such damages.\n\n   9. Accepting Warranty or Additional Liability. While redistributing\n      the Work or Derivative Works thereof, You may choose to offer,\n      and charge a fee for, acceptance of support, warranty, indemnity,\n      or other liability obligations and/or rights consistent with this\n      License. However, in accepting such obligations, You may act only\n      on Your own behalf and on Your sole responsibility, not on behalf\n      of any other Contributor, and only if You agree to indemnify,\n      defend, and hold each Contributor harmless for any liability\n      incurred by, or claims asserted against, such Contributor by reason\n      of your accepting any such warranty or additional liability.\n\n   END OF TERMS AND CONDITIONS\n\n   APPENDIX: How to apply the Apache License to your work.\n\n      To apply the Apache License to your work, attach the following\n      boilerplate notice, with the fields enclosed by brackets \"[]\"\n      replaced with your own identifying information. (Don't include\n      the brackets!)  The text should be enclosed in the appropriate\n      comment syntax for the file format. We also recommend that a\n      file or class name and description of purpose be included on the\n      same \"printed page\" as the copyright notice for easier\n      identification within third-party archives.\n\n   Copyright [yyyy] [name of copyright owner]\n\n   Licensed under the Apache License, Version 2.0 (the \"License\");\n   you may not use this file except in compliance with the License.\n   You may obtain a copy of the License at\n\n       http://www.apache.org/licenses/LICENSE-2.0\n\n   Unless required by applicable law or agreed to in writing, software\n   distributed under the License is distributed on an \"AS IS\" BASIS,\n   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n   See the License for the specific language governing permissions and\n   limitations under the License."
  },
  {
    "path": "NOTICE",
    "content": "terraform-aws-nomad\nCopyright 2017 Gruntwork, Inc.\n\nThis product includes software developed at Gruntwork (http://www.gruntwork.io/)."
  },
  {
    "path": "README.md",
    "content": "# DISCLAIMER: This is no longer supported.\nMoving forward in the future this repository will be no longer supported and eventually lead to\ndeprecation. Please use our latest versions of our products moving forward or alternatively you\nmay fork the repository to continue use and development for your personal/business use.\n\n---\n<!--\n:type: service\n:name: HashiCorp Nomad\n:description: Deploy a Nomad cluster. Supports automatic bootstrapping, discovery of Consul servers, automatic recovery of failed servers.\n:icon: /_docs/nomad-icon.png\n:category: docker-orchestration\n:cloud: aws\n:tags: docker, orchestration, containers\n:license: gruntwork\n:built-with: terraform, bash\n-->\n\n# Nomad AWS Module\n\n![Terraform Version](https://img.shields.io/badge/tf-%3E%3D1.0.0-blue.svg)\n\nThis repo contains a set of modules for deploying a [Nomad](https://www.nomadproject.io/) cluster on\n[AWS](https://aws.amazon.com/) using [Terraform](https://www.terraform.io/). Nomad is a distributed, highly-available\ndata-center aware scheduler. A Nomad cluster typically includes a small number of server nodes, which are responsible\nfor being part of the [consensus protocol](https://www.nomadproject.io/docs/internals/consensus.html), and a larger\nnumber of client nodes, which are used for running jobs.\n\n![Nomad architecture](https://raw.githubusercontent.com/hashicorp/terraform-aws-nomad/master/_docs/architecture.png)\n\n\n\n\n## Features\n\n* Deploy server nodes for managing jobs and client nodes running jobs\n* Supports colocated clusters and separate clusters\n* Least privilege security group rules for servers\n* Auto scaling and Auto healing\n\n\n\n\n## Learn\n\nThis repo was created by [Gruntwork](https://www.gruntwork.io?ref=repo_aws_nomad), and follows the same patterns as [the Gruntwork\nInfrastructure as Code Library](https://gruntwork.io/infrastructure-as-code-library/), a collection of reusable,\nbattle-tested, production ready infrastructure code. You can read [How to use the Gruntwork Infrastructure as Code\nLibrary](https://gruntwork.io/guides/foundations/how-to-use-gruntwork-infrastructure-as-code-library/) for an overview\nof how to use modules maintained by Gruntwork!\n\n### Core concepts\n\n* [Nomad Use Cases](https://www.nomadproject.io/intro/use-cases.html): overview of various use cases that Nomad is\n  optimized for.\n* [Nomad Guides](https://www.nomadproject.io/guides/index.html): official guide on how to configure and setup Nomad\n  clusters as well as how to use Nomad to schedule services on to the workers.\n* [Nomad Security](https://github.com/hashicorp/terraform-aws-nomad/tree/master/modules/nomad-cluster#security): overview of how to secure your Nomad clusters.\n\n### Repo organization\n\n* [modules](https://github.com/hashicorp/terraform-aws-nomad/tree/master/modules): the main implementation code for this repo, broken down into multiple standalone, orthogonal submodules.\n* [examples](https://github.com/hashicorp/terraform-aws-nomad/tree/master/examples): This folder contains working examples of how to use the submodules.\n* [test](https://github.com/hashicorp/terraform-aws-nomad/tree/master/test): Automated tests for the modules and examples.\n* [root](https://github.com/hashicorp/terraform-aws-nomad/tree/master): The root folder is *an example* of how to use the [nomad-cluster module](https://github.com/hashicorp/terraform-aws-nomad/tree/master/modules/nomad-cluster) module to deploy a [Nomad](https://www.nomadproject.io/) cluster in [AWS](https://aws.amazon.com/). The Terraform Registry requires the root of every repo to contain Terraform code, so we've put one of the examples there. This example is great for learning and experimenting, but for production use, please use the underlying modules in the [modules folder](https://github.com/hashicorp/terraform-aws-nomad/tree/master/modules) directly.\n\n\n\n\n\n\n## Deploy\n\n### Non-production deployment (quick start for learning)\n\nIf you just want to try this repo out for experimenting and learning, check out the following resources:\n\n* [examples folder](https://github.com/hashicorp/terraform-aws-nomad/tree/master/examples): The `examples` folder contains sample code optimized for learning, experimenting, and testing (but not production usage).\n\n### Production deployment\n\nIf you want to deploy this repo in production, check out the following resources:\n\n* [Nomad Production Setup Guide](https://www.nomadproject.io/guides/install/production/index.html):\n  detailed guide covering how to setup a production deployment of Nomad.\n\n\n\n## Manage\n\n### Day-to-day operations\n\n* [How to deploy Nomad and Consul in the same\n  cluster](https://github.com/hashicorp/terraform-aws-nomad/tree/master/core-concepts.md#deploy-nomad-and-consul-in-the-same-cluster)\n* [How to deploy Nomad and Consul in separate\n  clusters](https://github.com/hashicorp/terraform-aws-nomad/tree/master/core-concepts.md#deploy-nomad-and-consul-in-separate-clusters)\n* [How to connect to the Nomad cluster](https://github.com/hashicorp/terraform-aws-nomad/tree/master/modules/nomad-cluster/README.md#how-do-you-connect-to-the-nomad-cluster)\n* [What happens if a node crashes](https://github.com/hashicorp/terraform-aws-nomad/tree/master/modules/nomad-cluster/README.md#what-happens-if-a-node-crashes)\n* [How to connect load balancers to the ASG](https://github.com/hashicorp/terraform-aws-nomad/tree/master/modules/nomad-cluster/README.md#how-do-you-connect-load-balancers-to-the-auto-scaling-group-asg)\n\n### Major changes\n\n* [How to upgrade a Nomad cluster](https://github.com/hashicorp/terraform-aws-nomad/tree/master/modules/nomad-cluster/README.md#how-do-you-roll-out-updates)\n\n## Who created this Module?\n\nThese modules were created by [Gruntwork](http://www.gruntwork.io/?ref=repo_aws_nomad), in partnership with HashiCorp, in 2017 and maintained through 2021. They were deprecated in 2022, see the top of the README for details.\n\n## License\n\nPlease see [LICENSE](https://github.com/hashicorp/terraform-aws-nomad/tree/master/LICENSE) for details on how the code in this repo is licensed.\n\n\nCopyright &copy; 2019 [Gruntwork](https://www.gruntwork.io?ref=repo_aws_nomad), Inc.\n"
  },
  {
    "path": "_ci/publish-amis-in-new-account.md",
    "content": "# How to Publish AMIs in a New Account\n\nSee the [canonical page](https://github.com/hashicorp/terraform-aws-consul/blob/master/_ci/publish-amis-in-new-account.md)\nin the [Consul AWS Module](https://github.com/hashicorp/terraform-aws-consul) repo."
  },
  {
    "path": "_ci/publish-amis.sh",
    "content": "#!/bin/bash\n#\n# Build the example AMI, copy it to all AWS regions, and make all AMIs public.\n#\n# This script is meant to be run in a CircleCI job.\n#\n\nset -e\n\nreadonly SCRIPT_DIR=\"$(cd \"$(dirname \"${BASH_SOURCE[0]}\")\" && pwd)\"\nreadonly PACKER_TEMPLATE_PATH=\"$SCRIPT_DIR/../examples/nomad-consul-ami/nomad-consul.json\"\nreadonly PACKER_TEMPLATE_DEFAULT_REGION=\"us-east-1\"\nreadonly AMI_PROPERTIES_FILE=\"/tmp/ami.properties\"\n\n# In CircleCI, every build populates the branch name in CIRCLE_BRANCH except builds triggered by a new tag, for which\n# the CIRCLE_BRANCH env var is empty. We assume tags are only issued against the master branch.\nreadonly BRANCH_NAME=\"${CIRCLE_BRANCH:-master}\"\n\nreadonly PACKER_BUILD_NAME=\"$1\"\n\nif [[ -z \"$PACKER_BUILD_NAME\" ]]; then\n  echo \"ERROR: You must pass in the Packer build name as the first argument to this function.\"\n  exit 1\nfi\n\nif [[ -z \"$PUBLISH_AMI_AWS_ACCESS_KEY_ID\" || -z \"$PUBLISH_AMI_AWS_SECRET_ACCESS_KEY\" ]]; then\n  echo \"The PUBLISH_AMI_AWS_ACCESS_KEY_ID and PUBLISH_AMI_AWS_SECRET_ACCESS_KEY environment variables must be set to the AWS credentials to use to publish the AMIs.\"\n  exit 1\nfi\n\necho \"Checking out branch $BRANCH_NAME to make sure we do all work in a branch and not in detached HEAD state\"\ngit checkout \"$BRANCH_NAME\"\n\n# We publish the AMIs to a different AWS account, so set those credentials\nexport AWS_ACCESS_KEY_ID=\"$PUBLISH_AMI_AWS_ACCESS_KEY_ID\"\nexport AWS_SECRET_ACCESS_KEY=\"$PUBLISH_AMI_AWS_SECRET_ACCESS_KEY\"\n\n# Build the example AMI. WARNING! In a production setting, you should build your own AMI to ensure it has exactly the\n# configuration you want. We build this example AMI solely to make initial use of this Module as easy as possible.\nbuild-packer-artifact \\\n  --packer-template-path \"$PACKER_TEMPLATE_PATH\" \\\n  --build-name \"$PACKER_BUILD_NAME\" \\\n  --output-properties-file \"$AMI_PROPERTIES_FILE\"\n\n# Copy the AMI to all regions and make it public in each\nsource \"$AMI_PROPERTIES_FILE\"\npublish-ami \\\n  --all-regions \\\n  --source-ami-id \"$ARTIFACT_ID\" \\\n  --source-ami-region \"$PACKER_TEMPLATE_DEFAULT_REGION\"\n"
  },
  {
    "path": "core-concepts.md",
    "content": "# Background\n\nTo run a production Nomad cluster, you need to deploy a small number of server nodes (typically 3), which are responsible\nfor being part of the [consensus protocol](https://www.nomadproject.io/docs/internals/consensus.html), and a larger\nnumber of client nodes, which are used for running jobs. You must also have a [Consul](https://www.consul.io/) cluster\ndeployed (see the [Consul AWS Module](https://github.com/hashicorp/terraform-aws-consul)) in one of the following\nconfigurations:\n\n1. [Deploy Nomad and Consul in the same cluster](#deploy-nomad-and-consul-in-the-same-cluster)\n1. [Deploy Nomad and Consul in separate clusters](#deploy-nomad-and-consul-in-separate-clusters)\n\n\n## Deploy Nomad and Consul in the same cluster\n\n1. Use the [install-consul\n   module](https://github.com/hashicorp/terraform-aws-consul/tree/master/modules/install-consul) from the Consul AWS\n   Module and the [install-nomad module](https://github.com/hashicorp/terraform-aws-nomad/tree/master/modules/install-nomad) from this Module in a Packer template to create\n   an AMI with Consul and Nomad.\n\n   If you are just experimenting with this Module, you may find it more convenient to use one of our official public AMIs:\n   - [Latest Ubuntu 16 AMIs](https://github.com/hashicorp/terraform-aws-nomad/tree/master/_docs/ubuntu16-ami-list.md).\n   - [Latest Amazon Linux AMIs](https://github.com/hashicorp/terraform-aws-nomad/tree/master/_docs/amazon-linux-ami-list.md).\n\n   **WARNING! Do NOT use these AMIs in your production setup. In production, you should build your own AMIs in your own\n   AWS account.**\n\n1. Deploy a small number of server nodes (typically, 3) using the [consul-cluster\n   module](https://github.com/hashicorp/terraform-aws-consul/tree/master/modules/consul-cluster). Execute the\n   [run-consul script](https://github.com/hashicorp/terraform-aws-consul/tree/master/modules/run-consul) and the\n   [run-nomad script](https://github.com/hashicorp/terraform-aws-nomad/tree/master/modules/run-nomad) on each node during boot, setting the `--server` flag in both\n   scripts.\n1. Deploy as many client nodes as you need using the [nomad-cluster module](https://github.com/hashicorp/terraform-aws-nomad/tree/master/modules/nomad-cluster). Execute the\n   [run-consul script](https://github.com/hashicorp/terraform-aws-consul/tree/master/modules/run-consul) and the\n   [run-nomad script](https://github.com/hashicorp/terraform-aws-nomad/tree/master/modules/run-nomad) on each node during boot, setting the `--client` flag in both\n   scripts.\n\nCheck out the [nomad-consul-colocated-cluster example](https://github.com/hashicorp/terraform-aws-nomad/tree/master/examples/root-example) for working\t\nsample code.\n\n## Deploy Nomad and Consul in separate clusters\n\n1. Deploy a standalone Consul cluster by following the instructions in the [Consul AWS\n   Module](https://github.com/hashicorp/terraform-aws-consul).\n1. Use the scripts from the [install-nomad module](https://github.com/hashicorp/terraform-aws-nomad/tree/master/modules/install-nomad) in a Packer template to create a Nomad AMI.\n1. Deploy a small number of server nodes (typically, 3) using the [nomad-cluster module](https://github.com/hashicorp/terraform-aws-nomad/tree/master/modules/nomad-cluster). Execute the\n   [run-nomad script](https://github.com/hashicorp/terraform-aws-nomad/tree/master/modules/run-nomad) on each node during boot, setting the `--server` flag. You will\n   need to configure each node with the connection details for your standalone Consul cluster.\n1. Deploy as many client nodes as you need using the [nomad-cluster module](https://github.com/hashicorp/terraform-aws-nomad/tree/master/modules/nomad-cluster). Execute the\t\n   [run-nomad script](https://github.com/hashicorp/terraform-aws-nomad/tree/master/modules/run-nomad) on each node during boot, setting the `--client` flag.\n\nCheck out the [nomad-consul-separate-cluster example](https://github.com/hashicorp/terraform-aws-nomad/tree/master/examples/nomad-consul-separate-cluster) for working sample code.\n"
  },
  {
    "path": "examples/nomad-consul-ami/README.md",
    "content": "# Nomad and Consul AMI\n\nThis folder shows an example of how to use the [install-nomad module](https://github.com/hashicorp/terraform-aws-nomad/tree/master/modules/install-nomad) from this Module and\nthe [install-consul module](https://github.com/hashicorp/terraform-aws-consul/tree/master/modules/install-consul)\nfrom the Consul AWS Module with [Packer](https://www.packer.io/) to create [Amazon Machine Images\n(AMIs)](http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/AMIs.html) that have Nomad and Consul installed on top of:\n\n1. Ubuntu 16.04\n1. Ubuntu 18.04\n1. Amazon Linux 2\n\nThese AMIs will have [Consul](https://www.consul.io/) and [Nomad](https://www.nomadproject.io/) installed and\nconfigured to automatically join a cluster during boot-up.\n\nTo see how to deploy this AMI, check out the [nomad-consul-colocated-cluster\nexample](https://github.com/hashicorp/terraform-aws-nomad/tree/master/examples/root-example). For more info on Nomad installation and configuration, check out\nthe [install-nomad](https://github.com/hashicorp/terraform-aws-nomad/tree/master/modules/install-nomad) documentation.\n\n\n\n## Quick start\n\nTo build the Nomad and Consul AMI:\n\n1. `git clone` this repo to your computer.\n1. Install [Packer](https://www.packer.io/).\n1. Configure your AWS credentials using one of the [options supported by the AWS\n   SDK](http://docs.aws.amazon.com/sdk-for-java/v1/developer-guide/credentials.html). Usually, the easiest option is to\n   set the `AWS_ACCESS_KEY_ID` and `AWS_SECRET_ACCESS_KEY` environment variables.\n1. Update the `variables` section of the `nomad-consul.json` Packer template to configure the AWS region and Nomad version\n   you wish to use.\n1. Run `packer build nomad-consul.json`.\n\nWhen the build finishes, it will output the IDs of the new AMIs. To see how to deploy one of these AMIs, check out the\n[nomad-consul-colocated-cluster example](https://github.com/hashicorp/terraform-aws-nomad/tree/master/examples/root-example).\n\n\n\n\n## Creating your own Packer template for production usage\n\nWhen creating your own Packer template for production usage, you can copy the example in this folder more or less\nexactly, except for one change: we recommend replacing the `file` provisioner with a call to `git clone` in the `shell`\nprovisioner. Instead of:\n\n```json\n{\n  \"provisioners\": [{\n    \"type\": \"file\",\n    \"source\": \"{{template_dir}}/../../../terraform-aws-nomad\",\n    \"destination\": \"/tmp\"\n  },{\n    \"type\": \"shell\",\n    \"inline\": [\n      \"/tmp/terraform-aws-nomad/modules/install-nomad/install-nomad --version {{user `nomad_version`}}\"\n    ],\n    \"pause_before\": \"30s\"\n  }]\n}\n```\n\nYour code should look more like this:\n\n```json\n{\n  \"provisioners\": [{\n    \"type\": \"shell\",\n    \"inline\": [\n      \"git clone --branch <module_VERSION> https://github.com/hashicorp/terraform-aws-nomad.git /tmp/terraform-aws-nomad\",\n      \"/tmp/terraform-aws-nomad/modules/install-nomad/install-nomad --version {{user `nomad_version`}}\"\n    ],\n    \"pause_before\": \"30s\"\n  }]\n}\n```\n\nYou should replace `<module_VERSION>` in the code above with the version of this module that you want to use (see\nthe [Releases Page](../../releases) for all available versions). That's because for production usage, you should always\nuse a fixed, known version of this Module, downloaded from the official Git repo. On the other hand, when you're\njust experimenting with the Module, it's OK to use a local checkout of the Module, uploaded from your own\ncomputer.\n"
  },
  {
    "path": "examples/nomad-consul-ami/nomad-consul-docker.json",
    "content": "{\n  \"min_packer_version\": \"0.12.0\",\n  \"variables\": {\n    \"aws_region\": \"us-east-1\",\n    \"nomad_version\": \"1.1.1\",\n    \"consul_module_version\": \"v0.10.1\",\n    \"consul_version\": \"1.9.6\",\n    \"ami_name_prefix\": \"nomad-consul\"\n  },\n  \"builders\": [\n    {\n      \"name\": \"ubuntu18-ami\",\n      \"ami_name\": \"{{user `ami_name_prefix`}}-docker-ubuntu18-{{isotime | clean_resource_name}}\",\n      \"ami_description\": \"An example of how to build an Ubuntu 18.04 AMI that has Nomad, Consul and Docker\",\n      \"instance_type\": \"t2.micro\",\n      \"region\": \"{{user `aws_region`}}\",\n      \"type\": \"amazon-ebs\",\n      \"source_ami_filter\": {\n       \"filters\": {\n         \"virtualization-type\": \"hvm\",\n         \"architecture\": \"x86_64\",\n         \"name\": \"ubuntu/images/hvm-ssd/ubuntu-bionic-18.04-amd64-server-*\",\n         \"block-device-mapping.volume-type\": \"gp2\",\n         \"root-device-type\": \"ebs\"\n       },\n       \"owners\": [\n         \"099720109477\"\n       ],\n       \"most_recent\": true\n      },\n      \"ssh_username\": \"ubuntu\"\n    },\n    {\n      \"name\": \"ubuntu16-ami\",\n      \"ami_name\": \"{{user `ami_name_prefix`}}-docker-ubuntu16-{{isotime | clean_resource_name}}\",\n      \"ami_description\": \"An Ubuntu 16.04 AMI that has Nomad, Consul and Docker installed.\",\n      \"instance_type\": \"t2.micro\",\n      \"region\": \"{{user `aws_region`}}\",\n      \"type\": \"amazon-ebs\",\n      \"source_ami_filter\": {\n        \"filters\": {\n          \"virtualization-type\": \"hvm\",\n          \"architecture\": \"x86_64\",\n          \"name\": \"*ubuntu-xenial-16.04-amd64-server-*\",\n          \"block-device-mapping.volume-type\": \"gp2\",\n          \"root-device-type\": \"ebs\"\n        },\n        \"owners\": [\n          \"099720109477\"\n        ],\n        \"most_recent\": true\n      },\n      \"ssh_username\": \"ubuntu\"\n    },\n    {\n      \"ami_name\": \"{{user `ami_name_prefix`}}-docker-amazon-linux-2-amd64-{{isotime | clean_resource_name}}\",\n      \"ami_description\": \"An Amazon Linux 2 x86_64 AMI that has Nomad, Consul and Docker installed.\",\n      \"instance_type\": \"t2.micro\",\n      \"name\": \"amazon-linux-2-amd64-ami\",\n      \"region\": \"{{user `aws_region`}}\",\n      \"type\": \"amazon-ebs\",\n      \"source_ami_filter\": {\n        \"filters\": {\n          \"virtualization-type\": \"hvm\",\n          \"architecture\": \"x86_64\",\n          \"name\": \"*amzn2-ami-hvm-*\",\n          \"block-device-mapping.volume-type\": \"gp2\",\n          \"root-device-type\": \"ebs\"\n        },\n        \"owners\": [\n          \"amazon\"\n        ],\n        \"most_recent\": true\n      },\n      \"ssh_username\": \"ec2-user\"\n    },\n    {\n      \"ami_name\": \"{{user `ami_name_prefix`}}-docker-amazon-linux-2-arm64-{{isotime | clean_resource_name}}\",\n      \"ami_description\": \"An Amazon Linux 2 ARM64 AMI that has Nomad, Consul and Docker installed.\",\n      \"instance_type\": \"t4g.micro\",\n      \"name\": \"amazon-linux-2-arm64-ami\",\n      \"region\": \"{{user `aws_region`}}\",\n      \"type\": \"amazon-ebs\",\n      \"source_ami_filter\": {\n        \"filters\": {\n          \"virtualization-type\": \"hvm\",\n          \"architecture\": \"arm64\",\n          \"name\": \"*amzn2-ami-hvm-*\",\n          \"block-device-mapping.volume-type\": \"gp2\",\n          \"root-device-type\": \"ebs\"\n        },\n        \"owners\": [\n          \"amazon\"\n        ],\n        \"most_recent\": true\n      },\n      \"ssh_username\": \"ec2-user\"\n    }\n  ],\n  \"provisioners\": [\n    {\n      \"type\": \"shell\",\n      \"inline\": [\"mkdir -p /tmp/terraform-aws-nomad/modules\"]\n    },\n    {\n      \"type\": \"shell\",\n      \"script\": \"{{template_dir}}/setup_ubuntu.sh\",\n      \"only\": [\n        \"ubuntu16-ami\",\n        \"ubuntu18-ami\"\n      ]\n    },\n    {\n      \"type\": \"shell\",\n      \"script\": \"{{template_dir}}/setup_amazon-linux-2.sh\",\n      \"only\": [\n        \"amazon-linux-2-amd64-ami\",\n        \"amazon-linux-2-arm64-ami\"\n      ]\n    },\n    {\n      \"type\": \"file\",\n      \"source\": \"{{template_dir}}/../../modules/\",\n      \"destination\": \"/tmp/terraform-aws-nomad/modules\",\n      \"pause_before\": \"30s\"\n    },\n    {\n      \"type\": \"shell\",\n      \"environment_vars\": [\n        \"NOMAD_VERSION={{user `nomad_version`}}\",\n        \"CONSUL_VERSION={{user `consul_version`}}\",\n        \"CONSUL_MODULE_VERSION={{user `consul_module_version`}}\"\n      ],\n      \"script\": \"{{template_dir}}/setup_nomad_consul.sh\"\n    }\n  ]\n}\n\n"
  },
  {
    "path": "examples/nomad-consul-ami/nomad-consul.json",
    "content": "{\n  \"min_packer_version\": \"0.12.0\",\n  \"variables\": {\n    \"aws_region\": \"us-east-1\",\n    \"nomad_version\": \"1.1.1\",\n    \"consul_module_version\": \"v0.10.1\",\n    \"consul_version\": \"1.9.6\",\n    \"ami_name_prefix\": \"nomad-consul\"\n  },\n  \"builders\": [\n    {\n      \"name\": \"ubuntu18-ami\",\n      \"ami_name\": \"{{user `ami_name_prefix`}}-ubuntu18-{{isotime | clean_resource_name}}\",\n      \"ami_description\": \"An example of how to build an Ubuntu 18.04 AMI that has Nomad and Consul installed\",\n      \"instance_type\": \"t2.micro\",\n      \"region\": \"{{user `aws_region`}}\",\n      \"type\": \"amazon-ebs\",\n      \"source_ami_filter\": {\n       \"filters\": {\n         \"virtualization-type\": \"hvm\",\n         \"architecture\": \"x86_64\",\n         \"name\": \"ubuntu/images/hvm-ssd/ubuntu-bionic-18.04-amd64-server-*\",\n         \"block-device-mapping.volume-type\": \"gp2\",\n         \"root-device-type\": \"ebs\"\n       },\n       \"owners\": [\n         \"099720109477\"\n       ],\n       \"most_recent\": true\n      },\n      \"ssh_username\": \"ubuntu\"\n    },\n    {\n      \"ami_name\": \"{{user `ami_name_prefix`}}-ubuntu-{{isotime | clean_resource_name}}\",\n      \"ami_description\": \"An Ubuntu 16.04 AMI that has Nomad and Consul installed.\",\n      \"instance_type\": \"t2.micro\",\n      \"name\": \"ubuntu16-ami\",\n      \"region\": \"{{user `aws_region`}}\",\n      \"type\": \"amazon-ebs\",\n      \"source_ami_filter\": {\n        \"filters\": {\n          \"virtualization-type\": \"hvm\",\n          \"architecture\": \"x86_64\",\n          \"name\": \"*ubuntu-xenial-16.04-amd64-server-*\",\n          \"block-device-mapping.volume-type\": \"gp2\",\n          \"root-device-type\": \"ebs\"\n        },\n        \"owners\": [\n          \"099720109477\"\n        ],\n        \"most_recent\": true\n      },\n      \"ssh_username\": \"ubuntu\"\n    },\n    {\n      \"ami_name\": \"{{user `ami_name_prefix`}}-amazon-linux-2-amd64-{{isotime | clean_resource_name}}\",\n      \"ami_description\": \"An Amazon Linux 2 x86_64 AMI that has Nomad and Consul installed.\",\n      \"instance_type\": \"t2.micro\",\n      \"name\": \"amazon-linux-2-amd64-ami\",\n      \"region\": \"{{user `aws_region`}}\",\n      \"type\": \"amazon-ebs\",\n      \"source_ami_filter\": {\n        \"filters\": {\n          \"virtualization-type\": \"hvm\",\n          \"architecture\": \"x86_64\",\n          \"name\": \"*amzn2-ami-hvm-*\",\n          \"block-device-mapping.volume-type\": \"gp2\",\n          \"root-device-type\": \"ebs\"\n        },\n        \"owners\": [\n          \"amazon\"\n        ],\n        \"most_recent\": true\n      },\n      \"ssh_username\": \"ec2-user\"\n    },\n    {\n      \"ami_name\": \"{{user `ami_name_prefix`}}-amazon-linux-2-arm64-{{isotime | clean_resource_name}}\",\n      \"ami_description\": \"An Amazon Linux 2 ARM64 AMI that has Nomad and Consul installed.\",\n      \"instance_type\": \"t4g.micro\",\n      \"name\": \"amazon-linux-2-arm64-ami\",\n      \"region\": \"{{user `aws_region`}}\",\n      \"type\": \"amazon-ebs\",\n      \"source_ami_filter\": {\n        \"filters\": {\n          \"virtualization-type\": \"hvm\",\n          \"architecture\": \"arm64\",\n          \"name\": \"*amzn2-ami-hvm-*\",\n          \"block-device-mapping.volume-type\": \"gp2\",\n          \"root-device-type\": \"ebs\"\n        },\n        \"owners\": [\n          \"amazon\"\n        ],\n        \"most_recent\": true\n      },\n      \"ssh_username\": \"ec2-user\"\n    }\n  ],\n  \"provisioners\": [\n    {\n      \"type\": \"shell\",\n      \"inline\": [\n        \"sudo apt-get install -y git\"\n      ],\n      \"only\": [\n        \"ubuntu16-ami\",\n        \"ubuntu18-ami\"\n      ]\n    },\n    {\n      \"type\": \"shell\",\n      \"inline\": [\n        \"sudo yum install -y git\"\n      ],\n      \"only\": [\n        \"amazon-linux-2-amd64-ami\",\n        \"amazon-linux-2-arm64-ami\"\n      ]\n    },\n    {\n      \"type\": \"shell\",\n      \"inline\": [\"mkdir -p /tmp/terraform-aws-nomad\"],\n      \"pause_before\": \"30s\"\n    },\n    {\n      \"type\": \"file\",\n      \"source\": \"{{template_dir}}/../../\",\n      \"destination\": \"/tmp/terraform-aws-nomad\"\n    },\n      {\n      \"type\": \"shell\",\n      \"environment_vars\": [\n        \"NOMAD_VERSION={{user `nomad_version`}}\",\n        \"CONSUL_VERSION={{user `consul_version`}}\",\n        \"CONSUL_MODULE_VERSION={{user `consul_module_version`}}\"\n      ],\n      \"script\": \"{{template_dir}}/setup_nomad_consul.sh\"\n    }\n  ]\n}\n\n"
  },
  {
    "path": "examples/nomad-consul-ami/setup_amazon-linux-2.sh",
    "content": "#!/bin/sh\nset -e\n\nSCRIPT=`basename \"$0\"`\n\necho \"[INFO] [${SCRIPT}] Setup git\"\nsudo yum install -y git\n\necho \"[INFO] [${SCRIPT}] Setup docker\"\nsudo yum install -y docker\nsudo systemctl enable docker\nsudo systemctl start docker\nsudo usermod -a -G docker ec2-user\n"
  },
  {
    "path": "examples/nomad-consul-ami/setup_nomad_consul.sh",
    "content": "#!/bin/sh\nset -e\n\n# Environment variables are set by packer\n/tmp/terraform-aws-nomad/modules/install-nomad/install-nomad --version \"${NOMAD_VERSION}\"\n\ngit clone --branch \"${CONSUL_MODULE_VERSION}\"  https://github.com/hashicorp/terraform-aws-consul.git /tmp/terraform-aws-consul\n/tmp/terraform-aws-consul/modules/install-consul/install-consul --version \"${CONSUL_VERSION}\"\n"
  },
  {
    "path": "examples/nomad-consul-ami/setup_ubuntu.sh",
    "content": "#!/bin/sh\nset -e\n\nSCRIPT=`basename \"$0\"`\n\n# NOTE: git is required, but it should already be preinstalled on Ubuntu 16.0\n#echo \"[INFO] [${SCRIPT}] Setup git\"\n#sudo apt install -y git\n\n# Using Docker CE directly provided by Docker\necho \"[INFO] [${SCRIPT}] Setup docker\"\ncd /tmp/\ncurl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add -\nsudo add-apt-repository \"deb [arch=amd64] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable\"\nsudo apt-get update\napt-cache policy docker-ce\n\nsudo apt-get install -y docker-ce\nsudo usermod -a -G docker ubuntu\n"
  },
  {
    "path": "examples/nomad-consul-separate-cluster/README.md",
    "content": "# Nomad and Consul Separate Clusters Example\n\nThis folder shows an example of Terraform code to deploy a [Nomad](https://www.nomadproject.io/) cluster that connects \nto a separate [Consul](https://www.consul.io/) cluster in [AWS](https://aws.amazon.com/) (if you want to run Nomad and \nConsul in the same clusters, see the [nomad-consul-colocated-cluster example](https://github.com/hashicorp/terraform-aws-nomad/tree/master/MAIN.md) \ninstead). The Nomad cluster consists of two Auto Scaling Groups (ASGs): one with a small number of Nomad server \nnodes, which are responsible for being part of the [consensus \nquorum](https://www.nomadproject.io/docs/internals/consensus.html), and one with a larger number of Nomad client nodes, \nwhich are used to run jobs:\n\n![Nomad architecture](https://raw.githubusercontent.com/hashicorp/terraform-aws-nomad/master/_docs/architecture-nomad-consul-separate.png)\n\nYou will need to create an [Amazon Machine Image (AMI)](http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/AMIs.html) \nthat has Nomad and Consul installed, which you can do using the [nomad-consul-ami example](https://github.com/hashicorp/terraform-aws-nomad/tree/master/examples/nomad-consul-ami)).  \n\nFor more info on how the Nomad cluster works, check out the [nomad-cluster](https://github.com/hashicorp/terraform-aws-nomad/tree/master/modules/nomad-cluster) documentation.\n\n\n\n\n## Quick start\n\nTo deploy a Nomad Cluster:\n\n1. `git clone` this repo to your computer.\n1. Optional: build a Nomad and Consul AMI. See the [nomad-consul-ami\n   example](https://github.com/hashicorp/terraform-aws-nomad/tree/master/examples/nomad-consul-ami) documentation for\n   instructions. Make sure to note down the ID of the AMI.\n1. Install [Terraform](https://www.terraform.io/).\n1. Open `variables.tf`, set the environment variables specified at the top of the file, and fill in any other variables that\n   don't have a default. If you built a custom AMI, put the AMI ID into the `ami_id` variable. Otherwise, one of our\n   public example AMIs will be used by default. These AMIs are great for learning/experimenting, but are NOT\n   recommended for production use.\n1. Run `terraform init`.\n1. Run `terraform apply`.\n1. Run the [nomad-examples-helper.sh script](https://github.com/hashicorp/terraform-aws-nomad/tree/master/examples/nomad-examples-helper/nomad-examples-helper.sh) to print out\n   the IP addresses of the Nomad servers and some example commands you can run to interact with the cluster:\n   `../nomad-examples-helper/nomad-examples-helper.sh`.\n"
  },
  {
    "path": "examples/nomad-consul-separate-cluster/main.tf",
    "content": "# ---------------------------------------------------------------------------------------------------------------------\n# DEPLOY A NOMAD CLUSTER AND A SEPARATE CONSUL CLUSTER IN AWS\n# These templates show an example of how to use the nomad-cluster module to deploy a Nomad cluster in AWS. This cluster\n# connects to Consul running in a separate cluster.\n#\n# We deploy two Auto Scaling Groups (ASGs) for Nomad: one with a small number of Nomad server nodes and one with a\n# larger number of Nomad client nodes. Note that these templates assume that the AMI you provide via the\n# nomad_ami_id input variable is built from the examples/nomad-consul-ami/nomad-consul.json Packer template.\n#\n# We also deploy one ASG for Consul which has a small number of Consul server nodes. Note that these templates assume\n# that the AMI you provide via the consul_ami_id input variable is built from the examples/consul-ami/consul.json\n# Packer template in the Consul AWS Module.\n# ---------------------------------------------------------------------------------------------------------------------\n\n# ----------------------------------------------------------------------------------------------------------------------\n# REQUIRE A SPECIFIC TERRAFORM VERSION OR HIGHER\n# ----------------------------------------------------------------------------------------------------------------------\nterraform {\n  # This module is now only being tested with Terraform 1.0.x. However, to make upgrading easier, we are setting\n  # 0.12.26 as the minimum version, as that version added support for required_providers with source URLs, making it\n  # forwards compatible with 1.0.x code.\n  required_version = \">= 0.12.26\"\n}\n\n# ---------------------------------------------------------------------------------------------------------------------\n# AUTOMATICALLY LOOK UP THE LATEST PRE-BUILT AMI\n# This repo contains a CircleCI job that automatically builds and publishes the latest AMI by building the Packer\n# template at /examples/nomad-consul-ami upon every new release. The Terraform data source below automatically looks up\n# the latest AMI so that a simple \"terraform apply\" will just work without the user needing to manually build an AMI and\n# fill in the right value.\n#\n# !! WARNING !! These exmaple AMIs are meant only convenience when initially testing this repo. Do NOT use these example\n# AMIs in a production setting because it is important that you consciously think through the configuration you want\n# in your own production AMI.\n#\n# NOTE: This Terraform data source must return at least one AMI result or the entire template will fail. See\n# /_ci/publish-amis-in-new-account.md for more information.\n# ---------------------------------------------------------------------------------------------------------------------\ndata \"aws_ami\" \"nomad_consul\" {\n  most_recent = true\n\n  # If we change the AWS Account in which test are run, update this value.\n  owners = [\"562637147889\"]\n\n  filter {\n    name   = \"virtualization-type\"\n    values = [\"hvm\"]\n  }\n\n  filter {\n    name   = \"is-public\"\n    values = [\"true\"]\n  }\n\n  filter {\n    name   = \"name\"\n    values = [\"nomad-consul-ubuntu-*\"]\n  }\n}\n\n# ---------------------------------------------------------------------------------------------------------------------\n# DEPLOY THE NOMAD SERVER NODES\n# ---------------------------------------------------------------------------------------------------------------------\n\nmodule \"nomad_servers\" {\n  # When using these modules in your own templates, you will need to use a Git URL with a ref attribute that pins you\n  # to a specific version of the modules, such as the following example:\n  # source = \"github.com/hashicorp/terraform-aws-nomad//modules/nomad-cluster?ref=v0.1.0\"\n  source = \"../../modules/nomad-cluster\"\n\n  cluster_name  = \"${var.nomad_cluster_name}-server\"\n  instance_type = \"t2.micro\"\n\n  # You should typically use a fixed size of 3 or 5 for your Nomad server cluster\n  min_size         = var.num_nomad_servers\n  max_size         = var.num_nomad_servers\n  desired_capacity = var.num_nomad_servers\n\n  ami_id    = var.ami_id == null ? data.aws_ami.nomad_consul.image_id : var.ami_id\n  user_data = data.template_file.user_data_nomad_server.rendered\n\n  vpc_id     = data.aws_vpc.default.id\n  subnet_ids = data.aws_subnet_ids.default.ids\n\n  # To make testing easier, we allow requests from any IP address here but in a production deployment, we strongly\n  # recommend you limit this to the IP address ranges of known, trusted servers inside your VPC.\n  allowed_ssh_cidr_blocks = [\"0.0.0.0/0\"]\n\n  allowed_inbound_cidr_blocks = [\"0.0.0.0/0\"]\n  ssh_key_name                = var.ssh_key_name\n}\n\n# ---------------------------------------------------------------------------------------------------------------------\n# ATTACH IAM POLICIES FOR CONSUL\n# To allow our server Nodes to automatically discover the Consul servers, we need to give them the IAM permissions from\n# the Consul AWS Module's consul-iam-policies module.\n# ---------------------------------------------------------------------------------------------------------------------\n\nmodule \"consul_iam_policies_servers\" {\n  source = \"github.com/hashicorp/terraform-aws-consul//modules/consul-iam-policies?ref=v0.8.0\"\n\n  iam_role_id = module.nomad_servers.iam_role_id\n}\n\n# ---------------------------------------------------------------------------------------------------------------------\n# THE USER DATA SCRIPT THAT WILL RUN ON EACH NOMAD SERVER NODE WHEN IT'S BOOTING\n# This script will configure and start Nomad\n# ---------------------------------------------------------------------------------------------------------------------\n\ndata \"template_file\" \"user_data_nomad_server\" {\n  template = file(\"${path.module}/user-data-nomad-server.sh\")\n\n  vars = {\n    num_servers       = var.num_nomad_servers\n    cluster_tag_key   = var.cluster_tag_key\n    cluster_tag_value = var.consul_cluster_name\n  }\n}\n\n# ---------------------------------------------------------------------------------------------------------------------\n# DEPLOY THE CONSUL SERVER NODES\n# ---------------------------------------------------------------------------------------------------------------------\n\nmodule \"consul_servers\" {\n  source = \"github.com/hashicorp/terraform-aws-consul//modules/consul-cluster?ref=v0.8.0\"\n\n  cluster_name  = \"${var.consul_cluster_name}-server\"\n  cluster_size  = var.num_consul_servers\n  instance_type = \"t2.micro\"\n\n  # The EC2 Instances will use these tags to automatically discover each other and form a cluster\n  cluster_tag_key   = var.cluster_tag_key\n  cluster_tag_value = var.consul_cluster_name\n\n  ami_id    = var.ami_id == null ? data.aws_ami.nomad_consul.image_id : var.ami_id\n  user_data = data.template_file.user_data_consul_server.rendered\n\n  vpc_id     = data.aws_vpc.default.id\n  subnet_ids = data.aws_subnet_ids.default.ids\n\n  # To make testing easier, we allow Consul and SSH requests from any IP address here but in a production\n  # deployment, we strongly recommend you limit this to the IP address ranges of known, trusted servers inside your VPC.\n  allowed_ssh_cidr_blocks = [\"0.0.0.0/0\"]\n\n  allowed_inbound_cidr_blocks = [\"0.0.0.0/0\"]\n  ssh_key_name                = var.ssh_key_name\n}\n\n# ---------------------------------------------------------------------------------------------------------------------\n# THE USER DATA SCRIPT THAT WILL RUN ON EACH CONSUL SERVER EC2 INSTANCE WHEN IT'S BOOTING\n# This script will configure and start Consul\n# ---------------------------------------------------------------------------------------------------------------------\n\ndata \"template_file\" \"user_data_consul_server\" {\n  template = file(\"${path.module}/user-data-consul-server.sh\")\n\n  vars = {\n    cluster_tag_key   = var.cluster_tag_key\n    cluster_tag_value = var.consul_cluster_name\n  }\n}\n\n# ---------------------------------------------------------------------------------------------------------------------\n# DEPLOY THE NOMAD CLIENT NODES\n# ---------------------------------------------------------------------------------------------------------------------\n\nmodule \"nomad_clients\" {\n  # When using these modules in your own templates, you will need to use a Git URL with a ref attribute that pins you\n  # to a specific version of the modules, such as the following example:\n  # source = \"github.com/hashicorp/terraform-aws-nomad//modules/nomad-cluster?ref=v0.0.1\"\n  source = \"../../modules/nomad-cluster\"\n\n  cluster_name  = \"${var.nomad_cluster_name}-client\"\n  instance_type = \"t2.micro\"\n\n  # Give the clients a different tag so they don't try to join the server cluster\n  cluster_tag_key   = \"nomad-clients\"\n  cluster_tag_value = var.nomad_cluster_name\n\n  # To keep the example simple, we are using a fixed-size cluster. In real-world usage, you could use auto scaling\n  # policies to dynamically resize the cluster in response to load.\n\n  min_size         = var.num_nomad_clients\n  max_size         = var.num_nomad_clients\n  desired_capacity = var.num_nomad_clients\n  ami_id           = var.ami_id == null ? data.aws_ami.nomad_consul.image_id : var.ami_id\n  user_data        = data.template_file.user_data_nomad_client.rendered\n  vpc_id           = data.aws_vpc.default.id\n  subnet_ids       = data.aws_subnet_ids.default.ids\n\n  # To make testing easier, we allow Consul and SSH requests from any IP address here but in a production\n  # deployment, we strongly recommend you limit this to the IP address ranges of known, trusted servers inside your VPC.\n  allowed_ssh_cidr_blocks     = [\"0.0.0.0/0\"]\n  allowed_inbound_cidr_blocks = [\"0.0.0.0/0\"]\n  ssh_key_name                = var.ssh_key_name\n  ebs_block_devices = [\n    {\n      \"device_name\" = \"/dev/xvde\"\n      \"volume_size\" = \"10\"\n    },\n  ]\n}\n\n# ---------------------------------------------------------------------------------------------------------------------\n# ATTACH IAM POLICIES FOR CONSUL\n# To allow our client Nodes to automatically discover the Consul servers, we need to give them the IAM permissions from\n# the Consul AWS Module's consul-iam-policies module.\n# ---------------------------------------------------------------------------------------------------------------------\n\nmodule \"consul_iam_policies_clients\" {\n  source = \"github.com/hashicorp/terraform-aws-consul//modules/consul-iam-policies?ref=v0.8.0\"\n\n  iam_role_id = module.nomad_clients.iam_role_id\n}\n\n# ---------------------------------------------------------------------------------------------------------------------\n# THE USER DATA SCRIPT THAT WILL RUN ON EACH CLIENT NODE WHEN IT'S BOOTING\n# This script will configure and start Consul and Nomad\n# ---------------------------------------------------------------------------------------------------------------------\n\ndata \"template_file\" \"user_data_nomad_client\" {\n  template = file(\"${path.module}/user-data-nomad-client.sh\")\n\n  vars = {\n    cluster_tag_key   = var.cluster_tag_key\n    cluster_tag_value = var.consul_cluster_name\n  }\n}\n\n# ---------------------------------------------------------------------------------------------------------------------\n# DEPLOY THE CLUSTER IN THE DEFAULT VPC AND SUBNETS\n# Using the default VPC and subnets makes this example easy to run and test, but it means Consul and Nomad are\n# accessible from the public Internet. In a production deployment, we strongly recommend deploying into a custom VPC\n# and private subnets.\n# ---------------------------------------------------------------------------------------------------------------------\n\ndata \"aws_vpc\" \"default\" {\n  default = true\n}\n\ndata \"aws_subnet_ids\" \"default\" {\n  vpc_id = data.aws_vpc.default.id\n}\n\ndata \"aws_region\" \"current\" {\n}\n"
  },
  {
    "path": "examples/nomad-consul-separate-cluster/outputs.tf",
    "content": "output \"num_nomad_servers\" {\n  value = module.nomad_servers.cluster_size\n}\n\noutput \"asg_name_nomad_servers\" {\n  value = module.nomad_servers.asg_name\n}\n\noutput \"launch_config_name_nomad_servers\" {\n  value = module.nomad_servers.launch_config_name\n}\n\noutput \"iam_role_arn_nomad_servers\" {\n  value = module.nomad_servers.iam_role_arn\n}\n\noutput \"iam_role_id_nomad_servers\" {\n  value = module.nomad_servers.iam_role_id\n}\n\noutput \"security_group_id_nomad_servers\" {\n  value = module.nomad_servers.security_group_id\n}\n\noutput \"num_consul_servers\" {\n  value = module.consul_servers.cluster_size\n}\n\noutput \"asg_name_consul_servers\" {\n  value = module.consul_servers.asg_name\n}\n\noutput \"launch_config_name_consul_servers\" {\n  value = module.consul_servers.launch_config_name\n}\n\noutput \"iam_role_arn_consul_servers\" {\n  value = module.consul_servers.iam_role_arn\n}\n\noutput \"iam_role_id_consul_servers\" {\n  value = module.consul_servers.iam_role_id\n}\n\noutput \"security_group_id_consul_servers\" {\n  value = module.consul_servers.security_group_id\n}\n\noutput \"num_nomad_clients\" {\n  value = module.nomad_clients.cluster_size\n}\n\noutput \"asg_name_nomad_clients\" {\n  value = module.nomad_clients.asg_name\n}\n\noutput \"launch_config_name_nomad_clients\" {\n  value = module.nomad_clients.launch_config_name\n}\n\noutput \"iam_role_arn_nomad_clients\" {\n  value = module.nomad_clients.iam_role_arn\n}\n\noutput \"iam_role_id_nomad_clients\" {\n  value = module.nomad_clients.iam_role_id\n}\n\noutput \"security_group_id_nomad_clients\" {\n  value = module.nomad_clients.security_group_id\n}\n\noutput \"aws_region\" {\n  value = data.aws_region.current.name\n}\n\noutput \"nomad_servers_cluster_tag_key\" {\n  value = module.nomad_servers.cluster_tag_key\n}\n\noutput \"nomad_servers_cluster_tag_value\" {\n  value = module.nomad_servers.cluster_tag_value\n}\n\n"
  },
  {
    "path": "examples/nomad-consul-separate-cluster/user-data-consul-server.sh",
    "content": "#!/bin/bash\n# This script is meant to be run in the User Data of each EC2 Instance while it's booting. The script uses the\n# run-consul script to configure and start Consul in server mode. Note that this script assumes it's running in an AMI\n# built from the Packer template in examples/consul-ami/consul.json in the Consul AWS Module.\n\nset -e\n\n# Send the log output from this script to user-data.log, syslog, and the console\n# From: https://alestic.com/2010/12/ec2-user-data-output/\nexec > >(tee /var/log/user-data.log|logger -t user-data -s 2>/dev/console) 2>&1\n\n# These variables are passed in via Terraform template interplation\n/opt/consul/bin/run-consul --server --cluster-tag-key \"${cluster_tag_key}\" --cluster-tag-value \"${cluster_tag_value}\""
  },
  {
    "path": "examples/nomad-consul-separate-cluster/user-data-nomad-client.sh",
    "content": "#!/bin/bash\n# This script is meant to be run in the User Data of each EC2 Instance while it's booting. The script uses the\n# run-consul script to configure and start Consul in client mode and the run-nomad script to configure and start Nomad\n# in client mode. Note that this script assumes it's running in an AMI built from the Packer template in\n# examples/nomad-consul-ami/nomad-consul.json.\n\nset -e\n\n# Send the log output from this script to user-data.log, syslog, and the console\n# From: https://alestic.com/2010/12/ec2-user-data-output/\nexec > >(tee /var/log/user-data.log|logger -t user-data -s 2>/dev/console) 2>&1\n\n# These variables are passed in via Terraform template interplation\n/opt/consul/bin/run-consul --client --cluster-tag-key \"${cluster_tag_key}\" --cluster-tag-value \"${cluster_tag_value}\"\n/opt/nomad/bin/run-nomad --client\n\n"
  },
  {
    "path": "examples/nomad-consul-separate-cluster/user-data-nomad-server.sh",
    "content": "#!/bin/bash\n# This script is meant to be run in the User Data of each EC2 Instance while it's booting. The script uses the\n# run-consul script to configure and start Consul in client mode and then the run-nomad script to configure and start\n# Nomad in server mode. Note that this script assumes it's running in an AMI built from the Packer template in\n# examples/nomad-consul-ami/nomad-consul.json.\n\nset -e\n\n# Send the log output from this script to user-data.log, syslog, and the console\n# From: https://alestic.com/2010/12/ec2-user-data-output/\nexec > >(tee /var/log/user-data.log|logger -t user-data -s 2>/dev/console) 2>&1\n\n/opt/consul/bin/run-consul --client --cluster-tag-key \"${cluster_tag_key}\" --cluster-tag-value \"${cluster_tag_value}\"\n/opt/nomad/bin/run-nomad --server --num-servers \"${num_servers}\""
  },
  {
    "path": "examples/nomad-consul-separate-cluster/variables.tf",
    "content": "# ---------------------------------------------------------------------------------------------------------------------\n# ENVIRONMENT VARIABLES\n# Define these secrets as environment variables\n# ---------------------------------------------------------------------------------------------------------------------\n\n# AWS_ACCESS_KEY_ID\n# AWS_SECRET_ACCESS_KEY\n# AWS_DEFAULT_REGION\n\n# ---------------------------------------------------------------------------------------------------------------------\n# REQUIRED PARAMETERS\n# You must provide a value for each of these parameters.\n# ---------------------------------------------------------------------------------------------------------------------\n\n# None\n\n# ---------------------------------------------------------------------------------------------------------------------\n# OPTIONAL PARAMETERS\n# These parameters have reasonable defaults.\n# ---------------------------------------------------------------------------------------------------------------------\n\nvariable \"ami_id\" {\n  description = \"The ID of the AMI to run in the cluster. This should be an AMI built from the Packer template under examples/nomad-consul-ami/nomad-consul.json. If no AMI is specified, the template will 'just work' by using the example public AMIs. WARNING! Do not use the example AMIs in a production setting!\"\n  type        = string\n  default     = null\n}\n\nvariable \"nomad_cluster_name\" {\n  description = \"What to name the Nomad cluster and all of its associated resources\"\n  type        = string\n  default     = \"nomad-example\"\n}\n\nvariable \"consul_cluster_name\" {\n  description = \"What to name the Consul cluster and all of its associated resources\"\n  type        = string\n  default     = \"consul-example\"\n}\n\nvariable \"num_nomad_servers\" {\n  description = \"The number of Nomad server nodes to deploy. We strongly recommend using 3 or 5.\"\n  type        = number\n  default     = 3\n}\n\nvariable \"num_nomad_clients\" {\n  description = \"The number of Nomad client nodes to deploy. You can deploy as many as you need to run your jobs.\"\n  type        = number\n  default     = 6\n}\n\nvariable \"num_consul_servers\" {\n  description = \"The number of Consul server nodes to deploy. We strongly recommend using 3 or 5.\"\n  type        = number\n  default     = 3\n}\n\nvariable \"cluster_tag_key\" {\n  description = \"The tag the Consul EC2 Instances will look for to automatically discover each other and form a cluster.\"\n  type        = string\n  default     = \"consul-servers\"\n}\n\nvariable \"ssh_key_name\" {\n  description = \"The name of an EC2 Key Pair that can be used to SSH to the EC2 Instances in this cluster. Set to null to not associate a Key Pair.\"\n  type        = string\n  default     = null\n}\n\n"
  },
  {
    "path": "examples/nomad-examples-helper/README.md",
    "content": "# Nomad Examples Helper\n\nThis folder contains a helper script called `nomad-examples-helper.sh` for working with the \n[nomad-consul-colocated-cluster](https://github.com/hashicorp/terraform-aws-nomad/tree/master/MAIN.md) and\n[nomad-consul-separate-cluster](https://github.com/hashicorp/terraform-aws-nomad/tree/master/examples/nomad-consul-separate-cluster) examples. After running `terraform apply` on\nthe examples, if you run `nomad-examples-helper.sh`, it will automatically:\n\n1. Wait for the Nomad server cluster to come up.\n1. Print out the IP addresses of the Nomad servers.\n1. Print out some example commands you can run against your Nomad servers.\n\nThis folder also contains an example Nomad job called `example.nomad` that you can run in your Nomad cluster. This job \nsimply echoes \"Hello, World!\"\n\n"
  },
  {
    "path": "examples/nomad-examples-helper/example.nomad",
    "content": "# There can only be a single job definition per file. This job is named\n# \"example\" so it will create a job with the ID and Name \"example\".\n\n# The \"job\" stanza is the top-most configuration option in the job\n# specification. A job is a declarative specification of tasks that Nomad\n# should run. Jobs have a globally unique name, one or many task groups, which\n# are themselves collections of one or many tasks.\n#\n# For more information and examples on the \"job\" stanza, please see\n# the online documentation at:\n#\n#     https://www.nomadproject.io/docs/job-specification/job.html\n#\njob \"example\" {\n  # The \"region\" parameter specifies the region in which to execute the job. If\n  # omitted, this inherits the default region name of \"global\". Note that this example job\n  # is hard-coded to us-east-1, so if you are running your example elsewhere, make\n  # sure to update this setting, as well as the datacenters setting.\n  region = \"us-east-1\"\n\n  # The \"datacenters\" parameter specifies the list of datacenters which should\n  # be considered when placing this task. This must be provided. Note that this example job\n  # is hard-coded to us-east-1, so if you are running your example elsewhere, make\n  # sure to update this setting, as well as the region setting.\n  datacenters = [\"us-east-1a\", \"us-east-1b\", \"us-east-1c\", \"us-east-1d\", \"us-east-1e\"]\n\n  # The \"type\" parameter controls the type of job, which impacts the scheduler's\n  # decision on placement. This configuration is optional and defaults to\n  # \"service\". For a full list of job types and their differences, please see\n  # the online documentation.\n  #\n  # For more information, please see the online documentation at:\n  #\n  #     https://www.nomadproject.io/docs/jobspec/schedulers.html\n  #\n  type = \"batch\"\n\n  # The \"constraint\" stanza defines additional constraints for placing this job,\n  # in addition to any resource or driver constraints. This stanza may be placed\n  # at the \"job\", \"group\", or \"task\" level, and supports variable interpolation.\n  #\n  # For more information and examples on the \"constraint\" stanza, please see\n  # the online documentation at:\n  #\n  #     https://www.nomadproject.io/docs/job-specification/constraint.html\n  #\n  # constraint {\n  #   attribute = \"${attr.kernel.name}\"\n  #   value     = \"linux\"\n  # }\n\n  # The \"update\" stanza specifies the job update strategy. The update strategy\n  # is used to control things like rolling upgrades. If omitted, rolling\n  # updates are disabled.\n  #\n  # For more information and examples on the \"update\" stanza, please see\n  # the online documentation at:\n  #\n  #     https://www.nomadproject.io/docs/job-specification/update.html\n  #\n  # update {\n  #  # The \"stagger\" parameter specifies to do rolling updates of this job every\n  #  # 10 seconds.\n  #  stagger = \"10s\"\n\n  #  # The \"max_parallel\" parameter specifies the maximum number of updates to\n  #  # perform in parallel. In this case, this specifies to update a single task\n  #  # at a time.\n  #  max_parallel = 1\n  # }\n\n  # The \"group\" stanza defines a series of tasks that should be co-located on\n  # the same Nomad client. Any task within a group will be placed on the same\n  # client.\n  #\n  # For more information and examples on the \"group\" stanza, please see\n  # the online documentation at:\n  #\n  #     https://www.nomadproject.io/docs/job-specification/group.html\n  #\n  group \"cache\" {\n    # The \"count\" parameter specifies the number of the task groups that should\n    # be running under this group. This value must be non-negative and defaults\n    # to 1.\n    count = 1\n\n    # The \"restart\" stanza configures a group's behavior on task failure. If\n    # left unspecified, a default restart policy is used based on the job type.\n    #\n    # For more information and examples on the \"restart\" stanza, please see\n    # the online documentation at:\n    #\n    #     https://www.nomadproject.io/docs/job-specification/restart.html\n    #\n    restart {\n      # The number of attempts to run the job within the specified interval.\n      attempts = 10\n      interval = \"5m\"\n\n      # The \"delay\" parameter specifies the duration to wait before restarting\n      # a task after it has failed.\n      delay = \"25s\"\n\n     # The \"mode\" parameter controls what happens when a task has restarted\n     # \"attempts\" times within the interval. \"delay\" mode delays the next\n     # restart until the next interval. \"fail\" mode does not restart the task\n     # if \"attempts\" has been hit within the interval.\n      mode = \"delay\"\n    }\n\n    # The \"ephemeral_disk\" stanza instructs Nomad to utilize an ephemeral disk\n    # instead of a hard disk requirement. Clients using this stanza should\n    # not specify disk requirements in the resources stanza of the task. All\n    # tasks in this group will share the same ephemeral disk.\n    #\n    # For more information and examples on the \"ephemeral_disk\" stanza, please\n    # see the online documentation at:\n    #\n    #     https://www.nomadproject.io/docs/job-specification/ephemeral_disk.html\n    #\n    ephemeral_disk {\n      # When sticky is true and the task group is updated, the scheduler\n      # will prefer to place the updated allocation on the same node and\n      # will migrate the data. This is useful for tasks that store data\n      # that should persist across allocation updates.\n      # sticky = true\n      # \n      # Setting migrate to true results in the allocation directory of a\n      # sticky allocation directory to be migrated.\n      # migrate = true\n\n      # The \"size\" parameter specifies the size in MB of shared ephemeral disk\n      # between tasks in the group.\n      size = 300\n    }\n\n    # The \"task\" stanza creates an individual unit of work, such as a Docker\n    # container, web application, or batch processing.\n    #\n    # For more information and examples on the \"task\" stanza, please see\n    # the online documentation at:\n    #\n    #     https://www.nomadproject.io/docs/job-specification/task.html\n    #\n    task \"hello_world\" {\n      # The \"driver\" parameter specifies the task driver that should be used to\n      # run the task.\n      driver = \"exec\"\n\n      # The \"config\" stanza specifies the driver configuration, which is passed\n      # directly to the driver to start the task. The details of configurations\n      # are specific to each driver, so please see specific driver\n      # documentation for more information.\n      config {\n        command = \"/bin/echo\"\n        args    = [\"Hello, World!\"]\n      }\n\n      # The \"artifact\" stanza instructs Nomad to download an artifact from a\n      # remote source prior to starting the task. This provides a convenient\n      # mechanism for downloading configuration files or data needed to run the\n      # task. It is possible to specify the \"artifact\" stanza multiple times to\n      # download multiple artifacts.\n      #\n      # For more information and examples on the \"artifact\" stanza, please see\n      # the online documentation at:\n      #\n      #     https://www.nomadproject.io/docs/job-specification/artifact.html\n      #\n      # artifact {\n      #   source = \"http://foo.com/artifact.tar.gz\"\n      #   options {\n      #     checksum = \"md5:c4aa853ad2215426eb7d70a21922e794\"\n      #   }\n      # }\n\n      # The \"logs\" stana instructs the Nomad client on how many log files and\n      # the maximum size of those logs files to retain. Logging is enabled by\n      # default, but the \"logs\" stanza allows for finer-grained control over\n      # the log rotation and storage configuration.\n      #\n      # For more information and examples on the \"logs\" stanza, please see\n      # the online documentation at:\n      #\n      #     https://www.nomadproject.io/docs/job-specification/logs.html\n      #\n      # logs {\n      #   max_files     = 10\n      #   max_file_size = 15\n      # }\n\n      # The \"resources\" stanza describes the requirements a task needs to\n      # execute. Resource requirements include memory, network, cpu, and more.\n      # This ensures the task will execute on a machine that contains enough\n      # resource capacity.\n      #\n      # For more information and examples on the \"resources\" stanza, please see\n      # the online documentation at:\n      #\n      #     https://www.nomadproject.io/docs/job-specification/resources.html\n      #\n      resources {\n        cpu    = 500 # 500 MHz\n        memory = 256 # 256MB\n        network {\n          mbits = 10\n          port \"db\" {}\n        }\n      }\n\n      # The \"service\" stanza instructs Nomad to register this task as a service\n      # in the service discovery engine, which is currently Consul. This will\n      # make the service addressable after Nomad has placed it on a host and\n      # port.\n      #\n      # For more information and examples on the \"service\" stanza, please see\n      # the online documentation at:\n      #\n      #     https://www.nomadproject.io/docs/job-specification/service.html\n      #\n      # service {\n      #   name = \"global-redis-check\"\n      #   tags = [\"global\", \"cache\"]\n      #  port = \"db\"\n      #   check {\n      #     name     = \"alive\"\n      #     type     = \"tcp\"\n      #     interval = \"10s\"\n      #     timeout  = \"2s\"\n      #   }\n      #  }\n\n      # The \"template\" stanza instructs Nomad to manage a template, such as\n      # a configuration file or script. This template can optionally pull data\n      # from Consul or Vault to populate runtime configuration data.\n      #\n      # For more information and examples on the \"template\" stanza, please see\n      # the online documentation at:\n      #\n      #     https://www.nomadproject.io/docs/job-specification/template.html\n      #\n      # template {\n      #   data          = \"---\\nkey: {{ key \\\"service/my-key\\\" }}\"\n      #   destination   = \"local/file.yml\"\n      #   change_mode   = \"signal\"\n      #   change_signal = \"SIGHUP\"\n      # }\n\n      # The \"vault\" stanza instructs the Nomad client to acquire a token from\n      # a HashiCorp Vault server. The Nomad servers must be configured and\n      # authorized to communicate with Vault. By default, Nomad will inject\n      # The token into the job via an environment variable and make the token\n      # available to the \"template\" stanza. The Nomad client handles the renewal\n      # and revocation of the Vault token.\n      #\n      # For more information and examples on the \"vault\" stanza, please see\n      # the online documentation at:\n      #\n      #     https://www.nomadproject.io/docs/job-specification/vault.html\n      #\n      # vault {\n      #   policies      = [\"cdn\", \"frontend\"]\n      #   change_mode   = \"signal\"\n      #   change_signal = \"SIGHUP\"\n      # }\n\n      # Controls the timeout between signalling a task it will be killed\n      # and killing the task. If not set a default is used.\n      # kill_timeout = \"20s\"\n    }\n  }\n}"
  },
  {
    "path": "examples/nomad-examples-helper/nomad-examples-helper.sh",
    "content": "#!/bin/bash\n# A script that is meant to be used with the Nomad cluster examples to:\n#\n# 1. Wait for the Nomad server cluster to come up.\n# 2. Print out the IP addresses of the Nomad servers.\n# 3. Print out some example commands you can run against your Nomad servers.\n\nset -e\n\nreadonly SCRIPT_DIR=\"$(cd \"$(dirname \"${BASH_SOURCE[0]}\")\" && pwd)\"\nreadonly SCRIPT_NAME=\"$(basename \"$0\")\"\n\nreadonly MAX_RETRIES=30\nreadonly SLEEP_BETWEEN_RETRIES_SEC=10\n\nfunction log {\n  local readonly level=\"$1\"\n  local readonly message=\"$2\"\n  local readonly timestamp=$(date +\"%Y-%m-%d %H:%M:%S\")\n  >&2 echo -e \"${timestamp} [${level}] [$SCRIPT_NAME] ${message}\"\n}\n\nfunction log_info {\n  local readonly message=\"$1\"\n  log \"INFO\" \"$message\"\n}\n\nfunction log_warn {\n  local readonly message=\"$1\"\n  log \"WARN\" \"$message\"\n}\n\nfunction log_error {\n  local readonly message=\"$1\"\n  log \"ERROR\" \"$message\"\n}\n\nfunction assert_is_installed {\n  local readonly name=\"$1\"\n\n  if [[ ! $(command -v ${name}) ]]; then\n    log_error \"The binary '$name' is required by this script but is not installed or in the system's PATH.\"\n    exit 1\n  fi\n}\n\nfunction get_required_terraform_output {\n  local readonly output_name=\"$1\"\n  local output_value\n\n  output_value=$(terraform output -raw -no-color \"$output_name\")\n\n  if [[ -z \"$output_value\" ]]; then\n    log_error \"Unable to find a value for Terraform output $output_name\"\n    exit 1\n  fi\n\n  echo \"$output_value\"\n}\n\n#\n# Usage: join SEPARATOR ARRAY\n#\n# Joins the elements of ARRAY with the SEPARATOR character between them.\n#\n# Examples:\n#\n# join \", \" (\"A\" \"B\" \"C\")\n#   Returns: \"A, B, C\"\n#\nfunction join {\n  local readonly separator=\"$1\"\n  shift\n  local readonly values=(\"$@\")\n\n  printf \"%s$separator\" \"${values[@]}\" | sed \"s/$separator$//\"\n}\n\nfunction get_all_nomad_server_ips {\n  local expected_num_nomad_servers\n  expected_num_nomad_servers=$(get_required_terraform_output \"num_nomad_servers\")\n\n  log_info \"Looking up public IP addresses for $expected_num_nomad_servers Nomad server EC2 Instances.\"\n\n  local ips\n  local i\n\n  for (( i=1; i<=\"$MAX_RETRIES\"; i++ )); do\n    ips=($(get_nomad_server_ips))\n    if [[ \"${#ips[@]}\" -eq \"$expected_num_nomad_servers\" ]]; then\n      log_info \"Found all $expected_num_nomad_servers public IP addresses!\"\n      echo \"${ips[@]}\"\n      return\n    else\n      log_warn \"Found ${#ips[@]} of $expected_num_nomad_servers public IP addresses. Will sleep for $SLEEP_BETWEEN_RETRIES_SEC seconds and try again.\"\n      sleep \"$SLEEP_BETWEEN_RETRIES_SEC\"\n    fi\n  done\n\n  log_error \"Failed to find the IP addresses for $expected_num_nomad_servers Nomad server EC2 Instances after $MAX_RETRIES retries.\"\n  exit 1\n}\n\nfunction wait_for_all_nomad_servers_to_register {\n  local readonly server_ips=($@)\n  local readonly server_ip=\"${server_ips[0]}\"\n\n  local expected_num_nomad_servers\n  expected_num_nomad_servers=$(get_required_terraform_output \"num_nomad_servers\")\n\n  log_info \"Waiting for $expected_num_nomad_servers Nomad servers to register in the cluster\"\n\n  for (( i=1; i<=\"$MAX_RETRIES\"; i++ )); do\n    log_info \"Running 'nomad server members' command against server at IP address $server_ip\"\n    # Intentionally use local and readonly here so that this script doesn't exit if the nomad server members or grep\n    # commands exit with an error.\n    local readonly members=$(nomad server members -address=\"http://$server_ip:4646\")\n    local readonly alive_members=$(echo \"$members\" | grep \"alive\")\n    local readonly num_nomad_servers=$(echo \"$alive_members\" | wc -l | tr -d ' ')\n\n    if [[ \"$num_nomad_servers\" -eq \"$expected_num_nomad_servers\" ]]; then\n      log_info \"All $expected_num_nomad_servers Nomad servers have registered in the cluster!\"\n      return\n    else\n      log_info \"$num_nomad_servers out of $expected_num_nomad_servers Nomad servers have registered in the cluster.\"\n      log_info \"Sleeping for $SLEEP_BETWEEN_RETRIES_SEC seconds and will check again.\"\n      sleep \"$SLEEP_BETWEEN_RETRIES_SEC\"\n    fi\n  done\n\n  log_error \"Did not find $expected_num_nomad_servers Nomad servers registered after $MAX_RETRIES retries.\"\n  exit 1\n}\n\nfunction get_nomad_server_ips {\n  local aws_region\n  local cluster_tag_key\n  local cluster_tag_value\n  local instances\n\n  aws_region=$(get_required_terraform_output \"aws_region\")\n  cluster_tag_key=$(get_required_terraform_output \"nomad_servers_cluster_tag_key\")\n  cluster_tag_value=$(get_required_terraform_output \"nomad_servers_cluster_tag_value\")\n\n  log_info \"Fetching public IP addresses for EC2 Instances in $aws_region with tag $cluster_tag_key=$cluster_tag_value\"\n\n  instances=$(aws ec2 describe-instances \\\n    --region \"$aws_region\" \\\n    --filter \"Name=tag:$cluster_tag_key,Values=$cluster_tag_value\" \"Name=instance-state-name,Values=running\")\n\n  echo \"$instances\" | jq -r '.Reservations[].Instances[].PublicIpAddress'\n}\n\nfunction print_instructions {\n  local readonly server_ips=($@)\n  local readonly server_ip=\"${server_ips[0]}\"\n\n  local instructions=()\n  instructions+=(\"\\nYour Nomad servers are running at the following IP addresses:\\n\\n${server_ips[@]/#/    }\\n\")\n  instructions+=(\"Some commands for you to try:\\n\")\n  instructions+=(\"    nomad server members -address=http://$server_ip:4646\")\n  instructions+=(\"    nomad node status -address=http://$server_ip:4646\")\n  instructions+=(\"    nomad run -address=http://$server_ip:4646 $SCRIPT_DIR/example.nomad\")\n  instructions+=(\"    nomad status -address=http://$server_ip:4646 example\\n\")\n\n  local instructions_str\n  instructions_str=$(join \"\\n\" \"${instructions[@]}\")\n\n  echo -e \"$instructions_str\"\n}\n\nfunction run {\n  assert_is_installed \"aws\"\n  assert_is_installed \"jq\"\n  assert_is_installed \"terraform\"\n  assert_is_installed \"nomad\"\n\n  local server_ips\n  server_ips=$(get_all_nomad_server_ips)\n\n  wait_for_all_nomad_servers_to_register \"$server_ips\"\n  print_instructions \"$server_ips\"\n}\n\nrun\n"
  },
  {
    "path": "examples/root-example/README.md",
    "content": "# Nomad and Consul Co-located Cluster Example\n\nThis folder shows an example of Terraform code to deploy a [Nomad](https://www.nomadproject.io/) cluster co-located \nwith a [Consul](https://www.consul.io/) cluster in [AWS](https://aws.amazon.com/) (if you want to run Nomad and Consul \non separate clusters, see the [nomad-consul-separate-cluster example](https://github.com/hashicorp/terraform-aws-nomad/tree/master/examples/nomad-consul-separate-cluster) \ninstead). The cluster consists of two Auto Scaling Groups (ASGs): one with a small number of Nomad and Consul server \nnodes, which are responsible for being part of the [consensus \nprotocol](https://www.nomadproject.io/docs/internals/consensus.html), and one with a larger number of Nomad and Consul \nclient nodes, which are used to run jobs:\n\n![Nomad architecture](https://raw.githubusercontent.com/hashicorp/terraform-aws-nomad/master/_docs/architecture-nomad-consul-colocated.png)\n\nYou will need to create an [Amazon Machine Image (AMI)](http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/AMIs.html) \nthat has Nomad and Consul installed, which you can do using the [nomad-consul-ami example](https://github.com/hashicorp/terraform-aws-nomad/tree/master/examples/nomad-consul-ami)).  \n\nFor more info on how the Nomad cluster works, check out the [nomad-cluster](https://github.com/hashicorp/terraform-aws-nomad/tree/master/modules/nomad-cluster) documentation.\n\n\n\n\n## Quick start\n\nTo deploy a Nomad Cluster:\n\n1. `git clone` this repo to your computer.\n1. Optional: build a Nomad and Consul AMI. See the [nomad-consul-ami\n   example](https://github.com/hashicorp/terraform-aws-nomad/tree/master/examples/nomad-consul-ami) documentation for\n   instructions. Make sure to note down the ID of the AMI.\n1. Install [Terraform](https://www.terraform.io/).\n1. Open `variables.tf`, set the environment variables specified at the top of the file, and fill in any other variables that\n   don't have a default. If you built a custom AMI, put the AMI ID into the `ami_id` variable. Otherwise, one of our\n   public example AMIs will be used by default. These AMIs are great for learning/experimenting, but are NOT\n   recommended for production use.\n1. Run `terraform init`.\n1. Run `terraform apply`.\n1. Run the [nomad-examples-helper.sh script](https://github.com/hashicorp/terraform-aws-nomad/tree/master/examples/nomad-examples-helper/nomad-examples-helper.sh) to print out\n   the IP addresses of the Nomad servers and some example commands you can run to interact with the cluster:\n   `../nomad-examples-helper/nomad-examples-helper.sh`.\n   \n"
  },
  {
    "path": "examples/root-example/user-data-client.sh",
    "content": "#!/bin/bash\n# This script is meant to be run in the User Data of each EC2 Instance while it's booting. The script uses the\n# run-nomad and run-consul scripts to configure and start Nomad and Consul in client mode. Note that this script\n# assumes it's running in an AMI built from the Packer template in examples/nomad-consul-ami/nomad-consul.json.\n\nset -e\n\n# Send the log output from this script to user-data.log, syslog, and the console\n# From: https://alestic.com/2010/12/ec2-user-data-output/\nexec > >(tee /var/log/user-data.log|logger -t user-data -s 2>/dev/console) 2>&1\n\n# These variables are passed in via Terraform template interplation\n/opt/consul/bin/run-consul --client --cluster-tag-key \"${cluster_tag_key}\" --cluster-tag-value \"${cluster_tag_value}\"\n/opt/nomad/bin/run-nomad --client\n\n"
  },
  {
    "path": "examples/root-example/user-data-server.sh",
    "content": "#!/bin/bash\n# This script is meant to be run in the User Data of each EC2 Instance while it's booting. The script uses the\n# run-nomad and run-consul scripts to configure and start Consul and Nomad in server mode. Note that this script\n# assumes it's running in an AMI built from the Packer template in examples/nomad-consul-ami/nomad-consul.json.\n\nset -e\n\n# Send the log output from this script to user-data.log, syslog, and the console\n# From: https://alestic.com/2010/12/ec2-user-data-output/\nexec > >(tee /var/log/user-data.log|logger -t user-data -s 2>/dev/console) 2>&1\n\n# These variables are passed in via Terraform template interplation\n/opt/consul/bin/run-consul --server --cluster-tag-key \"${cluster_tag_key}\" --cluster-tag-value \"${cluster_tag_value}\"\n/opt/nomad/bin/run-nomad --server --num-servers \"${num_servers}\""
  },
  {
    "path": "main.tf",
    "content": "# ---------------------------------------------------------------------------------------------------------------------\n# DEPLOY A NOMAD CLUSTER CO-LOCATED WITH A CONSUL CLUSTER IN AWS\n# These templates show an example of how to use the nomad-cluster module to deploy a Nomad cluster in AWS. This cluster\n# has Consul colocated on the same nodes.\n#\n# We deploy two Auto Scaling Groups (ASGs): one with a small number of Nomad and Consul server nodes and one with a\n# larger number of Nomad and Consul client nodes. Note that these templates assume that the AMI you provide via the\n# ami_id input variable is built from the examples/nomad-consul-ami/nomad-consul.json Packer template.\n# ---------------------------------------------------------------------------------------------------------------------\n\n# ----------------------------------------------------------------------------------------------------------------------\n# REQUIRE A SPECIFIC TERRAFORM VERSION OR HIGHER\n# ----------------------------------------------------------------------------------------------------------------------\nterraform {\n  # This module is now only being tested with Terraform 1.0.x. However, to make upgrading easier, we are setting\n  # 0.12.26 as the minimum version, as that version added support for required_providers with source URLs, making it\n  # forwards compatible with 1.0.x code.\n  required_version = \">= 0.12.26\"\n}\n\n# ---------------------------------------------------------------------------------------------------------------------\n# AUTOMATICALLY LOOK UP THE LATEST PRE-BUILT AMI\n# This repo contains a CircleCI job that automatically builds and publishes the latest AMI by building the Packer\n# template at /examples/nomad-consul-ami upon every new release. The Terraform data source below automatically looks up\n# the latest AMI so that a simple \"terraform apply\" will just work without the user needing to manually build an AMI and\n# fill in the right value.\n#\n# !! WARNING !! These exmaple AMIs are meant only convenience when initially testing this repo. Do NOT use these example\n# AMIs in a production setting because it is important that you consciously think through the configuration you want\n# in your own production AMI.\n#\n# NOTE: This Terraform data source must return at least one AMI result or the entire template will fail. See\n# /_ci/publish-amis-in-new-account.md for more information.\n# ---------------------------------------------------------------------------------------------------------------------\ndata \"aws_ami\" \"nomad_consul\" {\n  most_recent = true\n\n  # If we change the AWS Account in which test are run, update this value.\n  owners = [\"562637147889\"]\n\n  filter {\n    name   = \"virtualization-type\"\n    values = [\"hvm\"]\n  }\n\n  filter {\n    name   = \"is-public\"\n    values = [\"true\"]\n  }\n\n  filter {\n    name   = \"name\"\n    values = [\"nomad-consul-ubuntu-*\"]\n  }\n}\n\n# ---------------------------------------------------------------------------------------------------------------------\n# DEPLOY THE SERVER NODES\n# Note that we use the consul-cluster module to deploy both the Nomad and Consul nodes on the same servers\n# ---------------------------------------------------------------------------------------------------------------------\n\nmodule \"servers\" {\n  source = \"github.com/hashicorp/terraform-aws-consul//modules/consul-cluster?ref=v0.8.0\"\n\n  cluster_name  = \"${var.cluster_name}-server\"\n  cluster_size  = var.num_servers\n  instance_type = var.server_instance_type\n\n  # The EC2 Instances will use these tags to automatically discover each other and form a cluster\n  cluster_tag_key   = var.cluster_tag_key\n  cluster_tag_value = var.cluster_tag_value\n\n  ami_id    = var.ami_id == null ? data.aws_ami.nomad_consul.image_id : var.ami_id\n  user_data = data.template_file.user_data_server.rendered\n\n  vpc_id     = data.aws_vpc.default.id\n  subnet_ids = data.aws_subnet_ids.default.ids\n\n  # To make testing easier, we allow requests from any IP address here but in a production deployment, we strongly\n  # recommend you limit this to the IP address ranges of known, trusted servers inside your VPC.\n  allowed_ssh_cidr_blocks = [\"0.0.0.0/0\"]\n\n  allowed_inbound_cidr_blocks = [\"0.0.0.0/0\"]\n  ssh_key_name                = var.ssh_key_name\n\n  tags = [\n    {\n      key                 = \"Environment\"\n      value               = \"development\"\n      propagate_at_launch = true\n    },\n  ]\n}\n\n# ---------------------------------------------------------------------------------------------------------------------\n# ATTACH SECURITY GROUP RULES FOR NOMAD\n# Our Nomad servers are running on top of the consul-cluster module, so we need to configure that cluster to allow\n# the inbound/outbound connections used by Nomad.\n# ---------------------------------------------------------------------------------------------------------------------\n\nmodule \"nomad_security_group_rules\" {\n  # When using these modules in your own templates, you will need to use a Git URL with a ref attribute that pins you\n  # to a specific version of the modules, such as the following example:\n  # source = \"github.com/hashicorp/terraform-aws-nomad//modules/nomad-security-group-rules?ref=v0.0.1\"\n  source = \"./modules/nomad-security-group-rules\"\n\n  # To make testing easier, we allow requests from any IP address here but in a production deployment, we strongly\n  # recommend you limit this to the IP address ranges of known, trusted servers inside your VPC.\n  security_group_id = module.servers.security_group_id\n\n  allowed_inbound_cidr_blocks = [\"0.0.0.0/0\"]\n}\n\n# ---------------------------------------------------------------------------------------------------------------------\n# THE USER DATA SCRIPT THAT WILL RUN ON EACH SERVER NODE WHEN IT'S BOOTING\n# This script will configure and start Consul and Nomad\n# ---------------------------------------------------------------------------------------------------------------------\n\ndata \"template_file\" \"user_data_server\" {\n  template = file(\"${path.module}/examples/root-example/user-data-server.sh\")\n\n  vars = {\n    cluster_tag_key   = var.cluster_tag_key\n    cluster_tag_value = var.cluster_tag_value\n    num_servers       = var.num_servers\n  }\n}\n\n# ---------------------------------------------------------------------------------------------------------------------\n# DEPLOY THE CLIENT NODES\n# ---------------------------------------------------------------------------------------------------------------------\n\nmodule \"clients\" {\n  # When using these modules in your own templates, you will need to use a Git URL with a ref attribute that pins you\n  # to a specific version of the modules, such as the following example:\n  # source = \"github.com/hashicorp/terraform-aws-nomad//modules/nomad-cluster?ref=v0.0.1\"\n  source = \"./modules/nomad-cluster\"\n\n  cluster_name  = \"${var.cluster_name}-client\"\n  instance_type = var.instance_type\n\n  # Give the clients a different tag so they don't try to join the server cluster\n  cluster_tag_key   = \"nomad-clients\"\n  cluster_tag_value = var.cluster_name\n\n  # To keep the example simple, we are using a fixed-size cluster. In real-world usage, you could use auto scaling\n  # policies to dynamically resize the cluster in response to load.\n  min_size = var.num_clients\n\n  max_size         = var.num_clients\n  desired_capacity = var.num_clients\n\n  ami_id    = var.ami_id == null ? data.aws_ami.nomad_consul.image_id : var.ami_id\n  user_data = data.template_file.user_data_client.rendered\n\n  vpc_id     = data.aws_vpc.default.id\n  subnet_ids = data.aws_subnet_ids.default.ids\n\n  # To make testing easier, we allow Consul and SSH requests from any IP address here but in a production\n  # deployment, we strongly recommend you limit this to the IP address ranges of known, trusted servers inside your VPC.\n  allowed_ssh_cidr_blocks = [\"0.0.0.0/0\"]\n\n  allowed_inbound_cidr_blocks = [\"0.0.0.0/0\"]\n  ssh_key_name                = var.ssh_key_name\n\n  tags = [\n    {\n      key                 = \"Environment\"\n      value               = \"development\"\n      propagate_at_launch = true\n    }\n  ]\n}\n\n# ---------------------------------------------------------------------------------------------------------------------\n# ATTACH IAM POLICIES FOR CONSUL\n# To allow our client Nodes to automatically discover the Consul servers, we need to give them the IAM permissions from\n# the Consul AWS Module's consul-iam-policies module.\n# ---------------------------------------------------------------------------------------------------------------------\n\nmodule \"consul_iam_policies\" {\n  source = \"github.com/hashicorp/terraform-aws-consul//modules/consul-iam-policies?ref=v0.8.0\"\n\n  iam_role_id = module.clients.iam_role_id\n}\n\n# ---------------------------------------------------------------------------------------------------------------------\n# THE USER DATA SCRIPT THAT WILL RUN ON EACH CLIENT NODE WHEN IT'S BOOTING\n# This script will configure and start Consul and Nomad\n# ---------------------------------------------------------------------------------------------------------------------\n\ndata \"template_file\" \"user_data_client\" {\n  template = file(\"${path.module}/examples/root-example/user-data-client.sh\")\n\n  vars = {\n    cluster_tag_key   = var.cluster_tag_key\n    cluster_tag_value = var.cluster_tag_value\n  }\n}\n\n# ---------------------------------------------------------------------------------------------------------------------\n# DEPLOY THE CLUSTER IN THE DEFAULT VPC AND SUBNETS\n# Using the default VPC and subnets makes this example easy to run and test, but it means Consul and Nomad are\n# accessible from the public Internet. In a production deployment, we strongly recommend deploying into a custom VPC\n# and private subnets.\n# ---------------------------------------------------------------------------------------------------------------------\n\ndata \"aws_vpc\" \"default\" {\n  default = var.vpc_id == \"\" ? true : false\n  id      = var.vpc_id\n}\n\ndata \"aws_subnet_ids\" \"default\" {\n  vpc_id = data.aws_vpc.default.id\n}\n\ndata \"aws_region\" \"current\" {\n}\n"
  },
  {
    "path": "modules/install-nomad/README.md",
    "content": "# Nomad Install Script\n\nThis folder contains a script for installing Nomad and its dependencies. You can use this script, along with the\n[run-nomad script](https://github.com/hashicorp/terraform-aws-nomad/tree/master/modules/run-nomad) it installs to create a Nomad [Amazon Machine Image\n(AMI)](http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/AMIs.html) that can be deployed in\n[AWS](https://aws.amazon.com/) across an Auto Scaling Group using the [nomad-cluster module](https://github.com/hashicorp/terraform-aws-nomad/tree/master/modules/nomad-cluster).\n\nThis script has been tested on the following operating systems:\n\n* Ubuntu 16.04\n* Ubuntu 18.04\n* Amazon Linux 2\n\nThere is a good chance it will work on other flavors of Debian, CentOS, and RHEL as well.\n\n\n\n## Quick start\n\n<!-- TODO: update the clone URL to the final URL when this Module is released -->\n\nTo install Nomad, use `git` to clone this repository at a specific tag (see the [releases page](../../../../releases)\nfor all available tags) and run the `install-nomad` script:\n\n```\ngit clone --branch <VERSION> https://github.com/hashicorp/terraform-aws-nomad.git\nterraform-aws-nomad/modules/install-nomad/install-nomad --version 0.5.4\n```\n\nThe `install-nomad` script will install Nomad, its dependencies, and the [run-nomad script](https://github.com/hashicorp/terraform-aws-nomad/tree/master/modules/run-nomad).\nYou can then run the `run-nomad` script when the server is booting to start Nomad and configure it to automatically\njoin other nodes to form a cluster.\n\nWe recommend running the `install-nomad` script as part of a [Packer](https://www.packer.io/) template to create a\nNomad [Amazon Machine Image (AMI)](http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/AMIs.html) (see the\n[nomad-consul-ami example](https://github.com/hashicorp/terraform-aws-nomad/tree/master/examples/nomad-consul-ami) for sample code). You can then deploy the AMI across an Auto\nScaling Group using the [nomad-cluster module](https://github.com/hashicorp/terraform-aws-nomad/tree/master/modules/nomad-cluster) (see the\n[nomad-consul-colocated-cluster](https://github.com/hashicorp/terraform-aws-nomad/tree/master/MAIN.md) and\n[nomad-consul-separate-cluster](https://github.com/hashicorp/terraform-aws-nomad/tree/master/examples/nomad-consul-separate-cluster) examples for fully-working sample code).\n\n\n\n\n## Command line Arguments\n\nThe `install-nomad` script accepts the following arguments:\n\n* `version VERSION`: Install Nomad version VERSION. Required.\n* `path DIR`: Install Nomad into folder DIR. Optional.\n* `user USER`: The install dirs will be owned by user USER. Optional.\n\nExample:\n\n```\ninstall-nomad --version 0.5.4\n```\n\n\n\n## How it works\n\nThe `install-nomad` script does the following:\n\n1. [Create a user and folders for Nomad](#create-a-user-and-folders-for-nomad)\n1. [Install Nomad binaries and scripts](#install-nomad-binaries-and-scripts)\n1. [Follow-up tasks](#follow-up-tasks)\n\n\n### Create a user and folders for Nomad\n\nCreate an OS user named `nomad`. Create the following folders, all owned by user `nomad`:\n\n* `/opt/nomad`: base directory for Nomad data (configurable via the `--path` argument).\n* `/opt/nomad/bin`: directory for Nomad binaries.\n* `/opt/nomad/data`: directory where the Nomad agent can store state.\n* `/opt/nomad/config`: directory where the Nomad agent looks up configuration.\n* `/opt/nomad/log`: directory where the Nomad agent will store log files.\n\n\n### Install Nomad binaries and scripts\n\nInstall the following:\n\n* `nomad`: Download the Nomad zip file from the [downloads page](https://www.nomadproject.io/downloads.html) (the\n  version number is configurable via the `--version` argument), and extract the `nomad` binary into\n  `/opt/nomad/bin`. Add a symlink to the `nomad` binary in `/usr/local/bin`.\n* `run-nomad`: Copy the [run-nomad script](https://github.com/hashicorp/terraform-aws-nomad/tree/master/modules/run-nomad) into `/opt/nomad/bin`.\n\n\n### Follow-up tasks\n\nAfter the `install-nomad` script finishes running, you may wish to do the following:\n\n1. If you have custom Nomad config (`.hcl`) files, you may want to copy them into the config directory (default:\n   `/opt/nomad/config`).\n1. If `/usr/local/bin` isn't already part of `PATH`, you should add it so you can run the `nomad` command without\n   specifying the full path.\n\n\n\n## Dependencies\n\nThe install script assumes that `systemd` is already installed.  We use it as a cross-platform supervisor to ensure Nomad is started\nwhenever the system boots and restarted if the Nomad process crashes.  Additionally, it is used to store all logs which can be accessed\nusing `journalctl`.\n\n\n## Why use Git to install this code?\n\n<!-- TODO: update the package managers URL to the final URL when this Module is released -->\n\nWe needed an easy way to install these scripts that satisfied a number of requirements, including working on a variety\nof operating systems and supported versioning. Our current solution is to use `git`, but this may change in the future.\nSee [Package Managers](https://github.com/hashicorp/terraform-aws-consul/blob/master/_docs/package-managers.md) for\na full discussion of the requirements, trade-offs, and why we picked `git`.\n"
  },
  {
    "path": "modules/install-nomad/install-nomad",
    "content": "#!/bin/bash\n# This script can be used to install Nomad and its dependencies. This script has been tested with the following\n# operating systems:\n#\n# 1. Ubuntu 16.04\n# 2. Ubuntu 18.04\n# 3. Amazon Linux 2\n\nset -e\n\nreadonly DEFAULT_INSTALL_PATH=\"/opt/nomad\"\nreadonly DEFAULT_NOMAD_USER=\"nomad\"\n\nreadonly SCRIPT_DIR=\"$(cd \"$(dirname \"${BASH_SOURCE[0]}\")\" && pwd)\"\nreadonly SYSTEM_BIN_DIR=\"/usr/local/bin\"\n\nreadonly SUPERVISOR_DIR=\"/etc/supervisor\"\nreadonly SUPERVISOR_CONF_DIR=\"$SUPERVISOR_DIR/conf.d\"\n\nreadonly SCRIPT_NAME=\"$(basename \"$0\")\"\n\nfunction print_usage {\n  echo\n  echo \"Usage: install-nomad [OPTIONS]\"\n  echo\n  echo \"This script can be used to install Nomad and its dependencies. This script has been tested with Ubuntu 16.04, Ubuntu 18.04 and Amazon Linux 2.\"\n  echo\n  echo \"Options:\"\n  echo\n  echo -e \"  --version\\t\\tThe version of Nomad to install. Required.\"\n  echo -e \"  --path\\t\\tThe path where Nomad should be installed. Optional. Default: $DEFAULT_INSTALL_PATH.\"\n  echo -e \"  --user\\t\\tThe user who will own the Nomad install directories. Optional. Default: $DEFAULT_NOMAD_USER.\"\n  echo\n  echo \"Example:\"\n  echo\n  echo \"  install-nomad --version 0.5.4\"\n}\n\nfunction log {\n  local readonly level=\"$1\"\n  local readonly message=\"$2\"\n  local readonly timestamp=$(date +\"%Y-%m-%d %H:%M:%S\")\n  >&2 echo -e \"${timestamp} [${level}] [$SCRIPT_NAME] ${message}\"\n}\n\nfunction log_info {\n  local readonly message=\"$1\"\n  log \"INFO\" \"$message\"\n}\n\nfunction log_warn {\n  local readonly message=\"$1\"\n  log \"WARN\" \"$message\"\n}\n\nfunction log_error {\n  local readonly message=\"$1\"\n  log \"ERROR\" \"$message\"\n}\n\nfunction assert_not_empty {\n  local readonly arg_name=\"$1\"\n  local readonly arg_value=\"$2\"\n\n  if [[ -z \"$arg_value\" ]]; then\n    log_error \"The value for '$arg_name' cannot be empty\"\n    print_usage\n    exit 1\n  fi\n}\n\nfunction has_yum {\n  [ -n \"$(command -v yum)\" ]\n}\n\nfunction has_apt_get {\n  [ -n \"$(command -v apt-get)\" ]\n}\n\nfunction install_dependencies {\n  log_info \"Installing dependencies\"\n\n  if $(has_apt_get); then\n    sudo apt-get update -y\n    sudo apt-get install -y awscli curl unzip jq\n  elif $(has_yum); then\n    sudo yum update -y\n    sudo yum install -y aws curl unzip jq\n  else\n    log_error \"Could not find apt-get or yum. Cannot install dependencies on this OS.\"\n    exit 1\n  fi\n}\n\nfunction user_exists {\n  local readonly username=\"$1\"\n  id \"$username\" >/dev/null 2>&1\n}\n\nfunction create_nomad_user {\n  local readonly username=\"$1\"\n\n  if $(user_exists \"$username\"); then\n    echo \"User $username already exists. Will not create again.\"\n  else\n    log_info \"Creating user named $username\"\n    sudo useradd \"$username\"\n  fi\n}\n\nfunction create_nomad_install_paths {\n  local readonly path=\"$1\"\n  local readonly username=\"$2\"\n\n  log_info \"Creating install dirs for Nomad at $path\"\n  sudo mkdir -p \"$path\"\n  sudo mkdir -p \"$path/bin\"\n  sudo mkdir -p \"$path/config\"\n  sudo mkdir -p \"$path/data\"\n\n  log_info \"Changing ownership of $path to $username\"\n  sudo chown -R \"$username:$username\" \"$path\"\n}\n\nfunction install_binaries {\n  local readonly version=\"$1\"\n  local readonly path=\"$2\"\n  local readonly username=\"$3\"\n\n  local cpu_arch\n  cpu_arch=\"$(uname -m)\"\n  local binary_arch=\"\"\n  case \"$cpu_arch\" in\n    x86_64)\n      binary_arch=\"amd64\"\n      ;;\n    x86)\n      binary_arch=\"386\"\n      ;;\n    arm64|aarch64)\n      binary_arch=\"arm64\"\n      ;;\n    arm*)\n      binary_arch=\"arm\"\n      ;;\n    *)\n      log_error \"CPU architecture $cpu_arch is not a supported by Consul.\"\n      exit 1\n      ;;\n    esac\n\n  local readonly url=\"https://releases.hashicorp.com/nomad/${version}/nomad_${version}_linux_${binary_arch}.zip\"\n  local readonly download_path=\"/tmp/nomad_${version}_linux_${binary_arch}.zip\"\n  local readonly bin_dir=\"$path/bin\"\n  local readonly nomad_dest_path=\"$bin_dir/nomad\"\n  local readonly run_nomad_dest_path=\"$bin_dir/run-nomad\"\n\n  log_info \"Downloading Nomad $version from $url to $download_path\"\n  curl -o \"$download_path\" \"$url\"\n  unzip -d /tmp \"$download_path\"\n\n  log_info \"Moving Nomad binary to $nomad_dest_path\"\n  sudo mv \"/tmp/nomad\" \"$nomad_dest_path\"\n  sudo chown \"$username:$username\" \"$nomad_dest_path\"\n  sudo chmod a+x \"$nomad_dest_path\"\n\n  local readonly symlink_path=\"$SYSTEM_BIN_DIR/nomad\"\n  if [[ -f \"$symlink_path\" ]]; then\n    log_info \"Symlink $symlink_path already exists. Will not add again.\"\n  else\n    log_info \"Adding symlink to $nomad_dest_path in $symlink_path\"\n    sudo ln -s \"$nomad_dest_path\" \"$symlink_path\"\n  fi\n\n  log_info \"Copying Nomad run script to $run_nomad_dest_path\"\n  sudo cp \"$SCRIPT_DIR/../run-nomad/run-nomad\" \"$run_nomad_dest_path\"\n  sudo chown \"$username:$username\" \"$run_nomad_dest_path\"\n  sudo chmod a+x \"$run_nomad_dest_path\"\n}\n\nfunction install {\n  local version=\"\"\n  local path=\"$DEFAULT_INSTALL_PATH\"\n  local user=\"$DEFAULT_NOMAD_USER\"\n\n  while [[ $# > 0 ]]; do\n    local key=\"$1\"\n\n    case \"$key\" in\n      --version)\n        version=\"$2\"\n        shift\n        ;;\n      --path)\n        path=\"$2\"\n        shift\n        ;;\n      --user)\n        user=\"$2\"\n        shift\n        ;;\n      --help)\n        print_usage\n        exit\n        ;;\n      *)\n        log_error \"Unrecognized argument: $key\"\n        print_usage\n        exit 1\n        ;;\n    esac\n\n    shift\n  done\n\n  assert_not_empty \"--version\" \"$version\"\n  assert_not_empty \"--path\" \"$path\"\n  assert_not_empty \"--user\" \"$user\"\n\n  log_info \"Starting Nomad install\"\n\n  install_dependencies\n  create_nomad_user \"$user\"\n  create_nomad_install_paths \"$path\" \"$user\"\n  install_binaries \"$version\" \"$path\" \"$user\"\n\n  log_info \"Nomad install complete!\"\n}\n\ninstall \"$@\"\n"
  },
  {
    "path": "modules/nomad-cluster/README.md",
    "content": "# Nomad Cluster\n\nThis folder contains a [Terraform](https://www.terraform.io/) module that can be used to deploy a\n[Nomad](https://www.nomadproject.io/) cluster in [AWS](https://aws.amazon.com/) on top of an Auto Scaling Group. This\nmodule is designed to deploy an [Amazon Machine Image (AMI)](http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/AMIs.html)\nthat had Nomad installed via the [install-nomad](https://github.com/hashicorp/terraform-aws-nomad/tree/master/modules/install-nomad) module in this Module.\n\nNote that this module assumes you have a separate [Consul](https://www.consul.io/) cluster already running. If you want\nto run Consul and Nomad in the same cluster, instead of using this module, see the [Deploy Nomad and Consul in the same\ncluster documentation](https://github.com/hashicorp/terraform-aws-nomad/tree/master/README.md#deploy-nomad-and-consul-in-the-same-cluster).\n\n## How do you use this module?\n\nThis folder defines a [Terraform module](https://www.terraform.io/docs/modules/usage.html), which you can use in your\ncode by adding a `module` configuration and setting its `source` parameter to URL of this folder:\n\n```hcl\nmodule \"nomad_cluster\" {\n  # TODO: update this to the final URL\n  # Use version v0.0.1 of the nomad-cluster module\n  source = \"github.com/hashicorp/terraform-aws-nomad//modules/nomad-cluster?ref=v0.0.1\"\n\n  # Specify the ID of the Nomad AMI. You should build this using the scripts in the install-nomad module.\n  ami_id = \"ami-abcd1234\"\n\n  # Configure and start Nomad during boot. It will automatically connect to the Consul cluster specified in its\n  # configuration and form a cluster with other Nomad nodes connected to that Consul cluster.\n  user_data = <<-EOF\n              #!/bin/bash\n              /opt/nomad/bin/run-nomad --server --num-servers 3\n              EOF\n\n  # ... See variables.tf for the other parameters you must define for the nomad-cluster module\n}\n```\n\nNote the following parameters:\n\n- `source`: Use this parameter to specify the URL of the nomad-cluster module. The double slash (`//`) is intentional\n  and required. Terraform uses it to specify subfolders within a Git repo (see [module\n  sources](https://www.terraform.io/docs/modules/sources.html)). The `ref` parameter specifies a specific Git tag in\n  this repo. That way, instead of using the latest version of this module from the `master` branch, which\n  will change every time you run Terraform, you're using a fixed version of the repo.\n\n- `ami_id`: Use this parameter to specify the ID of a Nomad [Amazon Machine Image\n  (AMI)](http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/AMIs.html) to deploy on each server in the cluster. You\n  should install Nomad in this AMI using the scripts in the [install-nomad](https://github.com/hashicorp/terraform-aws-nomad/tree/master/modules/install-nomad) module.\n\n- `user_data`: Use this parameter to specify a [User\n  Data](http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/user-data.html#user-data-shell-scripts) script that each\n  server will run during boot. This is where you can use the [run-nomad script](https://github.com/hashicorp/terraform-aws-nomad/tree/master/modules/run-nomad) to configure and\n  run Nomad. The `run-nomad` script is one of the scripts installed by the [install-nomad](https://github.com/hashicorp/terraform-aws-nomad/tree/master/modules/install-nomad)\n  module.\n\nYou can find the other parameters in [variables.tf](variables.tf).\n\nCheck out the [nomad-consul-separate-cluster example](https://github.com/hashicorp/terraform-aws-nomad/tree/master/examples/nomad-consul-separate-cluster) example for working\nsample code. Note that if you want to run Nomad and Consul on the same cluster, see the [nomad-consul-colocated-cluster\nexample](https://github.com/hashicorp/terraform-aws-nomad/tree/master/MAIN.md example) instead.\n\n## How do you connect to the Nomad cluster?\n\n### Using the Node agent from your own computer\n\nIf you want to connect to the cluster from your own computer, [install\nNomad](https://www.nomadproject.io/docs/install/index.html) and execute commands with the `-address` parameter set to\nthe IP address of one of the servers in your Nomad cluster. Note that this only works if the Nomad cluster is running\nin public subnets and/or your default VPC (as in both [examples](https://github.com/hashicorp/terraform-aws-nomad/tree/master/examples)), which is OK for testing and\nexperimentation, but NOT recommended for production usage.\n\nTo use the HTTP API, you first need to get the public IP address of one of the Nomad Instances. If you deployed the\n[nomad-consul-colocated-cluster](https://github.com/hashicorp/terraform-aws-nomad/tree/master/MAIN.md) or\n[nomad-consul-separate-cluster](https://github.com/hashicorp/terraform-aws-nomad/tree/master/examples/nomad-consul-separate-cluster) example, the\n[nomad-examples-helper.sh script](https://github.com/hashicorp/terraform-aws-nomad/tree/master/examples/nomad-examples-helper/nomad-examples-helper.sh) will do the tag lookup for\nyou automatically (note, you must have the [AWS CLI](https://aws.amazon.com/cli/),\n[jq](https://stedolan.github.io/jq/), and the [Nomad agent](https://www.nomadproject.io/) installed locally):\n\n```\n> ../nomad-examples-helper/nomad-examples-helper.sh\n\nYour Nomad servers are running at the following IP addresses:\n\n34.204.85.139\n52.23.167.204\n54.236.16.38\n```\n\nCopy and paste one of these IPs and use it with the `-address` argument for any [Nomad\ncommand](https://www.nomadproject.io/docs/commands/index.html). For example, to see the status of all the Nomad\nservers:\n\n```\n> nomad server members -address=http://<INSTANCE_IP_ADDR>:4646\n\nip-172-31-23-140.global  172.31.23.140  4648  alive   true    2         0.5.4  dc1         global\nip-172-31-23-141.global  172.31.23.141  4648  alive   true    2         0.5.4  dc1         global\nip-172-31-23-142.global  172.31.23.142  4648  alive   true    2         0.5.4  dc1         global\n```\n\nTo see the status of all the Nomad agents:\n\n```\n> nomad node status -address=http://<INSTANCE_IP_ADDR>:4646\n\nID        DC          Name                 Class   Drain  Status\nec2796cd  us-east-1e  i-0059e5cafb8103834  <none>  false  ready\nec2f799e  us-east-1d  i-0a5552c3c375e9ea0  <none>  false  ready\nec226624  us-east-1b  i-0d647981f5407ae32  <none>  false  ready\nec2d4635  us-east-1a  i-0c43dcc509e3d8bdf  <none>  false  ready\nec232ea5  us-east-1d  i-0eff2e6e5989f51c1  <none>  false  ready\nec2d4bd6  us-east-1c  i-01523bf946d98003e  <none>  false  ready\n```\n\nAnd to submit a job called `example.nomad`:\n\n```\n> nomad run -address=http://<INSTANCE_IP_ADDR>:4646 example.nomad\n\n==> Monitoring evaluation \"0d159869\"\n    Evaluation triggered by job \"example\"\n    Allocation \"5cbf23a1\" created: node \"1e1aa1e0\", group \"example\"\n    Evaluation status changed: \"pending\" -> \"complete\"\n==> Evaluation \"0d159869\" finished with status \"complete\"\n```\n\n### Using the Nomad agent on another EC2 Instance\n\nFor production usage, your EC2 Instances should be running the [Nomad\nagent](https://www.nomadproject.io/docs/agent/index.html). The agent nodes should discover the Nomad server nodes\nautomatically using Consul. Check out the [Service Discovery\ndocumentation](https://www.nomadproject.io/docs/service-discovery/index.html) for details.\n\n## What's included in this module?\n\nThis module creates the following architecture:\n\n![Nomad architecture](https://raw.githubusercontent.com/hashicorp/terraform-aws-nomad/master/_docs/architecture.png)\n\nThis architecture consists of the following resources:\n\n- [Auto Scaling Group](#auto-scaling-group)\n- [Security Group](#security-group)\n- [IAM Role and Permissions](#iam-role-and-permissions)\n\n### Auto Scaling Group\n\nThis module runs Nomad on top of an [Auto Scaling Group (ASG)](https://aws.amazon.com/autoscaling/). Typically, you\nshould run the ASG with 3 or 5 EC2 Instances spread across multiple [Availability\nZones](http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-regions-availability-zones.html). Each of the EC2\nInstances should be running an AMI that has had Nomad installed via the [install-nomad](https://github.com/hashicorp/terraform-aws-nomad/tree/master/modules/install-nomad)\nmodule. You pass in the ID of the AMI to run using the `ami_id` input parameter.\n\n### Security Group\n\nEach EC2 Instance in the ASG has a Security Group that allows:\n\n- All outbound requests\n- All the inbound ports specified in the [Nomad\n  documentation](https://www.nomadproject.io/docs/agent/configuration/index.html#ports)\n\nThe Security Group ID is exported as an output variable if you need to add additional rules.\n\nCheck out the [Security section](#security) for more details.\n\n### IAM Role and Permissions\n\nEach EC2 Instance in the ASG has an [IAM Role](http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles.html) attached.\nWe give this IAM role a small set of IAM permissions that each EC2 Instance can use to automatically discover the other\nInstances in its ASG and form a cluster with them.\n\nThe IAM Role ARN is exported as an output variable if you need to add additional permissions.\n\n## How do you roll out updates?\n\nIf you want to deploy a new version of Nomad across the cluster, the best way to do that is to:\n\n1. Build a new AMI.\n1. Set the `ami_id` parameter to the ID of the new AMI.\n1. Run `terraform apply`.\n\nThis updates the Launch Configuration of the ASG, so any new Instances in the ASG will have your new AMI, but it does\nNOT actually deploy those new instances. To make that happen, you should do the following:\n\n1. Issue an API call to one of the old Instances in the ASG to have it leave gracefully. E.g.:\n\n   ```\n   nomad server force-leave -address=<OLD_INSTANCE_IP>:4646\n   ```\n\n1. Once the instance has left the cluster, terminate it:\n\n   ```\n   aws ec2 terminate-instances --instance-ids <OLD_INSTANCE_ID>\n   ```\n\n1. After a minute or two, the ASG should automatically launch a new Instance, with the new AMI, to replace the old one.\n\n1. Wait for the new Instance to boot and join the cluster.\n\n1. Repeat these steps for each of the other old Instances in the ASG.\n\nWe will add a script in the future to automate this process (PRs are welcome!).\n\n## What happens if a node crashes?\n\nThere are two ways a Nomad node may go down:\n\n1. The Nomad process may crash. In that case, `systemd` should restart it automatically.\n1. The EC2 Instance running Nomad dies. In that case, the Auto Scaling Group should launch a replacement automatically.\n   Note that in this case, since the Nomad agent did not exit gracefully, and the replacement will have a different ID,\n   you may have to manually clean out the old nodes using the [server force-leave\n   command](https://www.nomadproject.io/docs/commands/server-force-leave.html). We may add a script to do this\n   automatically in the future. For more info, see the [Nomad Outage\n   documentation](https://www.nomadproject.io/guides/outage.html).\n\n## How do you connect load balancers to the Auto Scaling Group (ASG)?\n\nYou can use the [`aws_autoscaling_attachment`](https://www.terraform.io/docs/providers/aws/r/autoscaling_attachment.html) resource.\n\nFor example, if you are using the new application or network load balancers:\n\n```hcl\nresource \"aws_lb_target_group\" \"test\" {\n  // ...\n}\n\n# Create a new Nomad Cluster\nmodule \"nomad\" {\n  source =\"...\"\n  // ...\n}\n\n# Create a new load balancer attachment\nresource \"aws_autoscaling_attachment\" \"asg_attachment_bar\" {\n  autoscaling_group_name = module.nomad.asg_name\n  alb_target_group_arn   = aws_alb_target_group.test.arn\n}\n```\n\nIf you are using a \"classic\" load balancer:\n\n```hcl\n# Create a new load balancer\nresource \"aws_elb\" \"bar\" {\n  // ...\n}\n\n# Create a new Nomad Cluster\nmodule \"nomad\" {\n  source =\"...\"\n  // ...\n}\n\n# Create a new load balancer attachment\nresource \"aws_autoscaling_attachment\" \"asg_attachment_bar\" {\n  autoscaling_group_name = module.nomad.asg_name\n  elb                    = aws_elb.bar.id\n}\n```\n\n## Security\n\nHere are some of the main security considerations to keep in mind when using this module:\n\n1. [Encryption in transit](#encryption-in-transit)\n1. [Encryption at rest](#encryption-at-rest)\n1. [Dedicated instances](#dedicated-instances)\n1. [Security groups](#security-groups)\n1. [SSH access](#ssh-access)\n\n### Encryption in transit\n\nNomad can encrypt all of its network traffic. For instructions on enabling network encryption, have a look at the\n[How do you handle encryption documentation](https://github.com/hashicorp/terraform-aws-nomad/tree/master/modules/run-nomad#how-do-you-handle-encryption).\n\n### Encryption at rest\n\nThe EC2 Instances in the cluster store all their data on the root EBS Volume. To enable encryption for the data at\nrest, you must enable encryption in your Nomad AMI. If you're creating the AMI using Packer (e.g. as shown in\nthe [nomad-consul-ami example](https://github.com/hashicorp/terraform-aws-nomad/tree/master/examples/nomad-consul-ami)), you need to set the [encrypt_boot\nparameter](https://www.packer.io/docs/builders/amazon-ebs.html#encrypt_boot) to `true`.\n\n### Dedicated instances\n\nIf you wish to use dedicated instances, you can set the `tenancy` parameter to `\"dedicated\"` in this module.\n\n### Security groups\n\nThis module attaches a security group to each EC2 Instance that allows inbound requests as follows:\n\n- **Nomad**: For all the [ports used by Nomad](https://www.nomadproject.io/docs/agent/configuration/index.html#ports),\n  you can use the `allowed_inbound_cidr_blocks` parameter to control the list of\n  [CIDR blocks](https://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing) that will be allowed access.\n\n- **SSH**: For the SSH port (default: 22), you can use the `allowed_ssh_cidr_blocks` parameter to control the list of\n  [CIDR blocks](https://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing) that will be allowed access.\n\nNote that all the ports mentioned above are configurable via the `xxx_port` variables (e.g. `http_port`). See\n[variables.tf](variables.tf) for the full list.\n\n### SSH access\n\nYou can associate an [EC2 Key Pair](http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-key-pairs.html) with each\nof the EC2 Instances in this cluster by specifying the Key Pair's name in the `ssh_key_name` variable. If you don't\nwant to associate a Key Pair with these servers, set `ssh_key_name` to an empty string.\n\n## What's NOT included in this module?\n\nThis module does NOT handle the following items, which you may want to provide on your own:\n\n- [Consul](#consul)\n- [Monitoring, alerting, log aggregation](#monitoring-alerting-log-aggregation)\n- [VPCs, subnets, route tables](#vpcs-subnets-route-tables)\n- [DNS entries](#dns-entries)\n\n### Consul\n\nThis module assumes you already have Consul deployed in a separate cluster. If you want to run Nomad and Consul on the\nsame cluster, instead of using this module, see the [Deploy Nomad and Consul in the same cluster\ndocumentation](https://github.com/hashicorp/terraform-aws-nomad/tree/master/README.md#deploy-nomad-and-consul-in-the-same-cluster).\n\n### Monitoring, alerting, log aggregation\n\nThis module does not include anything for monitoring, alerting, or log aggregation. All ASGs and EC2 Instances come\nwith limited [CloudWatch](https://aws.amazon.com/cloudwatch/) metrics built-in, but beyond that, you will have to\nprovide your own solutions.\n\n### VPCs, subnets, route tables\n\nThis module assumes you've already created your network topology (VPC, subnets, route tables, etc). You will need to\npass in the the relevant info about your network topology (e.g. `vpc_id`, `subnet_ids`) as input variables to this\nmodule.\n\n### DNS entries\n\nThis module does not create any DNS entries for Nomad (e.g. in Route 53).\n"
  },
  {
    "path": "modules/nomad-cluster/main.tf",
    "content": "# ----------------------------------------------------------------------------------------------------------------------\n# REQUIRE A SPECIFIC TERRAFORM VERSION OR HIGHER\n# ----------------------------------------------------------------------------------------------------------------------\nterraform {\n  # This module is now only being tested with Terraform 1.0.x. However, to make upgrading easier, we are setting\n  # 0.12.26 as the minimum version, as that version added support for required_providers with source URLs, making it\n  # forwards compatible with 1.0.x code.\n  required_version = \">= 0.12.26\"\n}\n\n# ---------------------------------------------------------------------------------------------------------------------\n# CREATE AN AUTO SCALING GROUP (ASG) TO RUN NOMAD\n# ---------------------------------------------------------------------------------------------------------------------\n\nresource \"aws_autoscaling_group\" \"autoscaling_group\" {\n  launch_configuration = aws_launch_configuration.launch_configuration.name\n\n  name                = var.asg_name\n  availability_zones  = var.availability_zones\n  vpc_zone_identifier = var.subnet_ids\n\n  min_size             = var.min_size\n  max_size             = var.max_size\n  desired_capacity     = var.desired_capacity\n  termination_policies = [var.termination_policies]\n\n  health_check_type         = var.health_check_type\n  health_check_grace_period = var.health_check_grace_period\n  wait_for_capacity_timeout = var.wait_for_capacity_timeout\n\n  protect_from_scale_in = var.protect_from_scale_in\n\n  tag {\n    key                 = \"Name\"\n    value               = var.cluster_name\n    propagate_at_launch = true\n  }\n\n  tag {\n    key                 = var.cluster_tag_key\n    value               = var.cluster_tag_value\n    propagate_at_launch = true\n  }\n\n  dynamic \"tag\" {\n    for_each = var.tags\n\n    content {\n      key                 = tag.value[\"key\"]\n      value               = tag.value[\"value\"]\n      propagate_at_launch = tag.value[\"propagate_at_launch\"]\n    }\n  }\n\n  lifecycle {\n    # As of AWS Provider 3.x, inline load_balancers and target_group_arns\n    # in an aws_autoscaling_group take precedence over attachment resources.\n    # Since the consul-cluster module does not define any Load Balancers,\n    # it's safe to assume that we will always want to favor an attachment\n    # over these inline properties.\n    #\n    # For further discussion and links to relevant documentation, see\n    # https://github.com/hashicorp/terraform-aws-vault/issues/210\n    ignore_changes = [load_balancers, target_group_arns]\n  }\n}\n\n# ---------------------------------------------------------------------------------------------------------------------\n# CREATE LAUNCH CONFIGURATION TO DEFINE WHAT RUNS ON EACH INSTANCE IN THE ASG\n# ---------------------------------------------------------------------------------------------------------------------\n\nresource \"aws_launch_configuration\" \"launch_configuration\" {\n  name_prefix   = \"${var.cluster_name}-\"\n  image_id      = var.ami_id\n  instance_type = var.instance_type\n  user_data     = var.user_data\n\n  iam_instance_profile = aws_iam_instance_profile.instance_profile.name\n  key_name             = var.ssh_key_name\n\n  security_groups = concat(\n    [aws_security_group.lc_security_group.id],\n    var.security_groups,\n  )\n  placement_tenancy           = var.tenancy\n  associate_public_ip_address = var.associate_public_ip_address\n\n  ebs_optimized = var.root_volume_ebs_optimized\n\n  root_block_device {\n    volume_type           = var.root_volume_type\n    volume_size           = var.root_volume_size\n    delete_on_termination = var.root_volume_delete_on_termination\n  }\n\n  dynamic \"ebs_block_device\" {\n    for_each = var.ebs_block_devices\n\n    content {\n      device_name           = ebs_block_device.value[\"device_name\"]\n      volume_size           = ebs_block_device.value[\"volume_size\"]\n      snapshot_id           = lookup(ebs_block_device.value, \"snapshot_id\", null)\n      iops                  = lookup(ebs_block_device.value, \"iops\", null)\n      encrypted             = lookup(ebs_block_device.value, \"encrypted\", null)\n      delete_on_termination = lookup(ebs_block_device.value, \"delete_on_termination\", null)\n    }\n  }\n\n  # Important note: whenever using a launch configuration with an auto scaling group, you must set\n  # create_before_destroy = true. However, as soon as you set create_before_destroy = true in one resource, you must\n  # also set it in every resource that it depends on, or you'll get an error about cyclic dependencies (especially when\n  # removing resources). For more info, see:\n  #\n  # https://www.terraform.io/docs/providers/aws/r/launch_configuration.html\n  # https://terraform.io/docs/configuration/resources.html\n  lifecycle {\n    create_before_destroy = true\n  }\n}\n\n# ---------------------------------------------------------------------------------------------------------------------\n# CREATE A SECURITY GROUP TO CONTROL WHAT REQUESTS CAN GO IN AND OUT OF EACH EC2 INSTANCE\n# ---------------------------------------------------------------------------------------------------------------------\n\nresource \"aws_security_group\" \"lc_security_group\" {\n  name_prefix = var.cluster_name\n  description = \"Security group for the ${var.cluster_name} launch configuration\"\n  vpc_id      = var.vpc_id\n\n  # aws_launch_configuration.launch_configuration in this module sets create_before_destroy to true, which means\n  # everything it depends on, including this resource, must set it as well, or you'll get cyclic dependency errors\n  # when you try to do a terraform destroy.\n  lifecycle {\n    create_before_destroy = true\n  }\n}\n\nresource \"aws_security_group_rule\" \"allow_ssh_inbound\" {\n  count       = length(var.allowed_ssh_cidr_blocks) > 0 ? 1 : 0\n  type        = \"ingress\"\n  from_port   = var.ssh_port\n  to_port     = var.ssh_port\n  protocol    = \"tcp\"\n  cidr_blocks = var.allowed_ssh_cidr_blocks\n\n  security_group_id = aws_security_group.lc_security_group.id\n}\n\nresource \"aws_security_group_rule\" \"allow_all_outbound\" {\n  type        = \"egress\"\n  from_port   = 0\n  to_port     = 0\n  protocol    = \"-1\"\n  cidr_blocks = var.allow_outbound_cidr_blocks\n\n  security_group_id = aws_security_group.lc_security_group.id\n}\n\n# ---------------------------------------------------------------------------------------------------------------------\n# THE INBOUND/OUTBOUND RULES FOR THE SECURITY GROUP COME FROM THE NOMAD-SECURITY-GROUP-RULES MODULE\n# ---------------------------------------------------------------------------------------------------------------------\n\nmodule \"security_group_rules\" {\n  source = \"../nomad-security-group-rules\"\n\n  security_group_id           = aws_security_group.lc_security_group.id\n  allowed_inbound_cidr_blocks = var.allowed_inbound_cidr_blocks\n\n  http_port = var.http_port\n  rpc_port  = var.rpc_port\n  serf_port = var.serf_port\n}\n\n# ---------------------------------------------------------------------------------------------------------------------\n# ATTACH AN IAM ROLE TO EACH EC2 INSTANCE\n# We can use the IAM role to grant the instance IAM permissions so we can use the AWS CLI without having to figure out\n# how to get our secret AWS access keys onto the box.\n# ---------------------------------------------------------------------------------------------------------------------\n\nresource \"aws_iam_instance_profile\" \"instance_profile\" {\n  name_prefix = var.cluster_name\n  path        = var.instance_profile_path\n  role        = aws_iam_role.instance_role.name\n\n  # aws_launch_configuration.launch_configuration in this module sets create_before_destroy to true, which means\n  # everything it depends on, including this resource, must set it as well, or you'll get cyclic dependency errors\n  # when you try to do a terraform destroy.\n  lifecycle {\n    create_before_destroy = true\n  }\n}\n\nresource \"aws_iam_role\" \"instance_role\" {\n  name_prefix        = var.cluster_name\n  assume_role_policy = data.aws_iam_policy_document.instance_role.json\n\n  permissions_boundary = var.iam_permissions_boundary\n\n  # aws_iam_instance_profile.instance_profile in this module sets create_before_destroy to true, which means\n  # everything it depends on, including this resource, must set it as well, or you'll get cyclic dependency errors\n  # when you try to do a terraform destroy.\n  lifecycle {\n    create_before_destroy = true\n  }\n}\n\ndata \"aws_iam_policy_document\" \"instance_role\" {\n  statement {\n    effect  = \"Allow\"\n    actions = [\"sts:AssumeRole\"]\n\n    principals {\n      type        = \"Service\"\n      identifiers = [\"ec2.amazonaws.com\"]\n    }\n  }\n}\n"
  },
  {
    "path": "modules/nomad-cluster/outputs.tf",
    "content": "output \"asg_name\" {\n  value = aws_autoscaling_group.autoscaling_group.name\n}\n\noutput \"cluster_tag_key\" {\n  value = var.cluster_tag_key\n}\n\noutput \"cluster_tag_value\" {\n  value = var.cluster_tag_value\n}\n\noutput \"cluster_size\" {\n  value = aws_autoscaling_group.autoscaling_group.desired_capacity\n}\n\noutput \"launch_config_name\" {\n  value = aws_launch_configuration.launch_configuration.name\n}\n\noutput \"iam_instance_profile_arn\" {\n  value = aws_iam_instance_profile.instance_profile.arn\n}\n\noutput \"iam_instance_profile_id\" {\n  value = aws_iam_instance_profile.instance_profile.id\n}\n\noutput \"iam_instance_profile_name\" {\n  value = aws_iam_instance_profile.instance_profile.name\n}\n\noutput \"iam_role_arn\" {\n  value = aws_iam_role.instance_role.arn\n}\n\noutput \"iam_role_id\" {\n  value = aws_iam_role.instance_role.id\n}\n\noutput \"security_group_id\" {\n  value = aws_security_group.lc_security_group.id\n}\n\n"
  },
  {
    "path": "modules/nomad-cluster/variables.tf",
    "content": "# ---------------------------------------------------------------------------------------------------------------------\n# REQUIRED PARAMETERS\n# You must provide a value for each of these parameters.\n# ---------------------------------------------------------------------------------------------------------------------\n\nvariable \"cluster_name\" {\n  description = \"The name of the Nomad cluster (e.g. nomad-servers-stage). This variable is used to namespace all resources created by this module.\"\n  type        = string\n}\n\nvariable \"ami_id\" {\n  description = \"The ID of the AMI to run in this cluster. Should be an AMI that had Nomad installed and configured by the install-nomad module.\"\n  type        = string\n}\n\nvariable \"instance_type\" {\n  description = \"The type of EC2 Instances to run for each node in the cluster (e.g. t2.micro).\"\n  type        = string\n}\n\nvariable \"vpc_id\" {\n  description = \"The ID of the VPC in which to deploy the cluster\"\n  type        = string\n}\n\nvariable \"allowed_inbound_cidr_blocks\" {\n  description = \"A list of CIDR-formatted IP address ranges from which the EC2 Instances will allow connections to Nomad\"\n  type        = list(string)\n}\n\nvariable \"user_data\" {\n  description = \"A User Data script to execute while the server is booting. We remmend passing in a bash script that executes the run-nomad script, which should have been installed in the AMI by the install-nomad module.\"\n  type        = string\n}\n\nvariable \"min_size\" {\n  description = \"The minimum number of nodes to have in the cluster. If you're using this to run Nomad servers, we strongly recommend setting this to 3 or 5.\"\n  type        = number\n}\n\nvariable \"max_size\" {\n  description = \"The maximum number of nodes to have in the cluster. If you're using this to run Nomad servers, we strongly recommend setting this to 3 or 5.\"\n  type        = number\n}\n\nvariable \"desired_capacity\" {\n  description = \"The desired number of nodes to have in the cluster. If you're using this to run Nomad servers, we strongly recommend setting this to 3 or 5.\"\n  type        = number\n}\n\n# ---------------------------------------------------------------------------------------------------------------------\n# OPTIONAL PARAMETERS\n# These parameters have reasonable defaults.\n# ---------------------------------------------------------------------------------------------------------------------\n\nvariable \"asg_name\" {\n  description = \"The name to use for the Auto Scaling Group\"\n  type        = string\n  default     = \"\"\n}\n\nvariable \"subnet_ids\" {\n  description = \"The subnet IDs into which the EC2 Instances should be deployed. We recommend one subnet ID per node in the cluster_size variable. At least one of var.subnet_ids or var.availability_zones must be non-empty.\"\n  type        = list(string)\n  default     = null\n}\n\nvariable \"availability_zones\" {\n  description = \"The availability zones into which the EC2 Instances should be deployed. We recommend one availability zone per node in the cluster_size variable. At least one of var.subnet_ids or var.availability_zones must be non-empty.\"\n  type        = list(string)\n  default     = null\n}\n\nvariable \"ssh_key_name\" {\n  description = \"The name of an EC2 Key Pair that can be used to SSH to the EC2 Instances in this cluster. Set to an empty string to not associate a Key Pair.\"\n  type        = string\n  default     = \"\"\n}\n\nvariable \"allowed_ssh_cidr_blocks\" {\n  description = \"A list of CIDR-formatted IP address ranges from which the EC2 Instances will allow SSH connections\"\n  type        = list(string)\n  default     = []\n}\n\nvariable \"cluster_tag_key\" {\n  description = \"Add a tag with this key and the value var.cluster_tag_value to each Instance in the ASG.\"\n  type        = string\n  default     = \"nomad-servers\"\n}\n\nvariable \"cluster_tag_value\" {\n  description = \"Add a tag with key var.cluster_tag_key and this value to each Instance in the ASG. This can be used to automatically find other Consul nodes and form a cluster.\"\n  type        = string\n  default     = \"auto-join\"\n}\n\nvariable \"termination_policies\" {\n  description = \"A list of policies to decide how the instances in the auto scale group should be terminated. The allowed values are OldestInstance, NewestInstance, OldestLaunchConfiguration, ClosestToNextInstanceHour, Default.\"\n  type        = string\n  default     = \"Default\"\n}\n\nvariable \"associate_public_ip_address\" {\n  description = \"If set to true, associate a public IP address with each EC2 Instance in the cluster.\"\n  type        = bool\n  default     = false\n}\n\nvariable \"tenancy\" {\n  description = \"The tenancy of the instance. Must be one of: default or dedicated.\"\n  type        = string\n  default     = \"default\"\n}\n\nvariable \"root_volume_ebs_optimized\" {\n  description = \"If true, the launched EC2 instance will be EBS-optimized.\"\n  type        = bool\n  default     = false\n}\n\nvariable \"root_volume_type\" {\n  description = \"The type of volume. Must be one of: standard, gp2, or io1.\"\n  type        = string\n  default     = \"standard\"\n}\n\nvariable \"root_volume_size\" {\n  description = \"The size, in GB, of the root EBS volume.\"\n  type        = number\n  default     = 50\n}\n\nvariable \"root_volume_delete_on_termination\" {\n  description = \"Whether the volume should be destroyed on instance termination.\"\n  default     = true\n  type        = bool\n}\n\nvariable \"wait_for_capacity_timeout\" {\n  description = \"A maximum duration that Terraform should wait for ASG instances to be healthy before timing out. Setting this to '0' causes Terraform to skip all Capacity Waiting behavior.\"\n  type        = string\n  default     = \"10m\"\n}\n\nvariable \"health_check_type\" {\n  description = \"Controls how health checking is done. Must be one of EC2 or ELB.\"\n  type        = string\n  default     = \"EC2\"\n}\n\nvariable \"health_check_grace_period\" {\n  description = \"Time, in seconds, after instance comes into service before checking health.\"\n  type        = number\n  default     = 300\n}\n\nvariable \"instance_profile_path\" {\n  description = \"Path in which to create the IAM instance profile.\"\n  type        = string\n  default     = \"/\"\n}\n\nvariable \"http_port\" {\n  description = \"The port to use for HTTP\"\n  type        = number\n  default     = 4646\n}\n\nvariable \"rpc_port\" {\n  description = \"The port to use for RPC\"\n  type        = number\n  default     = 4647\n}\n\nvariable \"serf_port\" {\n  description = \"The port to use for Serf\"\n  type        = number\n  default     = 4648\n}\n\nvariable \"ssh_port\" {\n  description = \"The port used for SSH connections\"\n  type        = number\n  default     = 22\n}\n\nvariable \"security_groups\" {\n  description = \"Additional security groups to attach to the EC2 instances\"\n  type        = list(string)\n  default     = []\n}\n\nvariable \"tags\" {\n  description = \"List of extra tag blocks added to the autoscaling group configuration. Each element in the list is a map containing keys 'key', 'value', and 'propagate_at_launch' mapped to the respective values.\"\n  type = list(object({\n    key                 = string\n    value               = string\n    propagate_at_launch = bool\n  }))\n  default = []\n\n}\n\nvariable \"ebs_block_devices\" {\n  description = \"List of ebs volume definitions for those ebs_volumes that should be added to the instances created with the EC2 launch-configuration. Each element in the list is a map containing keys defined for ebs_block_device (see: https://www.terraform.io/docs/providers/aws/r/launch_configuration.html#ebs_block_device.\"\n  # We can't narrow the type down more than \"any\" because if we use list(object(...)), then all the fields in the\n  # object will be required (whereas some, such as encrypted, should be optional), and if we use list(map(...)), all\n  # the values in the map must be of the same type, whereas we need some to be strings, some to be bools, and some to\n  # be ints. So, we have to fall back to just any ugly \"any.\"\n  type    = any\n  default = []\n  # Example:\n  #\n  # default = [\n  #   {\n  #     device_name = \"/dev/xvdh\"\n  #     volume_type = \"gp2\"\n  #     volume_size = 300\n  #     encrypted   = true\n  #   }\n  # ]\n}\n\nvariable \"protect_from_scale_in\" {\n  description = \"(Optional) Allows setting instance protection. The autoscaling group will not select instances with this setting for termination during scale in events.\"\n  type        = bool\n  default     = false\n}\n\nvariable \"allow_outbound_cidr_blocks\" {\n  description = \"Allow outbound traffic to these CIDR blocks.\"\n  type        = list(string)\n  default     = [\"0.0.0.0/0\"]\n}\n\nvariable \"iam_permissions_boundary\" {\n  description = \"If set, restricts the created IAM role to the given permissions boundary\"\n  type        = string\n  default     = null\n}\n"
  },
  {
    "path": "modules/nomad-security-group-rules/README.md",
    "content": "# Nomad Security Group Rules Module\n\nThis folder contains a [Terraform](https://www.terraform.io/) module that defines the security group rules used by a\n[Nomad](https://www.nomadproject.io/) cluster to control the traffic that is allowed to go in and out of the cluster.\n\nNormally, you'd get these rules by default if you're using the [nomad-cluster module](https://github.com/hashicorp/terraform-aws-nomad/tree/master/modules/nomad-cluster), but if\nyou're running Nomad on top of a different cluster, then you can use this module to add the necessary security group\nrules that that cluster. For example, imagine you were using the [consul-cluster\nmodule](https://github.com/hashicorp/terraform-aws-consul/tree/master/modules/consul-cluster) to run a cluster of\nservers that have both Nomad and Consul on each node:\n\n```hcl\nmodule \"consul_servers\" {\n  source = \"github.com/hashicorp/terraform-aws-consul//modules/consul-cluster?ref=v0.8.0\"\n\n  # This AMI has both Nomad and Consul installed\n  ami_id = \"ami-1234abcd\"\n}\n```\n\nThe `consul-cluster` module will provide the security group rules for Consul, but not for Nomad. To ensure those\nservers have the necessary ports open for using Nomad, you can use this module as follows:\n\n```hcl\nmodule \"security_group_rules\" {\n  source = \"github.com/hashicorp/terraform-aws-nomad//modules/nomad-security-group-rules?ref=v0.0.1\"\n\n  security_group_id = module.consul_servers.security_group_id\n\n  # ... (other params omitted) ...\n}\n```\n\nNote the following parameters:\n\n- `source`: Use this parameter to specify the URL of this module. The double slash (`//`) is intentional\n  and required. Terraform uses it to specify subfolders within a Git repo (see [module\n  sources](https://www.terraform.io/docs/modules/sources.html)). The `ref` parameter specifies a specific Git tag in\n  this repo. That way, instead of using the latest version of this module from the `master` branch, which\n  will change every time you run Terraform, you're using a fixed version of the repo.\n\n- `security_group_id`: Use this parameter to specify the ID of the security group to which the rules in this module\n  should be added.\n\nYou can find the other parameters in [variables.tf](variables.tf).\n\nCheck out the [nomad-consul-colocated-cluster example](https://github.com/hashicorp/terraform-aws-nomad/tree/master/examples/root-example) for working\t\nsample code.\n"
  },
  {
    "path": "modules/nomad-security-group-rules/main.tf",
    "content": "# ---------------------------------------------------------------------------------------------------------------------\n# CREATE THE SECURITY GROUP RULES THAT CONTROL WHAT TRAFFIC CAN GO IN AND OUT OF A NOMAD CLUSTER\n# ---------------------------------------------------------------------------------------------------------------------\n\n# ----------------------------------------------------------------------------------------------------------------------\n# REQUIRE A SPECIFIC TERRAFORM VERSION OR HIGHER\n# ----------------------------------------------------------------------------------------------------------------------\nterraform {\n  # This module is now only being tested with Terraform 1.0.x. However, to make upgrading easier, we are setting\n  # 0.12.26 as the minimum version, as that version added support for required_providers with source URLs, making it\n  # forwards compatible with 1.0.x code.\n  required_version = \">= 0.12.26\"\n}\n\nresource \"aws_security_group_rule\" \"allow_http_inbound\" {\n  count       = length(var.allowed_inbound_cidr_blocks) >= 1 ? 1 : 0\n  type        = \"ingress\"\n  from_port   = var.http_port\n  to_port     = var.http_port\n  protocol    = \"tcp\"\n  cidr_blocks = var.allowed_inbound_cidr_blocks\n\n  security_group_id = var.security_group_id\n}\n\nresource \"aws_security_group_rule\" \"allow_rpc_inbound\" {\n  count       = length(var.allowed_inbound_cidr_blocks) >= 1 ? 1 : 0\n  type        = \"ingress\"\n  from_port   = var.rpc_port\n  to_port     = var.rpc_port\n  protocol    = \"tcp\"\n  cidr_blocks = var.allowed_inbound_cidr_blocks\n\n  security_group_id = var.security_group_id\n}\n\nresource \"aws_security_group_rule\" \"allow_serf_tcp_inbound\" {\n  count       = length(var.allowed_inbound_cidr_blocks) >= 1 ? 1 : 0\n  type        = \"ingress\"\n  from_port   = var.serf_port\n  to_port     = var.serf_port\n  protocol    = \"tcp\"\n  cidr_blocks = var.allowed_inbound_cidr_blocks\n\n  security_group_id = var.security_group_id\n}\n\nresource \"aws_security_group_rule\" \"allow_serf_udp_inbound\" {\n  count       = length(var.allowed_inbound_cidr_blocks) >= 1 ? 1 : 0\n  type        = \"ingress\"\n  from_port   = var.serf_port\n  to_port     = var.serf_port\n  protocol    = \"udp\"\n  cidr_blocks = var.allowed_inbound_cidr_blocks\n\n  security_group_id = var.security_group_id\n}\n\n"
  },
  {
    "path": "modules/nomad-security-group-rules/variables.tf",
    "content": "# ---------------------------------------------------------------------------------------------------------------------\n# REQUIRED PARAMETERS\n# You must provide a value for each of these parameters.\n# ---------------------------------------------------------------------------------------------------------------------\n\nvariable \"security_group_id\" {\n  description = \"The ID of the security group to which we should add the Nomad security group rules\"\n  type        = string\n}\n\nvariable \"allowed_inbound_cidr_blocks\" {\n  description = \"A list of CIDR-formatted IP address ranges from which the EC2 Instances will allow connections to Nomad\"\n  type        = list(string)\n}\n\n# ---------------------------------------------------------------------------------------------------------------------\n# OPTIONAL PARAMETERS\n# These parameters have reasonable defaults.\n# ---------------------------------------------------------------------------------------------------------------------\n\nvariable \"http_port\" {\n  description = \"The port to use for HTTP\"\n  type        = number\n  default     = 4646\n}\n\nvariable \"rpc_port\" {\n  description = \"The port to use for RPC\"\n  type        = number\n  default     = 4647\n}\n\nvariable \"serf_port\" {\n  description = \"The port to use for Serf\"\n  type        = number\n  default     = 4648\n}\n\n"
  },
  {
    "path": "modules/run-nomad/README.md",
    "content": "# Nomad Run Script\n\nThis folder contains a script for configuring and running Nomad on an [AWS](https://aws.amazon.com/) server. This\nscript has been tested on the following operating systems:\n\n* Ubuntu 16.04\n* Ubuntu 18.04\n* Amazon Linux 2\n\nThere is a good chance it will work on other flavors of Debian, CentOS, and RHEL as well.\n\n\n\n\n## Quick start\n\nThis script assumes you installed it, plus all of its dependencies (including Nomad itself), using the [install-nomad\nmodule](https://github.com/hashicorp/terraform-aws-nomad/tree/master/modules/install-nomad). The default install path is `/opt/nomad/bin`, so to start Nomad in server mode, you\nrun:\n\n```\n/opt/nomad/bin/run-nomad --server --num-servers 3\n```\n\nTo start Nomad in client mode, you run:\n\n```\n/opt/nomad/bin/run-nomad --client\n```\n\nThis will:\n\n1. Generate a Nomad configuration file called `default.hcl` in the Nomad config dir (default: `/opt/nomad/config`).\n   See [Nomad configuration](#nomad-configuration) for details on what this configuration file will contain and how\n   to override it with your own configuration.\n\n1. Generate a [systemd](https://www.freedesktop.org/wiki/Software/systemd/) configuration file called `nomad.service` in the systemd\n   config dir (default: `/etc/supervisor/conf.d`) with a command that will run Nomad:  \n   `nomad agent -config=/opt/nomad/config -data-dir=/opt/nomad/data`.\n\n1. Tell systemd to load the new configuration file, thereby starting Nomad.\n\nWe recommend using the `run-nomad` command as part of [User\nData](http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/user-data.html#user-data-shell-scripts), so that it executes\nwhen the EC2 Instance is first booting. If you are running Consul on the same server, make sure to use this script\n*after* Consul has booted. After running `run-nomad` on that initial boot, the `systemd` configuration\nwill automatically restart Nomad if it crashes or the EC2 instance reboots.\n\nNote that `systemd` logs to its own journal by default.  To view the Nomad logs, run `journalctl -u nomad.service`.  To change\nthe log output location, you can specify the `StandardOutput` and `StandardError` options by using the `--systemd-stdout` and `--systemd-stderr`\noptions.  See the [`systemd.exec` man pages](https://www.freedesktop.org/software/systemd/man/systemd.exec.html#StandardOutput=) for available\noptions, but note that the `file:path` option requires [systemd version >= 236](https://stackoverflow.com/a/48052152), which is not provided \nin the base Ubuntu 16.04 and Amazon Linux 2 images.\n\nSee the [nomad-consul-colocated-cluster example](https://github.com/hashicorp/terraform-aws-nomad/tree/master/MAIN.md) and\n[nomad-consul-separate-cluster example](https://github.com/hashicorp/terraform-aws-nomad/tree/master/examples/nomad-consul-separate-cluster example) for fully-working sample code.\n\n\n\n\n## Command line Arguments\n\nThe `run-nomad` script accepts the following arguments:\n\n* `server` (optional): If set, run in server mode. At least one of `--server` or `--client` must be set.\n* `client` (optional): If set, run in client mode. At least one of `--server` or `--client` must be set.\n* `num-servers` (optional): The number of servers to expect in the Nomad cluster. Required if `--server` is set.\n* `config-dir` (optional): The path to the Nomad config folder. Default is to take the absolute path of `../config`,\n  relative to the `run-nomad` script itself.\n* `data-dir` (optional): The path to the Nomad config folder. Default is to take the absolute path of `../data`,\n  relative to the `run-nomad` script itself.\n* `systemd-stdout` (optional): The StandardOutput option of the systemd unit. If not specified, it will use systemd's default (journal).\n* `systemd-stderr` (optional): The StandardError option of the systemd unit. If not specified, it will use systemd's default (inherit).\n* `user` (optional): The user to run Nomad as. Default is to use the owner of `config-dir`.\n* `use-sudo` (optional): Nomad clients make use of operating system primitives for resource isolation that require\n  elevated (root) permissions (see [the\n  docs](https://www.nomadproject.io/intro/getting-started/running.html) for more info). If you set this flag, Nomad\n  will run with root-level privileges. If you don't, it'll still work, but certain task drivers will not be available.\n  By default, this flag is enabled if `--client` is set and disabled if `--server` is set (server nodes don't need\n  root-level privileges).\n* `skip-nomad-config`: If this flag is set, don't generate a Nomad configuration file. This is useful if you have\n  a custom configuration file and don't want to use any of of the default settings from `run-nomad`.\n\nExample:\n\n```\n/opt/nomad/bin/run-nomad --server --num-servers 3\n```\n\n\n\n\n## Nomad configuration\n\n`run-nomad` generates a configuration file for Nomad called `default.hcl` that tries to figure out reasonable\ndefaults for a Nomad cluster in AWS. Check out the [Nomad Configuration Files\ndocumentation](https://www.nomadproject.io/docs/agent/configuration/index.html) for what configuration settings are\navailable.\n\n\n### Default configuration\n\n`run-nomad` sets the following configuration values by default:\n\n* [advertise](https://www.nomadproject.io/docs/agent/configuration/index.html#advertise): All the advertise addresses\n  are set to the Instance's private IP address, as fetched from  \n  [Metadata](http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-instance-metadata.html).\n\n* [bind_addr](https://www.nomadproject.io/docs/agent/configuration/index.html#bind_addr): Set to 0.0.0.0.\n\n* [client](https://www.nomadproject.io/docs/agent/configuration/client.html): This config is only set of `--client` is\n  set.\n\n    * [enabled](https://www.nomadproject.io/docs/agent/configuration/client.html#enabled): `true`.\n\n* [consul](https://www.nomadproject.io/docs/agent/configuration/consul.html): By default, set the Consul address to\n  `127.0.0.1:8500`, with the assumption that the Consul agent is running on the same server.\n\n* [datacenter](https://www.nomadproject.io/docs/agent/configuration/index.html#datacenter): Set to the current\n  availability zone, as fetched from\n  [Metadata](http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-instance-metadata.html).\n\n* [name](https://www.nomadproject.io/docs/agent/configuration/index.html#name): Set to the instance id, as fetched from\n  [Metadata](http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-instance-metadata.html).     \n\n* [region](https://www.nomadproject.io/docs/agent/configuration/index.html#region): Set to the current AWS region, as\n  fetched from [Metadata](http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-instance-metadata.html).\n\n* [server](https://www.nomadproject.io/docs/agent/configuration/server.html): This config is only set if `--server` is\n  set.\n\n    * [enabled](https://www.nomadproject.io/docs/agent/configuration/server.html#enabled): `true`.\n    * [bootstrap_expect](https://www.nomadproject.io/docs/agent/configuration/server.html#bootstrap_expect): Set to the\n      `--num-servers` parameter.\n\n\n### Overriding the configuration\n\nTo override the default configuration, simply put your own configuration file in the Nomad config folder (default:\n`/opt/nomad/config`), but with a name that comes later in the alphabet than `default.hcl` (e.g.\n`my-custom-config.hcl`). Nomad will load all the `.hcl` configuration files in the config dir and\n[merge them together in alphabetical\norder](https://www.nomadproject.io/docs/agent/configuration/index.html#load-order-and-merging), so that settings in\nfiles that come later in the alphabet will override the earlier ones.\n\nFor example, to override the default `name` setting, you could create a file called `tags.hcl` with the\ncontents:\n\n```hcl\nname = \"my-custom-name\"\n```\n\nIf you want to override *all* the default settings, you can tell `run-nomad` not to generate a default config file\nat all using the `--skip-nomad-config` flag:\n\n```\n/opt/nomad/bin/run-nomad --server --num-servers 3 --skip-nomad-config\n```\n\n\n\n\n## How do you handle encryption?\n\nNomad can encrypt all of its network traffic (see the [encryption docs for\ndetails](https://www.nomadproject.io/docs/agent/encryption.html)), but by default, encryption is not enabled in this\nModule. To enable encryption, you need to do the following:\n\n1. [Gossip encryption: provide an encryption key](#gossip-encryption-provide-an-encryption-key)\n1. [RPC encryption: provide TLS certificates](#rpc-encryption-provide-tls-certificates)\n1. [Consul encryption](#consul-encryption)\n\n\n### Gossip encryption: provide an encryption key\n\nTo enable Gossip encryption, you need to provide a 16-byte, Base64-encoded encryption key, which you can generate using\nthe [nomad keygen command](https://www.nomadproject.io/docs/commands/keygen.html). You can put the key in a Nomad\nconfiguration file (e.g. `encryption.hcl`) in the Nomad config dir (default location: `/opt/nomad/config`):\n\n```hcl\nserver {\n  encrypt = \"cg8StVXbQJ0gPvMd9o7yrg==\"\n}\n```\n\n\n### RPC encryption: provide TLS certificates\n\nTo enable RPC encryption, you need to provide the paths to the CA and signing keys ([here is a tutorial on generating\nthese keys](http://russellsimpkins.blogspot.com/2015/10/consul-adding-tls-using-self-signed.html)). You can specify\nthese paths in a Nomad configuration file (e.g. `encryption.hcl`) in the Nomad config dir (default location:\n`/opt/nomad/config`):\n\n```hcl\ntls {\n  # Enable encryption on incoming HTTP and RPC endpoints\n  http = true\n  rpc  = true\n\n  # Verify server hostname for outgoing TLS connections\n  verify_server_hostname = true\n\n  # Specify the CA and signing key paths\n  ca_file   = \"/opt/nomad/tls/certs/ca-bundle.crt\",\n  cert_file = \"/opt/nomad/tls/certs/my.crt\",\n  key_file  = \"/opt/nomad/tls/private/my.key\"\n}\n```\n\n\n### Consul encryption\n\nNote that Nomad relies on Consul, and enabling encryption for Consul requires a separate process. Check out the\n[official Consul encryption docs](https://www.consul.io/docs/agent/encryption.html) and the Consul AWS Module\n[How do you handle encryption\ndocs](https://github.com/hashicorp/terraform-aws-consul/tree/master/modules/run-consul#how-do-you-handle-encryption)\nfor more info.\n"
  },
  {
    "path": "modules/run-nomad/run-nomad",
    "content": "#!/bin/bash\n# This script is used to configure and run Nomad on an AWS server.\n\nset -e\n\nreadonly NOMAD_CONFIG_FILE=\"default.hcl\"\nreadonly SYSTEMD_CONFIG_PATH=\"/etc/systemd/system/nomad.service\"\n\nreadonly EC2_INSTANCE_METADATA_URL=\"http://169.254.169.254/latest/meta-data\"\nreadonly EC2_INSTANCE_DYNAMIC_DATA_URL=\"http://169.254.169.254/latest/dynamic\"\n\nreadonly SCRIPT_DIR=\"$(cd \"$(dirname \"${BASH_SOURCE[0]}\")\" && pwd)\"\nreadonly SCRIPT_NAME=\"$(basename \"$0\")\"\n\nfunction print_usage {\n  echo\n  echo \"Usage: run-nomad [OPTIONS]\"\n  echo\n  echo \"This script is used to configure and run Nomad on an AWS server.\"\n  echo\n  echo \"Options:\"\n  echo\n  echo -e \"  --server\\t\\tIf set, run in server mode. Optional. At least one of --server or --client must be set.\"\n  echo -e \"  --client\\t\\tIf set, run in client mode. Optional. At least one of --server or --client must be set.\"\n  echo -e \"  --num-servers\\t\\tThe number of servers to expect in the Nomad cluster. Required if --server is true.\"\n  echo -e \"  --config-dir\\t\\tThe path to the Nomad config folder. Optional. Default is the absolute path of '../config', relative to this script.\"\n  echo -e \"  --data-dir\\t\\tThe path to the Nomad data folder. Optional. Default is the absolute path of '../data', relative to this script.\"\n  echo -e \"  --bin-dir\\t\\tThe path to the folder with Nomad binary. Optional. Default is the absolute path of the parent folder of this script.\"\n  echo -e \"  --systemd-stdout\\t\\tThe StandardOutput option of the systemd unit.  Optional.  If not configured, uses systemd's default (journal).\"\n  echo -e \"  --systemd-stderr\\t\\tThe StandardError option of the systemd unit.  Optional.  If not configured, uses systemd's default (inherit).\"\n  echo -e \"  --user\\t\\tThe user to run Nomad as. Optional. Default is to use the owner of --config-dir.\"\n  echo -e \"  --use-sudo\\t\\tIf set, run the Nomad agent with sudo. By default, sudo is only used if --client is set.\"\n  echo -e \"  --environment\\t\\A single environment variable in the key/value pair form 'KEY=\\\"val\\\"' to pass to Nomad as environment variable when starting it up. Repeat this option for additional variables. Optional.\"\n  echo -e \"  --skip-nomad-config\\tIf this flag is set, don't generate a Nomad configuration file. Optional. Default is false.\"\n  echo\n  echo \"Example:\"\n  echo\n  echo \"  run-nomad --server --config-dir /custom/path/to/nomad/config\"\n}\n\nfunction log {\n  local readonly level=\"$1\"\n  local readonly message=\"$2\"\n  local readonly timestamp=$(date +\"%Y-%m-%d %H:%M:%S\")\n  >&2 echo -e \"${timestamp} [${level}] [$SCRIPT_NAME] ${message}\"\n}\n\nfunction log_info {\n  local readonly message=\"$1\"\n  log \"INFO\" \"$message\"\n}\n\nfunction log_warn {\n  local readonly message=\"$1\"\n  log \"WARN\" \"$message\"\n}\n\nfunction log_error {\n  local readonly message=\"$1\"\n  log \"ERROR\" \"$message\"\n}\n\n# Based on code from: http://stackoverflow.com/a/16623897/483528\nfunction strip_prefix {\n  local readonly str=\"$1\"\n  local readonly prefix=\"$2\"\n  echo \"${str#$prefix}\"\n}\n\nfunction assert_not_empty {\n  local readonly arg_name=\"$1\"\n  local readonly arg_value=\"$2\"\n\n  if [[ -z \"$arg_value\" ]]; then\n    log_error \"The value for '$arg_name' cannot be empty\"\n    print_usage\n    exit 1\n  fi\n}\n\nfunction split_by_lines {\n  local prefix=\"$1\"\n  shift\n\n  for var in \"$@\"; do\n    echo \"${prefix}${var}\"\n  done\n}\n\nfunction lookup_path_in_instance_metadata {\n  local readonly path=\"$1\"\n  curl --silent --location \"$EC2_INSTANCE_METADATA_URL/$path/\"\n}\n\nfunction lookup_path_in_instance_dynamic_data {\n  local readonly path=\"$1\"\n  curl --silent --location \"$EC2_INSTANCE_DYNAMIC_DATA_URL/$path/\"\n}\n\nfunction get_instance_ip_address {\n  lookup_path_in_instance_metadata \"local-ipv4\"\n}\n\nfunction get_instance_id {\n  lookup_path_in_instance_metadata \"instance-id\"\n}\n\nfunction get_instance_availability_zone {\n  lookup_path_in_instance_metadata \"placement/availability-zone\"\n}\n\nfunction get_instance_region {\n  lookup_path_in_instance_dynamic_data \"instance-identity/document\" | jq -r \".region\"\n}\n\nfunction assert_is_installed {\n  local readonly name=\"$1\"\n\n  if [[ ! $(command -v ${name}) ]]; then\n    log_error \"The binary '$name' is required by this script but is not installed or in the system's PATH.\"\n    exit 1\n  fi\n}\n\nfunction generate_nomad_config {\n  local readonly server=\"$1\"\n  local readonly client=\"$2\"\n  local readonly num_servers=\"$3\"\n  local readonly config_dir=\"$4\"\n  local readonly user=\"$5\"\n  local readonly config_path=\"$config_dir/$NOMAD_CONFIG_FILE\"\n\n  local instance_id=\"\"\n  local instance_ip_address=\"\"\n  local instance_region=\"\"\n  local instance_availability_zone=\"\"\n\n  instance_id=$(get_instance_id)\n  instance_ip_address=$(get_instance_ip_address)\n  instance_region=$(get_instance_region)\n  availability_zone=$(get_instance_availability_zone)\n\n  local server_config=\"\"\n  if [[ \"$server\" == \"true\" ]]; then\n    server_config=$(cat <<EOF\nserver {\n  enabled = true\n  bootstrap_expect = $num_servers\n}\nEOF\n)\n  fi\n\n  local client_config=\"\"\n  if [[ \"$client\" == \"true\" ]]; then\n    client_config=$(cat <<EOF\nclient {\n  enabled = true\n}\nEOF\n)\n  fi\n\n  log_info \"Creating default Nomad config file in $config_path\"\n  cat > \"$config_path\" <<EOF\ndatacenter = \"$availability_zone\"\nname       = \"$instance_id\"\nregion     = \"$instance_region\"\nbind_addr  = \"0.0.0.0\"\n\nadvertise {\n  http = \"$instance_ip_address\"\n  rpc  = \"$instance_ip_address\"\n  serf = \"$instance_ip_address\"\n}\n\n$client_config\n\n$server_config\n\nconsul {\n  address = \"127.0.0.1:8500\"\n}\nEOF\n  chown \"$user:$user\" \"$config_path\"\n}\n\nfunction generate_systemd_config {\n  local readonly systemd_config_path=\"$1\"\n  local readonly nomad_config_dir=\"$2\"\n  local readonly nomad_data_dir=\"$3\"\n  local readonly nomad_bin_dir=\"$4\"\n  local readonly nomad_sytemd_stdout=\"$5\"\n  local readonly nomad_sytemd_stderr=\"$6\"\n  local readonly nomad_user=\"$7\"\n  local readonly use_sudo=\"$8\"\n  shift 8\n  local readonly environment=(\"$@\")\n  local readonly config_path=\"$nomad_config_dir/$NOMAD_CONFIG_FILE\"\n\n  if [[ \"$use_sudo\" == \"true\" ]]; then\n    log_info \"The --use-sudo flag is set, so running Nomad as the root user\"\n    nomad_user=\"root\"\n  fi\n\n  log_info \"Creating systemd config file to run Nomad in $systemd_config_path\"\n\n  local readonly unit_config=$(cat <<EOF\n[Unit]\nDescription=\"HashiCorp Nomad\"\nDocumentation=https://www.nomadproject.io/\nRequires=network-online.target\nAfter=network-online.target\nConditionalFileNotEmpty=$config_path\nEOF\n)\n\n  local readonly service_config=$(cat <<EOF\n[Service]\nUser=$nomad_user\nGroup=$nomad_user\nExecStart=$nomad_bin_dir/nomad agent -config $nomad_config_dir -data-dir $nomad_data_dir\nExecReload=/bin/kill --signal HUP \\$MAINPID\nKillMode=process\nRestart=on-failure\nLimitNOFILE=65536\n$(split_by_lines \"Environment=\" \"${environment[@]}\")\n\nEOF\n)\n\n  local log_config=\"\"\n  if [[ ! -z $nomad_sytemd_stdout ]]; then\n    log_config+=\"StandardOutput=$nomad_sytemd_stdout\\n\"\n  fi\n  if [[ ! -z $nomad_sytemd_stderr ]]; then\n    log_config+=\"StandardError=$nomad_sytemd_stderr\\n\"\n  fi\n\n  local readonly install_config=$(cat <<EOF\n[Install]\nWantedBy=multi-user.target\nEOF\n)\n\n  echo -e \"$unit_config\" > \"$systemd_config_path\"\n  echo -e \"$service_config\" >> \"$systemd_config_path\"\n  echo -e \"$log_config\" >> \"$systemd_config_path\"\n  echo -e \"$install_config\" >> \"$systemd_config_path\"\n}\n\nfunction start_nomad {\n  log_info \"Reloading systemd config and starting Nomad\"\n\n  sudo systemctl daemon-reload\n  sudo systemctl enable nomad.service\n  sudo systemctl restart nomad.service\n}\n\n# Based on: http://unix.stackexchange.com/a/7732/215969\nfunction get_owner_of_path {\n  local readonly path=\"$1\"\n  ls -ld \"$path\" | awk '{print $3}'\n}\n\nfunction run {\n  local server=\"false\"\n  local client=\"false\"\n  local num_servers=\"\"\n  local config_dir=\"\"\n  local data_dir=\"\"\n  local bin_dir=\"\"\n  local systemd_stdout=\"\"\n  local systemd_stderr=\"\"\n  local user=\"\"\n  local skip_nomad_config=\"false\"\n  local use_sudo=\"\"\n  local environment=()\n  local all_args=()\n\n  while [[ $# > 0 ]]; do\n    local key=\"$1\"\n\n    case \"$key\" in\n      --server)\n        server=\"true\"\n        ;;\n      --client)\n        client=\"true\"\n        ;;\n      --num-servers)\n        num_servers=\"$2\"\n        shift\n        ;;\n      --config-dir)\n        assert_not_empty \"$key\" \"$2\"\n        config_dir=\"$2\"\n        shift\n        ;;\n      --data-dir)\n        assert_not_empty \"$key\" \"$2\"\n        data_dir=\"$2\"\n        shift\n        ;;\n      --bin-dir)\n        assert_not_empty \"$key\" \"$2\"\n        bin_dir=\"$2\"\n        shift\n        ;;\n      --systemd-stdout)\n        assert_not_empty \"$key\" \"$2\"\n        systemd_stdout=\"$2\"\n        shift\n        ;;\n      --systemd-stderr)\n        assert_not_empty \"$key\" \"$2\"\n        systemd_stderr=\"$2\"\n        shift\n        ;;\n      --user)\n        assert_not_empty \"$key\" \"$2\"\n        user=\"$2\"\n        shift\n        ;;\n      --cluster-tag-key)\n        assert_not_empty \"$key\" \"$2\"\n        cluster_tag_key=\"$2\"\n        shift\n        ;;\n      --cluster-tag-value)\n        assert_not_empty \"$key\" \"$2\"\n        cluster_tag_value=\"$2\"\n        shift\n        ;;\n      --skip-nomad-config)\n        skip_nomad_config=\"true\"\n        ;;\n      --use-sudo)\n        use_sudo=\"true\"\n        ;;\n      --environment)\n        assert_not_empty \"$key\" \"$2\"\n        environment+=(\"$2\")\n        shift\n        ;;\n      --help)\n        print_usage\n        exit\n        ;;\n      *)\n        log_error \"Unrecognized argument: $key\"\n        print_usage\n        exit 1\n        ;;\n    esac\n\n    shift\n  done\n\n  if [[ \"$server\" == \"true\" ]]; then\n    assert_not_empty \"--num-servers\" \"$num_servers\"\n  fi\n\n  if [[ \"$server\" == \"false\" && \"$client\" == \"false\" ]]; then\n    log_error \"At least one of --server or --client must be set\"\n    exit 1\n  fi\n\n  if [[ -z \"$use_sudo\" ]]; then\n    if [[ \"$client\" == \"true\" ]]; then\n      use_sudo=\"true\"\n    else\n      use_sudo=\"false\"\n    fi\n  fi\n\n  assert_is_installed \"systemctl\"\n  assert_is_installed \"aws\"\n  assert_is_installed \"curl\"\n  assert_is_installed \"jq\"\n\n  if [[ -z \"$config_dir\" ]]; then\n    config_dir=$(cd \"$SCRIPT_DIR/../config\" && pwd)\n  fi\n\n  if [[ -z \"$data_dir\" ]]; then\n    data_dir=$(cd \"$SCRIPT_DIR/../data\" && pwd)\n  fi\n\n  if [[ -z \"$bin_dir\" ]]; then\n    bin_dir=$(cd \"$SCRIPT_DIR/../bin\" && pwd)\n  fi\n\n  # If $systemd_stdout and/or $systemd_stderr are empty, we leave them empty so that generate_systemd_config will use systemd's defaults (journal and inherit, respectively)\n\n  if [[ -z \"$user\" ]]; then\n    user=$(get_owner_of_path \"$config_dir\")\n  fi\n\n  if [[ \"$skip_nomad_config\" == \"true\" ]]; then\n    log_info \"The --skip-nomad-config flag is set, so will not generate a default Nomad config file.\"\n  else\n    generate_nomad_config \"$server\" \"$client\" \"$num_servers\" \"$config_dir\" \"$user\"\n  fi\n\n  generate_systemd_config \"$SYSTEMD_CONFIG_PATH\" \"$config_dir\" \"$data_dir\" \"$bin_dir\" \"$systemd_stdout\" \"$systemd_stderr\" \"$user\" \"$use_sudo\" \"${environment[@]}\"\n  start_nomad\n}\n\nrun \"$@\"\n"
  },
  {
    "path": "outputs.tf",
    "content": "output \"num_nomad_servers\" {\n  value = module.servers.cluster_size\n}\n\noutput \"asg_name_servers\" {\n  value = module.servers.asg_name\n}\n\noutput \"launch_config_name_servers\" {\n  value = module.servers.launch_config_name\n}\n\noutput \"iam_role_arn_servers\" {\n  value = module.servers.iam_role_arn\n}\n\noutput \"iam_role_id_servers\" {\n  value = module.servers.iam_role_id\n}\n\noutput \"security_group_id_servers\" {\n  value = module.servers.security_group_id\n}\n\noutput \"num_clients\" {\n  value = module.clients.cluster_size\n}\n\noutput \"asg_name_clients\" {\n  value = module.clients.asg_name\n}\n\noutput \"launch_config_name_clients\" {\n  value = module.clients.launch_config_name\n}\n\noutput \"iam_role_arn_clients\" {\n  value = module.clients.iam_role_arn\n}\n\noutput \"iam_role_id_clients\" {\n  value = module.clients.iam_role_id\n}\n\noutput \"security_group_id_clients\" {\n  value = module.clients.security_group_id\n}\n\noutput \"aws_region\" {\n  value = data.aws_region.current.name\n}\n\noutput \"nomad_servers_cluster_tag_key\" {\n  value = module.servers.cluster_tag_key\n}\n\noutput \"nomad_servers_cluster_tag_value\" {\n  value = module.servers.cluster_tag_value\n}\n\n"
  },
  {
    "path": "test/README.md",
    "content": "# Tests\n\nThis folder contains automated tests for this Module. All of the tests are written in [Go](https://golang.org/). \nMost of these are \"integration tests\" that deploy real infrastructure using Terraform and verify that infrastructure \nworks as expected using a helper library called [Terratest](https://github.com/gruntwork-io/terratest).  \n\n\n\n## WARNING WARNING WARNING\n\n**Note #1**: Many of these tests create real resources in an AWS account and then try to clean those resources up at \nthe end of a test run. That means these tests may cost you money to run! When adding tests, please be considerate of \nthe resources you create and take extra care to clean everything up when you're done!\n\n**Note #2**: Never forcefully shut the tests down (e.g. by hitting `CTRL + C`) or the cleanup tasks won't run!\n\n**Note #3**: We set `-timeout 60m` on all tests not because they necessarily take that long, but because Go has a\ndefault test timeout of 10 minutes, after which it forcefully kills the tests with a `SIGQUIT`, preventing the cleanup\ntasks from running. Therefore, we set an overlying long timeout to make sure all tests have enough time to finish and \nclean up.\n\n\n\n## Running the tests\n\n### Prerequisites\n\n- Install the latest version of [Go](https://golang.org/).\n- Install [Terraform](https://www.terraform.io/downloads.html).\n- Configure your AWS credentials using one of the [options supported by the AWS \n  SDK](http://docs.aws.amazon.com/sdk-for-java/v1/developer-guide/credentials.html). Usually, the easiest option is to\n  set the `AWS_ACCESS_KEY_ID` and `AWS_SECRET_ACCESS_KEY` environment variables.\n\n\n### Run all the tests\n\n```bash\ncd test\ngo test -v -timeout 60m\n```\n\n\n### Run a specific test\n\nTo run a specific test called `TestFoo`:\n\n```bash\ncd test\ngo test -v -timeout 60m -run TestFoo\n```\n\n\n  \n"
  },
  {
    "path": "test/aws_helpers.go",
    "content": "package test\n\nimport (\n\t\"testing\"\n\n\t\"github.com/gruntwork-io/terratest/modules/aws\"\n)\n\n// Get the IP address from a randomly chosen EC2 Instance in an Auto Scaling Group of the given name in the given\n// region\nfunc getIpAddressOfAsgInstance(t *testing.T, asgName string, awsRegion string) string {\n\tinstanceIds := aws.GetInstanceIdsForAsg(t, asgName, awsRegion)\n\n\tif len(instanceIds) == 0 {\n\t\tt.Fatalf(\"Could not find any instances in ASG %s in %s\", asgName, awsRegion)\n\t}\n\n\treturn aws.GetPublicIpOfEc2Instance(t, instanceIds[0], awsRegion)\n}\n\nfunc getRandomRegion(t *testing.T) string {\n\treturn aws.GetRandomRegion(t, nil, []string{\"eu-north-1\", \"ap-northeast-3\"})\n}\n"
  },
  {
    "path": "test/go.mod",
    "content": "module github.com/gruntwork-io/terraform-aws-nomad/test\n\ngo 1.13\n\nrequire github.com/gruntwork-io/terratest v0.37.6\n"
  },
  {
    "path": "test/go.sum",
    "content": "cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=\ncloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=\ncloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU=\ncloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU=\ncloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY=\ncloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc=\ncloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0=\ncloud.google.com/go v0.51.0 h1:PvKAVQWCtlGUSlZkGW3QLelKaWq7KYv/MW1EboG8bfM=\ncloud.google.com/go v0.51.0/go.mod h1:hWtGJ6gnXH+KgDv+V0zFGDvpi07n3z8ZNj3T1RW0Gcw=\ncloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o=\ncloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE=\ncloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I=\ncloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw=\ndmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=\ngithub.com/Azure/azure-sdk-for-go v35.0.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc=\ngithub.com/Azure/azure-sdk-for-go v38.0.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc=\ngithub.com/Azure/azure-sdk-for-go v46.0.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc=\ngithub.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8=\ngithub.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24=\ngithub.com/Azure/go-autorest/autorest v0.9.0/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI=\ngithub.com/Azure/go-autorest/autorest v0.9.3/go.mod h1:GsRuLYvwzLjjjRoWEIyMUaYq8GNUx2nRB378IPt/1p0=\ngithub.com/Azure/go-autorest/autorest v0.9.6/go.mod h1:/FALq9T/kS7b5J5qsQ+RSTUdAmGFqi0vUdVNNx8q630=\ngithub.com/Azure/go-autorest/autorest v0.11.0/go.mod h1:JFgpikqFJ/MleTTxwepExTKnFUKKszPS8UavbQYUMuw=\ngithub.com/Azure/go-autorest/autorest v0.11.5/go.mod h1:foo3aIXRQ90zFve3r0QiDsrjGDUwWhKl0ZOQy1CT14k=\ngithub.com/Azure/go-autorest/autorest/adal v0.5.0/go.mod h1:8Z9fGy2MpX0PvDjB1pEgQTmVqjGhiHBW7RJJEciWzS0=\ngithub.com/Azure/go-autorest/autorest/adal v0.8.0/go.mod h1:Z6vX6WXXuyieHAXwMj0S6HY6e6wcHn37qQMBQlvY3lc=\ngithub.com/Azure/go-autorest/autorest/adal v0.8.1/go.mod h1:ZjhuQClTqx435SRJ2iMlOxPYt3d2C/T/7TiQCVZSn3Q=\ngithub.com/Azure/go-autorest/autorest/adal v0.8.2/go.mod h1:ZjhuQClTqx435SRJ2iMlOxPYt3d2C/T/7TiQCVZSn3Q=\ngithub.com/Azure/go-autorest/autorest/adal v0.9.0/go.mod h1:/c022QCutn2P7uY+/oQWWNcK9YU+MH96NgK+jErpbcg=\ngithub.com/Azure/go-autorest/autorest/adal v0.9.2/go.mod h1:/3SMAM86bP6wC9Ev35peQDUeqFZBMH07vvUOmg4z/fE=\ngithub.com/Azure/go-autorest/autorest/azure/auth v0.5.1/go.mod h1:ea90/jvmnAwDrSooLH4sRIehEPtG/EPUXavDh31MnA4=\ngithub.com/Azure/go-autorest/autorest/azure/cli v0.4.0/go.mod h1:JljT387FplPzBA31vUcvsetLKF3pec5bdAxjVU4kI2s=\ngithub.com/Azure/go-autorest/autorest/date v0.1.0/go.mod h1:plvfp3oPSKwf2DNjlBjWF/7vwR+cUD/ELuzDCXwHUVA=\ngithub.com/Azure/go-autorest/autorest/date v0.2.0/go.mod h1:vcORJHLJEh643/Ioh9+vPmf1Ij9AEBM5FuBIXLmIy0g=\ngithub.com/Azure/go-autorest/autorest/date v0.3.0/go.mod h1:BI0uouVdmngYNUzGWeSYnokU+TrmwEsOqdt8Y6sso74=\ngithub.com/Azure/go-autorest/autorest/mocks v0.1.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0=\ngithub.com/Azure/go-autorest/autorest/mocks v0.2.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0=\ngithub.com/Azure/go-autorest/autorest/mocks v0.3.0/go.mod h1:a8FDP3DYzQ4RYfVAxAN3SVSiiO77gL2j2ronKKP0syM=\ngithub.com/Azure/go-autorest/autorest/mocks v0.4.0/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k=\ngithub.com/Azure/go-autorest/autorest/mocks v0.4.1/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k=\ngithub.com/Azure/go-autorest/autorest/to v0.2.0/go.mod h1:GunWKJp1AEqgMaGLV+iocmRAJWqST1wQYhyyjXJ3SJc=\ngithub.com/Azure/go-autorest/autorest/to v0.3.0/go.mod h1:MgwOyqaIuKdG4TL/2ywSsIWKAfJfgHDo8ObuUk3t5sA=\ngithub.com/Azure/go-autorest/autorest/validation v0.1.0/go.mod h1:Ha3z/SqBeaalWQvokg3NZAlQTalVMtOIAs1aGK7G6u8=\ngithub.com/Azure/go-autorest/autorest/validation v0.3.0/go.mod h1:yhLgjC0Wda5DYXl6JAsWyUe4KVNffhoDhG0zVzUMo3E=\ngithub.com/Azure/go-autorest/logger v0.1.0/go.mod h1:oExouG+K6PryycPJfVSxi/koC6LSNgds39diKLz7Vrc=\ngithub.com/Azure/go-autorest/logger v0.2.0/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8=\ngithub.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk=\ngithub.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU=\ngithub.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=\ngithub.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=\ngithub.com/GoogleCloudPlatform/k8s-cloud-provider v0.0.0-20190822182118-27a4ced34534/go.mod h1:iroGtC8B3tQiqtds1l+mgk/BBOrxbqjH+eUfFQYRc14=\ngithub.com/Microsoft/go-winio v0.4.14/go.mod h1:qXqCSQ3Xa7+6tgxaGTIe4Kpcdsi+P8jBhyzoq1bpyYA=\ngithub.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ=\ngithub.com/PuerkitoBio/purell v1.0.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0=\ngithub.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0=\ngithub.com/PuerkitoBio/urlesc v0.0.0-20160726150825-5bd2802263f2/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE=\ngithub.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE=\ngithub.com/agext/levenshtein v1.2.1 h1:QmvMAjj2aEICytGiWzmxoE0x2KZvE0fvmqMOfy2tjT8=\ngithub.com/agext/levenshtein v1.2.1/go.mod h1:JEDfjyjHDjOF/1e4FlBE/PkbqA9OfWu2ki2W0IB5558=\ngithub.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=\ngithub.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=\ngithub.com/apparentlymart/go-dump v0.0.0-20180507223929-23540a00eaa3/go.mod h1:oL81AME2rN47vu18xqj1S1jPIPuN7afo62yKTNn3XMM=\ngithub.com/apparentlymart/go-textseg v1.0.0 h1:rRmlIsPEEhUTIKQb7T++Nz/A5Q6C9IuX2wFoYVvnCs0=\ngithub.com/apparentlymart/go-textseg v1.0.0/go.mod h1:z96Txxhf3xSFMPmb5X/1W05FF/Nj9VFpLOpjS5yuumk=\ngithub.com/apparentlymart/go-textseg/v12 v12.0.0 h1:bNEQyAGak9tojivJNkoqWErVCQbjdL7GzRt3F8NvfJ0=\ngithub.com/apparentlymart/go-textseg/v12 v12.0.0/go.mod h1:S/4uRK2UtaQttw1GenVJEynmyUenKwP++x/+DdGV/Ec=\ngithub.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8=\ngithub.com/aws/aws-lambda-go v1.13.3/go.mod h1:4UKl9IzQMoD+QF79YdCuzCwp8VbmG4VAQwij/eHl5CU=\ngithub.com/aws/aws-sdk-go v1.16.26/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo=\ngithub.com/aws/aws-sdk-go v1.27.1 h1:MXnqY6SlWySaZAqNnXThOvjRFdiiOuKtC6i7baFdNdU=\ngithub.com/aws/aws-sdk-go v1.27.1/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo=\ngithub.com/aws/aws-sdk-go v1.38.28 h1:2ZzgEupSluR18ClxUnHwXKyuADheZpMblXRAsHqF0tI=\ngithub.com/aws/aws-sdk-go v1.38.28/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro=\ngithub.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=\ngithub.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=\ngithub.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs=\ngithub.com/blang/semver v3.5.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk=\ngithub.com/boombuler/barcode v1.0.1-0.20190219062509-6c824513bacc h1:biVzkmvwrH8WK8raXaxBx6fRVTlJILwEwQGL1I/ByEI=\ngithub.com/boombuler/barcode v1.0.1-0.20190219062509-6c824513bacc/go.mod h1:paBWMcWSl3LHKBqUq+rly7CNSldXjb2rDl3JlRe0mD8=\ngithub.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=\ngithub.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=\ngithub.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI=\ngithub.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=\ngithub.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=\ngithub.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8=\ngithub.com/containerd/containerd v1.3.0/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=\ngithub.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=\ngithub.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk=\ngithub.com/coreos/go-oidc v2.1.0+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc=\ngithub.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=\ngithub.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=\ngithub.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=\ngithub.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=\ngithub.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=\ngithub.com/coreos/pkg v0.0.0-20180108230652-97fdf19511ea/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=\ngithub.com/cpuguy83/go-md2man v1.0.10 h1:BSKMNlYxDvnunlTymqtgONjNnaRV1sTpcovwwjF22jk=\ngithub.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE=\ngithub.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=\ngithub.com/cpuguy83/go-md2man/v2 v2.0.0 h1:EoUDS0afbrsXAZ9YQ9jdu/mZ2sXgT1/2yyNng4PGlyM=\ngithub.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=\ngithub.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY=\ngithub.com/davecgh/go-spew v0.0.0-20151105211317-5215b55f46b2/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=\ngithub.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=\ngithub.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=\ngithub.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=\ngithub.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=\ngithub.com/dimchansky/utfbom v1.1.0/go.mod h1:rO41eb7gLfo8SF1jd9F8HplJm1Fewwi4mQvIirEdv+8=\ngithub.com/dnaeon/go-vcr v1.0.1/go.mod h1:aBB1+wY4s93YsC3HHjMBMrwTj2R9FHDzUr9KyGc8n1E=\ngithub.com/docker/cli v0.0.0-20191017083524-a8ff7f821017/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8=\ngithub.com/docker/cli v0.0.0-20200109221225-a4f60165b7a3/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8=\ngithub.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=\ngithub.com/docker/docker v0.7.3-0.20190327010347-be7ac8be2ae0/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=\ngithub.com/docker/docker v1.4.2-0.20190924003213-a8608b5b67c7/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=\ngithub.com/docker/docker-credential-helpers v0.6.3/go.mod h1:WRaJzqw3CTB9bk10avuGsjVBZsD05qeibJ1/TYlvc0Y=\ngithub.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec=\ngithub.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=\ngithub.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM=\ngithub.com/docker/spdystream v0.0.0-20181023171402-6480d4af844c h1:ZfSZ3P3BedhKGUhzj7BQlPSU4OvT6tfOKe3DVHzOA7s=\ngithub.com/docker/spdystream v0.0.0-20181023171402-6480d4af844c/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM=\ngithub.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE=\ngithub.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=\ngithub.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=\ngithub.com/elazarl/goproxy v0.0.0-20170405201442-c4fc26588b6e/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc=\ngithub.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc=\ngithub.com/elazarl/goproxy v0.0.0-20190911111923-ecfe977594f1 h1:yY9rWGoXv1U5pl4gxqlULARMQD7x0QG85lqEXTWysik=\ngithub.com/elazarl/goproxy v0.0.0-20190911111923-ecfe977594f1/go.mod h1:Ro8st/ElPeALwNFlcTpWmkr6IoMFfkjXAvTHpevnDsM=\ngithub.com/elazarl/goproxy/ext v0.0.0-20190711103511-473e67f1d7d2 h1:dWB6v3RcOy03t/bUadywsbyrQwCqZeNIEX6M1OtSZOM=\ngithub.com/elazarl/goproxy/ext v0.0.0-20190711103511-473e67f1d7d2/go.mod h1:gNh8nYJoAm43RfaxurUnxr+N1PwuFV3ZMl/efxlIlY8=\ngithub.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs=\ngithub.com/emicklei/go-restful v2.9.5+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs=\ngithub.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=\ngithub.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=\ngithub.com/evanphx/json-patch v4.2.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=\ngithub.com/evanphx/json-patch v4.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=\ngithub.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=\ngithub.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU=\ngithub.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=\ngithub.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=\ngithub.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=\ngithub.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=\ngithub.com/go-errors/errors v1.0.1/go.mod h1:f4zRHt4oKfwPJE5k8C9vpYG+aDHdBFUsgrm6/TyX73Q=\ngithub.com/go-errors/errors v1.0.2-0.20180813162953-d98b870cc4e0 h1:skJKxRtNmevLqnayafdLe2AsenqRupVmzZSqrvb5caU=\ngithub.com/go-errors/errors v1.0.2-0.20180813162953-d98b870cc4e0/go.mod h1:f4zRHt4oKfwPJE5k8C9vpYG+aDHdBFUsgrm6/TyX73Q=\ngithub.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=\ngithub.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=\ngithub.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=\ngithub.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas=\ngithub.com/go-logr/logr v0.2.0 h1:QvGt2nLcHH0WK9orKa+ppBPAxREcH364nPUedEpK0TY=\ngithub.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU=\ngithub.com/go-openapi/jsonpointer v0.0.0-20160704185906-46af16f9f7b1/go.mod h1:+35s3my2LFTysnkMfxsJBAMHj/DoqoB9knIWoYG/Vk0=\ngithub.com/go-openapi/jsonpointer v0.19.2/go.mod h1:3akKfEdA7DF1sugOqz1dVQHBcuDBPKZGEoHC/NkiQRg=\ngithub.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg=\ngithub.com/go-openapi/jsonreference v0.0.0-20160704190145-13c6e3589ad9/go.mod h1:W3Z9FmVs9qj+KR4zFKmDPGiLdk1D9Rlm7cyMvf57TTg=\ngithub.com/go-openapi/jsonreference v0.19.2/go.mod h1:jMjeRr2HHw6nAVajTXJ4eiUwohSTlpa0o73RUL1owJc=\ngithub.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8=\ngithub.com/go-openapi/spec v0.0.0-20160808142527-6aced65f8501/go.mod h1:J8+jY1nAiCcj+friV/PDoE1/3eeccG9LYBs0tYvLOWc=\ngithub.com/go-openapi/spec v0.19.3/go.mod h1:FpwSN1ksY1eteniUU7X0N/BgJ7a4WvBFVA8Lj9mJglo=\ngithub.com/go-openapi/swag v0.0.0-20160704191624-1d0bd113de87/go.mod h1:DXUve3Dpr1UfpPtxFw+EFuQ41HhCWZfha5jSVRG7C7I=\ngithub.com/go-openapi/swag v0.19.2/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk=\ngithub.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk=\ngithub.com/go-sql-driver/mysql v1.4.1 h1:g24URVg0OFbNUTx9qqY1IRZ9D9z3iPyi5zKhQZpNwpA=\ngithub.com/go-sql-driver/mysql v1.4.1/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w=\ngithub.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=\ngithub.com/go-test/deep v1.0.3 h1:ZrJSEWsXzPOxaZnFteGEfooLba+ju3FYIbOrS+rQd68=\ngithub.com/go-test/deep v1.0.3/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA=\ngithub.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=\ngithub.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4=\ngithub.com/gogo/protobuf v1.2.2-0.20190723190241-65acae22fc9d/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o=\ngithub.com/gogo/protobuf v1.3.1 h1:DqDEcV5aeaTmdFBePNpYsp3FlcVH/2ISVVM9Qf8PSls=\ngithub.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o=\ngithub.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=\ngithub.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=\ngithub.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=\ngithub.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=\ngithub.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=\ngithub.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=\ngithub.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y=\ngithub.com/golang/protobuf v0.0.0-20161109072736-4bd1920723d7/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=\ngithub.com/golang/protobuf v1.1.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=\ngithub.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=\ngithub.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=\ngithub.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=\ngithub.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=\ngithub.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=\ngithub.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=\ngithub.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=\ngithub.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=\ngithub.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=\ngithub.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8=\ngithub.com/golang/protobuf v1.4.2 h1:+Z5KGCizgyZCbGh1KZqA0fcLLkwbsjIzS4aV2v7wJX0=\ngithub.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=\ngithub.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=\ngithub.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=\ngithub.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=\ngithub.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=\ngithub.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=\ngithub.com/google/go-cmp v0.4.0 h1:xsAVV57WRhGj6kEIi8ReJzQlHHqcBYCElAvkovg3B/4=\ngithub.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=\ngithub.com/google/go-containerregistry v0.0.0-20200110202235-f4fb41bf00a3/go.mod h1:2wIuQute9+hhWqvL3vEI7YB0EKluF4WcPzI1eAliazk=\ngithub.com/google/gofuzz v0.0.0-20161122191042-44d81051d367/go.mod h1:HP5RmnzzSNb993RKQDq4+1A4ia9nllfqcQFTQJedwGI=\ngithub.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=\ngithub.com/google/gofuzz v1.1.0 h1:Hsa8mG0dQ46ij8Sl2AYJDUv1oA9/d6Vk+3LG99Oe02g=\ngithub.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=\ngithub.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=\ngithub.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=\ngithub.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=\ngithub.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=\ngithub.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=\ngithub.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=\ngithub.com/google/uuid v1.1.1 h1:Gkbcsh/GbpXz7lPftLA3P6TYMwjCLYm83jiFQZF/3gY=\ngithub.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=\ngithub.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=\ngithub.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=\ngithub.com/googleapis/gnostic v0.0.0-20170729233727-0c5108395e2d/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY=\ngithub.com/googleapis/gnostic v0.2.2/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY=\ngithub.com/googleapis/gnostic v0.4.1 h1:DLJCy1n/vrD4HPjOvYcT8aYQXpPIzoRZONaYwyycI+I=\ngithub.com/googleapis/gnostic v0.4.1/go.mod h1:LRhVm6pbyptWbWbuZ38d1eyptfvIytN3ir6b65WBswg=\ngithub.com/gophercloud/gophercloud v0.1.0/go.mod h1:vxM41WHh5uqHVBMZHzuwNOHh8XEoIEcSTewFxm1c5g8=\ngithub.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs=\ngithub.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ=\ngithub.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ=\ngithub.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA=\ngithub.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs=\ngithub.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk=\ngithub.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=\ngithub.com/gruntwork-io/go-commons v0.8.0 h1:k/yypwrPqSeYHevLlEDmvmgQzcyTwrlZGRaxEM6G0ro=\ngithub.com/gruntwork-io/go-commons v0.8.0/go.mod h1:gtp0yTtIBExIZp7vyIV9I0XQkVwiQZze678hvDXof78=\ngithub.com/gruntwork-io/terratest v0.37.6 h1:wrmqMImrrIvjGs6CBmmByqvwA6t0Wc3Zo2ohEIptPXM=\ngithub.com/gruntwork-io/terratest v0.37.6/go.mod h1:CSHpZNJdqYQ+TUrigM100jcahRUV5X6w7K2kZJ8iylY=\ngithub.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA=\ngithub.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=\ngithub.com/hashicorp/go-multierror v1.1.0 h1:B9UzwGQJehnUY1yNrnwREHc3fGbC2xefo8g4TbElacI=\ngithub.com/hashicorp/go-multierror v1.1.0/go.mod h1:spPvp8C1qA32ftKqdAHm4hHTbPw+vmowP0z+KUhOZdA=\ngithub.com/hashicorp/go-version v1.3.0 h1:McDWVJIU/y+u1BRV06dPaLfLCaT7fUTJLp5r04x7iNw=\ngithub.com/hashicorp/go-version v1.3.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA=\ngithub.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=\ngithub.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=\ngithub.com/hashicorp/golang-lru v0.5.3/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4=\ngithub.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4=\ngithub.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ=\ngithub.com/hashicorp/hcl/v2 v2.8.2 h1:wmFle3D1vu0okesm8BTLVDyJ6/OL9DCLUwn0b2OptiY=\ngithub.com/hashicorp/hcl/v2 v2.8.2/go.mod h1:bQTN5mpo+jewjJgh8jr0JUguIi7qPHUF6yIfAEN3jqY=\ngithub.com/hashicorp/terraform-json v0.12.0 h1:8czPgEEWWPROStjkWPUnTQDXmpmZPlkQAwYYLETaTvw=\ngithub.com/hashicorp/terraform-json v0.12.0/go.mod h1:pmbq9o4EuL43db5+0ogX10Yofv1nozM+wskr/bGFJpI=\ngithub.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=\ngithub.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=\ngithub.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=\ngithub.com/imdario/mergo v0.3.7 h1:Y+UAYTZ7gDEuOfhxKWy+dvb5dRQ6rJjFSdX2HZY1/gI=\ngithub.com/imdario/mergo v0.3.7/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=\ngithub.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=\ngithub.com/jinzhu/copier v0.0.0-20190924061706-b57f9002281a h1:zPPuIq2jAWWPTrGt70eK/BSch+gFAGrNzecsoENgu2o=\ngithub.com/jinzhu/copier v0.0.0-20190924061706-b57f9002281a/go.mod h1:yL958EeXv8Ylng6IfnvG4oflryUi3vgA3xPs9hmII1s=\ngithub.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af h1:pmfjZENx5imkbgOkpRUYLnmbU7UEFbjtDA2hxJ1ichM=\ngithub.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k=\ngithub.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg=\ngithub.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo=\ngithub.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8=\ngithub.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U=\ngithub.com/joefitzgerald/rainbow-reporter v0.1.0/go.mod h1:481CNgqmVHQZzdIbN52CupLJyoVwB10FQ/IQlF1pdL8=\ngithub.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo=\ngithub.com/json-iterator/go v0.0.0-20180612202835-f2b4162afba3/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=\ngithub.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=\ngithub.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=\ngithub.com/json-iterator/go v1.1.8/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=\ngithub.com/json-iterator/go v1.1.10 h1:Kz6Cvnvv2wGdaG/V8yMvfkmNiXq9Ya2KUv4rouJJr68=\ngithub.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=\ngithub.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=\ngithub.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk=\ngithub.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=\ngithub.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q=\ngithub.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00=\ngithub.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=\ngithub.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=\ngithub.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=\ngithub.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=\ngithub.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=\ngithub.com/kr/pretty v0.2.0 h1:s5hAObm+yFO5uHYt5dYjxi2rXrsnmRpJx4OYvIWUaQs=\ngithub.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=\ngithub.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=\ngithub.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA=\ngithub.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=\ngithub.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=\ngithub.com/kylelemons/godebug v0.0.0-20170820004349-d65d576e9348 h1:MtvEpTB6LX3vkb4ax0b5D2DHbNAUsen0Gx5wZoq3lV4=\ngithub.com/kylelemons/godebug v0.0.0-20170820004349-d65d576e9348/go.mod h1:B69LEHPfb2qLo0BaaOLcbitczOKLWTsrBG9LczfCD4k=\ngithub.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=\ngithub.com/mailru/easyjson v0.0.0-20160728113105-d5b7844b561a/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=\ngithub.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=\ngithub.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=\ngithub.com/mailru/easyjson v0.7.0/go.mod h1:KAzv3t3aY1NaHWoQz1+4F1ccyAH66Jk7yos7ldAVICs=\ngithub.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU=\ngithub.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE=\ngithub.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=\ngithub.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=\ngithub.com/mattn/go-isatty v0.0.11/go.mod h1:PhnuNfih5lzO57/f3n+odYbM4JtupLOxQOAqxQCu2WE=\ngithub.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU=\ngithub.com/mattn/go-zglob v0.0.1/go.mod h1:9fxibJccNxU2cnpIKLRRFA7zX7qhkJIQWBb449FYHOo=\ngithub.com/mattn/go-zglob v0.0.2-0.20190814121620-e3c945676326 h1:ofNAzWCcyTALn2Zv40+8XitdzCgXY6e9qvXwN9W0YXg=\ngithub.com/mattn/go-zglob v0.0.2-0.20190814121620-e3c945676326/go.mod h1:9fxibJccNxU2cnpIKLRRFA7zX7qhkJIQWBb449FYHOo=\ngithub.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=\ngithub.com/maxbrunsfeld/counterfeiter/v6 v6.2.2/go.mod h1:eD9eIE7cdwcMi9rYluz88Jz2VyhSmden33/aXg4oVIY=\ngithub.com/miekg/dns v1.1.31/go.mod h1:KNUDUusw/aVsxyTYZM1oqvCicbwhgbNgztCETuNZ7xM=\ngithub.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s=\ngithub.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y=\ngithub.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=\ngithub.com/mitchellh/go-wordwrap v0.0.0-20150314170334-ad45545899c7 h1:DpOJ2HYzCv8LZP15IdmG+YdwD2luVPHITV96TkirNBM=\ngithub.com/mitchellh/go-wordwrap v0.0.0-20150314170334-ad45545899c7/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo=\ngithub.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=\ngithub.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw=\ngithub.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=\ngithub.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=\ngithub.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=\ngithub.com/modern-go/reflect2 v0.0.0-20180320133207-05fbef0ca5da/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=\ngithub.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=\ngithub.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9AWI=\ngithub.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=\ngithub.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc=\ngithub.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=\ngithub.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=\ngithub.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=\ngithub.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw=\ngithub.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo=\ngithub.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=\ngithub.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=\ngithub.com/onsi/ginkgo v1.8.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=\ngithub.com/onsi/ginkgo v1.10.1/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=\ngithub.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=\ngithub.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA=\ngithub.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=\ngithub.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=\ngithub.com/opencontainers/go-digest v1.0.0-rc1/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s=\ngithub.com/opencontainers/image-spec v1.0.1/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0=\ngithub.com/oracle/oci-go-sdk v7.1.0+incompatible/go.mod h1:VQb79nF8Z2cwLkLS35ukwStZIg5F66tcBccjip/j888=\ngithub.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic=\ngithub.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU=\ngithub.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=\ngithub.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=\ngithub.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=\ngithub.com/pmezard/go-difflib v0.0.0-20151028094244-d8ed2627bdf0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=\ngithub.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=\ngithub.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=\ngithub.com/pquerna/cachecontrol v0.0.0-20171018203845-0dec1b30a021/go.mod h1:prYjPmNq4d1NPVmpShWobRqXY3q7Vp+80DqgxxUrUIA=\ngithub.com/pquerna/otp v1.2.0 h1:/A3+Jn+cagqayeR3iHs/L62m5ue7710D35zl1zJ1kok=\ngithub.com/pquerna/otp v1.2.0/go.mod h1:dkJfzwRKNiegxyNb54X/3fLwhCynbMspSyWKnvi1AEg=\ngithub.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=\ngithub.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=\ngithub.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=\ngithub.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=\ngithub.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=\ngithub.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=\ngithub.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=\ngithub.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=\ngithub.com/remyoudompheng/bigfft v0.0.0-20170806203942-52369c62f446/go.mod h1:uYEyJGbgTkfkS4+E/PavXkNJcbFIpEtjt2B0KDQ5+9M=\ngithub.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg=\ngithub.com/rogpeppe/go-charset v0.0.0-20180617210344-2471d30d28b4/go.mod h1:qgYeAmZ5ZIpBWTGllZSQnw97Dj+woV0toclVaRGI8pc=\ngithub.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=\ngithub.com/rubiojr/go-vhd v0.0.0-20160810183302-0bfd3b39853c/go.mod h1:DM5xW0nvfNNm2uytzsvhI3OnX8uzaRAg8UX/CnDqbto=\ngithub.com/russross/blackfriday v1.5.2 h1:HyvC0ARfnZBqnXwABFeSZHpKvJHJJfPz81GNueLj0oo=\ngithub.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g=\ngithub.com/russross/blackfriday/v2 v2.0.1 h1:lPqVAte+HuHNfhJ/0LC98ESWRz8afy9tM/0RK8m9o+Q=\ngithub.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=\ngithub.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0=\ngithub.com/sclevine/spec v1.2.0/go.mod h1:W4J29eT/Kzv7/b9IWLB055Z+qvVC9vt0Arko24q7p+U=\ngithub.com/sebdah/goldie v1.0.0/go.mod h1:jXP4hmWywNEwZzhMuv2ccnqTSFpuq8iyQhtQdkkZBH4=\ngithub.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo=\ngithub.com/shurcooL/sanitized_anchor_name v1.0.0 h1:PdmoCO6wvbs+7yrJyMORt4/BmY5IYyJwS/kOiWx8mHo=\ngithub.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=\ngithub.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=\ngithub.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q=\ngithub.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=\ngithub.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM=\ngithub.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ=\ngithub.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk=\ngithub.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=\ngithub.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ=\ngithub.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU=\ngithub.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo=\ngithub.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=\ngithub.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=\ngithub.com/spf13/pflag v1.0.2/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=\ngithub.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=\ngithub.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=\ngithub.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=\ngithub.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s=\ngithub.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=\ngithub.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=\ngithub.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE=\ngithub.com/stretchr/testify v0.0.0-20151208002404-e3a8ff8ce365/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=\ngithub.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=\ngithub.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=\ngithub.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk=\ngithub.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=\ngithub.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=\ngithub.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0=\ngithub.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA=\ngithub.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0=\ngithub.com/urfave/cli v1.22.2 h1:gsqYFH8bb9ekPA12kRo0hfjngWQjkJPlN9R0N78BoUo=\ngithub.com/urfave/cli v1.22.2/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0=\ngithub.com/vdemeester/k8s-pkg-credentialprovider v0.0.0-20200107171650-7c61ffa44238/go.mod h1:JwQJCMWpUDqjZrB5jpw0f5VbN7U95zxFy1ZDpoEarGo=\ngithub.com/vmihailenco/msgpack v3.3.3+incompatible/go.mod h1:fy3FlTQTDXWkZ7Bh6AcGMlsjHatGryHQYUTf1ShIgkk=\ngithub.com/vmware/govmomi v0.20.3/go.mod h1:URlwyTFZX72RmxtxuaFL2Uj3fD1JTvZdx59bHWk6aFU=\ngithub.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU=\ngithub.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q=\ngithub.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=\ngithub.com/zclconf/go-cty v1.2.0/go.mod h1:hOPWgoHbaTUnI5k4D2ld+GRpFJSCe6bCM7m1q/N4PQ8=\ngithub.com/zclconf/go-cty v1.2.1 h1:vGMsygfmeCl4Xb6OA5U5XVAaQZ69FvoG7X2jUtQujb8=\ngithub.com/zclconf/go-cty v1.2.1/go.mod h1:hOPWgoHbaTUnI5k4D2ld+GRpFJSCe6bCM7m1q/N4PQ8=\ngo.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=\ngo.etcd.io/etcd v0.0.0-20191023171146-3cf2f69b5738/go.mod h1:dnLIgRNXwCJa5e+c6mIZCrds/GIG4ncV9HhK5PX7jPg=\ngo.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=\ngo.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8=\ngo.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=\ngo.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=\ngo.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=\ngo.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=\ngolang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=\ngolang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=\ngolang.org/x/crypto v0.0.0-20190211182817-74369b46fc67/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=\ngolang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=\ngolang.org/x/crypto v0.0.0-20190426145343-a29dc8fdc734/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=\ngolang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=\ngolang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=\ngolang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=\ngolang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=\ngolang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=\ngolang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=\ngolang.org/x/crypto v0.0.0-20200622213623-75b288015ac9 h1:psW17arqaxU48Z5kZ0CQnkZWQJsqcURM6tKiBApRjXI=\ngolang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=\ngolang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=\ngolang.org/x/exp v0.0.0-20190125153040-c74c464bbbf2/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=\ngolang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=\ngolang.org/x/exp v0.0.0-20190312203227-4b39c73a6495/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=\ngolang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=\ngolang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek=\ngolang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=\ngolang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=\ngolang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=\ngolang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=\ngolang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=\ngolang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=\ngolang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=\ngolang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=\ngolang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=\ngolang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs=\ngolang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE=\ngolang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o=\ngolang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc=\ngolang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY=\ngolang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=\ngolang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=\ngolang.org/x/net v0.0.0-20170114055629-f2499483f923/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=\ngolang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=\ngolang.org/x/net v0.0.0-20180811021610-c39426892332/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=\ngolang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=\ngolang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=\ngolang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=\ngolang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=\ngolang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=\ngolang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=\ngolang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=\ngolang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=\ngolang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=\ngolang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=\ngolang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=\ngolang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=\ngolang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=\ngolang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=\ngolang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=\ngolang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=\ngolang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=\ngolang.org/x/net v0.0.0-20191004110552-13f9640d40b9/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=\ngolang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=\ngolang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=\ngolang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=\ngolang.org/x/net v0.0.0-20201021035429-f5854403a974 h1:IX6qOQeG5uLjB/hjjwjedwfjND0hgjPMMyO1RoIXQNI=\ngolang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=\ngolang.org/x/net v0.0.0-20201110031124-69a78807bb2b h1:uwuIcX0g4Yl1NC5XAz37xsr2lTtcqevgzYNVt49waME=\ngolang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=\ngolang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=\ngolang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=\ngolang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=\ngolang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=\ngolang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d h1:TzXSXBo42m9gQenoE3b9BGiEpg5IG2JkU5FkPIawgtw=\ngolang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=\ngolang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=\ngolang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=\ngolang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=\ngolang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=\ngolang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=\ngolang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=\ngolang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=\ngolang.org/x/sys v0.0.0-20170830134202-bb24a47a89ea/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=\ngolang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=\ngolang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=\ngolang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=\ngolang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=\ngolang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=\ngolang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=\ngolang.org/x/sys v0.0.0-20190209173611-3b5209105503/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=\ngolang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=\ngolang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=\ngolang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=\ngolang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=\ngolang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=\ngolang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=\ngolang.org/x/sys v0.0.0-20190502175342-a43fa875dd82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=\ngolang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=\ngolang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=\ngolang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=\ngolang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=\ngolang.org/x/sys v0.0.0-20190626221950-04f50cda93cb/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=\ngolang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=\ngolang.org/x/sys v0.0.0-20190924154521-2837fb4f24fe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=\ngolang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=\ngolang.org/x/sys v0.0.0-20191010194322-b09406accb47/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=\ngolang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=\ngolang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=\ngolang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=\ngolang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=\ngolang.org/x/sys v0.0.0-20200622214017-ed371f2e16b4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=\ngolang.org/x/sys v0.0.0-20200930185726-fdedc70b468f h1:+Nyd8tzPX9R7BWHguqsrbFdRx3WQ/1ib8I44HXV5yTA=\ngolang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=\ngolang.org/x/text v0.0.0-20160726164857-2910a502d2bf/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=\ngolang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=\ngolang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=\ngolang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=\ngolang.org/x/text v0.3.3 h1:cokOdA+Jmi5PJGXLlLllQSgYigAEfHXJAERHVMaCc2k=\ngolang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=\ngolang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=\ngolang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=\ngolang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=\ngolang.org/x/time v0.0.0-20191024005414-555d28b269f0 h1:/5xXl8Y5W96D+TtHSlonuFqGHIWVuyCkGJLwGh9JJFs=\ngolang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=\ngolang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=\ngolang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=\ngolang.org/x/tools v0.0.0-20181011042414-1f849cf54d09/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=\ngolang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=\ngolang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=\ngolang.org/x/tools v0.0.0-20190206041539-40960b6deb8e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=\ngolang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=\ngolang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=\ngolang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=\ngolang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=\ngolang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=\ngolang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=\ngolang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=\ngolang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=\ngolang.org/x/tools v0.0.0-20190614205625-5aca471b1d59/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=\ngolang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=\ngolang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=\ngolang.org/x/tools v0.0.0-20190706070813-72ffa07ba3db/go.mod h1:jcCCGcm9btYwXyDqrUWc6MKQKKGJCWEQ3AfLSRIbEuI=\ngolang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=\ngolang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=\ngolang.org/x/tools v0.0.0-20190920225731-5eefd052ad72/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=\ngolang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=\ngolang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=\ngolang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=\ngolang.org/x/tools v0.0.0-20191205215504-7b8c8591a921/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=\ngolang.org/x/tools v0.0.0-20191216052735-49a3e744a425/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=\ngolang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=\ngolang.org/x/tools v0.0.0-20201110201400-7099162a900a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=\ngolang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=\ngolang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=\ngolang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=\ngolang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE=\ngolang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=\ngonum.org/v1/gonum v0.0.0-20190331200053-3d26580ed485/go.mod h1:2ltnJ7xHfj0zHS40VVPYEAAMTa3ZGguvHGBSJeRWqE0=\ngonum.org/v1/netlib v0.0.0-20190313105609-8cb42192e0e0/go.mod h1:wa6Ws7BG/ESfp6dHfk7C6KdzKA7wR7u/rKwOGE66zvw=\ngonum.org/v1/netlib v0.0.0-20190331212654-76723241ea4e/go.mod h1:kS+toOQn6AQKjmKJ7gzohV1XkqsFehRA2FbsbkopSuQ=\ngoogle.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE=\ngoogle.golang.org/api v0.6.1-0.20190607001116-5213b8090861/go.mod h1:btoxGiFvQNVUZQ8W08zLtrVS08CNpINPEfxXxgJL1Q4=\ngoogle.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M=\ngoogle.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=\ngoogle.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=\ngoogle.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=\ngoogle.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=\ngoogle.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=\ngoogle.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=\ngoogle.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0=\ngoogle.golang.org/appengine v1.6.5 h1:tycE03LOZYQNhDpS27tcQdAzLCVMaj7QT2SXxebnpCM=\ngoogle.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=\ngoogle.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=\ngoogle.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=\ngoogle.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=\ngoogle.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=\ngoogle.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=\ngoogle.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=\ngoogle.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=\ngoogle.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8=\ngoogle.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=\ngoogle.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=\ngoogle.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=\ngoogle.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=\ngoogle.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=\ngoogle.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=\ngoogle.golang.org/grpc v1.23.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=\ngoogle.golang.org/grpc v1.24.0/go.mod h1:XDChyiUovWa60DnaeDeZmSW86xtLtjtZbwvSiRnRtcA=\ngoogle.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=\ngoogle.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=\ngoogle.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=\ngoogle.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=\ngoogle.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=\ngoogle.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE=\ngoogle.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=\ngoogle.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=\ngoogle.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=\ngoogle.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=\ngoogle.golang.org/protobuf v1.24.0 h1:UhZDfRO8JRQru4/+LlLE0BRKGF8L+PICnvYZmx/fEGA=\ngoogle.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4=\ngopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=\ngopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=\ngopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=\ngopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo=\ngopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=\ngopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw=\ngopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=\ngopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=\ngopkg.in/gcfg.v1 v1.2.0/go.mod h1:yesOnuUOFQAhST5vPY4nbZsb/huCgGGXlipJsBn0b3o=\ngopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc=\ngopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=\ngopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k=\ngopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo=\ngopkg.in/square/go-jose.v2 v2.2.2/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI=\ngopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=\ngopkg.in/warnings.v0 v0.1.1/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI=\ngopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74=\ngopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=\ngopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=\ngopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=\ngopkg.in/yaml.v2 v2.2.8 h1:obN1ZagJSUGI0Ek/LBmuj4SNLPfIny3KsKFopxRdj10=\ngopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=\ngotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw=\nhonnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=\nhonnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=\nhonnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=\nhonnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=\nhonnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=\nk8s.io/api v0.17.0/go.mod h1:npsyOePkeP0CPwyGfXDHxvypiYMJxBWAMpQxCaJ4ZxI=\nk8s.io/api v0.19.3 h1:GN6ntFnv44Vptj/b+OnMW7FmzkpDoIDLZRvKX3XH9aU=\nk8s.io/api v0.19.3/go.mod h1:VF+5FT1B74Pw3KxMdKyinLo+zynBaMBiAfGMuldcNDs=\nk8s.io/apimachinery v0.17.0/go.mod h1:b9qmWdKlLuU9EBh+06BtLcSf/Mu89rWL33naRxs1uZg=\nk8s.io/apimachinery v0.19.3 h1:bpIQXlKjB4cB/oNpnNnV+BybGPR7iP5oYpsOTEJ4hgc=\nk8s.io/apimachinery v0.19.3/go.mod h1:DnPGDnARWFvYa3pMHgSxtbZb7gpzzAZ1pTfaUNDVlmA=\nk8s.io/apiserver v0.17.0/go.mod h1:ABM+9x/prjINN6iiffRVNCBR2Wk7uY4z+EtEGZD48cg=\nk8s.io/client-go v0.17.0/go.mod h1:TYgR6EUHs6k45hb6KWjVD6jFZvJV4gHDikv/It0xz+k=\nk8s.io/client-go v0.19.3 h1:ctqR1nQ52NUs6LpI0w+a5U+xjYwflFwA13OJKcicMxg=\nk8s.io/client-go v0.19.3/go.mod h1:+eEMktZM+MG0KO+PTkci8xnbCZHvj9TqR6Q1XDUIJOM=\nk8s.io/cloud-provider v0.17.0/go.mod h1:Ze4c3w2C0bRsjkBUoHpFi+qWe3ob1wI2/7cUn+YQIDE=\nk8s.io/code-generator v0.0.0-20191121015212-c4c8f8345c7e/go.mod h1:DVmfPQgxQENqDIzVR2ddLXMH34qeszkKSdH/N+s+38s=\nk8s.io/component-base v0.17.0/go.mod h1:rKuRAokNMY2nn2A6LP/MiwpoaMRHpfRnrPaUJJj1Yoc=\nk8s.io/csi-translation-lib v0.17.0/go.mod h1:HEF7MEz7pOLJCnxabi45IPkhSsE/KmxPQksuCrHKWls=\nk8s.io/gengo v0.0.0-20190128074634-0689ccc1d7d6/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0=\nk8s.io/gengo v0.0.0-20190822140433-26a664648505/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0=\nk8s.io/gengo v0.0.0-20200413195148-3a45101e95ac/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0=\nk8s.io/klog v0.0.0-20181102134211-b9b56d5dfc92/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk=\nk8s.io/klog v0.3.0/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk=\nk8s.io/klog v1.0.0 h1:Pt+yjF5aB1xDSVbau4VsWe+dQNzA0qv1LlXdC2dF6Q8=\nk8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I=\nk8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE=\nk8s.io/klog/v2 v2.2.0 h1:XRvcwJozkgZ1UQJmfMGpvRthQHOvihEhYtDfAaxMz/A=\nk8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y=\nk8s.io/kube-openapi v0.0.0-20191107075043-30be4d16710a/go.mod h1:1TqjTSzOxsLGIKfj0lK8EeCP7K1iUG65v09OM0/WG5E=\nk8s.io/kube-openapi v0.0.0-20200805222855-6aeccd4b50c6/go.mod h1:UuqjUnNftUyPE5H64/qeyjQoUZhGpeFDVdxjTeEVN2o=\nk8s.io/legacy-cloud-providers v0.17.0/go.mod h1:DdzaepJ3RtRy+e5YhNtrCYwlgyK87j/5+Yfp0L9Syp8=\nk8s.io/utils v0.0.0-20191114184206-e782cd3c129f/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew=\nk8s.io/utils v0.0.0-20200729134348-d5654de09c73 h1:uJmqzgNWG7XyClnU/mLPBWwfKKF1K8Hf8whTseBgJcg=\nk8s.io/utils v0.0.0-20200729134348-d5654de09c73/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=\nmodernc.org/cc v1.0.0/go.mod h1:1Sk4//wdnYJiUIxnW8ddKpaOJCF37yAdqYnkxUpaYxw=\nmodernc.org/golex v1.0.0/go.mod h1:b/QX9oBD/LhixY6NDh+IdGv17hgB+51fET1i2kPSmvk=\nmodernc.org/mathutil v1.0.0/go.mod h1:wU0vUrJsVWBZ4P6e7xtFJEhFSNsfRLJ8H458uRjg03k=\nmodernc.org/strutil v1.0.0/go.mod h1:lstksw84oURvj9y3tn8lGvRxyRC1S2+g5uuIzNfIOBs=\nmodernc.org/xc v1.0.0/go.mod h1:mRNCo0bvLjGhHO9WsyuKVU4q0ceiDDDoEeWDJHrNx8I=\nrsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=\nsigs.k8s.io/structured-merge-diff v0.0.0-20190525122527-15d366b2352e/go.mod h1:wWxsB5ozmmv/SG7nM11ayaAW51xMvak/t1r0CSlcokI=\nsigs.k8s.io/structured-merge-diff v1.0.1-0.20191108220359-b1b620dd3f06 h1:zD2IemQ4LmOcAumeiyDWXKUI2SO0NYDe3H6QGvPOVgU=\nsigs.k8s.io/structured-merge-diff v1.0.1-0.20191108220359-b1b620dd3f06/go.mod h1:/ULNhyfzRopfcjskuui0cTITekDduZ7ycKN3oUT9R18=\nsigs.k8s.io/structured-merge-diff/v4 v4.0.1 h1:YXTMot5Qz/X1iBRJhAt+vI+HVttY0WkSqqhKxQ0xVbA=\nsigs.k8s.io/structured-merge-diff/v4 v4.0.1/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw=\nsigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o=\nsigs.k8s.io/yaml v1.2.0 h1:kr/MCeFWJWTwyaHoR9c8EjH9OumOmoF9YGiZd7lFm/Q=\nsigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc=\n"
  },
  {
    "path": "test/nomad_cluster_ssh_test.go",
    "content": "package test\n\nimport \"testing\"\n\nfunc TestNomadClusterSSHAccess(t *testing.T) {\n\tt.Parallel()\n\trunNomadClusterSSHTest(t, \"amazon-linux-2-amd64-ami\", \"ec2-user\")\n}\n"
  },
  {
    "path": "test/nomad_consul_cluster_colocated_test.go",
    "content": "package test\n\nimport (\n\t\"testing\"\n)\n\nfunc TestNomadConsulClusterColocatedWithUbuntu18Ami(t *testing.T) {\n\tt.Parallel()\n\trunNomadClusterColocatedTest(t, \"ubuntu18-ami\")\n}\n\nfunc TestNomadConsulClusterColocatedWithUbuntu16Ami(t *testing.T) {\n\tt.Parallel()\n\trunNomadClusterColocatedTest(t, \"ubuntu16-ami\")\n}\n\nfunc TestNomadConsulClusterColocatedAmazonLinux2Amd64Ami(t *testing.T) {\n\tt.Parallel()\n\trunNomadClusterColocatedTest(t, \"amazon-linux-2-amd64-ami\")\n}\n"
  },
  {
    "path": "test/nomad_consul_cluster_separate_test.go",
    "content": "package test\n\nimport \"testing\"\n\nfunc TestNomadConsulClusterSeparateWith18UbuntuAmi(t *testing.T) {\n\tt.Parallel()\n\trunNomadClusterSeparateTest(t, \"ubuntu18-ami\")\n}\n\nfunc TestNomadConsulClusterSeparateWithUbuntu16Ami(t *testing.T) {\n\tt.Parallel()\n\trunNomadClusterSeparateTest(t, \"ubuntu16-ami\")\n}\n\nfunc TestNomadConsulClusterSeparateAmazonLinux2Ami(t *testing.T) {\n\tt.Parallel()\n\trunNomadClusterSeparateTest(t, \"amazon-linux-2-amd64-ami\")\n}\n"
  },
  {
    "path": "test/nomad_helpers.go",
    "content": "package test\n\nimport (\n\t\"encoding/json\"\n\t\"fmt\"\n\t\"io/ioutil\"\n\t\"net/http\"\n\t\"path/filepath\"\n\t\"strings\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/gruntwork-io/terratest/modules/aws\"\n\t\"github.com/gruntwork-io/terratest/modules/logger\"\n\t\"github.com/gruntwork-io/terratest/modules/random\"\n\t\"github.com/gruntwork-io/terratest/modules/retry\"\n\t\"github.com/gruntwork-io/terratest/modules/ssh\"\n\t\"github.com/gruntwork-io/terratest/modules/terraform\"\n\t\"github.com/gruntwork-io/terratest/modules/test-structure\"\n)\n\nconst REPO_ROOT = \"../\"\n\nconst ENV_VAR_AWS_REGION = \"AWS_DEFAULT_REGION\"\n\nconst VAR_AMI_ID = \"ami_id\"\nconst VAR_SSH_CIDR = \"allowed_ssh_cidr_blocks\"\n\nconst CLUSTER_COLOCATED_EXAMPLE_PATH = \"/\"\nconst CLUSTER_COLOCATED_EXAMPLE_VAR_CLUSTER_NAME = \"cluster_name\"\nconst CLUSTER_COLOCATED_EXAMPLE_VAR_CLUSTER_TAG_VALUE = \"cluster_tag_value\"\nconst CLUSTER_COLOCATED_EXAMPLE_VAR_NUM_SERVERS = \"num_servers\"\nconst CLUSTER_COLOCATED_EXAMPLE_VAR_NUM_CLIENTS = \"num_clients\"\nconst CLUSTER_COLOCATED_EXAMPLE_OUTPUT_SERVER_ASG_NAME = \"asg_name_servers\"\n\nconst CLUSTER_SEPARATE_EXAMPLE_PATH = \"examples/nomad-consul-separate-cluster\"\nconst CLUSTER_SEPARATE_EXAMPLE_VAR_NOMAD_CLUSTER_NAME = \"nomad_cluster_name\"\nconst CLUSTER_SEPARATE_EXAMPLE_VAR_CONSUL_CLUSTER_NAME = \"consul_cluster_name\"\nconst CLUSTER_SEPARATE_EXAMPLE_VAR_NUM_NOMAD_SERVERS = \"num_nomad_servers\"\nconst CLUSTER_SEPARATE_EXAMPLE_VAR_NUM_CONSUL_SERVERS = \"num_consul_servers\"\nconst CLUSTER_SEPARATE_EXAMPLE_VAR_NUM_NOMAD_CLIENTS = \"num_nomad_clients\"\nconst CLUSTER_SEPARATE_EXAMPLE_VAR_SSH_KEY_NAME = \"ssh_key_name\"\nconst CLUSTER_SEPARATE_EXAMPLE_OUTPUT_NOMAD_SERVER_ASG_NAME = \"asg_name_nomad_servers\"\n\nconst DEFAULT_NUM_SERVERS = 3\nconst DEFAULT_NUM_CLIENTS = 6\n\nconst SAVED_AWS_REGION = \"AwsRegion\"\nconst SAVED_UNIQUE_ID = \"UniqueId\"\n\n// Test the Nomad/Consul colocated cluster example by:\n//\n// 1. Copying the code in this repo to a temp folder so tests on the Terraform code can run in parallel without the\n//    state files overwriting each other.\n// 2. Building the AMI in the nomad-consul-ami example with the given build name\n// 3. Deploying that AMI using the example Terraform code\n// 4. Checking that the Nomad cluster comes up within a reasonable time period and can respond to requests\nfunc runNomadClusterColocatedTest(t *testing.T, packerBuildName string) {\n\texamplesDir := test_structure.CopyTerraformFolderToTemp(t, REPO_ROOT, CLUSTER_COLOCATED_EXAMPLE_PATH)\n\n\tdefer test_structure.RunTestStage(t, \"teardown\", func() {\n\t\tterraformOptions := test_structure.LoadTerraformOptions(t, examplesDir)\n\t\tterraform.Destroy(t, terraformOptions)\n\n\t\tamiId := test_structure.LoadAmiId(t, examplesDir)\n\t\tawsRegion := test_structure.LoadString(t, examplesDir, SAVED_AWS_REGION)\n\t\taws.DeleteAmi(t, awsRegion, amiId)\n\t})\n\n\ttest_structure.RunTestStage(t, \"setup_ami\", func() {\n\t\tawsRegion := getRandomRegion(t)\n\t\ttest_structure.SaveString(t, examplesDir, SAVED_AWS_REGION, awsRegion)\n\n\t\tuniqueId := random.UniqueId()\n\t\ttest_structure.SaveString(t, examplesDir, SAVED_UNIQUE_ID, uniqueId)\n\n\t\tamiId := buildAmi(t, filepath.Join(examplesDir, \"examples\", \"nomad-consul-ami\", \"nomad-consul.json\"), packerBuildName, awsRegion, uniqueId)\n\t\ttest_structure.SaveAmiId(t, examplesDir, amiId)\n\t})\n\n\ttest_structure.RunTestStage(t, \"deploy\", func() {\n\t\tamiId := test_structure.LoadAmiId(t, examplesDir)\n\t\tawsRegion := test_structure.LoadString(t, examplesDir, SAVED_AWS_REGION)\n\t\tuniqueId := test_structure.LoadString(t, examplesDir, SAVED_UNIQUE_ID)\n\n\t\tterraformOptions := &terraform.Options{\n\t\t\tTerraformDir: examplesDir,\n\t\t\tVars: map[string]interface{}{\n\t\t\t\tCLUSTER_COLOCATED_EXAMPLE_VAR_CLUSTER_NAME:      fmt.Sprintf(\"test-%s\", uniqueId),\n\t\t\t\tCLUSTER_COLOCATED_EXAMPLE_VAR_CLUSTER_TAG_VALUE: fmt.Sprintf(\"auto-join-%s\", uniqueId),\n\t\t\t\tCLUSTER_COLOCATED_EXAMPLE_VAR_NUM_SERVERS:       DEFAULT_NUM_SERVERS,\n\t\t\t\tCLUSTER_COLOCATED_EXAMPLE_VAR_NUM_CLIENTS:       DEFAULT_NUM_CLIENTS,\n\t\t\t\tVAR_AMI_ID: amiId,\n\t\t\t},\n\t\t\tEnvVars: map[string]string{\n\t\t\t\tENV_VAR_AWS_REGION: awsRegion,\n\t\t\t},\n\t\t}\n\t\ttest_structure.SaveTerraformOptions(t, examplesDir, terraformOptions)\n\n\t\tterraform.InitAndApply(t, terraformOptions)\n\t})\n\n\ttest_structure.RunTestStage(t, \"validate\", func() {\n\t\tterraformOptions := test_structure.LoadTerraformOptions(t, examplesDir)\n\t\tawsRegion := test_structure.LoadString(t, examplesDir, SAVED_AWS_REGION)\n\n\t\tcheckNomadClusterIsWorking(t, CLUSTER_COLOCATED_EXAMPLE_OUTPUT_SERVER_ASG_NAME, terraformOptions, awsRegion)\n\t})\n}\n\n// Test the Nomad/Consul separate clusters example by:\n//\n// 1. Copying the code in this repo to a temp folder so tests on the Terraform code can run in parallel without the\n//    state files overwriting each other.\n// 2. Building the AMI in the nomad-consul-ami example with the given build name\n// 3. Deploying that AMI using the example Terraform code\n// 4. Checking that the Nomad cluster comes up within a reasonable time period and can respond to requests\nfunc runNomadClusterSeparateTest(t *testing.T, packerBuildName string) {\n\texamplesDir := test_structure.CopyTerraformFolderToTemp(t, REPO_ROOT, \"/\")\n\n\tdefer test_structure.RunTestStage(t, \"teardown\", func() {\n\t\tterraformOptions := test_structure.LoadTerraformOptions(t, examplesDir)\n\t\tterraform.Destroy(t, terraformOptions)\n\n\t\tamiId := test_structure.LoadAmiId(t, examplesDir)\n\t\tawsRegion := test_structure.LoadString(t, examplesDir, SAVED_AWS_REGION)\n\t\taws.DeleteAmi(t, awsRegion, amiId)\n\t})\n\n\ttest_structure.RunTestStage(t, \"setup_ami\", func() {\n\t\tawsRegion := getRandomRegion(t)\n\t\ttest_structure.SaveString(t, examplesDir, SAVED_AWS_REGION, awsRegion)\n\n\t\tuniqueId := random.UniqueId()\n\t\ttest_structure.SaveString(t, examplesDir, SAVED_UNIQUE_ID, uniqueId)\n\n\t\tamiId := buildAmi(t, filepath.Join(examplesDir, \"examples\", \"nomad-consul-ami\", \"nomad-consul.json\"), packerBuildName, awsRegion, uniqueId)\n\t\ttest_structure.SaveAmiId(t, examplesDir, amiId)\n\t})\n\n\ttest_structure.RunTestStage(t, \"deploy\", func() {\n\t\tamiId := test_structure.LoadAmiId(t, examplesDir)\n\t\tawsRegion := test_structure.LoadString(t, examplesDir, SAVED_AWS_REGION)\n\t\tuniqueId := test_structure.LoadString(t, examplesDir, SAVED_UNIQUE_ID)\n\n\t\tterraformOptions := &terraform.Options{\n\t\t\tTerraformDir: filepath.Join(examplesDir, \"examples\", \"nomad-consul-separate-cluster\"),\n\t\t\tVars: map[string]interface{}{\n\t\t\t\tCLUSTER_SEPARATE_EXAMPLE_VAR_NOMAD_CLUSTER_NAME:  fmt.Sprintf(\"test-%s\", uniqueId),\n\t\t\t\tCLUSTER_SEPARATE_EXAMPLE_VAR_CONSUL_CLUSTER_NAME: fmt.Sprintf(\"test-%s\", uniqueId),\n\t\t\t\tCLUSTER_SEPARATE_EXAMPLE_VAR_NUM_NOMAD_SERVERS:   DEFAULT_NUM_SERVERS,\n\t\t\t\tCLUSTER_SEPARATE_EXAMPLE_VAR_NUM_CONSUL_SERVERS:  DEFAULT_NUM_SERVERS,\n\t\t\t\tCLUSTER_SEPARATE_EXAMPLE_VAR_NUM_NOMAD_CLIENTS:   DEFAULT_NUM_CLIENTS,\n\t\t\t\tVAR_AMI_ID: amiId,\n\t\t\t},\n\t\t\tEnvVars: map[string]string{\n\t\t\t\tENV_VAR_AWS_REGION: awsRegion,\n\t\t\t},\n\t\t}\n\t\ttest_structure.SaveTerraformOptions(t, examplesDir, terraformOptions)\n\n\t\tterraform.InitAndApply(t, terraformOptions)\n\t})\n\n\ttest_structure.RunTestStage(t, \"validate\", func() {\n\t\tterraformOptions := test_structure.LoadTerraformOptions(t, examplesDir)\n\t\tawsRegion := test_structure.LoadString(t, examplesDir, SAVED_AWS_REGION)\n\n\t\tcheckNomadClusterIsWorking(t, CLUSTER_SEPARATE_EXAMPLE_OUTPUT_NOMAD_SERVER_ASG_NAME, terraformOptions, awsRegion)\n\t})\n}\n\n// Check that the Nomad cluster comes up within a reasonable time period and can respond to requests\nfunc checkNomadClusterIsWorking(t *testing.T, asgNameOutputVar string, terraformOptions *terraform.Options, awsRegion string) {\n\tasgName := rawTerraformOutput(t, terraformOptions, asgNameOutputVar)\n\tnodeIpAddress := getIpAddressOfAsgInstance(t, asgName, awsRegion)\n\ttestNomadCluster(t, nodeIpAddress)\n}\n\nfunc checkNomadClusterSshAccess(t *testing.T, asgNameOutputVar string, terraformOptions *terraform.Options, awsRegion string, keyPair *ssh.KeyPair, sshUsername string) {\n\tasgName := rawTerraformOutput(t, terraformOptions, asgNameOutputVar)\n\tnodeIpAddress := getIpAddressOfAsgInstance(t, asgName, awsRegion)\n\n\tpublicHost := ssh.Host{\n\t\tHostname:    nodeIpAddress,\n\t\tSshKeyPair:  keyPair,\n\t\tSshUserName: sshUsername,\n\t}\n\n\ttestSshAccess(t, publicHost, true)\n}\n\nfunc testSshAccess(t *testing.T, publicHost ssh.Host, ssh_access bool) {\n\t// Check basic SSH to the instance\n\t// SSH access might fail, if none is configured - this is expected.\n\tresponse, err := retry.DoWithRetryE(t, \"SSH to public host\", 30, 5*time.Second, func() (string, error) {\n\t\texpectedText := fmt.Sprintf(\"Hello, %s\", publicHost.Hostname)\n\t\tcommand := fmt.Sprintf(\"echo -n '%s'\", expectedText)\n\t\tactualText, err := ssh.CheckSshCommandE(t, publicHost, command)\n\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\n\t\tif strings.TrimSpace(actualText) != expectedText {\n\t\t\treturn \"\", fmt.Errorf(\"Expected SSH command to return '%s' but got '%s'\", expectedText, actualText)\n\t\t}\n\n\t\treturn \"SSH access was successful\", nil\n\t})\n\n\t// No SSH access results in an error.\n\tif err != nil && !ssh_access {\n\t\tlogger.Logf(t, \"Nomad cluster is properly deployed without SSH access: %s\", response)\n\t\treturn\n\t}\n\tif err == nil && !ssh_access {\n\t\tlogger.Logf(t, \"Nomad cluster is NOT properly deployed without SSH access: %s\", response)\n\t\tt.Fatal(\"No SSH access configured, but nevertheless SSH access was successful.\")\n\t}\n\n\t// SSH access should result in no error.\n\tif err == nil && ssh_access {\n\t\tlogger.Logf(t, \"Nomad cluster is properly deployed with SSH access: %s\", response)\n\t\treturn\n\t}\n\tif err != nil && ssh_access {\n\t\tlogger.Logf(t, \"Nomad cluster is NOT properly deployed with SSH access: %s\", response)\n\t\tt.Fatal(\"SSH access configured, but SSH test was unsuccessful.\")\n\t}\n\n\tt.Fatal(\"Something went wrong. This part should never be reached.\")\n}\n\n// Use a Nomad client to connect to the given node and use it to verify that:\n//\n// 1. The Nomad cluster has deployed\n// 2. The cluster has the expected number of server nodes\n// 2. The cluster has the expected number of client nodes\nfunc testNomadCluster(t *testing.T, nodeIpAddress string) {\n\tmaxRetries := 90\n\tsleepBetweenRetries := 10 * time.Second\n\n\tresponse := retry.DoWithRetry(t, \"Check Nomad cluster has expected number of servers and clients\", maxRetries, sleepBetweenRetries, func() (string, error) {\n\t\tclients, err := callNomadApi(t, nodeIpAddress, \"v1/nodes\")\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\n\t\tif len(clients) != DEFAULT_NUM_CLIENTS {\n\t\t\treturn \"\", fmt.Errorf(\"Expected the cluster to have %d clients, but found %d\", DEFAULT_NUM_CLIENTS, len(clients))\n\t\t}\n\n\t\tservers, err := callNomadApi(t, nodeIpAddress, \"v1/status/peers\")\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\n\t\tif len(servers) != DEFAULT_NUM_SERVERS {\n\t\t\treturn \"\", fmt.Errorf(\"Expected the cluster to have %d servers, but found %d\", DEFAULT_NUM_SERVERS, len(servers))\n\t\t}\n\n\t\treturn fmt.Sprintf(\"Got back expected number of clients (%d) and servers (%d)\", len(clients), len(servers)), nil\n\t})\n\n\tlogger.Logf(t, \"Nomad cluster is properly deployed: %s\", response)\n}\n\n// A quick, hacky way to call the Nomad HTTP API: https://www.nomadproject.io/docs/http/index.html\nfunc callNomadApi(t *testing.T, nodeIpAddress string, path string) ([]interface{}, error) {\n\turl := fmt.Sprintf(\"http://%s:4646/%s\", nodeIpAddress, path)\n\tlogger.Logf(t, \"Making an HTTP GET to URL %s\", url)\n\n\tresp, err := http.Get(url)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tdefer resp.Body.Close()\n\tbody, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tlogger.Logf(t, \"Response from Nomad for URL %s: %s\", url, string(body))\n\n\tresult := []interface{}{}\n\tif err := json.Unmarshal(body, &result); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn result, nil\n}\n\nfunc runNomadClusterSSHTest(t *testing.T, packerBuildName string, ssh_username string) {\n\texamplesDir := test_structure.CopyTerraformFolderToTemp(t, REPO_ROOT, \"/\")\n\n\tdefer test_structure.RunTestStage(t, \"teardown\", func() {\n\t\tterraformOptions := test_structure.LoadTerraformOptions(t, examplesDir)\n\t\tterraform.Destroy(t, terraformOptions)\n\n\t\tamiId := test_structure.LoadAmiId(t, examplesDir)\n\t\tawsRegion := test_structure.LoadString(t, examplesDir, SAVED_AWS_REGION)\n\t\taws.DeleteAmi(t, awsRegion, amiId)\n\t})\n\n\ttest_structure.RunTestStage(t, \"setup_ami\", func() {\n\t\tawsRegion := getRandomRegion(t)\n\t\ttest_structure.SaveString(t, examplesDir, SAVED_AWS_REGION, awsRegion)\n\n\t\tuniqueId := random.UniqueId()\n\t\ttest_structure.SaveString(t, examplesDir, SAVED_UNIQUE_ID, uniqueId)\n\n\t\tamiId := buildAmi(t, filepath.Join(examplesDir, \"examples\", \"nomad-consul-ami\", \"nomad-consul.json\"), packerBuildName, awsRegion, uniqueId)\n\t\ttest_structure.SaveAmiId(t, examplesDir, amiId)\n\t})\n\n\ttest_structure.RunTestStage(t, \"deploy\", func() {\n\t\tamiId := test_structure.LoadAmiId(t, examplesDir)\n\t\tawsRegion := test_structure.LoadString(t, examplesDir, SAVED_AWS_REGION)\n\t\tuniqueId := test_structure.LoadString(t, examplesDir, SAVED_UNIQUE_ID)\n\n\t\tterraformOptions := &terraform.Options{\n\t\t\tTerraformDir: filepath.Join(examplesDir, \"examples\", \"nomad-consul-separate-cluster\"),\n\t\t\tVars: map[string]interface{}{\n\t\t\t\tCLUSTER_SEPARATE_EXAMPLE_VAR_NOMAD_CLUSTER_NAME:  fmt.Sprintf(\"test-%s\", uniqueId),\n\t\t\t\tCLUSTER_SEPARATE_EXAMPLE_VAR_CONSUL_CLUSTER_NAME: fmt.Sprintf(\"test-%s\", uniqueId),\n\t\t\t\tCLUSTER_SEPARATE_EXAMPLE_VAR_NUM_NOMAD_SERVERS:   DEFAULT_NUM_SERVERS,\n\t\t\t\tCLUSTER_SEPARATE_EXAMPLE_VAR_NUM_CONSUL_SERVERS:  DEFAULT_NUM_SERVERS,\n\t\t\t\tCLUSTER_SEPARATE_EXAMPLE_VAR_NUM_NOMAD_CLIENTS:   DEFAULT_NUM_CLIENTS,\n\t\t\t\tVAR_AMI_ID: amiId,\n\t\t\t},\n\t\t\tEnvVars: map[string]string{\n\t\t\t\tENV_VAR_AWS_REGION: awsRegion,\n\t\t\t},\n\t\t}\n\n\t\tkeyPairName := fmt.Sprintf(\"terratest-onetime-key-%s\", uniqueId)\n\t\tkeyPair := aws.CreateAndImportEC2KeyPair(t, awsRegion, keyPairName)\n\t\tterraformOptions.Vars[CLUSTER_SEPARATE_EXAMPLE_VAR_SSH_KEY_NAME] = keyPairName\n\t\ttest_structure.SaveEc2KeyPair(t, examplesDir, keyPair)\n\n\t\ttest_structure.SaveTerraformOptions(t, examplesDir, terraformOptions)\n\n\t\tterraform.InitAndApply(t, terraformOptions)\n\t})\n\n\ttest_structure.RunTestStage(t, \"validate\", func() {\n\t\tterraformOptions := test_structure.LoadTerraformOptions(t, examplesDir)\n\t\tawsRegion := test_structure.LoadString(t, examplesDir, SAVED_AWS_REGION)\n\t\tkeyPair := test_structure.LoadEc2KeyPair(t, examplesDir)\n\t\tcheckNomadClusterSshAccess(t, CLUSTER_SEPARATE_EXAMPLE_OUTPUT_NOMAD_SERVER_ASG_NAME, terraformOptions, awsRegion, keyPair.KeyPair, ssh_username)\n\t})\n}\n"
  },
  {
    "path": "test/terratest_helpers.go",
    "content": "package test\n\nimport (\n\t\"fmt\"\n\t\"strings\"\n\t\"testing\"\n\n\t\"github.com/gruntwork-io/terratest/modules/packer\"\n\t\"github.com/gruntwork-io/terratest/modules/terraform\"\n)\n\nconst CONSUL_AMI_TEMPLATE_VAR_REGION = \"aws_region\"\nconst CONSUL_AMI_TEMPLATE_VAR_AMI_PREFIX = \"ami_name_prefix\"\n\n// Use Packer to build the AMI in the given packer template, with the given build name, and return the AMI's ID\nfunc buildAmi(t *testing.T, packerTemplatePath string, packerBuildName string, awsRegion string, uniqueId string) string {\n\toptions := &packer.Options{\n\t\tTemplate: packerTemplatePath,\n\t\tOnly:     packerBuildName,\n\t\tVars: map[string]string{\n\t\t\tCONSUL_AMI_TEMPLATE_VAR_REGION:     awsRegion,\n\t\t\tCONSUL_AMI_TEMPLATE_VAR_AMI_PREFIX: fmt.Sprintf(\"nomad-consul-%s\", uniqueId),\n\t\t},\n\t}\n\n\treturn packer.BuildAmi(t, options)\n}\n\n// Recent terraform version changed the behavior on terraform output.\n// Values now contain quotations marks, if terraform output is called with `-raw` option.\n// - https://github.com/gruntwork-io/terratest/issues/766\nfunc rawTerraformOutput(t *testing.T, terraformOptions *terraform.Options, outputVariableName string) string {\n\treturn strings.Trim(terraform.Output(t, terraformOptions, outputVariableName), \"\\\"\")\n}\n"
  },
  {
    "path": "variables.tf",
    "content": "# ---------------------------------------------------------------------------------------------------------------------\n# ENVIRONMENT VARIABLES\n# Define these secrets as environment variables\n# ---------------------------------------------------------------------------------------------------------------------\n\n# AWS_ACCESS_KEY_ID\n# AWS_SECRET_ACCESS_KEY\n# AWS_DEFAULT_REGION\n\n# ---------------------------------------------------------------------------------------------------------------------\n# REQUIRED PARAMETERS\n# You must provide a value for each of these parameters.\n# ---------------------------------------------------------------------------------------------------------------------\n\n# None\n\n# ---------------------------------------------------------------------------------------------------------------------\n# OPTIONAL PARAMETERS\n# These parameters have reasonable defaults.\n# ---------------------------------------------------------------------------------------------------------------------\n\nvariable \"ami_id\" {\n  description = \"The ID of the AMI to run in the cluster. This should be an AMI built from the Packer template under examples/nomad-consul-ami/nomad-consul.json. If no AMI is specified, the template will 'just work' by using the example public AMIs. WARNING! Do not use the example AMIs in a production setting!\"\n  type        = string\n  default     = null\n}\n\nvariable \"cluster_name\" {\n  description = \"What to name the cluster and all of its associated resources\"\n  type        = string\n  default     = \"nomad-example\"\n}\n\nvariable \"server_instance_type\" {\n  description = \"What kind of instance type to use for the nomad servers\"\n  type        = string\n  default     = \"t2.micro\"\n}\n\nvariable \"instance_type\" {\n  description = \"What kind of instance type to use for the nomad clients\"\n  type        = string\n  default     = \"t2.micro\"\n}\n\nvariable \"num_servers\" {\n  description = \"The number of server nodes to deploy. We strongly recommend using 3 or 5.\"\n  type        = number\n  default     = 3\n}\n\nvariable \"num_clients\" {\n  description = \"The number of client nodes to deploy. You can deploy as many as you need to run your jobs.\"\n  type        = number\n  default     = 6\n}\n\nvariable \"cluster_tag_key\" {\n  description = \"The tag the EC2 Instances will look for to automatically discover each other and form a cluster.\"\n  type        = string\n  default     = \"nomad-servers\"\n}\n\nvariable \"cluster_tag_value\" {\n  description = \"Add a tag with key var.cluster_tag_key and this value to each Instance in the ASG. This can be used to automatically find other Consul nodes and form a cluster.\"\n  type        = string\n  default     = \"auto-join\"\n}\n\nvariable \"ssh_key_name\" {\n  description = \"The name of an EC2 Key Pair that can be used to SSH to the EC2 Instances in this cluster. Set to an empty string to not associate a Key Pair.\"\n  type        = string\n  default     = \"\"\n}\n\nvariable \"vpc_id\" {\n  description = \"The ID of the VPC in which the nodes will be deployed.  Uses default VPC if not supplied.\"\n  type        = string\n  default     = \"\"\n}\n\n"
  }
]