Repository: hashicorp/terraform-aws-nomad Branch: master Commit: 938e206a921e Files: 55 Total size: 253.2 KB Directory structure: gitextract_d1drx6lm/ ├── .circleci/ │ └── config.yml ├── .github/ │ ├── ISSUE_TEMPLATE/ │ │ ├── bug_report.md │ │ └── feature_request.md │ └── pull_request_template.md ├── .gitignore ├── .pre-commit-config.yaml ├── CODEOWNERS ├── LICENSE ├── NOTICE ├── README.md ├── _ci/ │ ├── publish-amis-in-new-account.md │ └── publish-amis.sh ├── core-concepts.md ├── examples/ │ ├── nomad-consul-ami/ │ │ ├── README.md │ │ ├── nomad-consul-docker.json │ │ ├── nomad-consul.json │ │ ├── setup_amazon-linux-2.sh │ │ ├── setup_nomad_consul.sh │ │ └── setup_ubuntu.sh │ ├── nomad-consul-separate-cluster/ │ │ ├── README.md │ │ ├── main.tf │ │ ├── outputs.tf │ │ ├── user-data-consul-server.sh │ │ ├── user-data-nomad-client.sh │ │ ├── user-data-nomad-server.sh │ │ └── variables.tf │ ├── nomad-examples-helper/ │ │ ├── README.md │ │ ├── example.nomad │ │ └── nomad-examples-helper.sh │ └── root-example/ │ ├── README.md │ ├── user-data-client.sh │ └── user-data-server.sh ├── main.tf ├── modules/ │ ├── install-nomad/ │ │ ├── README.md │ │ └── install-nomad │ ├── nomad-cluster/ │ │ ├── README.md │ │ ├── main.tf │ │ ├── outputs.tf │ │ └── variables.tf │ ├── nomad-security-group-rules/ │ │ ├── README.md │ │ ├── main.tf │ │ └── variables.tf │ └── run-nomad/ │ ├── README.md │ └── run-nomad ├── outputs.tf ├── test/ │ ├── README.md │ ├── aws_helpers.go │ ├── go.mod │ ├── go.sum │ ├── nomad_cluster_ssh_test.go │ ├── nomad_consul_cluster_colocated_test.go │ ├── nomad_consul_cluster_separate_test.go │ ├── nomad_helpers.go │ └── terratest_helpers.go └── variables.tf ================================================ FILE CONTENTS ================================================ ================================================ FILE: .circleci/config.yml ================================================ defaults: &defaults docker: - image: 087285199408.dkr.ecr.us-east-1.amazonaws.com/circle-ci-test-image-base:go1.16-tf1.0-tg31.1-pck1.7 version: 2 jobs: test: <<: *defaults steps: - checkout - run: # Fail the build if the pre-commit hooks don't pass. Note: if you run $ pre-commit install locally within this repo, these hooks will # execute automatically every time before you commit, ensuring the build never fails at this step! name: run pre-commit hooks command: | pip install pre-commit==1.21.0 cfgv==2.0.1 pre-commit install pre-commit run --all-files - run: name: create log directory command: mkdir -p /tmp/logs - run: name: run tests command: run-go-tests --path test --timeout 2h | tee /tmp/logs/all.log no_output_timeout: 3600s - store_artifacts: path: /tmp/logs - store_test_results: path: /tmp/logs deploy: <<: *defaults steps: - checkout - run: echo 'export PATH=$HOME/terraform:$HOME/packer:$PATH' >> $BASH_ENV - run: sudo -E gruntwork-install --module-name "aws-helpers" --repo "https://github.com/gruntwork-io/module-ci" --tag "v0.29.0" - run: sudo -E gruntwork-install --module-name "git-helpers" --repo "https://github.com/gruntwork-io/module-ci" --tag "v0.29.0" - run: sudo -E gruntwork-install --module-name "build-helpers" --repo "https://github.com/gruntwork-io/module-ci" --tag "v0.29.0" # We generally only want to build AMIs on new releases, but when we are setting up AMIs in a new account for the # first time, we want to build the AMIs but NOT run automated tests, since those tests will fail without an existing # AMI already in the AWS Account. - run: _ci/publish-amis.sh "ubuntu16-ami" - run: _ci/publish-amis.sh "ubuntu18-ami" - run: _ci/publish-amis.sh "amazon-linux-2-amd64-ami" - run: _ci/publish-amis.sh "amazon-linux-2-arm64-ami" workflows: version: 2 build-and-test: jobs: - test: filters: branches: ignore: publish-amis - deploy: requires: - test filters: branches: only: publish-amis tags: only: /^v.*/ nightly-test: triggers: - schedule: cron: "0 0 * * *" filters: branches: only: - master jobs: - test ================================================ FILE: .github/ISSUE_TEMPLATE/bug_report.md ================================================ --- name: Bug report about: Create a bug report to help us improve. title: '' labels: bug assignees: '' --- **Describe the bug** A clear and concise description of what the bug is. **To Reproduce** Steps to reproduce the behavior including the relevant Terraform/Terragrunt/Packer version number and any code snippets and module inputs you used. ```hcl // paste code snippets here ``` **Expected behavior** A clear and concise description of what you expected to happen. **Nice to have** - [ ] Terminal output - [ ] Screenshots **Additional context** Add any other context about the problem here. ================================================ FILE: .github/ISSUE_TEMPLATE/feature_request.md ================================================ --- name: Feature request about: Submit a feature request for this repo. title: '' labels: enhancement assignees: '' --- **Describe the solution you'd like** A clear and concise description of what you want to happen. **Describe alternatives you've considered** A clear and concise description of any alternative solutions or features you've considered. **Additional context** Add any other context or screenshots about the feature request here. ================================================ FILE: .github/pull_request_template.md ================================================ ## Description ### Documentation ## TODOs Please ensure all of these TODOs are completed before asking for a review. - [ ] Ensure the branch is named correctly with the issue number. e.g: `feature/new-vpc-endpoints-955` or `bug/missing-count-param-434`. - [ ] Update the docs. - [ ] Keep the changes backward compatible where possible. - [ ] Run the pre-commit checks successfully. - [ ] Run the relevant tests successfully. - [ ] Ensure any 3rd party code adheres with our [license policy](https://www.notion.so/gruntwork/Gruntwork-licenses-and-open-source-usage-policy-f7dece1f780341c7b69c1763f22b1378) or delete this line if its not applicable. ## Related Issues ================================================ FILE: .gitignore ================================================ # Terraform files .terraform terraform.tfstate terraform.tfvars *.tfstate* # OS X files .history .DS_Store # IntelliJ files .idea_modules *.iml *.iws *.ipr .idea/ build/ */build/ out/ # Go best practices dictate that libraries should not include the vendor directory vendor # Folder used to store temporary test data by Terratest .test-data # Ignore Terraform lock files, as we want to test the Terraform code in these repos with the latest provider # versions. .terraform.lock.hcl ================================================ FILE: .pre-commit-config.yaml ================================================ repos: - repo: https://github.com/gruntwork-io/pre-commit rev: v0.1.10 hooks: - id: terraform-fmt - id: gofmt ================================================ FILE: CODEOWNERS ================================================ * @robmorgan @Etiene @anouarchattouna ================================================ FILE: LICENSE ================================================ Copyright (c) 2017 HashiCorp, Inc. Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. END OF TERMS AND CONDITIONS APPENDIX: How to apply the Apache License to your work. To apply the Apache License to your work, attach the following boilerplate notice, with the fields enclosed by brackets "[]" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a file or class name and description of purpose be included on the same "printed page" as the copyright notice for easier identification within third-party archives. Copyright [yyyy] [name of copyright owner] Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ================================================ FILE: NOTICE ================================================ terraform-aws-nomad Copyright 2017 Gruntwork, Inc. This product includes software developed at Gruntwork (http://www.gruntwork.io/). ================================================ FILE: README.md ================================================ # DISCLAIMER: This is no longer supported. Moving forward in the future this repository will be no longer supported and eventually lead to deprecation. Please use our latest versions of our products moving forward or alternatively you may fork the repository to continue use and development for your personal/business use. --- # Nomad AWS Module ![Terraform Version](https://img.shields.io/badge/tf-%3E%3D1.0.0-blue.svg) This repo contains a set of modules for deploying a [Nomad](https://www.nomadproject.io/) cluster on [AWS](https://aws.amazon.com/) using [Terraform](https://www.terraform.io/). Nomad is a distributed, highly-available data-center aware scheduler. A Nomad cluster typically includes a small number of server nodes, which are responsible for being part of the [consensus protocol](https://www.nomadproject.io/docs/internals/consensus.html), and a larger number of client nodes, which are used for running jobs. ![Nomad architecture](https://raw.githubusercontent.com/hashicorp/terraform-aws-nomad/master/_docs/architecture.png) ## Features * Deploy server nodes for managing jobs and client nodes running jobs * Supports colocated clusters and separate clusters * Least privilege security group rules for servers * Auto scaling and Auto healing ## Learn This repo was created by [Gruntwork](https://www.gruntwork.io?ref=repo_aws_nomad), and follows the same patterns as [the Gruntwork Infrastructure as Code Library](https://gruntwork.io/infrastructure-as-code-library/), a collection of reusable, battle-tested, production ready infrastructure code. You can read [How to use the Gruntwork Infrastructure as Code Library](https://gruntwork.io/guides/foundations/how-to-use-gruntwork-infrastructure-as-code-library/) for an overview of how to use modules maintained by Gruntwork! ### Core concepts * [Nomad Use Cases](https://www.nomadproject.io/intro/use-cases.html): overview of various use cases that Nomad is optimized for. * [Nomad Guides](https://www.nomadproject.io/guides/index.html): official guide on how to configure and setup Nomad clusters as well as how to use Nomad to schedule services on to the workers. * [Nomad Security](https://github.com/hashicorp/terraform-aws-nomad/tree/master/modules/nomad-cluster#security): overview of how to secure your Nomad clusters. ### Repo organization * [modules](https://github.com/hashicorp/terraform-aws-nomad/tree/master/modules): the main implementation code for this repo, broken down into multiple standalone, orthogonal submodules. * [examples](https://github.com/hashicorp/terraform-aws-nomad/tree/master/examples): This folder contains working examples of how to use the submodules. * [test](https://github.com/hashicorp/terraform-aws-nomad/tree/master/test): Automated tests for the modules and examples. * [root](https://github.com/hashicorp/terraform-aws-nomad/tree/master): The root folder is *an example* of how to use the [nomad-cluster module](https://github.com/hashicorp/terraform-aws-nomad/tree/master/modules/nomad-cluster) module to deploy a [Nomad](https://www.nomadproject.io/) cluster in [AWS](https://aws.amazon.com/). The Terraform Registry requires the root of every repo to contain Terraform code, so we've put one of the examples there. This example is great for learning and experimenting, but for production use, please use the underlying modules in the [modules folder](https://github.com/hashicorp/terraform-aws-nomad/tree/master/modules) directly. ## Deploy ### Non-production deployment (quick start for learning) If you just want to try this repo out for experimenting and learning, check out the following resources: * [examples folder](https://github.com/hashicorp/terraform-aws-nomad/tree/master/examples): The `examples` folder contains sample code optimized for learning, experimenting, and testing (but not production usage). ### Production deployment If you want to deploy this repo in production, check out the following resources: * [Nomad Production Setup Guide](https://www.nomadproject.io/guides/install/production/index.html): detailed guide covering how to setup a production deployment of Nomad. ## Manage ### Day-to-day operations * [How to deploy Nomad and Consul in the same cluster](https://github.com/hashicorp/terraform-aws-nomad/tree/master/core-concepts.md#deploy-nomad-and-consul-in-the-same-cluster) * [How to deploy Nomad and Consul in separate clusters](https://github.com/hashicorp/terraform-aws-nomad/tree/master/core-concepts.md#deploy-nomad-and-consul-in-separate-clusters) * [How to connect to the Nomad cluster](https://github.com/hashicorp/terraform-aws-nomad/tree/master/modules/nomad-cluster/README.md#how-do-you-connect-to-the-nomad-cluster) * [What happens if a node crashes](https://github.com/hashicorp/terraform-aws-nomad/tree/master/modules/nomad-cluster/README.md#what-happens-if-a-node-crashes) * [How to connect load balancers to the ASG](https://github.com/hashicorp/terraform-aws-nomad/tree/master/modules/nomad-cluster/README.md#how-do-you-connect-load-balancers-to-the-auto-scaling-group-asg) ### Major changes * [How to upgrade a Nomad cluster](https://github.com/hashicorp/terraform-aws-nomad/tree/master/modules/nomad-cluster/README.md#how-do-you-roll-out-updates) ## Who created this Module? These modules were created by [Gruntwork](http://www.gruntwork.io/?ref=repo_aws_nomad), in partnership with HashiCorp, in 2017 and maintained through 2021. They were deprecated in 2022, see the top of the README for details. ## License Please see [LICENSE](https://github.com/hashicorp/terraform-aws-nomad/tree/master/LICENSE) for details on how the code in this repo is licensed. Copyright © 2019 [Gruntwork](https://www.gruntwork.io?ref=repo_aws_nomad), Inc. ================================================ FILE: _ci/publish-amis-in-new-account.md ================================================ # How to Publish AMIs in a New Account See the [canonical page](https://github.com/hashicorp/terraform-aws-consul/blob/master/_ci/publish-amis-in-new-account.md) in the [Consul AWS Module](https://github.com/hashicorp/terraform-aws-consul) repo. ================================================ FILE: _ci/publish-amis.sh ================================================ #!/bin/bash # # Build the example AMI, copy it to all AWS regions, and make all AMIs public. # # This script is meant to be run in a CircleCI job. # set -e readonly SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" readonly PACKER_TEMPLATE_PATH="$SCRIPT_DIR/../examples/nomad-consul-ami/nomad-consul.json" readonly PACKER_TEMPLATE_DEFAULT_REGION="us-east-1" readonly AMI_PROPERTIES_FILE="/tmp/ami.properties" # In CircleCI, every build populates the branch name in CIRCLE_BRANCH except builds triggered by a new tag, for which # the CIRCLE_BRANCH env var is empty. We assume tags are only issued against the master branch. readonly BRANCH_NAME="${CIRCLE_BRANCH:-master}" readonly PACKER_BUILD_NAME="$1" if [[ -z "$PACKER_BUILD_NAME" ]]; then echo "ERROR: You must pass in the Packer build name as the first argument to this function." exit 1 fi if [[ -z "$PUBLISH_AMI_AWS_ACCESS_KEY_ID" || -z "$PUBLISH_AMI_AWS_SECRET_ACCESS_KEY" ]]; then echo "The PUBLISH_AMI_AWS_ACCESS_KEY_ID and PUBLISH_AMI_AWS_SECRET_ACCESS_KEY environment variables must be set to the AWS credentials to use to publish the AMIs." exit 1 fi echo "Checking out branch $BRANCH_NAME to make sure we do all work in a branch and not in detached HEAD state" git checkout "$BRANCH_NAME" # We publish the AMIs to a different AWS account, so set those credentials export AWS_ACCESS_KEY_ID="$PUBLISH_AMI_AWS_ACCESS_KEY_ID" export AWS_SECRET_ACCESS_KEY="$PUBLISH_AMI_AWS_SECRET_ACCESS_KEY" # Build the example AMI. WARNING! In a production setting, you should build your own AMI to ensure it has exactly the # configuration you want. We build this example AMI solely to make initial use of this Module as easy as possible. build-packer-artifact \ --packer-template-path "$PACKER_TEMPLATE_PATH" \ --build-name "$PACKER_BUILD_NAME" \ --output-properties-file "$AMI_PROPERTIES_FILE" # Copy the AMI to all regions and make it public in each source "$AMI_PROPERTIES_FILE" publish-ami \ --all-regions \ --source-ami-id "$ARTIFACT_ID" \ --source-ami-region "$PACKER_TEMPLATE_DEFAULT_REGION" ================================================ FILE: core-concepts.md ================================================ # Background To run a production Nomad cluster, you need to deploy a small number of server nodes (typically 3), which are responsible for being part of the [consensus protocol](https://www.nomadproject.io/docs/internals/consensus.html), and a larger number of client nodes, which are used for running jobs. You must also have a [Consul](https://www.consul.io/) cluster deployed (see the [Consul AWS Module](https://github.com/hashicorp/terraform-aws-consul)) in one of the following configurations: 1. [Deploy Nomad and Consul in the same cluster](#deploy-nomad-and-consul-in-the-same-cluster) 1. [Deploy Nomad and Consul in separate clusters](#deploy-nomad-and-consul-in-separate-clusters) ## Deploy Nomad and Consul in the same cluster 1. Use the [install-consul module](https://github.com/hashicorp/terraform-aws-consul/tree/master/modules/install-consul) from the Consul AWS Module and the [install-nomad module](https://github.com/hashicorp/terraform-aws-nomad/tree/master/modules/install-nomad) from this Module in a Packer template to create an AMI with Consul and Nomad. If you are just experimenting with this Module, you may find it more convenient to use one of our official public AMIs: - [Latest Ubuntu 16 AMIs](https://github.com/hashicorp/terraform-aws-nomad/tree/master/_docs/ubuntu16-ami-list.md). - [Latest Amazon Linux AMIs](https://github.com/hashicorp/terraform-aws-nomad/tree/master/_docs/amazon-linux-ami-list.md). **WARNING! Do NOT use these AMIs in your production setup. In production, you should build your own AMIs in your own AWS account.** 1. Deploy a small number of server nodes (typically, 3) using the [consul-cluster module](https://github.com/hashicorp/terraform-aws-consul/tree/master/modules/consul-cluster). Execute the [run-consul script](https://github.com/hashicorp/terraform-aws-consul/tree/master/modules/run-consul) and the [run-nomad script](https://github.com/hashicorp/terraform-aws-nomad/tree/master/modules/run-nomad) on each node during boot, setting the `--server` flag in both scripts. 1. Deploy as many client nodes as you need using the [nomad-cluster module](https://github.com/hashicorp/terraform-aws-nomad/tree/master/modules/nomad-cluster). Execute the [run-consul script](https://github.com/hashicorp/terraform-aws-consul/tree/master/modules/run-consul) and the [run-nomad script](https://github.com/hashicorp/terraform-aws-nomad/tree/master/modules/run-nomad) on each node during boot, setting the `--client` flag in both scripts. Check out the [nomad-consul-colocated-cluster example](https://github.com/hashicorp/terraform-aws-nomad/tree/master/examples/root-example) for working sample code. ## Deploy Nomad and Consul in separate clusters 1. Deploy a standalone Consul cluster by following the instructions in the [Consul AWS Module](https://github.com/hashicorp/terraform-aws-consul). 1. Use the scripts from the [install-nomad module](https://github.com/hashicorp/terraform-aws-nomad/tree/master/modules/install-nomad) in a Packer template to create a Nomad AMI. 1. Deploy a small number of server nodes (typically, 3) using the [nomad-cluster module](https://github.com/hashicorp/terraform-aws-nomad/tree/master/modules/nomad-cluster). Execute the [run-nomad script](https://github.com/hashicorp/terraform-aws-nomad/tree/master/modules/run-nomad) on each node during boot, setting the `--server` flag. You will need to configure each node with the connection details for your standalone Consul cluster. 1. Deploy as many client nodes as you need using the [nomad-cluster module](https://github.com/hashicorp/terraform-aws-nomad/tree/master/modules/nomad-cluster). Execute the [run-nomad script](https://github.com/hashicorp/terraform-aws-nomad/tree/master/modules/run-nomad) on each node during boot, setting the `--client` flag. Check out the [nomad-consul-separate-cluster example](https://github.com/hashicorp/terraform-aws-nomad/tree/master/examples/nomad-consul-separate-cluster) for working sample code. ================================================ FILE: examples/nomad-consul-ami/README.md ================================================ # Nomad and Consul AMI This folder shows an example of how to use the [install-nomad module](https://github.com/hashicorp/terraform-aws-nomad/tree/master/modules/install-nomad) from this Module and the [install-consul module](https://github.com/hashicorp/terraform-aws-consul/tree/master/modules/install-consul) from the Consul AWS Module with [Packer](https://www.packer.io/) to create [Amazon Machine Images (AMIs)](http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/AMIs.html) that have Nomad and Consul installed on top of: 1. Ubuntu 16.04 1. Ubuntu 18.04 1. Amazon Linux 2 These AMIs will have [Consul](https://www.consul.io/) and [Nomad](https://www.nomadproject.io/) installed and configured to automatically join a cluster during boot-up. To see how to deploy this AMI, check out the [nomad-consul-colocated-cluster example](https://github.com/hashicorp/terraform-aws-nomad/tree/master/examples/root-example). For more info on Nomad installation and configuration, check out the [install-nomad](https://github.com/hashicorp/terraform-aws-nomad/tree/master/modules/install-nomad) documentation. ## Quick start To build the Nomad and Consul AMI: 1. `git clone` this repo to your computer. 1. Install [Packer](https://www.packer.io/). 1. Configure your AWS credentials using one of the [options supported by the AWS SDK](http://docs.aws.amazon.com/sdk-for-java/v1/developer-guide/credentials.html). Usually, the easiest option is to set the `AWS_ACCESS_KEY_ID` and `AWS_SECRET_ACCESS_KEY` environment variables. 1. Update the `variables` section of the `nomad-consul.json` Packer template to configure the AWS region and Nomad version you wish to use. 1. Run `packer build nomad-consul.json`. When the build finishes, it will output the IDs of the new AMIs. To see how to deploy one of these AMIs, check out the [nomad-consul-colocated-cluster example](https://github.com/hashicorp/terraform-aws-nomad/tree/master/examples/root-example). ## Creating your own Packer template for production usage When creating your own Packer template for production usage, you can copy the example in this folder more or less exactly, except for one change: we recommend replacing the `file` provisioner with a call to `git clone` in the `shell` provisioner. Instead of: ```json { "provisioners": [{ "type": "file", "source": "{{template_dir}}/../../../terraform-aws-nomad", "destination": "/tmp" },{ "type": "shell", "inline": [ "/tmp/terraform-aws-nomad/modules/install-nomad/install-nomad --version {{user `nomad_version`}}" ], "pause_before": "30s" }] } ``` Your code should look more like this: ```json { "provisioners": [{ "type": "shell", "inline": [ "git clone --branch https://github.com/hashicorp/terraform-aws-nomad.git /tmp/terraform-aws-nomad", "/tmp/terraform-aws-nomad/modules/install-nomad/install-nomad --version {{user `nomad_version`}}" ], "pause_before": "30s" }] } ``` You should replace `` in the code above with the version of this module that you want to use (see the [Releases Page](../../releases) for all available versions). That's because for production usage, you should always use a fixed, known version of this Module, downloaded from the official Git repo. On the other hand, when you're just experimenting with the Module, it's OK to use a local checkout of the Module, uploaded from your own computer. ================================================ FILE: examples/nomad-consul-ami/nomad-consul-docker.json ================================================ { "min_packer_version": "0.12.0", "variables": { "aws_region": "us-east-1", "nomad_version": "1.1.1", "consul_module_version": "v0.10.1", "consul_version": "1.9.6", "ami_name_prefix": "nomad-consul" }, "builders": [ { "name": "ubuntu18-ami", "ami_name": "{{user `ami_name_prefix`}}-docker-ubuntu18-{{isotime | clean_resource_name}}", "ami_description": "An example of how to build an Ubuntu 18.04 AMI that has Nomad, Consul and Docker", "instance_type": "t2.micro", "region": "{{user `aws_region`}}", "type": "amazon-ebs", "source_ami_filter": { "filters": { "virtualization-type": "hvm", "architecture": "x86_64", "name": "ubuntu/images/hvm-ssd/ubuntu-bionic-18.04-amd64-server-*", "block-device-mapping.volume-type": "gp2", "root-device-type": "ebs" }, "owners": [ "099720109477" ], "most_recent": true }, "ssh_username": "ubuntu" }, { "name": "ubuntu16-ami", "ami_name": "{{user `ami_name_prefix`}}-docker-ubuntu16-{{isotime | clean_resource_name}}", "ami_description": "An Ubuntu 16.04 AMI that has Nomad, Consul and Docker installed.", "instance_type": "t2.micro", "region": "{{user `aws_region`}}", "type": "amazon-ebs", "source_ami_filter": { "filters": { "virtualization-type": "hvm", "architecture": "x86_64", "name": "*ubuntu-xenial-16.04-amd64-server-*", "block-device-mapping.volume-type": "gp2", "root-device-type": "ebs" }, "owners": [ "099720109477" ], "most_recent": true }, "ssh_username": "ubuntu" }, { "ami_name": "{{user `ami_name_prefix`}}-docker-amazon-linux-2-amd64-{{isotime | clean_resource_name}}", "ami_description": "An Amazon Linux 2 x86_64 AMI that has Nomad, Consul and Docker installed.", "instance_type": "t2.micro", "name": "amazon-linux-2-amd64-ami", "region": "{{user `aws_region`}}", "type": "amazon-ebs", "source_ami_filter": { "filters": { "virtualization-type": "hvm", "architecture": "x86_64", "name": "*amzn2-ami-hvm-*", "block-device-mapping.volume-type": "gp2", "root-device-type": "ebs" }, "owners": [ "amazon" ], "most_recent": true }, "ssh_username": "ec2-user" }, { "ami_name": "{{user `ami_name_prefix`}}-docker-amazon-linux-2-arm64-{{isotime | clean_resource_name}}", "ami_description": "An Amazon Linux 2 ARM64 AMI that has Nomad, Consul and Docker installed.", "instance_type": "t4g.micro", "name": "amazon-linux-2-arm64-ami", "region": "{{user `aws_region`}}", "type": "amazon-ebs", "source_ami_filter": { "filters": { "virtualization-type": "hvm", "architecture": "arm64", "name": "*amzn2-ami-hvm-*", "block-device-mapping.volume-type": "gp2", "root-device-type": "ebs" }, "owners": [ "amazon" ], "most_recent": true }, "ssh_username": "ec2-user" } ], "provisioners": [ { "type": "shell", "inline": ["mkdir -p /tmp/terraform-aws-nomad/modules"] }, { "type": "shell", "script": "{{template_dir}}/setup_ubuntu.sh", "only": [ "ubuntu16-ami", "ubuntu18-ami" ] }, { "type": "shell", "script": "{{template_dir}}/setup_amazon-linux-2.sh", "only": [ "amazon-linux-2-amd64-ami", "amazon-linux-2-arm64-ami" ] }, { "type": "file", "source": "{{template_dir}}/../../modules/", "destination": "/tmp/terraform-aws-nomad/modules", "pause_before": "30s" }, { "type": "shell", "environment_vars": [ "NOMAD_VERSION={{user `nomad_version`}}", "CONSUL_VERSION={{user `consul_version`}}", "CONSUL_MODULE_VERSION={{user `consul_module_version`}}" ], "script": "{{template_dir}}/setup_nomad_consul.sh" } ] } ================================================ FILE: examples/nomad-consul-ami/nomad-consul.json ================================================ { "min_packer_version": "0.12.0", "variables": { "aws_region": "us-east-1", "nomad_version": "1.1.1", "consul_module_version": "v0.10.1", "consul_version": "1.9.6", "ami_name_prefix": "nomad-consul" }, "builders": [ { "name": "ubuntu18-ami", "ami_name": "{{user `ami_name_prefix`}}-ubuntu18-{{isotime | clean_resource_name}}", "ami_description": "An example of how to build an Ubuntu 18.04 AMI that has Nomad and Consul installed", "instance_type": "t2.micro", "region": "{{user `aws_region`}}", "type": "amazon-ebs", "source_ami_filter": { "filters": { "virtualization-type": "hvm", "architecture": "x86_64", "name": "ubuntu/images/hvm-ssd/ubuntu-bionic-18.04-amd64-server-*", "block-device-mapping.volume-type": "gp2", "root-device-type": "ebs" }, "owners": [ "099720109477" ], "most_recent": true }, "ssh_username": "ubuntu" }, { "ami_name": "{{user `ami_name_prefix`}}-ubuntu-{{isotime | clean_resource_name}}", "ami_description": "An Ubuntu 16.04 AMI that has Nomad and Consul installed.", "instance_type": "t2.micro", "name": "ubuntu16-ami", "region": "{{user `aws_region`}}", "type": "amazon-ebs", "source_ami_filter": { "filters": { "virtualization-type": "hvm", "architecture": "x86_64", "name": "*ubuntu-xenial-16.04-amd64-server-*", "block-device-mapping.volume-type": "gp2", "root-device-type": "ebs" }, "owners": [ "099720109477" ], "most_recent": true }, "ssh_username": "ubuntu" }, { "ami_name": "{{user `ami_name_prefix`}}-amazon-linux-2-amd64-{{isotime | clean_resource_name}}", "ami_description": "An Amazon Linux 2 x86_64 AMI that has Nomad and Consul installed.", "instance_type": "t2.micro", "name": "amazon-linux-2-amd64-ami", "region": "{{user `aws_region`}}", "type": "amazon-ebs", "source_ami_filter": { "filters": { "virtualization-type": "hvm", "architecture": "x86_64", "name": "*amzn2-ami-hvm-*", "block-device-mapping.volume-type": "gp2", "root-device-type": "ebs" }, "owners": [ "amazon" ], "most_recent": true }, "ssh_username": "ec2-user" }, { "ami_name": "{{user `ami_name_prefix`}}-amazon-linux-2-arm64-{{isotime | clean_resource_name}}", "ami_description": "An Amazon Linux 2 ARM64 AMI that has Nomad and Consul installed.", "instance_type": "t4g.micro", "name": "amazon-linux-2-arm64-ami", "region": "{{user `aws_region`}}", "type": "amazon-ebs", "source_ami_filter": { "filters": { "virtualization-type": "hvm", "architecture": "arm64", "name": "*amzn2-ami-hvm-*", "block-device-mapping.volume-type": "gp2", "root-device-type": "ebs" }, "owners": [ "amazon" ], "most_recent": true }, "ssh_username": "ec2-user" } ], "provisioners": [ { "type": "shell", "inline": [ "sudo apt-get install -y git" ], "only": [ "ubuntu16-ami", "ubuntu18-ami" ] }, { "type": "shell", "inline": [ "sudo yum install -y git" ], "only": [ "amazon-linux-2-amd64-ami", "amazon-linux-2-arm64-ami" ] }, { "type": "shell", "inline": ["mkdir -p /tmp/terraform-aws-nomad"], "pause_before": "30s" }, { "type": "file", "source": "{{template_dir}}/../../", "destination": "/tmp/terraform-aws-nomad" }, { "type": "shell", "environment_vars": [ "NOMAD_VERSION={{user `nomad_version`}}", "CONSUL_VERSION={{user `consul_version`}}", "CONSUL_MODULE_VERSION={{user `consul_module_version`}}" ], "script": "{{template_dir}}/setup_nomad_consul.sh" } ] } ================================================ FILE: examples/nomad-consul-ami/setup_amazon-linux-2.sh ================================================ #!/bin/sh set -e SCRIPT=`basename "$0"` echo "[INFO] [${SCRIPT}] Setup git" sudo yum install -y git echo "[INFO] [${SCRIPT}] Setup docker" sudo yum install -y docker sudo systemctl enable docker sudo systemctl start docker sudo usermod -a -G docker ec2-user ================================================ FILE: examples/nomad-consul-ami/setup_nomad_consul.sh ================================================ #!/bin/sh set -e # Environment variables are set by packer /tmp/terraform-aws-nomad/modules/install-nomad/install-nomad --version "${NOMAD_VERSION}" git clone --branch "${CONSUL_MODULE_VERSION}" https://github.com/hashicorp/terraform-aws-consul.git /tmp/terraform-aws-consul /tmp/terraform-aws-consul/modules/install-consul/install-consul --version "${CONSUL_VERSION}" ================================================ FILE: examples/nomad-consul-ami/setup_ubuntu.sh ================================================ #!/bin/sh set -e SCRIPT=`basename "$0"` # NOTE: git is required, but it should already be preinstalled on Ubuntu 16.0 #echo "[INFO] [${SCRIPT}] Setup git" #sudo apt install -y git # Using Docker CE directly provided by Docker echo "[INFO] [${SCRIPT}] Setup docker" cd /tmp/ curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add - sudo add-apt-repository "deb [arch=amd64] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable" sudo apt-get update apt-cache policy docker-ce sudo apt-get install -y docker-ce sudo usermod -a -G docker ubuntu ================================================ FILE: examples/nomad-consul-separate-cluster/README.md ================================================ # Nomad and Consul Separate Clusters Example This folder shows an example of Terraform code to deploy a [Nomad](https://www.nomadproject.io/) cluster that connects to a separate [Consul](https://www.consul.io/) cluster in [AWS](https://aws.amazon.com/) (if you want to run Nomad and Consul in the same clusters, see the [nomad-consul-colocated-cluster example](https://github.com/hashicorp/terraform-aws-nomad/tree/master/MAIN.md) instead). The Nomad cluster consists of two Auto Scaling Groups (ASGs): one with a small number of Nomad server nodes, which are responsible for being part of the [consensus quorum](https://www.nomadproject.io/docs/internals/consensus.html), and one with a larger number of Nomad client nodes, which are used to run jobs: ![Nomad architecture](https://raw.githubusercontent.com/hashicorp/terraform-aws-nomad/master/_docs/architecture-nomad-consul-separate.png) You will need to create an [Amazon Machine Image (AMI)](http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/AMIs.html) that has Nomad and Consul installed, which you can do using the [nomad-consul-ami example](https://github.com/hashicorp/terraform-aws-nomad/tree/master/examples/nomad-consul-ami)). For more info on how the Nomad cluster works, check out the [nomad-cluster](https://github.com/hashicorp/terraform-aws-nomad/tree/master/modules/nomad-cluster) documentation. ## Quick start To deploy a Nomad Cluster: 1. `git clone` this repo to your computer. 1. Optional: build a Nomad and Consul AMI. See the [nomad-consul-ami example](https://github.com/hashicorp/terraform-aws-nomad/tree/master/examples/nomad-consul-ami) documentation for instructions. Make sure to note down the ID of the AMI. 1. Install [Terraform](https://www.terraform.io/). 1. Open `variables.tf`, set the environment variables specified at the top of the file, and fill in any other variables that don't have a default. If you built a custom AMI, put the AMI ID into the `ami_id` variable. Otherwise, one of our public example AMIs will be used by default. These AMIs are great for learning/experimenting, but are NOT recommended for production use. 1. Run `terraform init`. 1. Run `terraform apply`. 1. Run the [nomad-examples-helper.sh script](https://github.com/hashicorp/terraform-aws-nomad/tree/master/examples/nomad-examples-helper/nomad-examples-helper.sh) to print out the IP addresses of the Nomad servers and some example commands you can run to interact with the cluster: `../nomad-examples-helper/nomad-examples-helper.sh`. ================================================ FILE: examples/nomad-consul-separate-cluster/main.tf ================================================ # --------------------------------------------------------------------------------------------------------------------- # DEPLOY A NOMAD CLUSTER AND A SEPARATE CONSUL CLUSTER IN AWS # These templates show an example of how to use the nomad-cluster module to deploy a Nomad cluster in AWS. This cluster # connects to Consul running in a separate cluster. # # We deploy two Auto Scaling Groups (ASGs) for Nomad: one with a small number of Nomad server nodes and one with a # larger number of Nomad client nodes. Note that these templates assume that the AMI you provide via the # nomad_ami_id input variable is built from the examples/nomad-consul-ami/nomad-consul.json Packer template. # # We also deploy one ASG for Consul which has a small number of Consul server nodes. Note that these templates assume # that the AMI you provide via the consul_ami_id input variable is built from the examples/consul-ami/consul.json # Packer template in the Consul AWS Module. # --------------------------------------------------------------------------------------------------------------------- # ---------------------------------------------------------------------------------------------------------------------- # REQUIRE A SPECIFIC TERRAFORM VERSION OR HIGHER # ---------------------------------------------------------------------------------------------------------------------- terraform { # This module is now only being tested with Terraform 1.0.x. However, to make upgrading easier, we are setting # 0.12.26 as the minimum version, as that version added support for required_providers with source URLs, making it # forwards compatible with 1.0.x code. required_version = ">= 0.12.26" } # --------------------------------------------------------------------------------------------------------------------- # AUTOMATICALLY LOOK UP THE LATEST PRE-BUILT AMI # This repo contains a CircleCI job that automatically builds and publishes the latest AMI by building the Packer # template at /examples/nomad-consul-ami upon every new release. The Terraform data source below automatically looks up # the latest AMI so that a simple "terraform apply" will just work without the user needing to manually build an AMI and # fill in the right value. # # !! WARNING !! These exmaple AMIs are meant only convenience when initially testing this repo. Do NOT use these example # AMIs in a production setting because it is important that you consciously think through the configuration you want # in your own production AMI. # # NOTE: This Terraform data source must return at least one AMI result or the entire template will fail. See # /_ci/publish-amis-in-new-account.md for more information. # --------------------------------------------------------------------------------------------------------------------- data "aws_ami" "nomad_consul" { most_recent = true # If we change the AWS Account in which test are run, update this value. owners = ["562637147889"] filter { name = "virtualization-type" values = ["hvm"] } filter { name = "is-public" values = ["true"] } filter { name = "name" values = ["nomad-consul-ubuntu-*"] } } # --------------------------------------------------------------------------------------------------------------------- # DEPLOY THE NOMAD SERVER NODES # --------------------------------------------------------------------------------------------------------------------- module "nomad_servers" { # When using these modules in your own templates, you will need to use a Git URL with a ref attribute that pins you # to a specific version of the modules, such as the following example: # source = "github.com/hashicorp/terraform-aws-nomad//modules/nomad-cluster?ref=v0.1.0" source = "../../modules/nomad-cluster" cluster_name = "${var.nomad_cluster_name}-server" instance_type = "t2.micro" # You should typically use a fixed size of 3 or 5 for your Nomad server cluster min_size = var.num_nomad_servers max_size = var.num_nomad_servers desired_capacity = var.num_nomad_servers ami_id = var.ami_id == null ? data.aws_ami.nomad_consul.image_id : var.ami_id user_data = data.template_file.user_data_nomad_server.rendered vpc_id = data.aws_vpc.default.id subnet_ids = data.aws_subnet_ids.default.ids # To make testing easier, we allow requests from any IP address here but in a production deployment, we strongly # recommend you limit this to the IP address ranges of known, trusted servers inside your VPC. allowed_ssh_cidr_blocks = ["0.0.0.0/0"] allowed_inbound_cidr_blocks = ["0.0.0.0/0"] ssh_key_name = var.ssh_key_name } # --------------------------------------------------------------------------------------------------------------------- # ATTACH IAM POLICIES FOR CONSUL # To allow our server Nodes to automatically discover the Consul servers, we need to give them the IAM permissions from # the Consul AWS Module's consul-iam-policies module. # --------------------------------------------------------------------------------------------------------------------- module "consul_iam_policies_servers" { source = "github.com/hashicorp/terraform-aws-consul//modules/consul-iam-policies?ref=v0.8.0" iam_role_id = module.nomad_servers.iam_role_id } # --------------------------------------------------------------------------------------------------------------------- # THE USER DATA SCRIPT THAT WILL RUN ON EACH NOMAD SERVER NODE WHEN IT'S BOOTING # This script will configure and start Nomad # --------------------------------------------------------------------------------------------------------------------- data "template_file" "user_data_nomad_server" { template = file("${path.module}/user-data-nomad-server.sh") vars = { num_servers = var.num_nomad_servers cluster_tag_key = var.cluster_tag_key cluster_tag_value = var.consul_cluster_name } } # --------------------------------------------------------------------------------------------------------------------- # DEPLOY THE CONSUL SERVER NODES # --------------------------------------------------------------------------------------------------------------------- module "consul_servers" { source = "github.com/hashicorp/terraform-aws-consul//modules/consul-cluster?ref=v0.8.0" cluster_name = "${var.consul_cluster_name}-server" cluster_size = var.num_consul_servers instance_type = "t2.micro" # The EC2 Instances will use these tags to automatically discover each other and form a cluster cluster_tag_key = var.cluster_tag_key cluster_tag_value = var.consul_cluster_name ami_id = var.ami_id == null ? data.aws_ami.nomad_consul.image_id : var.ami_id user_data = data.template_file.user_data_consul_server.rendered vpc_id = data.aws_vpc.default.id subnet_ids = data.aws_subnet_ids.default.ids # To make testing easier, we allow Consul and SSH requests from any IP address here but in a production # deployment, we strongly recommend you limit this to the IP address ranges of known, trusted servers inside your VPC. allowed_ssh_cidr_blocks = ["0.0.0.0/0"] allowed_inbound_cidr_blocks = ["0.0.0.0/0"] ssh_key_name = var.ssh_key_name } # --------------------------------------------------------------------------------------------------------------------- # THE USER DATA SCRIPT THAT WILL RUN ON EACH CONSUL SERVER EC2 INSTANCE WHEN IT'S BOOTING # This script will configure and start Consul # --------------------------------------------------------------------------------------------------------------------- data "template_file" "user_data_consul_server" { template = file("${path.module}/user-data-consul-server.sh") vars = { cluster_tag_key = var.cluster_tag_key cluster_tag_value = var.consul_cluster_name } } # --------------------------------------------------------------------------------------------------------------------- # DEPLOY THE NOMAD CLIENT NODES # --------------------------------------------------------------------------------------------------------------------- module "nomad_clients" { # When using these modules in your own templates, you will need to use a Git URL with a ref attribute that pins you # to a specific version of the modules, such as the following example: # source = "github.com/hashicorp/terraform-aws-nomad//modules/nomad-cluster?ref=v0.0.1" source = "../../modules/nomad-cluster" cluster_name = "${var.nomad_cluster_name}-client" instance_type = "t2.micro" # Give the clients a different tag so they don't try to join the server cluster cluster_tag_key = "nomad-clients" cluster_tag_value = var.nomad_cluster_name # To keep the example simple, we are using a fixed-size cluster. In real-world usage, you could use auto scaling # policies to dynamically resize the cluster in response to load. min_size = var.num_nomad_clients max_size = var.num_nomad_clients desired_capacity = var.num_nomad_clients ami_id = var.ami_id == null ? data.aws_ami.nomad_consul.image_id : var.ami_id user_data = data.template_file.user_data_nomad_client.rendered vpc_id = data.aws_vpc.default.id subnet_ids = data.aws_subnet_ids.default.ids # To make testing easier, we allow Consul and SSH requests from any IP address here but in a production # deployment, we strongly recommend you limit this to the IP address ranges of known, trusted servers inside your VPC. allowed_ssh_cidr_blocks = ["0.0.0.0/0"] allowed_inbound_cidr_blocks = ["0.0.0.0/0"] ssh_key_name = var.ssh_key_name ebs_block_devices = [ { "device_name" = "/dev/xvde" "volume_size" = "10" }, ] } # --------------------------------------------------------------------------------------------------------------------- # ATTACH IAM POLICIES FOR CONSUL # To allow our client Nodes to automatically discover the Consul servers, we need to give them the IAM permissions from # the Consul AWS Module's consul-iam-policies module. # --------------------------------------------------------------------------------------------------------------------- module "consul_iam_policies_clients" { source = "github.com/hashicorp/terraform-aws-consul//modules/consul-iam-policies?ref=v0.8.0" iam_role_id = module.nomad_clients.iam_role_id } # --------------------------------------------------------------------------------------------------------------------- # THE USER DATA SCRIPT THAT WILL RUN ON EACH CLIENT NODE WHEN IT'S BOOTING # This script will configure and start Consul and Nomad # --------------------------------------------------------------------------------------------------------------------- data "template_file" "user_data_nomad_client" { template = file("${path.module}/user-data-nomad-client.sh") vars = { cluster_tag_key = var.cluster_tag_key cluster_tag_value = var.consul_cluster_name } } # --------------------------------------------------------------------------------------------------------------------- # DEPLOY THE CLUSTER IN THE DEFAULT VPC AND SUBNETS # Using the default VPC and subnets makes this example easy to run and test, but it means Consul and Nomad are # accessible from the public Internet. In a production deployment, we strongly recommend deploying into a custom VPC # and private subnets. # --------------------------------------------------------------------------------------------------------------------- data "aws_vpc" "default" { default = true } data "aws_subnet_ids" "default" { vpc_id = data.aws_vpc.default.id } data "aws_region" "current" { } ================================================ FILE: examples/nomad-consul-separate-cluster/outputs.tf ================================================ output "num_nomad_servers" { value = module.nomad_servers.cluster_size } output "asg_name_nomad_servers" { value = module.nomad_servers.asg_name } output "launch_config_name_nomad_servers" { value = module.nomad_servers.launch_config_name } output "iam_role_arn_nomad_servers" { value = module.nomad_servers.iam_role_arn } output "iam_role_id_nomad_servers" { value = module.nomad_servers.iam_role_id } output "security_group_id_nomad_servers" { value = module.nomad_servers.security_group_id } output "num_consul_servers" { value = module.consul_servers.cluster_size } output "asg_name_consul_servers" { value = module.consul_servers.asg_name } output "launch_config_name_consul_servers" { value = module.consul_servers.launch_config_name } output "iam_role_arn_consul_servers" { value = module.consul_servers.iam_role_arn } output "iam_role_id_consul_servers" { value = module.consul_servers.iam_role_id } output "security_group_id_consul_servers" { value = module.consul_servers.security_group_id } output "num_nomad_clients" { value = module.nomad_clients.cluster_size } output "asg_name_nomad_clients" { value = module.nomad_clients.asg_name } output "launch_config_name_nomad_clients" { value = module.nomad_clients.launch_config_name } output "iam_role_arn_nomad_clients" { value = module.nomad_clients.iam_role_arn } output "iam_role_id_nomad_clients" { value = module.nomad_clients.iam_role_id } output "security_group_id_nomad_clients" { value = module.nomad_clients.security_group_id } output "aws_region" { value = data.aws_region.current.name } output "nomad_servers_cluster_tag_key" { value = module.nomad_servers.cluster_tag_key } output "nomad_servers_cluster_tag_value" { value = module.nomad_servers.cluster_tag_value } ================================================ FILE: examples/nomad-consul-separate-cluster/user-data-consul-server.sh ================================================ #!/bin/bash # This script is meant to be run in the User Data of each EC2 Instance while it's booting. The script uses the # run-consul script to configure and start Consul in server mode. Note that this script assumes it's running in an AMI # built from the Packer template in examples/consul-ami/consul.json in the Consul AWS Module. set -e # Send the log output from this script to user-data.log, syslog, and the console # From: https://alestic.com/2010/12/ec2-user-data-output/ exec > >(tee /var/log/user-data.log|logger -t user-data -s 2>/dev/console) 2>&1 # These variables are passed in via Terraform template interplation /opt/consul/bin/run-consul --server --cluster-tag-key "${cluster_tag_key}" --cluster-tag-value "${cluster_tag_value}" ================================================ FILE: examples/nomad-consul-separate-cluster/user-data-nomad-client.sh ================================================ #!/bin/bash # This script is meant to be run in the User Data of each EC2 Instance while it's booting. The script uses the # run-consul script to configure and start Consul in client mode and the run-nomad script to configure and start Nomad # in client mode. Note that this script assumes it's running in an AMI built from the Packer template in # examples/nomad-consul-ami/nomad-consul.json. set -e # Send the log output from this script to user-data.log, syslog, and the console # From: https://alestic.com/2010/12/ec2-user-data-output/ exec > >(tee /var/log/user-data.log|logger -t user-data -s 2>/dev/console) 2>&1 # These variables are passed in via Terraform template interplation /opt/consul/bin/run-consul --client --cluster-tag-key "${cluster_tag_key}" --cluster-tag-value "${cluster_tag_value}" /opt/nomad/bin/run-nomad --client ================================================ FILE: examples/nomad-consul-separate-cluster/user-data-nomad-server.sh ================================================ #!/bin/bash # This script is meant to be run in the User Data of each EC2 Instance while it's booting. The script uses the # run-consul script to configure and start Consul in client mode and then the run-nomad script to configure and start # Nomad in server mode. Note that this script assumes it's running in an AMI built from the Packer template in # examples/nomad-consul-ami/nomad-consul.json. set -e # Send the log output from this script to user-data.log, syslog, and the console # From: https://alestic.com/2010/12/ec2-user-data-output/ exec > >(tee /var/log/user-data.log|logger -t user-data -s 2>/dev/console) 2>&1 /opt/consul/bin/run-consul --client --cluster-tag-key "${cluster_tag_key}" --cluster-tag-value "${cluster_tag_value}" /opt/nomad/bin/run-nomad --server --num-servers "${num_servers}" ================================================ FILE: examples/nomad-consul-separate-cluster/variables.tf ================================================ # --------------------------------------------------------------------------------------------------------------------- # ENVIRONMENT VARIABLES # Define these secrets as environment variables # --------------------------------------------------------------------------------------------------------------------- # AWS_ACCESS_KEY_ID # AWS_SECRET_ACCESS_KEY # AWS_DEFAULT_REGION # --------------------------------------------------------------------------------------------------------------------- # REQUIRED PARAMETERS # You must provide a value for each of these parameters. # --------------------------------------------------------------------------------------------------------------------- # None # --------------------------------------------------------------------------------------------------------------------- # OPTIONAL PARAMETERS # These parameters have reasonable defaults. # --------------------------------------------------------------------------------------------------------------------- variable "ami_id" { description = "The ID of the AMI to run in the cluster. This should be an AMI built from the Packer template under examples/nomad-consul-ami/nomad-consul.json. If no AMI is specified, the template will 'just work' by using the example public AMIs. WARNING! Do not use the example AMIs in a production setting!" type = string default = null } variable "nomad_cluster_name" { description = "What to name the Nomad cluster and all of its associated resources" type = string default = "nomad-example" } variable "consul_cluster_name" { description = "What to name the Consul cluster and all of its associated resources" type = string default = "consul-example" } variable "num_nomad_servers" { description = "The number of Nomad server nodes to deploy. We strongly recommend using 3 or 5." type = number default = 3 } variable "num_nomad_clients" { description = "The number of Nomad client nodes to deploy. You can deploy as many as you need to run your jobs." type = number default = 6 } variable "num_consul_servers" { description = "The number of Consul server nodes to deploy. We strongly recommend using 3 or 5." type = number default = 3 } variable "cluster_tag_key" { description = "The tag the Consul EC2 Instances will look for to automatically discover each other and form a cluster." type = string default = "consul-servers" } variable "ssh_key_name" { description = "The name of an EC2 Key Pair that can be used to SSH to the EC2 Instances in this cluster. Set to null to not associate a Key Pair." type = string default = null } ================================================ FILE: examples/nomad-examples-helper/README.md ================================================ # Nomad Examples Helper This folder contains a helper script called `nomad-examples-helper.sh` for working with the [nomad-consul-colocated-cluster](https://github.com/hashicorp/terraform-aws-nomad/tree/master/MAIN.md) and [nomad-consul-separate-cluster](https://github.com/hashicorp/terraform-aws-nomad/tree/master/examples/nomad-consul-separate-cluster) examples. After running `terraform apply` on the examples, if you run `nomad-examples-helper.sh`, it will automatically: 1. Wait for the Nomad server cluster to come up. 1. Print out the IP addresses of the Nomad servers. 1. Print out some example commands you can run against your Nomad servers. This folder also contains an example Nomad job called `example.nomad` that you can run in your Nomad cluster. This job simply echoes "Hello, World!" ================================================ FILE: examples/nomad-examples-helper/example.nomad ================================================ # There can only be a single job definition per file. This job is named # "example" so it will create a job with the ID and Name "example". # The "job" stanza is the top-most configuration option in the job # specification. A job is a declarative specification of tasks that Nomad # should run. Jobs have a globally unique name, one or many task groups, which # are themselves collections of one or many tasks. # # For more information and examples on the "job" stanza, please see # the online documentation at: # # https://www.nomadproject.io/docs/job-specification/job.html # job "example" { # The "region" parameter specifies the region in which to execute the job. If # omitted, this inherits the default region name of "global". Note that this example job # is hard-coded to us-east-1, so if you are running your example elsewhere, make # sure to update this setting, as well as the datacenters setting. region = "us-east-1" # The "datacenters" parameter specifies the list of datacenters which should # be considered when placing this task. This must be provided. Note that this example job # is hard-coded to us-east-1, so if you are running your example elsewhere, make # sure to update this setting, as well as the region setting. datacenters = ["us-east-1a", "us-east-1b", "us-east-1c", "us-east-1d", "us-east-1e"] # The "type" parameter controls the type of job, which impacts the scheduler's # decision on placement. This configuration is optional and defaults to # "service". For a full list of job types and their differences, please see # the online documentation. # # For more information, please see the online documentation at: # # https://www.nomadproject.io/docs/jobspec/schedulers.html # type = "batch" # The "constraint" stanza defines additional constraints for placing this job, # in addition to any resource or driver constraints. This stanza may be placed # at the "job", "group", or "task" level, and supports variable interpolation. # # For more information and examples on the "constraint" stanza, please see # the online documentation at: # # https://www.nomadproject.io/docs/job-specification/constraint.html # # constraint { # attribute = "${attr.kernel.name}" # value = "linux" # } # The "update" stanza specifies the job update strategy. The update strategy # is used to control things like rolling upgrades. If omitted, rolling # updates are disabled. # # For more information and examples on the "update" stanza, please see # the online documentation at: # # https://www.nomadproject.io/docs/job-specification/update.html # # update { # # The "stagger" parameter specifies to do rolling updates of this job every # # 10 seconds. # stagger = "10s" # # The "max_parallel" parameter specifies the maximum number of updates to # # perform in parallel. In this case, this specifies to update a single task # # at a time. # max_parallel = 1 # } # The "group" stanza defines a series of tasks that should be co-located on # the same Nomad client. Any task within a group will be placed on the same # client. # # For more information and examples on the "group" stanza, please see # the online documentation at: # # https://www.nomadproject.io/docs/job-specification/group.html # group "cache" { # The "count" parameter specifies the number of the task groups that should # be running under this group. This value must be non-negative and defaults # to 1. count = 1 # The "restart" stanza configures a group's behavior on task failure. If # left unspecified, a default restart policy is used based on the job type. # # For more information and examples on the "restart" stanza, please see # the online documentation at: # # https://www.nomadproject.io/docs/job-specification/restart.html # restart { # The number of attempts to run the job within the specified interval. attempts = 10 interval = "5m" # The "delay" parameter specifies the duration to wait before restarting # a task after it has failed. delay = "25s" # The "mode" parameter controls what happens when a task has restarted # "attempts" times within the interval. "delay" mode delays the next # restart until the next interval. "fail" mode does not restart the task # if "attempts" has been hit within the interval. mode = "delay" } # The "ephemeral_disk" stanza instructs Nomad to utilize an ephemeral disk # instead of a hard disk requirement. Clients using this stanza should # not specify disk requirements in the resources stanza of the task. All # tasks in this group will share the same ephemeral disk. # # For more information and examples on the "ephemeral_disk" stanza, please # see the online documentation at: # # https://www.nomadproject.io/docs/job-specification/ephemeral_disk.html # ephemeral_disk { # When sticky is true and the task group is updated, the scheduler # will prefer to place the updated allocation on the same node and # will migrate the data. This is useful for tasks that store data # that should persist across allocation updates. # sticky = true # # Setting migrate to true results in the allocation directory of a # sticky allocation directory to be migrated. # migrate = true # The "size" parameter specifies the size in MB of shared ephemeral disk # between tasks in the group. size = 300 } # The "task" stanza creates an individual unit of work, such as a Docker # container, web application, or batch processing. # # For more information and examples on the "task" stanza, please see # the online documentation at: # # https://www.nomadproject.io/docs/job-specification/task.html # task "hello_world" { # The "driver" parameter specifies the task driver that should be used to # run the task. driver = "exec" # The "config" stanza specifies the driver configuration, which is passed # directly to the driver to start the task. The details of configurations # are specific to each driver, so please see specific driver # documentation for more information. config { command = "/bin/echo" args = ["Hello, World!"] } # The "artifact" stanza instructs Nomad to download an artifact from a # remote source prior to starting the task. This provides a convenient # mechanism for downloading configuration files or data needed to run the # task. It is possible to specify the "artifact" stanza multiple times to # download multiple artifacts. # # For more information and examples on the "artifact" stanza, please see # the online documentation at: # # https://www.nomadproject.io/docs/job-specification/artifact.html # # artifact { # source = "http://foo.com/artifact.tar.gz" # options { # checksum = "md5:c4aa853ad2215426eb7d70a21922e794" # } # } # The "logs" stana instructs the Nomad client on how many log files and # the maximum size of those logs files to retain. Logging is enabled by # default, but the "logs" stanza allows for finer-grained control over # the log rotation and storage configuration. # # For more information and examples on the "logs" stanza, please see # the online documentation at: # # https://www.nomadproject.io/docs/job-specification/logs.html # # logs { # max_files = 10 # max_file_size = 15 # } # The "resources" stanza describes the requirements a task needs to # execute. Resource requirements include memory, network, cpu, and more. # This ensures the task will execute on a machine that contains enough # resource capacity. # # For more information and examples on the "resources" stanza, please see # the online documentation at: # # https://www.nomadproject.io/docs/job-specification/resources.html # resources { cpu = 500 # 500 MHz memory = 256 # 256MB network { mbits = 10 port "db" {} } } # The "service" stanza instructs Nomad to register this task as a service # in the service discovery engine, which is currently Consul. This will # make the service addressable after Nomad has placed it on a host and # port. # # For more information and examples on the "service" stanza, please see # the online documentation at: # # https://www.nomadproject.io/docs/job-specification/service.html # # service { # name = "global-redis-check" # tags = ["global", "cache"] # port = "db" # check { # name = "alive" # type = "tcp" # interval = "10s" # timeout = "2s" # } # } # The "template" stanza instructs Nomad to manage a template, such as # a configuration file or script. This template can optionally pull data # from Consul or Vault to populate runtime configuration data. # # For more information and examples on the "template" stanza, please see # the online documentation at: # # https://www.nomadproject.io/docs/job-specification/template.html # # template { # data = "---\nkey: {{ key \"service/my-key\" }}" # destination = "local/file.yml" # change_mode = "signal" # change_signal = "SIGHUP" # } # The "vault" stanza instructs the Nomad client to acquire a token from # a HashiCorp Vault server. The Nomad servers must be configured and # authorized to communicate with Vault. By default, Nomad will inject # The token into the job via an environment variable and make the token # available to the "template" stanza. The Nomad client handles the renewal # and revocation of the Vault token. # # For more information and examples on the "vault" stanza, please see # the online documentation at: # # https://www.nomadproject.io/docs/job-specification/vault.html # # vault { # policies = ["cdn", "frontend"] # change_mode = "signal" # change_signal = "SIGHUP" # } # Controls the timeout between signalling a task it will be killed # and killing the task. If not set a default is used. # kill_timeout = "20s" } } } ================================================ FILE: examples/nomad-examples-helper/nomad-examples-helper.sh ================================================ #!/bin/bash # A script that is meant to be used with the Nomad cluster examples to: # # 1. Wait for the Nomad server cluster to come up. # 2. Print out the IP addresses of the Nomad servers. # 3. Print out some example commands you can run against your Nomad servers. set -e readonly SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" readonly SCRIPT_NAME="$(basename "$0")" readonly MAX_RETRIES=30 readonly SLEEP_BETWEEN_RETRIES_SEC=10 function log { local readonly level="$1" local readonly message="$2" local readonly timestamp=$(date +"%Y-%m-%d %H:%M:%S") >&2 echo -e "${timestamp} [${level}] [$SCRIPT_NAME] ${message}" } function log_info { local readonly message="$1" log "INFO" "$message" } function log_warn { local readonly message="$1" log "WARN" "$message" } function log_error { local readonly message="$1" log "ERROR" "$message" } function assert_is_installed { local readonly name="$1" if [[ ! $(command -v ${name}) ]]; then log_error "The binary '$name' is required by this script but is not installed or in the system's PATH." exit 1 fi } function get_required_terraform_output { local readonly output_name="$1" local output_value output_value=$(terraform output -raw -no-color "$output_name") if [[ -z "$output_value" ]]; then log_error "Unable to find a value for Terraform output $output_name" exit 1 fi echo "$output_value" } # # Usage: join SEPARATOR ARRAY # # Joins the elements of ARRAY with the SEPARATOR character between them. # # Examples: # # join ", " ("A" "B" "C") # Returns: "A, B, C" # function join { local readonly separator="$1" shift local readonly values=("$@") printf "%s$separator" "${values[@]}" | sed "s/$separator$//" } function get_all_nomad_server_ips { local expected_num_nomad_servers expected_num_nomad_servers=$(get_required_terraform_output "num_nomad_servers") log_info "Looking up public IP addresses for $expected_num_nomad_servers Nomad server EC2 Instances." local ips local i for (( i=1; i<="$MAX_RETRIES"; i++ )); do ips=($(get_nomad_server_ips)) if [[ "${#ips[@]}" -eq "$expected_num_nomad_servers" ]]; then log_info "Found all $expected_num_nomad_servers public IP addresses!" echo "${ips[@]}" return else log_warn "Found ${#ips[@]} of $expected_num_nomad_servers public IP addresses. Will sleep for $SLEEP_BETWEEN_RETRIES_SEC seconds and try again." sleep "$SLEEP_BETWEEN_RETRIES_SEC" fi done log_error "Failed to find the IP addresses for $expected_num_nomad_servers Nomad server EC2 Instances after $MAX_RETRIES retries." exit 1 } function wait_for_all_nomad_servers_to_register { local readonly server_ips=($@) local readonly server_ip="${server_ips[0]}" local expected_num_nomad_servers expected_num_nomad_servers=$(get_required_terraform_output "num_nomad_servers") log_info "Waiting for $expected_num_nomad_servers Nomad servers to register in the cluster" for (( i=1; i<="$MAX_RETRIES"; i++ )); do log_info "Running 'nomad server members' command against server at IP address $server_ip" # Intentionally use local and readonly here so that this script doesn't exit if the nomad server members or grep # commands exit with an error. local readonly members=$(nomad server members -address="http://$server_ip:4646") local readonly alive_members=$(echo "$members" | grep "alive") local readonly num_nomad_servers=$(echo "$alive_members" | wc -l | tr -d ' ') if [[ "$num_nomad_servers" -eq "$expected_num_nomad_servers" ]]; then log_info "All $expected_num_nomad_servers Nomad servers have registered in the cluster!" return else log_info "$num_nomad_servers out of $expected_num_nomad_servers Nomad servers have registered in the cluster." log_info "Sleeping for $SLEEP_BETWEEN_RETRIES_SEC seconds and will check again." sleep "$SLEEP_BETWEEN_RETRIES_SEC" fi done log_error "Did not find $expected_num_nomad_servers Nomad servers registered after $MAX_RETRIES retries." exit 1 } function get_nomad_server_ips { local aws_region local cluster_tag_key local cluster_tag_value local instances aws_region=$(get_required_terraform_output "aws_region") cluster_tag_key=$(get_required_terraform_output "nomad_servers_cluster_tag_key") cluster_tag_value=$(get_required_terraform_output "nomad_servers_cluster_tag_value") log_info "Fetching public IP addresses for EC2 Instances in $aws_region with tag $cluster_tag_key=$cluster_tag_value" instances=$(aws ec2 describe-instances \ --region "$aws_region" \ --filter "Name=tag:$cluster_tag_key,Values=$cluster_tag_value" "Name=instance-state-name,Values=running") echo "$instances" | jq -r '.Reservations[].Instances[].PublicIpAddress' } function print_instructions { local readonly server_ips=($@) local readonly server_ip="${server_ips[0]}" local instructions=() instructions+=("\nYour Nomad servers are running at the following IP addresses:\n\n${server_ips[@]/#/ }\n") instructions+=("Some commands for you to try:\n") instructions+=(" nomad server members -address=http://$server_ip:4646") instructions+=(" nomad node status -address=http://$server_ip:4646") instructions+=(" nomad run -address=http://$server_ip:4646 $SCRIPT_DIR/example.nomad") instructions+=(" nomad status -address=http://$server_ip:4646 example\n") local instructions_str instructions_str=$(join "\n" "${instructions[@]}") echo -e "$instructions_str" } function run { assert_is_installed "aws" assert_is_installed "jq" assert_is_installed "terraform" assert_is_installed "nomad" local server_ips server_ips=$(get_all_nomad_server_ips) wait_for_all_nomad_servers_to_register "$server_ips" print_instructions "$server_ips" } run ================================================ FILE: examples/root-example/README.md ================================================ # Nomad and Consul Co-located Cluster Example This folder shows an example of Terraform code to deploy a [Nomad](https://www.nomadproject.io/) cluster co-located with a [Consul](https://www.consul.io/) cluster in [AWS](https://aws.amazon.com/) (if you want to run Nomad and Consul on separate clusters, see the [nomad-consul-separate-cluster example](https://github.com/hashicorp/terraform-aws-nomad/tree/master/examples/nomad-consul-separate-cluster) instead). The cluster consists of two Auto Scaling Groups (ASGs): one with a small number of Nomad and Consul server nodes, which are responsible for being part of the [consensus protocol](https://www.nomadproject.io/docs/internals/consensus.html), and one with a larger number of Nomad and Consul client nodes, which are used to run jobs: ![Nomad architecture](https://raw.githubusercontent.com/hashicorp/terraform-aws-nomad/master/_docs/architecture-nomad-consul-colocated.png) You will need to create an [Amazon Machine Image (AMI)](http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/AMIs.html) that has Nomad and Consul installed, which you can do using the [nomad-consul-ami example](https://github.com/hashicorp/terraform-aws-nomad/tree/master/examples/nomad-consul-ami)). For more info on how the Nomad cluster works, check out the [nomad-cluster](https://github.com/hashicorp/terraform-aws-nomad/tree/master/modules/nomad-cluster) documentation. ## Quick start To deploy a Nomad Cluster: 1. `git clone` this repo to your computer. 1. Optional: build a Nomad and Consul AMI. See the [nomad-consul-ami example](https://github.com/hashicorp/terraform-aws-nomad/tree/master/examples/nomad-consul-ami) documentation for instructions. Make sure to note down the ID of the AMI. 1. Install [Terraform](https://www.terraform.io/). 1. Open `variables.tf`, set the environment variables specified at the top of the file, and fill in any other variables that don't have a default. If you built a custom AMI, put the AMI ID into the `ami_id` variable. Otherwise, one of our public example AMIs will be used by default. These AMIs are great for learning/experimenting, but are NOT recommended for production use. 1. Run `terraform init`. 1. Run `terraform apply`. 1. Run the [nomad-examples-helper.sh script](https://github.com/hashicorp/terraform-aws-nomad/tree/master/examples/nomad-examples-helper/nomad-examples-helper.sh) to print out the IP addresses of the Nomad servers and some example commands you can run to interact with the cluster: `../nomad-examples-helper/nomad-examples-helper.sh`. ================================================ FILE: examples/root-example/user-data-client.sh ================================================ #!/bin/bash # This script is meant to be run in the User Data of each EC2 Instance while it's booting. The script uses the # run-nomad and run-consul scripts to configure and start Nomad and Consul in client mode. Note that this script # assumes it's running in an AMI built from the Packer template in examples/nomad-consul-ami/nomad-consul.json. set -e # Send the log output from this script to user-data.log, syslog, and the console # From: https://alestic.com/2010/12/ec2-user-data-output/ exec > >(tee /var/log/user-data.log|logger -t user-data -s 2>/dev/console) 2>&1 # These variables are passed in via Terraform template interplation /opt/consul/bin/run-consul --client --cluster-tag-key "${cluster_tag_key}" --cluster-tag-value "${cluster_tag_value}" /opt/nomad/bin/run-nomad --client ================================================ FILE: examples/root-example/user-data-server.sh ================================================ #!/bin/bash # This script is meant to be run in the User Data of each EC2 Instance while it's booting. The script uses the # run-nomad and run-consul scripts to configure and start Consul and Nomad in server mode. Note that this script # assumes it's running in an AMI built from the Packer template in examples/nomad-consul-ami/nomad-consul.json. set -e # Send the log output from this script to user-data.log, syslog, and the console # From: https://alestic.com/2010/12/ec2-user-data-output/ exec > >(tee /var/log/user-data.log|logger -t user-data -s 2>/dev/console) 2>&1 # These variables are passed in via Terraform template interplation /opt/consul/bin/run-consul --server --cluster-tag-key "${cluster_tag_key}" --cluster-tag-value "${cluster_tag_value}" /opt/nomad/bin/run-nomad --server --num-servers "${num_servers}" ================================================ FILE: main.tf ================================================ # --------------------------------------------------------------------------------------------------------------------- # DEPLOY A NOMAD CLUSTER CO-LOCATED WITH A CONSUL CLUSTER IN AWS # These templates show an example of how to use the nomad-cluster module to deploy a Nomad cluster in AWS. This cluster # has Consul colocated on the same nodes. # # We deploy two Auto Scaling Groups (ASGs): one with a small number of Nomad and Consul server nodes and one with a # larger number of Nomad and Consul client nodes. Note that these templates assume that the AMI you provide via the # ami_id input variable is built from the examples/nomad-consul-ami/nomad-consul.json Packer template. # --------------------------------------------------------------------------------------------------------------------- # ---------------------------------------------------------------------------------------------------------------------- # REQUIRE A SPECIFIC TERRAFORM VERSION OR HIGHER # ---------------------------------------------------------------------------------------------------------------------- terraform { # This module is now only being tested with Terraform 1.0.x. However, to make upgrading easier, we are setting # 0.12.26 as the minimum version, as that version added support for required_providers with source URLs, making it # forwards compatible with 1.0.x code. required_version = ">= 0.12.26" } # --------------------------------------------------------------------------------------------------------------------- # AUTOMATICALLY LOOK UP THE LATEST PRE-BUILT AMI # This repo contains a CircleCI job that automatically builds and publishes the latest AMI by building the Packer # template at /examples/nomad-consul-ami upon every new release. The Terraform data source below automatically looks up # the latest AMI so that a simple "terraform apply" will just work without the user needing to manually build an AMI and # fill in the right value. # # !! WARNING !! These exmaple AMIs are meant only convenience when initially testing this repo. Do NOT use these example # AMIs in a production setting because it is important that you consciously think through the configuration you want # in your own production AMI. # # NOTE: This Terraform data source must return at least one AMI result or the entire template will fail. See # /_ci/publish-amis-in-new-account.md for more information. # --------------------------------------------------------------------------------------------------------------------- data "aws_ami" "nomad_consul" { most_recent = true # If we change the AWS Account in which test are run, update this value. owners = ["562637147889"] filter { name = "virtualization-type" values = ["hvm"] } filter { name = "is-public" values = ["true"] } filter { name = "name" values = ["nomad-consul-ubuntu-*"] } } # --------------------------------------------------------------------------------------------------------------------- # DEPLOY THE SERVER NODES # Note that we use the consul-cluster module to deploy both the Nomad and Consul nodes on the same servers # --------------------------------------------------------------------------------------------------------------------- module "servers" { source = "github.com/hashicorp/terraform-aws-consul//modules/consul-cluster?ref=v0.8.0" cluster_name = "${var.cluster_name}-server" cluster_size = var.num_servers instance_type = var.server_instance_type # The EC2 Instances will use these tags to automatically discover each other and form a cluster cluster_tag_key = var.cluster_tag_key cluster_tag_value = var.cluster_tag_value ami_id = var.ami_id == null ? data.aws_ami.nomad_consul.image_id : var.ami_id user_data = data.template_file.user_data_server.rendered vpc_id = data.aws_vpc.default.id subnet_ids = data.aws_subnet_ids.default.ids # To make testing easier, we allow requests from any IP address here but in a production deployment, we strongly # recommend you limit this to the IP address ranges of known, trusted servers inside your VPC. allowed_ssh_cidr_blocks = ["0.0.0.0/0"] allowed_inbound_cidr_blocks = ["0.0.0.0/0"] ssh_key_name = var.ssh_key_name tags = [ { key = "Environment" value = "development" propagate_at_launch = true }, ] } # --------------------------------------------------------------------------------------------------------------------- # ATTACH SECURITY GROUP RULES FOR NOMAD # Our Nomad servers are running on top of the consul-cluster module, so we need to configure that cluster to allow # the inbound/outbound connections used by Nomad. # --------------------------------------------------------------------------------------------------------------------- module "nomad_security_group_rules" { # When using these modules in your own templates, you will need to use a Git URL with a ref attribute that pins you # to a specific version of the modules, such as the following example: # source = "github.com/hashicorp/terraform-aws-nomad//modules/nomad-security-group-rules?ref=v0.0.1" source = "./modules/nomad-security-group-rules" # To make testing easier, we allow requests from any IP address here but in a production deployment, we strongly # recommend you limit this to the IP address ranges of known, trusted servers inside your VPC. security_group_id = module.servers.security_group_id allowed_inbound_cidr_blocks = ["0.0.0.0/0"] } # --------------------------------------------------------------------------------------------------------------------- # THE USER DATA SCRIPT THAT WILL RUN ON EACH SERVER NODE WHEN IT'S BOOTING # This script will configure and start Consul and Nomad # --------------------------------------------------------------------------------------------------------------------- data "template_file" "user_data_server" { template = file("${path.module}/examples/root-example/user-data-server.sh") vars = { cluster_tag_key = var.cluster_tag_key cluster_tag_value = var.cluster_tag_value num_servers = var.num_servers } } # --------------------------------------------------------------------------------------------------------------------- # DEPLOY THE CLIENT NODES # --------------------------------------------------------------------------------------------------------------------- module "clients" { # When using these modules in your own templates, you will need to use a Git URL with a ref attribute that pins you # to a specific version of the modules, such as the following example: # source = "github.com/hashicorp/terraform-aws-nomad//modules/nomad-cluster?ref=v0.0.1" source = "./modules/nomad-cluster" cluster_name = "${var.cluster_name}-client" instance_type = var.instance_type # Give the clients a different tag so they don't try to join the server cluster cluster_tag_key = "nomad-clients" cluster_tag_value = var.cluster_name # To keep the example simple, we are using a fixed-size cluster. In real-world usage, you could use auto scaling # policies to dynamically resize the cluster in response to load. min_size = var.num_clients max_size = var.num_clients desired_capacity = var.num_clients ami_id = var.ami_id == null ? data.aws_ami.nomad_consul.image_id : var.ami_id user_data = data.template_file.user_data_client.rendered vpc_id = data.aws_vpc.default.id subnet_ids = data.aws_subnet_ids.default.ids # To make testing easier, we allow Consul and SSH requests from any IP address here but in a production # deployment, we strongly recommend you limit this to the IP address ranges of known, trusted servers inside your VPC. allowed_ssh_cidr_blocks = ["0.0.0.0/0"] allowed_inbound_cidr_blocks = ["0.0.0.0/0"] ssh_key_name = var.ssh_key_name tags = [ { key = "Environment" value = "development" propagate_at_launch = true } ] } # --------------------------------------------------------------------------------------------------------------------- # ATTACH IAM POLICIES FOR CONSUL # To allow our client Nodes to automatically discover the Consul servers, we need to give them the IAM permissions from # the Consul AWS Module's consul-iam-policies module. # --------------------------------------------------------------------------------------------------------------------- module "consul_iam_policies" { source = "github.com/hashicorp/terraform-aws-consul//modules/consul-iam-policies?ref=v0.8.0" iam_role_id = module.clients.iam_role_id } # --------------------------------------------------------------------------------------------------------------------- # THE USER DATA SCRIPT THAT WILL RUN ON EACH CLIENT NODE WHEN IT'S BOOTING # This script will configure and start Consul and Nomad # --------------------------------------------------------------------------------------------------------------------- data "template_file" "user_data_client" { template = file("${path.module}/examples/root-example/user-data-client.sh") vars = { cluster_tag_key = var.cluster_tag_key cluster_tag_value = var.cluster_tag_value } } # --------------------------------------------------------------------------------------------------------------------- # DEPLOY THE CLUSTER IN THE DEFAULT VPC AND SUBNETS # Using the default VPC and subnets makes this example easy to run and test, but it means Consul and Nomad are # accessible from the public Internet. In a production deployment, we strongly recommend deploying into a custom VPC # and private subnets. # --------------------------------------------------------------------------------------------------------------------- data "aws_vpc" "default" { default = var.vpc_id == "" ? true : false id = var.vpc_id } data "aws_subnet_ids" "default" { vpc_id = data.aws_vpc.default.id } data "aws_region" "current" { } ================================================ FILE: modules/install-nomad/README.md ================================================ # Nomad Install Script This folder contains a script for installing Nomad and its dependencies. You can use this script, along with the [run-nomad script](https://github.com/hashicorp/terraform-aws-nomad/tree/master/modules/run-nomad) it installs to create a Nomad [Amazon Machine Image (AMI)](http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/AMIs.html) that can be deployed in [AWS](https://aws.amazon.com/) across an Auto Scaling Group using the [nomad-cluster module](https://github.com/hashicorp/terraform-aws-nomad/tree/master/modules/nomad-cluster). This script has been tested on the following operating systems: * Ubuntu 16.04 * Ubuntu 18.04 * Amazon Linux 2 There is a good chance it will work on other flavors of Debian, CentOS, and RHEL as well. ## Quick start To install Nomad, use `git` to clone this repository at a specific tag (see the [releases page](../../../../releases) for all available tags) and run the `install-nomad` script: ``` git clone --branch https://github.com/hashicorp/terraform-aws-nomad.git terraform-aws-nomad/modules/install-nomad/install-nomad --version 0.5.4 ``` The `install-nomad` script will install Nomad, its dependencies, and the [run-nomad script](https://github.com/hashicorp/terraform-aws-nomad/tree/master/modules/run-nomad). You can then run the `run-nomad` script when the server is booting to start Nomad and configure it to automatically join other nodes to form a cluster. We recommend running the `install-nomad` script as part of a [Packer](https://www.packer.io/) template to create a Nomad [Amazon Machine Image (AMI)](http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/AMIs.html) (see the [nomad-consul-ami example](https://github.com/hashicorp/terraform-aws-nomad/tree/master/examples/nomad-consul-ami) for sample code). You can then deploy the AMI across an Auto Scaling Group using the [nomad-cluster module](https://github.com/hashicorp/terraform-aws-nomad/tree/master/modules/nomad-cluster) (see the [nomad-consul-colocated-cluster](https://github.com/hashicorp/terraform-aws-nomad/tree/master/MAIN.md) and [nomad-consul-separate-cluster](https://github.com/hashicorp/terraform-aws-nomad/tree/master/examples/nomad-consul-separate-cluster) examples for fully-working sample code). ## Command line Arguments The `install-nomad` script accepts the following arguments: * `version VERSION`: Install Nomad version VERSION. Required. * `path DIR`: Install Nomad into folder DIR. Optional. * `user USER`: The install dirs will be owned by user USER. Optional. Example: ``` install-nomad --version 0.5.4 ``` ## How it works The `install-nomad` script does the following: 1. [Create a user and folders for Nomad](#create-a-user-and-folders-for-nomad) 1. [Install Nomad binaries and scripts](#install-nomad-binaries-and-scripts) 1. [Follow-up tasks](#follow-up-tasks) ### Create a user and folders for Nomad Create an OS user named `nomad`. Create the following folders, all owned by user `nomad`: * `/opt/nomad`: base directory for Nomad data (configurable via the `--path` argument). * `/opt/nomad/bin`: directory for Nomad binaries. * `/opt/nomad/data`: directory where the Nomad agent can store state. * `/opt/nomad/config`: directory where the Nomad agent looks up configuration. * `/opt/nomad/log`: directory where the Nomad agent will store log files. ### Install Nomad binaries and scripts Install the following: * `nomad`: Download the Nomad zip file from the [downloads page](https://www.nomadproject.io/downloads.html) (the version number is configurable via the `--version` argument), and extract the `nomad` binary into `/opt/nomad/bin`. Add a symlink to the `nomad` binary in `/usr/local/bin`. * `run-nomad`: Copy the [run-nomad script](https://github.com/hashicorp/terraform-aws-nomad/tree/master/modules/run-nomad) into `/opt/nomad/bin`. ### Follow-up tasks After the `install-nomad` script finishes running, you may wish to do the following: 1. If you have custom Nomad config (`.hcl`) files, you may want to copy them into the config directory (default: `/opt/nomad/config`). 1. If `/usr/local/bin` isn't already part of `PATH`, you should add it so you can run the `nomad` command without specifying the full path. ## Dependencies The install script assumes that `systemd` is already installed. We use it as a cross-platform supervisor to ensure Nomad is started whenever the system boots and restarted if the Nomad process crashes. Additionally, it is used to store all logs which can be accessed using `journalctl`. ## Why use Git to install this code? We needed an easy way to install these scripts that satisfied a number of requirements, including working on a variety of operating systems and supported versioning. Our current solution is to use `git`, but this may change in the future. See [Package Managers](https://github.com/hashicorp/terraform-aws-consul/blob/master/_docs/package-managers.md) for a full discussion of the requirements, trade-offs, and why we picked `git`. ================================================ FILE: modules/install-nomad/install-nomad ================================================ #!/bin/bash # This script can be used to install Nomad and its dependencies. This script has been tested with the following # operating systems: # # 1. Ubuntu 16.04 # 2. Ubuntu 18.04 # 3. Amazon Linux 2 set -e readonly DEFAULT_INSTALL_PATH="/opt/nomad" readonly DEFAULT_NOMAD_USER="nomad" readonly SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" readonly SYSTEM_BIN_DIR="/usr/local/bin" readonly SUPERVISOR_DIR="/etc/supervisor" readonly SUPERVISOR_CONF_DIR="$SUPERVISOR_DIR/conf.d" readonly SCRIPT_NAME="$(basename "$0")" function print_usage { echo echo "Usage: install-nomad [OPTIONS]" echo echo "This script can be used to install Nomad and its dependencies. This script has been tested with Ubuntu 16.04, Ubuntu 18.04 and Amazon Linux 2." echo echo "Options:" echo echo -e " --version\t\tThe version of Nomad to install. Required." echo -e " --path\t\tThe path where Nomad should be installed. Optional. Default: $DEFAULT_INSTALL_PATH." echo -e " --user\t\tThe user who will own the Nomad install directories. Optional. Default: $DEFAULT_NOMAD_USER." echo echo "Example:" echo echo " install-nomad --version 0.5.4" } function log { local readonly level="$1" local readonly message="$2" local readonly timestamp=$(date +"%Y-%m-%d %H:%M:%S") >&2 echo -e "${timestamp} [${level}] [$SCRIPT_NAME] ${message}" } function log_info { local readonly message="$1" log "INFO" "$message" } function log_warn { local readonly message="$1" log "WARN" "$message" } function log_error { local readonly message="$1" log "ERROR" "$message" } function assert_not_empty { local readonly arg_name="$1" local readonly arg_value="$2" if [[ -z "$arg_value" ]]; then log_error "The value for '$arg_name' cannot be empty" print_usage exit 1 fi } function has_yum { [ -n "$(command -v yum)" ] } function has_apt_get { [ -n "$(command -v apt-get)" ] } function install_dependencies { log_info "Installing dependencies" if $(has_apt_get); then sudo apt-get update -y sudo apt-get install -y awscli curl unzip jq elif $(has_yum); then sudo yum update -y sudo yum install -y aws curl unzip jq else log_error "Could not find apt-get or yum. Cannot install dependencies on this OS." exit 1 fi } function user_exists { local readonly username="$1" id "$username" >/dev/null 2>&1 } function create_nomad_user { local readonly username="$1" if $(user_exists "$username"); then echo "User $username already exists. Will not create again." else log_info "Creating user named $username" sudo useradd "$username" fi } function create_nomad_install_paths { local readonly path="$1" local readonly username="$2" log_info "Creating install dirs for Nomad at $path" sudo mkdir -p "$path" sudo mkdir -p "$path/bin" sudo mkdir -p "$path/config" sudo mkdir -p "$path/data" log_info "Changing ownership of $path to $username" sudo chown -R "$username:$username" "$path" } function install_binaries { local readonly version="$1" local readonly path="$2" local readonly username="$3" local cpu_arch cpu_arch="$(uname -m)" local binary_arch="" case "$cpu_arch" in x86_64) binary_arch="amd64" ;; x86) binary_arch="386" ;; arm64|aarch64) binary_arch="arm64" ;; arm*) binary_arch="arm" ;; *) log_error "CPU architecture $cpu_arch is not a supported by Consul." exit 1 ;; esac local readonly url="https://releases.hashicorp.com/nomad/${version}/nomad_${version}_linux_${binary_arch}.zip" local readonly download_path="/tmp/nomad_${version}_linux_${binary_arch}.zip" local readonly bin_dir="$path/bin" local readonly nomad_dest_path="$bin_dir/nomad" local readonly run_nomad_dest_path="$bin_dir/run-nomad" log_info "Downloading Nomad $version from $url to $download_path" curl -o "$download_path" "$url" unzip -d /tmp "$download_path" log_info "Moving Nomad binary to $nomad_dest_path" sudo mv "/tmp/nomad" "$nomad_dest_path" sudo chown "$username:$username" "$nomad_dest_path" sudo chmod a+x "$nomad_dest_path" local readonly symlink_path="$SYSTEM_BIN_DIR/nomad" if [[ -f "$symlink_path" ]]; then log_info "Symlink $symlink_path already exists. Will not add again." else log_info "Adding symlink to $nomad_dest_path in $symlink_path" sudo ln -s "$nomad_dest_path" "$symlink_path" fi log_info "Copying Nomad run script to $run_nomad_dest_path" sudo cp "$SCRIPT_DIR/../run-nomad/run-nomad" "$run_nomad_dest_path" sudo chown "$username:$username" "$run_nomad_dest_path" sudo chmod a+x "$run_nomad_dest_path" } function install { local version="" local path="$DEFAULT_INSTALL_PATH" local user="$DEFAULT_NOMAD_USER" while [[ $# > 0 ]]; do local key="$1" case "$key" in --version) version="$2" shift ;; --path) path="$2" shift ;; --user) user="$2" shift ;; --help) print_usage exit ;; *) log_error "Unrecognized argument: $key" print_usage exit 1 ;; esac shift done assert_not_empty "--version" "$version" assert_not_empty "--path" "$path" assert_not_empty "--user" "$user" log_info "Starting Nomad install" install_dependencies create_nomad_user "$user" create_nomad_install_paths "$path" "$user" install_binaries "$version" "$path" "$user" log_info "Nomad install complete!" } install "$@" ================================================ FILE: modules/nomad-cluster/README.md ================================================ # Nomad Cluster This folder contains a [Terraform](https://www.terraform.io/) module that can be used to deploy a [Nomad](https://www.nomadproject.io/) cluster in [AWS](https://aws.amazon.com/) on top of an Auto Scaling Group. This module is designed to deploy an [Amazon Machine Image (AMI)](http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/AMIs.html) that had Nomad installed via the [install-nomad](https://github.com/hashicorp/terraform-aws-nomad/tree/master/modules/install-nomad) module in this Module. Note that this module assumes you have a separate [Consul](https://www.consul.io/) cluster already running. If you want to run Consul and Nomad in the same cluster, instead of using this module, see the [Deploy Nomad and Consul in the same cluster documentation](https://github.com/hashicorp/terraform-aws-nomad/tree/master/README.md#deploy-nomad-and-consul-in-the-same-cluster). ## How do you use this module? This folder defines a [Terraform module](https://www.terraform.io/docs/modules/usage.html), which you can use in your code by adding a `module` configuration and setting its `source` parameter to URL of this folder: ```hcl module "nomad_cluster" { # TODO: update this to the final URL # Use version v0.0.1 of the nomad-cluster module source = "github.com/hashicorp/terraform-aws-nomad//modules/nomad-cluster?ref=v0.0.1" # Specify the ID of the Nomad AMI. You should build this using the scripts in the install-nomad module. ami_id = "ami-abcd1234" # Configure and start Nomad during boot. It will automatically connect to the Consul cluster specified in its # configuration and form a cluster with other Nomad nodes connected to that Consul cluster. user_data = <<-EOF #!/bin/bash /opt/nomad/bin/run-nomad --server --num-servers 3 EOF # ... See variables.tf for the other parameters you must define for the nomad-cluster module } ``` Note the following parameters: - `source`: Use this parameter to specify the URL of the nomad-cluster module. The double slash (`//`) is intentional and required. Terraform uses it to specify subfolders within a Git repo (see [module sources](https://www.terraform.io/docs/modules/sources.html)). The `ref` parameter specifies a specific Git tag in this repo. That way, instead of using the latest version of this module from the `master` branch, which will change every time you run Terraform, you're using a fixed version of the repo. - `ami_id`: Use this parameter to specify the ID of a Nomad [Amazon Machine Image (AMI)](http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/AMIs.html) to deploy on each server in the cluster. You should install Nomad in this AMI using the scripts in the [install-nomad](https://github.com/hashicorp/terraform-aws-nomad/tree/master/modules/install-nomad) module. - `user_data`: Use this parameter to specify a [User Data](http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/user-data.html#user-data-shell-scripts) script that each server will run during boot. This is where you can use the [run-nomad script](https://github.com/hashicorp/terraform-aws-nomad/tree/master/modules/run-nomad) to configure and run Nomad. The `run-nomad` script is one of the scripts installed by the [install-nomad](https://github.com/hashicorp/terraform-aws-nomad/tree/master/modules/install-nomad) module. You can find the other parameters in [variables.tf](variables.tf). Check out the [nomad-consul-separate-cluster example](https://github.com/hashicorp/terraform-aws-nomad/tree/master/examples/nomad-consul-separate-cluster) example for working sample code. Note that if you want to run Nomad and Consul on the same cluster, see the [nomad-consul-colocated-cluster example](https://github.com/hashicorp/terraform-aws-nomad/tree/master/MAIN.md example) instead. ## How do you connect to the Nomad cluster? ### Using the Node agent from your own computer If you want to connect to the cluster from your own computer, [install Nomad](https://www.nomadproject.io/docs/install/index.html) and execute commands with the `-address` parameter set to the IP address of one of the servers in your Nomad cluster. Note that this only works if the Nomad cluster is running in public subnets and/or your default VPC (as in both [examples](https://github.com/hashicorp/terraform-aws-nomad/tree/master/examples)), which is OK for testing and experimentation, but NOT recommended for production usage. To use the HTTP API, you first need to get the public IP address of one of the Nomad Instances. If you deployed the [nomad-consul-colocated-cluster](https://github.com/hashicorp/terraform-aws-nomad/tree/master/MAIN.md) or [nomad-consul-separate-cluster](https://github.com/hashicorp/terraform-aws-nomad/tree/master/examples/nomad-consul-separate-cluster) example, the [nomad-examples-helper.sh script](https://github.com/hashicorp/terraform-aws-nomad/tree/master/examples/nomad-examples-helper/nomad-examples-helper.sh) will do the tag lookup for you automatically (note, you must have the [AWS CLI](https://aws.amazon.com/cli/), [jq](https://stedolan.github.io/jq/), and the [Nomad agent](https://www.nomadproject.io/) installed locally): ``` > ../nomad-examples-helper/nomad-examples-helper.sh Your Nomad servers are running at the following IP addresses: 34.204.85.139 52.23.167.204 54.236.16.38 ``` Copy and paste one of these IPs and use it with the `-address` argument for any [Nomad command](https://www.nomadproject.io/docs/commands/index.html). For example, to see the status of all the Nomad servers: ``` > nomad server members -address=http://:4646 ip-172-31-23-140.global 172.31.23.140 4648 alive true 2 0.5.4 dc1 global ip-172-31-23-141.global 172.31.23.141 4648 alive true 2 0.5.4 dc1 global ip-172-31-23-142.global 172.31.23.142 4648 alive true 2 0.5.4 dc1 global ``` To see the status of all the Nomad agents: ``` > nomad node status -address=http://:4646 ID DC Name Class Drain Status ec2796cd us-east-1e i-0059e5cafb8103834 false ready ec2f799e us-east-1d i-0a5552c3c375e9ea0 false ready ec226624 us-east-1b i-0d647981f5407ae32 false ready ec2d4635 us-east-1a i-0c43dcc509e3d8bdf false ready ec232ea5 us-east-1d i-0eff2e6e5989f51c1 false ready ec2d4bd6 us-east-1c i-01523bf946d98003e false ready ``` And to submit a job called `example.nomad`: ``` > nomad run -address=http://:4646 example.nomad ==> Monitoring evaluation "0d159869" Evaluation triggered by job "example" Allocation "5cbf23a1" created: node "1e1aa1e0", group "example" Evaluation status changed: "pending" -> "complete" ==> Evaluation "0d159869" finished with status "complete" ``` ### Using the Nomad agent on another EC2 Instance For production usage, your EC2 Instances should be running the [Nomad agent](https://www.nomadproject.io/docs/agent/index.html). The agent nodes should discover the Nomad server nodes automatically using Consul. Check out the [Service Discovery documentation](https://www.nomadproject.io/docs/service-discovery/index.html) for details. ## What's included in this module? This module creates the following architecture: ![Nomad architecture](https://raw.githubusercontent.com/hashicorp/terraform-aws-nomad/master/_docs/architecture.png) This architecture consists of the following resources: - [Auto Scaling Group](#auto-scaling-group) - [Security Group](#security-group) - [IAM Role and Permissions](#iam-role-and-permissions) ### Auto Scaling Group This module runs Nomad on top of an [Auto Scaling Group (ASG)](https://aws.amazon.com/autoscaling/). Typically, you should run the ASG with 3 or 5 EC2 Instances spread across multiple [Availability Zones](http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-regions-availability-zones.html). Each of the EC2 Instances should be running an AMI that has had Nomad installed via the [install-nomad](https://github.com/hashicorp/terraform-aws-nomad/tree/master/modules/install-nomad) module. You pass in the ID of the AMI to run using the `ami_id` input parameter. ### Security Group Each EC2 Instance in the ASG has a Security Group that allows: - All outbound requests - All the inbound ports specified in the [Nomad documentation](https://www.nomadproject.io/docs/agent/configuration/index.html#ports) The Security Group ID is exported as an output variable if you need to add additional rules. Check out the [Security section](#security) for more details. ### IAM Role and Permissions Each EC2 Instance in the ASG has an [IAM Role](http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles.html) attached. We give this IAM role a small set of IAM permissions that each EC2 Instance can use to automatically discover the other Instances in its ASG and form a cluster with them. The IAM Role ARN is exported as an output variable if you need to add additional permissions. ## How do you roll out updates? If you want to deploy a new version of Nomad across the cluster, the best way to do that is to: 1. Build a new AMI. 1. Set the `ami_id` parameter to the ID of the new AMI. 1. Run `terraform apply`. This updates the Launch Configuration of the ASG, so any new Instances in the ASG will have your new AMI, but it does NOT actually deploy those new instances. To make that happen, you should do the following: 1. Issue an API call to one of the old Instances in the ASG to have it leave gracefully. E.g.: ``` nomad server force-leave -address=:4646 ``` 1. Once the instance has left the cluster, terminate it: ``` aws ec2 terminate-instances --instance-ids ``` 1. After a minute or two, the ASG should automatically launch a new Instance, with the new AMI, to replace the old one. 1. Wait for the new Instance to boot and join the cluster. 1. Repeat these steps for each of the other old Instances in the ASG. We will add a script in the future to automate this process (PRs are welcome!). ## What happens if a node crashes? There are two ways a Nomad node may go down: 1. The Nomad process may crash. In that case, `systemd` should restart it automatically. 1. The EC2 Instance running Nomad dies. In that case, the Auto Scaling Group should launch a replacement automatically. Note that in this case, since the Nomad agent did not exit gracefully, and the replacement will have a different ID, you may have to manually clean out the old nodes using the [server force-leave command](https://www.nomadproject.io/docs/commands/server-force-leave.html). We may add a script to do this automatically in the future. For more info, see the [Nomad Outage documentation](https://www.nomadproject.io/guides/outage.html). ## How do you connect load balancers to the Auto Scaling Group (ASG)? You can use the [`aws_autoscaling_attachment`](https://www.terraform.io/docs/providers/aws/r/autoscaling_attachment.html) resource. For example, if you are using the new application or network load balancers: ```hcl resource "aws_lb_target_group" "test" { // ... } # Create a new Nomad Cluster module "nomad" { source ="..." // ... } # Create a new load balancer attachment resource "aws_autoscaling_attachment" "asg_attachment_bar" { autoscaling_group_name = module.nomad.asg_name alb_target_group_arn = aws_alb_target_group.test.arn } ``` If you are using a "classic" load balancer: ```hcl # Create a new load balancer resource "aws_elb" "bar" { // ... } # Create a new Nomad Cluster module "nomad" { source ="..." // ... } # Create a new load balancer attachment resource "aws_autoscaling_attachment" "asg_attachment_bar" { autoscaling_group_name = module.nomad.asg_name elb = aws_elb.bar.id } ``` ## Security Here are some of the main security considerations to keep in mind when using this module: 1. [Encryption in transit](#encryption-in-transit) 1. [Encryption at rest](#encryption-at-rest) 1. [Dedicated instances](#dedicated-instances) 1. [Security groups](#security-groups) 1. [SSH access](#ssh-access) ### Encryption in transit Nomad can encrypt all of its network traffic. For instructions on enabling network encryption, have a look at the [How do you handle encryption documentation](https://github.com/hashicorp/terraform-aws-nomad/tree/master/modules/run-nomad#how-do-you-handle-encryption). ### Encryption at rest The EC2 Instances in the cluster store all their data on the root EBS Volume. To enable encryption for the data at rest, you must enable encryption in your Nomad AMI. If you're creating the AMI using Packer (e.g. as shown in the [nomad-consul-ami example](https://github.com/hashicorp/terraform-aws-nomad/tree/master/examples/nomad-consul-ami)), you need to set the [encrypt_boot parameter](https://www.packer.io/docs/builders/amazon-ebs.html#encrypt_boot) to `true`. ### Dedicated instances If you wish to use dedicated instances, you can set the `tenancy` parameter to `"dedicated"` in this module. ### Security groups This module attaches a security group to each EC2 Instance that allows inbound requests as follows: - **Nomad**: For all the [ports used by Nomad](https://www.nomadproject.io/docs/agent/configuration/index.html#ports), you can use the `allowed_inbound_cidr_blocks` parameter to control the list of [CIDR blocks](https://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing) that will be allowed access. - **SSH**: For the SSH port (default: 22), you can use the `allowed_ssh_cidr_blocks` parameter to control the list of [CIDR blocks](https://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing) that will be allowed access. Note that all the ports mentioned above are configurable via the `xxx_port` variables (e.g. `http_port`). See [variables.tf](variables.tf) for the full list. ### SSH access You can associate an [EC2 Key Pair](http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-key-pairs.html) with each of the EC2 Instances in this cluster by specifying the Key Pair's name in the `ssh_key_name` variable. If you don't want to associate a Key Pair with these servers, set `ssh_key_name` to an empty string. ## What's NOT included in this module? This module does NOT handle the following items, which you may want to provide on your own: - [Consul](#consul) - [Monitoring, alerting, log aggregation](#monitoring-alerting-log-aggregation) - [VPCs, subnets, route tables](#vpcs-subnets-route-tables) - [DNS entries](#dns-entries) ### Consul This module assumes you already have Consul deployed in a separate cluster. If you want to run Nomad and Consul on the same cluster, instead of using this module, see the [Deploy Nomad and Consul in the same cluster documentation](https://github.com/hashicorp/terraform-aws-nomad/tree/master/README.md#deploy-nomad-and-consul-in-the-same-cluster). ### Monitoring, alerting, log aggregation This module does not include anything for monitoring, alerting, or log aggregation. All ASGs and EC2 Instances come with limited [CloudWatch](https://aws.amazon.com/cloudwatch/) metrics built-in, but beyond that, you will have to provide your own solutions. ### VPCs, subnets, route tables This module assumes you've already created your network topology (VPC, subnets, route tables, etc). You will need to pass in the the relevant info about your network topology (e.g. `vpc_id`, `subnet_ids`) as input variables to this module. ### DNS entries This module does not create any DNS entries for Nomad (e.g. in Route 53). ================================================ FILE: modules/nomad-cluster/main.tf ================================================ # ---------------------------------------------------------------------------------------------------------------------- # REQUIRE A SPECIFIC TERRAFORM VERSION OR HIGHER # ---------------------------------------------------------------------------------------------------------------------- terraform { # This module is now only being tested with Terraform 1.0.x. However, to make upgrading easier, we are setting # 0.12.26 as the minimum version, as that version added support for required_providers with source URLs, making it # forwards compatible with 1.0.x code. required_version = ">= 0.12.26" } # --------------------------------------------------------------------------------------------------------------------- # CREATE AN AUTO SCALING GROUP (ASG) TO RUN NOMAD # --------------------------------------------------------------------------------------------------------------------- resource "aws_autoscaling_group" "autoscaling_group" { launch_configuration = aws_launch_configuration.launch_configuration.name name = var.asg_name availability_zones = var.availability_zones vpc_zone_identifier = var.subnet_ids min_size = var.min_size max_size = var.max_size desired_capacity = var.desired_capacity termination_policies = [var.termination_policies] health_check_type = var.health_check_type health_check_grace_period = var.health_check_grace_period wait_for_capacity_timeout = var.wait_for_capacity_timeout protect_from_scale_in = var.protect_from_scale_in tag { key = "Name" value = var.cluster_name propagate_at_launch = true } tag { key = var.cluster_tag_key value = var.cluster_tag_value propagate_at_launch = true } dynamic "tag" { for_each = var.tags content { key = tag.value["key"] value = tag.value["value"] propagate_at_launch = tag.value["propagate_at_launch"] } } lifecycle { # As of AWS Provider 3.x, inline load_balancers and target_group_arns # in an aws_autoscaling_group take precedence over attachment resources. # Since the consul-cluster module does not define any Load Balancers, # it's safe to assume that we will always want to favor an attachment # over these inline properties. # # For further discussion and links to relevant documentation, see # https://github.com/hashicorp/terraform-aws-vault/issues/210 ignore_changes = [load_balancers, target_group_arns] } } # --------------------------------------------------------------------------------------------------------------------- # CREATE LAUNCH CONFIGURATION TO DEFINE WHAT RUNS ON EACH INSTANCE IN THE ASG # --------------------------------------------------------------------------------------------------------------------- resource "aws_launch_configuration" "launch_configuration" { name_prefix = "${var.cluster_name}-" image_id = var.ami_id instance_type = var.instance_type user_data = var.user_data iam_instance_profile = aws_iam_instance_profile.instance_profile.name key_name = var.ssh_key_name security_groups = concat( [aws_security_group.lc_security_group.id], var.security_groups, ) placement_tenancy = var.tenancy associate_public_ip_address = var.associate_public_ip_address ebs_optimized = var.root_volume_ebs_optimized root_block_device { volume_type = var.root_volume_type volume_size = var.root_volume_size delete_on_termination = var.root_volume_delete_on_termination } dynamic "ebs_block_device" { for_each = var.ebs_block_devices content { device_name = ebs_block_device.value["device_name"] volume_size = ebs_block_device.value["volume_size"] snapshot_id = lookup(ebs_block_device.value, "snapshot_id", null) iops = lookup(ebs_block_device.value, "iops", null) encrypted = lookup(ebs_block_device.value, "encrypted", null) delete_on_termination = lookup(ebs_block_device.value, "delete_on_termination", null) } } # Important note: whenever using a launch configuration with an auto scaling group, you must set # create_before_destroy = true. However, as soon as you set create_before_destroy = true in one resource, you must # also set it in every resource that it depends on, or you'll get an error about cyclic dependencies (especially when # removing resources). For more info, see: # # https://www.terraform.io/docs/providers/aws/r/launch_configuration.html # https://terraform.io/docs/configuration/resources.html lifecycle { create_before_destroy = true } } # --------------------------------------------------------------------------------------------------------------------- # CREATE A SECURITY GROUP TO CONTROL WHAT REQUESTS CAN GO IN AND OUT OF EACH EC2 INSTANCE # --------------------------------------------------------------------------------------------------------------------- resource "aws_security_group" "lc_security_group" { name_prefix = var.cluster_name description = "Security group for the ${var.cluster_name} launch configuration" vpc_id = var.vpc_id # aws_launch_configuration.launch_configuration in this module sets create_before_destroy to true, which means # everything it depends on, including this resource, must set it as well, or you'll get cyclic dependency errors # when you try to do a terraform destroy. lifecycle { create_before_destroy = true } } resource "aws_security_group_rule" "allow_ssh_inbound" { count = length(var.allowed_ssh_cidr_blocks) > 0 ? 1 : 0 type = "ingress" from_port = var.ssh_port to_port = var.ssh_port protocol = "tcp" cidr_blocks = var.allowed_ssh_cidr_blocks security_group_id = aws_security_group.lc_security_group.id } resource "aws_security_group_rule" "allow_all_outbound" { type = "egress" from_port = 0 to_port = 0 protocol = "-1" cidr_blocks = var.allow_outbound_cidr_blocks security_group_id = aws_security_group.lc_security_group.id } # --------------------------------------------------------------------------------------------------------------------- # THE INBOUND/OUTBOUND RULES FOR THE SECURITY GROUP COME FROM THE NOMAD-SECURITY-GROUP-RULES MODULE # --------------------------------------------------------------------------------------------------------------------- module "security_group_rules" { source = "../nomad-security-group-rules" security_group_id = aws_security_group.lc_security_group.id allowed_inbound_cidr_blocks = var.allowed_inbound_cidr_blocks http_port = var.http_port rpc_port = var.rpc_port serf_port = var.serf_port } # --------------------------------------------------------------------------------------------------------------------- # ATTACH AN IAM ROLE TO EACH EC2 INSTANCE # We can use the IAM role to grant the instance IAM permissions so we can use the AWS CLI without having to figure out # how to get our secret AWS access keys onto the box. # --------------------------------------------------------------------------------------------------------------------- resource "aws_iam_instance_profile" "instance_profile" { name_prefix = var.cluster_name path = var.instance_profile_path role = aws_iam_role.instance_role.name # aws_launch_configuration.launch_configuration in this module sets create_before_destroy to true, which means # everything it depends on, including this resource, must set it as well, or you'll get cyclic dependency errors # when you try to do a terraform destroy. lifecycle { create_before_destroy = true } } resource "aws_iam_role" "instance_role" { name_prefix = var.cluster_name assume_role_policy = data.aws_iam_policy_document.instance_role.json permissions_boundary = var.iam_permissions_boundary # aws_iam_instance_profile.instance_profile in this module sets create_before_destroy to true, which means # everything it depends on, including this resource, must set it as well, or you'll get cyclic dependency errors # when you try to do a terraform destroy. lifecycle { create_before_destroy = true } } data "aws_iam_policy_document" "instance_role" { statement { effect = "Allow" actions = ["sts:AssumeRole"] principals { type = "Service" identifiers = ["ec2.amazonaws.com"] } } } ================================================ FILE: modules/nomad-cluster/outputs.tf ================================================ output "asg_name" { value = aws_autoscaling_group.autoscaling_group.name } output "cluster_tag_key" { value = var.cluster_tag_key } output "cluster_tag_value" { value = var.cluster_tag_value } output "cluster_size" { value = aws_autoscaling_group.autoscaling_group.desired_capacity } output "launch_config_name" { value = aws_launch_configuration.launch_configuration.name } output "iam_instance_profile_arn" { value = aws_iam_instance_profile.instance_profile.arn } output "iam_instance_profile_id" { value = aws_iam_instance_profile.instance_profile.id } output "iam_instance_profile_name" { value = aws_iam_instance_profile.instance_profile.name } output "iam_role_arn" { value = aws_iam_role.instance_role.arn } output "iam_role_id" { value = aws_iam_role.instance_role.id } output "security_group_id" { value = aws_security_group.lc_security_group.id } ================================================ FILE: modules/nomad-cluster/variables.tf ================================================ # --------------------------------------------------------------------------------------------------------------------- # REQUIRED PARAMETERS # You must provide a value for each of these parameters. # --------------------------------------------------------------------------------------------------------------------- variable "cluster_name" { description = "The name of the Nomad cluster (e.g. nomad-servers-stage). This variable is used to namespace all resources created by this module." type = string } variable "ami_id" { description = "The ID of the AMI to run in this cluster. Should be an AMI that had Nomad installed and configured by the install-nomad module." type = string } variable "instance_type" { description = "The type of EC2 Instances to run for each node in the cluster (e.g. t2.micro)." type = string } variable "vpc_id" { description = "The ID of the VPC in which to deploy the cluster" type = string } variable "allowed_inbound_cidr_blocks" { description = "A list of CIDR-formatted IP address ranges from which the EC2 Instances will allow connections to Nomad" type = list(string) } variable "user_data" { description = "A User Data script to execute while the server is booting. We remmend passing in a bash script that executes the run-nomad script, which should have been installed in the AMI by the install-nomad module." type = string } variable "min_size" { description = "The minimum number of nodes to have in the cluster. If you're using this to run Nomad servers, we strongly recommend setting this to 3 or 5." type = number } variable "max_size" { description = "The maximum number of nodes to have in the cluster. If you're using this to run Nomad servers, we strongly recommend setting this to 3 or 5." type = number } variable "desired_capacity" { description = "The desired number of nodes to have in the cluster. If you're using this to run Nomad servers, we strongly recommend setting this to 3 or 5." type = number } # --------------------------------------------------------------------------------------------------------------------- # OPTIONAL PARAMETERS # These parameters have reasonable defaults. # --------------------------------------------------------------------------------------------------------------------- variable "asg_name" { description = "The name to use for the Auto Scaling Group" type = string default = "" } variable "subnet_ids" { description = "The subnet IDs into which the EC2 Instances should be deployed. We recommend one subnet ID per node in the cluster_size variable. At least one of var.subnet_ids or var.availability_zones must be non-empty." type = list(string) default = null } variable "availability_zones" { description = "The availability zones into which the EC2 Instances should be deployed. We recommend one availability zone per node in the cluster_size variable. At least one of var.subnet_ids or var.availability_zones must be non-empty." type = list(string) default = null } variable "ssh_key_name" { description = "The name of an EC2 Key Pair that can be used to SSH to the EC2 Instances in this cluster. Set to an empty string to not associate a Key Pair." type = string default = "" } variable "allowed_ssh_cidr_blocks" { description = "A list of CIDR-formatted IP address ranges from which the EC2 Instances will allow SSH connections" type = list(string) default = [] } variable "cluster_tag_key" { description = "Add a tag with this key and the value var.cluster_tag_value to each Instance in the ASG." type = string default = "nomad-servers" } variable "cluster_tag_value" { description = "Add a tag with key var.cluster_tag_key and this value to each Instance in the ASG. This can be used to automatically find other Consul nodes and form a cluster." type = string default = "auto-join" } variable "termination_policies" { description = "A list of policies to decide how the instances in the auto scale group should be terminated. The allowed values are OldestInstance, NewestInstance, OldestLaunchConfiguration, ClosestToNextInstanceHour, Default." type = string default = "Default" } variable "associate_public_ip_address" { description = "If set to true, associate a public IP address with each EC2 Instance in the cluster." type = bool default = false } variable "tenancy" { description = "The tenancy of the instance. Must be one of: default or dedicated." type = string default = "default" } variable "root_volume_ebs_optimized" { description = "If true, the launched EC2 instance will be EBS-optimized." type = bool default = false } variable "root_volume_type" { description = "The type of volume. Must be one of: standard, gp2, or io1." type = string default = "standard" } variable "root_volume_size" { description = "The size, in GB, of the root EBS volume." type = number default = 50 } variable "root_volume_delete_on_termination" { description = "Whether the volume should be destroyed on instance termination." default = true type = bool } variable "wait_for_capacity_timeout" { description = "A maximum duration that Terraform should wait for ASG instances to be healthy before timing out. Setting this to '0' causes Terraform to skip all Capacity Waiting behavior." type = string default = "10m" } variable "health_check_type" { description = "Controls how health checking is done. Must be one of EC2 or ELB." type = string default = "EC2" } variable "health_check_grace_period" { description = "Time, in seconds, after instance comes into service before checking health." type = number default = 300 } variable "instance_profile_path" { description = "Path in which to create the IAM instance profile." type = string default = "/" } variable "http_port" { description = "The port to use for HTTP" type = number default = 4646 } variable "rpc_port" { description = "The port to use for RPC" type = number default = 4647 } variable "serf_port" { description = "The port to use for Serf" type = number default = 4648 } variable "ssh_port" { description = "The port used for SSH connections" type = number default = 22 } variable "security_groups" { description = "Additional security groups to attach to the EC2 instances" type = list(string) default = [] } variable "tags" { description = "List of extra tag blocks added to the autoscaling group configuration. Each element in the list is a map containing keys 'key', 'value', and 'propagate_at_launch' mapped to the respective values." type = list(object({ key = string value = string propagate_at_launch = bool })) default = [] } variable "ebs_block_devices" { description = "List of ebs volume definitions for those ebs_volumes that should be added to the instances created with the EC2 launch-configuration. Each element in the list is a map containing keys defined for ebs_block_device (see: https://www.terraform.io/docs/providers/aws/r/launch_configuration.html#ebs_block_device." # We can't narrow the type down more than "any" because if we use list(object(...)), then all the fields in the # object will be required (whereas some, such as encrypted, should be optional), and if we use list(map(...)), all # the values in the map must be of the same type, whereas we need some to be strings, some to be bools, and some to # be ints. So, we have to fall back to just any ugly "any." type = any default = [] # Example: # # default = [ # { # device_name = "/dev/xvdh" # volume_type = "gp2" # volume_size = 300 # encrypted = true # } # ] } variable "protect_from_scale_in" { description = "(Optional) Allows setting instance protection. The autoscaling group will not select instances with this setting for termination during scale in events." type = bool default = false } variable "allow_outbound_cidr_blocks" { description = "Allow outbound traffic to these CIDR blocks." type = list(string) default = ["0.0.0.0/0"] } variable "iam_permissions_boundary" { description = "If set, restricts the created IAM role to the given permissions boundary" type = string default = null } ================================================ FILE: modules/nomad-security-group-rules/README.md ================================================ # Nomad Security Group Rules Module This folder contains a [Terraform](https://www.terraform.io/) module that defines the security group rules used by a [Nomad](https://www.nomadproject.io/) cluster to control the traffic that is allowed to go in and out of the cluster. Normally, you'd get these rules by default if you're using the [nomad-cluster module](https://github.com/hashicorp/terraform-aws-nomad/tree/master/modules/nomad-cluster), but if you're running Nomad on top of a different cluster, then you can use this module to add the necessary security group rules that that cluster. For example, imagine you were using the [consul-cluster module](https://github.com/hashicorp/terraform-aws-consul/tree/master/modules/consul-cluster) to run a cluster of servers that have both Nomad and Consul on each node: ```hcl module "consul_servers" { source = "github.com/hashicorp/terraform-aws-consul//modules/consul-cluster?ref=v0.8.0" # This AMI has both Nomad and Consul installed ami_id = "ami-1234abcd" } ``` The `consul-cluster` module will provide the security group rules for Consul, but not for Nomad. To ensure those servers have the necessary ports open for using Nomad, you can use this module as follows: ```hcl module "security_group_rules" { source = "github.com/hashicorp/terraform-aws-nomad//modules/nomad-security-group-rules?ref=v0.0.1" security_group_id = module.consul_servers.security_group_id # ... (other params omitted) ... } ``` Note the following parameters: - `source`: Use this parameter to specify the URL of this module. The double slash (`//`) is intentional and required. Terraform uses it to specify subfolders within a Git repo (see [module sources](https://www.terraform.io/docs/modules/sources.html)). The `ref` parameter specifies a specific Git tag in this repo. That way, instead of using the latest version of this module from the `master` branch, which will change every time you run Terraform, you're using a fixed version of the repo. - `security_group_id`: Use this parameter to specify the ID of the security group to which the rules in this module should be added. You can find the other parameters in [variables.tf](variables.tf). Check out the [nomad-consul-colocated-cluster example](https://github.com/hashicorp/terraform-aws-nomad/tree/master/examples/root-example) for working sample code. ================================================ FILE: modules/nomad-security-group-rules/main.tf ================================================ # --------------------------------------------------------------------------------------------------------------------- # CREATE THE SECURITY GROUP RULES THAT CONTROL WHAT TRAFFIC CAN GO IN AND OUT OF A NOMAD CLUSTER # --------------------------------------------------------------------------------------------------------------------- # ---------------------------------------------------------------------------------------------------------------------- # REQUIRE A SPECIFIC TERRAFORM VERSION OR HIGHER # ---------------------------------------------------------------------------------------------------------------------- terraform { # This module is now only being tested with Terraform 1.0.x. However, to make upgrading easier, we are setting # 0.12.26 as the minimum version, as that version added support for required_providers with source URLs, making it # forwards compatible with 1.0.x code. required_version = ">= 0.12.26" } resource "aws_security_group_rule" "allow_http_inbound" { count = length(var.allowed_inbound_cidr_blocks) >= 1 ? 1 : 0 type = "ingress" from_port = var.http_port to_port = var.http_port protocol = "tcp" cidr_blocks = var.allowed_inbound_cidr_blocks security_group_id = var.security_group_id } resource "aws_security_group_rule" "allow_rpc_inbound" { count = length(var.allowed_inbound_cidr_blocks) >= 1 ? 1 : 0 type = "ingress" from_port = var.rpc_port to_port = var.rpc_port protocol = "tcp" cidr_blocks = var.allowed_inbound_cidr_blocks security_group_id = var.security_group_id } resource "aws_security_group_rule" "allow_serf_tcp_inbound" { count = length(var.allowed_inbound_cidr_blocks) >= 1 ? 1 : 0 type = "ingress" from_port = var.serf_port to_port = var.serf_port protocol = "tcp" cidr_blocks = var.allowed_inbound_cidr_blocks security_group_id = var.security_group_id } resource "aws_security_group_rule" "allow_serf_udp_inbound" { count = length(var.allowed_inbound_cidr_blocks) >= 1 ? 1 : 0 type = "ingress" from_port = var.serf_port to_port = var.serf_port protocol = "udp" cidr_blocks = var.allowed_inbound_cidr_blocks security_group_id = var.security_group_id } ================================================ FILE: modules/nomad-security-group-rules/variables.tf ================================================ # --------------------------------------------------------------------------------------------------------------------- # REQUIRED PARAMETERS # You must provide a value for each of these parameters. # --------------------------------------------------------------------------------------------------------------------- variable "security_group_id" { description = "The ID of the security group to which we should add the Nomad security group rules" type = string } variable "allowed_inbound_cidr_blocks" { description = "A list of CIDR-formatted IP address ranges from which the EC2 Instances will allow connections to Nomad" type = list(string) } # --------------------------------------------------------------------------------------------------------------------- # OPTIONAL PARAMETERS # These parameters have reasonable defaults. # --------------------------------------------------------------------------------------------------------------------- variable "http_port" { description = "The port to use for HTTP" type = number default = 4646 } variable "rpc_port" { description = "The port to use for RPC" type = number default = 4647 } variable "serf_port" { description = "The port to use for Serf" type = number default = 4648 } ================================================ FILE: modules/run-nomad/README.md ================================================ # Nomad Run Script This folder contains a script for configuring and running Nomad on an [AWS](https://aws.amazon.com/) server. This script has been tested on the following operating systems: * Ubuntu 16.04 * Ubuntu 18.04 * Amazon Linux 2 There is a good chance it will work on other flavors of Debian, CentOS, and RHEL as well. ## Quick start This script assumes you installed it, plus all of its dependencies (including Nomad itself), using the [install-nomad module](https://github.com/hashicorp/terraform-aws-nomad/tree/master/modules/install-nomad). The default install path is `/opt/nomad/bin`, so to start Nomad in server mode, you run: ``` /opt/nomad/bin/run-nomad --server --num-servers 3 ``` To start Nomad in client mode, you run: ``` /opt/nomad/bin/run-nomad --client ``` This will: 1. Generate a Nomad configuration file called `default.hcl` in the Nomad config dir (default: `/opt/nomad/config`). See [Nomad configuration](#nomad-configuration) for details on what this configuration file will contain and how to override it with your own configuration. 1. Generate a [systemd](https://www.freedesktop.org/wiki/Software/systemd/) configuration file called `nomad.service` in the systemd config dir (default: `/etc/supervisor/conf.d`) with a command that will run Nomad: `nomad agent -config=/opt/nomad/config -data-dir=/opt/nomad/data`. 1. Tell systemd to load the new configuration file, thereby starting Nomad. We recommend using the `run-nomad` command as part of [User Data](http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/user-data.html#user-data-shell-scripts), so that it executes when the EC2 Instance is first booting. If you are running Consul on the same server, make sure to use this script *after* Consul has booted. After running `run-nomad` on that initial boot, the `systemd` configuration will automatically restart Nomad if it crashes or the EC2 instance reboots. Note that `systemd` logs to its own journal by default. To view the Nomad logs, run `journalctl -u nomad.service`. To change the log output location, you can specify the `StandardOutput` and `StandardError` options by using the `--systemd-stdout` and `--systemd-stderr` options. See the [`systemd.exec` man pages](https://www.freedesktop.org/software/systemd/man/systemd.exec.html#StandardOutput=) for available options, but note that the `file:path` option requires [systemd version >= 236](https://stackoverflow.com/a/48052152), which is not provided in the base Ubuntu 16.04 and Amazon Linux 2 images. See the [nomad-consul-colocated-cluster example](https://github.com/hashicorp/terraform-aws-nomad/tree/master/MAIN.md) and [nomad-consul-separate-cluster example](https://github.com/hashicorp/terraform-aws-nomad/tree/master/examples/nomad-consul-separate-cluster example) for fully-working sample code. ## Command line Arguments The `run-nomad` script accepts the following arguments: * `server` (optional): If set, run in server mode. At least one of `--server` or `--client` must be set. * `client` (optional): If set, run in client mode. At least one of `--server` or `--client` must be set. * `num-servers` (optional): The number of servers to expect in the Nomad cluster. Required if `--server` is set. * `config-dir` (optional): The path to the Nomad config folder. Default is to take the absolute path of `../config`, relative to the `run-nomad` script itself. * `data-dir` (optional): The path to the Nomad config folder. Default is to take the absolute path of `../data`, relative to the `run-nomad` script itself. * `systemd-stdout` (optional): The StandardOutput option of the systemd unit. If not specified, it will use systemd's default (journal). * `systemd-stderr` (optional): The StandardError option of the systemd unit. If not specified, it will use systemd's default (inherit). * `user` (optional): The user to run Nomad as. Default is to use the owner of `config-dir`. * `use-sudo` (optional): Nomad clients make use of operating system primitives for resource isolation that require elevated (root) permissions (see [the docs](https://www.nomadproject.io/intro/getting-started/running.html) for more info). If you set this flag, Nomad will run with root-level privileges. If you don't, it'll still work, but certain task drivers will not be available. By default, this flag is enabled if `--client` is set and disabled if `--server` is set (server nodes don't need root-level privileges). * `skip-nomad-config`: If this flag is set, don't generate a Nomad configuration file. This is useful if you have a custom configuration file and don't want to use any of of the default settings from `run-nomad`. Example: ``` /opt/nomad/bin/run-nomad --server --num-servers 3 ``` ## Nomad configuration `run-nomad` generates a configuration file for Nomad called `default.hcl` that tries to figure out reasonable defaults for a Nomad cluster in AWS. Check out the [Nomad Configuration Files documentation](https://www.nomadproject.io/docs/agent/configuration/index.html) for what configuration settings are available. ### Default configuration `run-nomad` sets the following configuration values by default: * [advertise](https://www.nomadproject.io/docs/agent/configuration/index.html#advertise): All the advertise addresses are set to the Instance's private IP address, as fetched from [Metadata](http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-instance-metadata.html). * [bind_addr](https://www.nomadproject.io/docs/agent/configuration/index.html#bind_addr): Set to 0.0.0.0. * [client](https://www.nomadproject.io/docs/agent/configuration/client.html): This config is only set of `--client` is set. * [enabled](https://www.nomadproject.io/docs/agent/configuration/client.html#enabled): `true`. * [consul](https://www.nomadproject.io/docs/agent/configuration/consul.html): By default, set the Consul address to `127.0.0.1:8500`, with the assumption that the Consul agent is running on the same server. * [datacenter](https://www.nomadproject.io/docs/agent/configuration/index.html#datacenter): Set to the current availability zone, as fetched from [Metadata](http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-instance-metadata.html). * [name](https://www.nomadproject.io/docs/agent/configuration/index.html#name): Set to the instance id, as fetched from [Metadata](http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-instance-metadata.html). * [region](https://www.nomadproject.io/docs/agent/configuration/index.html#region): Set to the current AWS region, as fetched from [Metadata](http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-instance-metadata.html). * [server](https://www.nomadproject.io/docs/agent/configuration/server.html): This config is only set if `--server` is set. * [enabled](https://www.nomadproject.io/docs/agent/configuration/server.html#enabled): `true`. * [bootstrap_expect](https://www.nomadproject.io/docs/agent/configuration/server.html#bootstrap_expect): Set to the `--num-servers` parameter. ### Overriding the configuration To override the default configuration, simply put your own configuration file in the Nomad config folder (default: `/opt/nomad/config`), but with a name that comes later in the alphabet than `default.hcl` (e.g. `my-custom-config.hcl`). Nomad will load all the `.hcl` configuration files in the config dir and [merge them together in alphabetical order](https://www.nomadproject.io/docs/agent/configuration/index.html#load-order-and-merging), so that settings in files that come later in the alphabet will override the earlier ones. For example, to override the default `name` setting, you could create a file called `tags.hcl` with the contents: ```hcl name = "my-custom-name" ``` If you want to override *all* the default settings, you can tell `run-nomad` not to generate a default config file at all using the `--skip-nomad-config` flag: ``` /opt/nomad/bin/run-nomad --server --num-servers 3 --skip-nomad-config ``` ## How do you handle encryption? Nomad can encrypt all of its network traffic (see the [encryption docs for details](https://www.nomadproject.io/docs/agent/encryption.html)), but by default, encryption is not enabled in this Module. To enable encryption, you need to do the following: 1. [Gossip encryption: provide an encryption key](#gossip-encryption-provide-an-encryption-key) 1. [RPC encryption: provide TLS certificates](#rpc-encryption-provide-tls-certificates) 1. [Consul encryption](#consul-encryption) ### Gossip encryption: provide an encryption key To enable Gossip encryption, you need to provide a 16-byte, Base64-encoded encryption key, which you can generate using the [nomad keygen command](https://www.nomadproject.io/docs/commands/keygen.html). You can put the key in a Nomad configuration file (e.g. `encryption.hcl`) in the Nomad config dir (default location: `/opt/nomad/config`): ```hcl server { encrypt = "cg8StVXbQJ0gPvMd9o7yrg==" } ``` ### RPC encryption: provide TLS certificates To enable RPC encryption, you need to provide the paths to the CA and signing keys ([here is a tutorial on generating these keys](http://russellsimpkins.blogspot.com/2015/10/consul-adding-tls-using-self-signed.html)). You can specify these paths in a Nomad configuration file (e.g. `encryption.hcl`) in the Nomad config dir (default location: `/opt/nomad/config`): ```hcl tls { # Enable encryption on incoming HTTP and RPC endpoints http = true rpc = true # Verify server hostname for outgoing TLS connections verify_server_hostname = true # Specify the CA and signing key paths ca_file = "/opt/nomad/tls/certs/ca-bundle.crt", cert_file = "/opt/nomad/tls/certs/my.crt", key_file = "/opt/nomad/tls/private/my.key" } ``` ### Consul encryption Note that Nomad relies on Consul, and enabling encryption for Consul requires a separate process. Check out the [official Consul encryption docs](https://www.consul.io/docs/agent/encryption.html) and the Consul AWS Module [How do you handle encryption docs](https://github.com/hashicorp/terraform-aws-consul/tree/master/modules/run-consul#how-do-you-handle-encryption) for more info. ================================================ FILE: modules/run-nomad/run-nomad ================================================ #!/bin/bash # This script is used to configure and run Nomad on an AWS server. set -e readonly NOMAD_CONFIG_FILE="default.hcl" readonly SYSTEMD_CONFIG_PATH="/etc/systemd/system/nomad.service" readonly EC2_INSTANCE_METADATA_URL="http://169.254.169.254/latest/meta-data" readonly EC2_INSTANCE_DYNAMIC_DATA_URL="http://169.254.169.254/latest/dynamic" readonly SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" readonly SCRIPT_NAME="$(basename "$0")" function print_usage { echo echo "Usage: run-nomad [OPTIONS]" echo echo "This script is used to configure and run Nomad on an AWS server." echo echo "Options:" echo echo -e " --server\t\tIf set, run in server mode. Optional. At least one of --server or --client must be set." echo -e " --client\t\tIf set, run in client mode. Optional. At least one of --server or --client must be set." echo -e " --num-servers\t\tThe number of servers to expect in the Nomad cluster. Required if --server is true." echo -e " --config-dir\t\tThe path to the Nomad config folder. Optional. Default is the absolute path of '../config', relative to this script." echo -e " --data-dir\t\tThe path to the Nomad data folder. Optional. Default is the absolute path of '../data', relative to this script." echo -e " --bin-dir\t\tThe path to the folder with Nomad binary. Optional. Default is the absolute path of the parent folder of this script." echo -e " --systemd-stdout\t\tThe StandardOutput option of the systemd unit. Optional. If not configured, uses systemd's default (journal)." echo -e " --systemd-stderr\t\tThe StandardError option of the systemd unit. Optional. If not configured, uses systemd's default (inherit)." echo -e " --user\t\tThe user to run Nomad as. Optional. Default is to use the owner of --config-dir." echo -e " --use-sudo\t\tIf set, run the Nomad agent with sudo. By default, sudo is only used if --client is set." echo -e " --environment\t\A single environment variable in the key/value pair form 'KEY=\"val\"' to pass to Nomad as environment variable when starting it up. Repeat this option for additional variables. Optional." echo -e " --skip-nomad-config\tIf this flag is set, don't generate a Nomad configuration file. Optional. Default is false." echo echo "Example:" echo echo " run-nomad --server --config-dir /custom/path/to/nomad/config" } function log { local readonly level="$1" local readonly message="$2" local readonly timestamp=$(date +"%Y-%m-%d %H:%M:%S") >&2 echo -e "${timestamp} [${level}] [$SCRIPT_NAME] ${message}" } function log_info { local readonly message="$1" log "INFO" "$message" } function log_warn { local readonly message="$1" log "WARN" "$message" } function log_error { local readonly message="$1" log "ERROR" "$message" } # Based on code from: http://stackoverflow.com/a/16623897/483528 function strip_prefix { local readonly str="$1" local readonly prefix="$2" echo "${str#$prefix}" } function assert_not_empty { local readonly arg_name="$1" local readonly arg_value="$2" if [[ -z "$arg_value" ]]; then log_error "The value for '$arg_name' cannot be empty" print_usage exit 1 fi } function split_by_lines { local prefix="$1" shift for var in "$@"; do echo "${prefix}${var}" done } function lookup_path_in_instance_metadata { local readonly path="$1" curl --silent --location "$EC2_INSTANCE_METADATA_URL/$path/" } function lookup_path_in_instance_dynamic_data { local readonly path="$1" curl --silent --location "$EC2_INSTANCE_DYNAMIC_DATA_URL/$path/" } function get_instance_ip_address { lookup_path_in_instance_metadata "local-ipv4" } function get_instance_id { lookup_path_in_instance_metadata "instance-id" } function get_instance_availability_zone { lookup_path_in_instance_metadata "placement/availability-zone" } function get_instance_region { lookup_path_in_instance_dynamic_data "instance-identity/document" | jq -r ".region" } function assert_is_installed { local readonly name="$1" if [[ ! $(command -v ${name}) ]]; then log_error "The binary '$name' is required by this script but is not installed or in the system's PATH." exit 1 fi } function generate_nomad_config { local readonly server="$1" local readonly client="$2" local readonly num_servers="$3" local readonly config_dir="$4" local readonly user="$5" local readonly config_path="$config_dir/$NOMAD_CONFIG_FILE" local instance_id="" local instance_ip_address="" local instance_region="" local instance_availability_zone="" instance_id=$(get_instance_id) instance_ip_address=$(get_instance_ip_address) instance_region=$(get_instance_region) availability_zone=$(get_instance_availability_zone) local server_config="" if [[ "$server" == "true" ]]; then server_config=$(cat < "$config_path" < "$systemd_config_path" echo -e "$service_config" >> "$systemd_config_path" echo -e "$log_config" >> "$systemd_config_path" echo -e "$install_config" >> "$systemd_config_path" } function start_nomad { log_info "Reloading systemd config and starting Nomad" sudo systemctl daemon-reload sudo systemctl enable nomad.service sudo systemctl restart nomad.service } # Based on: http://unix.stackexchange.com/a/7732/215969 function get_owner_of_path { local readonly path="$1" ls -ld "$path" | awk '{print $3}' } function run { local server="false" local client="false" local num_servers="" local config_dir="" local data_dir="" local bin_dir="" local systemd_stdout="" local systemd_stderr="" local user="" local skip_nomad_config="false" local use_sudo="" local environment=() local all_args=() while [[ $# > 0 ]]; do local key="$1" case "$key" in --server) server="true" ;; --client) client="true" ;; --num-servers) num_servers="$2" shift ;; --config-dir) assert_not_empty "$key" "$2" config_dir="$2" shift ;; --data-dir) assert_not_empty "$key" "$2" data_dir="$2" shift ;; --bin-dir) assert_not_empty "$key" "$2" bin_dir="$2" shift ;; --systemd-stdout) assert_not_empty "$key" "$2" systemd_stdout="$2" shift ;; --systemd-stderr) assert_not_empty "$key" "$2" systemd_stderr="$2" shift ;; --user) assert_not_empty "$key" "$2" user="$2" shift ;; --cluster-tag-key) assert_not_empty "$key" "$2" cluster_tag_key="$2" shift ;; --cluster-tag-value) assert_not_empty "$key" "$2" cluster_tag_value="$2" shift ;; --skip-nomad-config) skip_nomad_config="true" ;; --use-sudo) use_sudo="true" ;; --environment) assert_not_empty "$key" "$2" environment+=("$2") shift ;; --help) print_usage exit ;; *) log_error "Unrecognized argument: $key" print_usage exit 1 ;; esac shift done if [[ "$server" == "true" ]]; then assert_not_empty "--num-servers" "$num_servers" fi if [[ "$server" == "false" && "$client" == "false" ]]; then log_error "At least one of --server or --client must be set" exit 1 fi if [[ -z "$use_sudo" ]]; then if [[ "$client" == "true" ]]; then use_sudo="true" else use_sudo="false" fi fi assert_is_installed "systemctl" assert_is_installed "aws" assert_is_installed "curl" assert_is_installed "jq" if [[ -z "$config_dir" ]]; then config_dir=$(cd "$SCRIPT_DIR/../config" && pwd) fi if [[ -z "$data_dir" ]]; then data_dir=$(cd "$SCRIPT_DIR/../data" && pwd) fi if [[ -z "$bin_dir" ]]; then bin_dir=$(cd "$SCRIPT_DIR/../bin" && pwd) fi # If $systemd_stdout and/or $systemd_stderr are empty, we leave them empty so that generate_systemd_config will use systemd's defaults (journal and inherit, respectively) if [[ -z "$user" ]]; then user=$(get_owner_of_path "$config_dir") fi if [[ "$skip_nomad_config" == "true" ]]; then log_info "The --skip-nomad-config flag is set, so will not generate a default Nomad config file." else generate_nomad_config "$server" "$client" "$num_servers" "$config_dir" "$user" fi generate_systemd_config "$SYSTEMD_CONFIG_PATH" "$config_dir" "$data_dir" "$bin_dir" "$systemd_stdout" "$systemd_stderr" "$user" "$use_sudo" "${environment[@]}" start_nomad } run "$@" ================================================ FILE: outputs.tf ================================================ output "num_nomad_servers" { value = module.servers.cluster_size } output "asg_name_servers" { value = module.servers.asg_name } output "launch_config_name_servers" { value = module.servers.launch_config_name } output "iam_role_arn_servers" { value = module.servers.iam_role_arn } output "iam_role_id_servers" { value = module.servers.iam_role_id } output "security_group_id_servers" { value = module.servers.security_group_id } output "num_clients" { value = module.clients.cluster_size } output "asg_name_clients" { value = module.clients.asg_name } output "launch_config_name_clients" { value = module.clients.launch_config_name } output "iam_role_arn_clients" { value = module.clients.iam_role_arn } output "iam_role_id_clients" { value = module.clients.iam_role_id } output "security_group_id_clients" { value = module.clients.security_group_id } output "aws_region" { value = data.aws_region.current.name } output "nomad_servers_cluster_tag_key" { value = module.servers.cluster_tag_key } output "nomad_servers_cluster_tag_value" { value = module.servers.cluster_tag_value } ================================================ FILE: test/README.md ================================================ # Tests This folder contains automated tests for this Module. All of the tests are written in [Go](https://golang.org/). Most of these are "integration tests" that deploy real infrastructure using Terraform and verify that infrastructure works as expected using a helper library called [Terratest](https://github.com/gruntwork-io/terratest). ## WARNING WARNING WARNING **Note #1**: Many of these tests create real resources in an AWS account and then try to clean those resources up at the end of a test run. That means these tests may cost you money to run! When adding tests, please be considerate of the resources you create and take extra care to clean everything up when you're done! **Note #2**: Never forcefully shut the tests down (e.g. by hitting `CTRL + C`) or the cleanup tasks won't run! **Note #3**: We set `-timeout 60m` on all tests not because they necessarily take that long, but because Go has a default test timeout of 10 minutes, after which it forcefully kills the tests with a `SIGQUIT`, preventing the cleanup tasks from running. Therefore, we set an overlying long timeout to make sure all tests have enough time to finish and clean up. ## Running the tests ### Prerequisites - Install the latest version of [Go](https://golang.org/). - Install [Terraform](https://www.terraform.io/downloads.html). - Configure your AWS credentials using one of the [options supported by the AWS SDK](http://docs.aws.amazon.com/sdk-for-java/v1/developer-guide/credentials.html). Usually, the easiest option is to set the `AWS_ACCESS_KEY_ID` and `AWS_SECRET_ACCESS_KEY` environment variables. ### Run all the tests ```bash cd test go test -v -timeout 60m ``` ### Run a specific test To run a specific test called `TestFoo`: ```bash cd test go test -v -timeout 60m -run TestFoo ``` ================================================ FILE: test/aws_helpers.go ================================================ package test import ( "testing" "github.com/gruntwork-io/terratest/modules/aws" ) // Get the IP address from a randomly chosen EC2 Instance in an Auto Scaling Group of the given name in the given // region func getIpAddressOfAsgInstance(t *testing.T, asgName string, awsRegion string) string { instanceIds := aws.GetInstanceIdsForAsg(t, asgName, awsRegion) if len(instanceIds) == 0 { t.Fatalf("Could not find any instances in ASG %s in %s", asgName, awsRegion) } return aws.GetPublicIpOfEc2Instance(t, instanceIds[0], awsRegion) } func getRandomRegion(t *testing.T) string { return aws.GetRandomRegion(t, nil, []string{"eu-north-1", "ap-northeast-3"}) } ================================================ FILE: test/go.mod ================================================ module github.com/gruntwork-io/terraform-aws-nomad/test go 1.13 require github.com/gruntwork-io/terratest v0.37.6 ================================================ FILE: test/go.sum ================================================ cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= cloud.google.com/go v0.51.0 h1:PvKAVQWCtlGUSlZkGW3QLelKaWq7KYv/MW1EboG8bfM= cloud.google.com/go v0.51.0/go.mod h1:hWtGJ6gnXH+KgDv+V0zFGDvpi07n3z8ZNj3T1RW0Gcw= cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= github.com/Azure/azure-sdk-for-go v35.0.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= github.com/Azure/azure-sdk-for-go v38.0.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= github.com/Azure/azure-sdk-for-go v46.0.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8= github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= github.com/Azure/go-autorest/autorest v0.9.0/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI= github.com/Azure/go-autorest/autorest v0.9.3/go.mod h1:GsRuLYvwzLjjjRoWEIyMUaYq8GNUx2nRB378IPt/1p0= github.com/Azure/go-autorest/autorest v0.9.6/go.mod h1:/FALq9T/kS7b5J5qsQ+RSTUdAmGFqi0vUdVNNx8q630= github.com/Azure/go-autorest/autorest v0.11.0/go.mod h1:JFgpikqFJ/MleTTxwepExTKnFUKKszPS8UavbQYUMuw= github.com/Azure/go-autorest/autorest v0.11.5/go.mod h1:foo3aIXRQ90zFve3r0QiDsrjGDUwWhKl0ZOQy1CT14k= github.com/Azure/go-autorest/autorest/adal v0.5.0/go.mod h1:8Z9fGy2MpX0PvDjB1pEgQTmVqjGhiHBW7RJJEciWzS0= github.com/Azure/go-autorest/autorest/adal v0.8.0/go.mod h1:Z6vX6WXXuyieHAXwMj0S6HY6e6wcHn37qQMBQlvY3lc= github.com/Azure/go-autorest/autorest/adal v0.8.1/go.mod h1:ZjhuQClTqx435SRJ2iMlOxPYt3d2C/T/7TiQCVZSn3Q= github.com/Azure/go-autorest/autorest/adal v0.8.2/go.mod h1:ZjhuQClTqx435SRJ2iMlOxPYt3d2C/T/7TiQCVZSn3Q= github.com/Azure/go-autorest/autorest/adal v0.9.0/go.mod h1:/c022QCutn2P7uY+/oQWWNcK9YU+MH96NgK+jErpbcg= github.com/Azure/go-autorest/autorest/adal v0.9.2/go.mod h1:/3SMAM86bP6wC9Ev35peQDUeqFZBMH07vvUOmg4z/fE= github.com/Azure/go-autorest/autorest/azure/auth v0.5.1/go.mod h1:ea90/jvmnAwDrSooLH4sRIehEPtG/EPUXavDh31MnA4= github.com/Azure/go-autorest/autorest/azure/cli v0.4.0/go.mod h1:JljT387FplPzBA31vUcvsetLKF3pec5bdAxjVU4kI2s= github.com/Azure/go-autorest/autorest/date v0.1.0/go.mod h1:plvfp3oPSKwf2DNjlBjWF/7vwR+cUD/ELuzDCXwHUVA= github.com/Azure/go-autorest/autorest/date v0.2.0/go.mod h1:vcORJHLJEh643/Ioh9+vPmf1Ij9AEBM5FuBIXLmIy0g= github.com/Azure/go-autorest/autorest/date v0.3.0/go.mod h1:BI0uouVdmngYNUzGWeSYnokU+TrmwEsOqdt8Y6sso74= github.com/Azure/go-autorest/autorest/mocks v0.1.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= github.com/Azure/go-autorest/autorest/mocks v0.2.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= github.com/Azure/go-autorest/autorest/mocks v0.3.0/go.mod h1:a8FDP3DYzQ4RYfVAxAN3SVSiiO77gL2j2ronKKP0syM= github.com/Azure/go-autorest/autorest/mocks v0.4.0/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k= github.com/Azure/go-autorest/autorest/mocks v0.4.1/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k= github.com/Azure/go-autorest/autorest/to v0.2.0/go.mod h1:GunWKJp1AEqgMaGLV+iocmRAJWqST1wQYhyyjXJ3SJc= github.com/Azure/go-autorest/autorest/to v0.3.0/go.mod h1:MgwOyqaIuKdG4TL/2ywSsIWKAfJfgHDo8ObuUk3t5sA= github.com/Azure/go-autorest/autorest/validation v0.1.0/go.mod h1:Ha3z/SqBeaalWQvokg3NZAlQTalVMtOIAs1aGK7G6u8= github.com/Azure/go-autorest/autorest/validation v0.3.0/go.mod h1:yhLgjC0Wda5DYXl6JAsWyUe4KVNffhoDhG0zVzUMo3E= github.com/Azure/go-autorest/logger v0.1.0/go.mod h1:oExouG+K6PryycPJfVSxi/koC6LSNgds39diKLz7Vrc= github.com/Azure/go-autorest/logger v0.2.0/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8= github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk= github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= github.com/GoogleCloudPlatform/k8s-cloud-provider v0.0.0-20190822182118-27a4ced34534/go.mod h1:iroGtC8B3tQiqtds1l+mgk/BBOrxbqjH+eUfFQYRc14= github.com/Microsoft/go-winio v0.4.14/go.mod h1:qXqCSQ3Xa7+6tgxaGTIe4Kpcdsi+P8jBhyzoq1bpyYA= github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= github.com/PuerkitoBio/purell v1.0.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= github.com/PuerkitoBio/urlesc v0.0.0-20160726150825-5bd2802263f2/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= github.com/agext/levenshtein v1.2.1 h1:QmvMAjj2aEICytGiWzmxoE0x2KZvE0fvmqMOfy2tjT8= github.com/agext/levenshtein v1.2.1/go.mod h1:JEDfjyjHDjOF/1e4FlBE/PkbqA9OfWu2ki2W0IB5558= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/apparentlymart/go-dump v0.0.0-20180507223929-23540a00eaa3/go.mod h1:oL81AME2rN47vu18xqj1S1jPIPuN7afo62yKTNn3XMM= github.com/apparentlymart/go-textseg v1.0.0 h1:rRmlIsPEEhUTIKQb7T++Nz/A5Q6C9IuX2wFoYVvnCs0= github.com/apparentlymart/go-textseg v1.0.0/go.mod h1:z96Txxhf3xSFMPmb5X/1W05FF/Nj9VFpLOpjS5yuumk= github.com/apparentlymart/go-textseg/v12 v12.0.0 h1:bNEQyAGak9tojivJNkoqWErVCQbjdL7GzRt3F8NvfJ0= github.com/apparentlymart/go-textseg/v12 v12.0.0/go.mod h1:S/4uRK2UtaQttw1GenVJEynmyUenKwP++x/+DdGV/Ec= github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= github.com/aws/aws-lambda-go v1.13.3/go.mod h1:4UKl9IzQMoD+QF79YdCuzCwp8VbmG4VAQwij/eHl5CU= github.com/aws/aws-sdk-go v1.16.26/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/aws/aws-sdk-go v1.27.1 h1:MXnqY6SlWySaZAqNnXThOvjRFdiiOuKtC6i7baFdNdU= github.com/aws/aws-sdk-go v1.27.1/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/aws/aws-sdk-go v1.38.28 h1:2ZzgEupSluR18ClxUnHwXKyuADheZpMblXRAsHqF0tI= github.com/aws/aws-sdk-go v1.38.28/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= github.com/blang/semver v3.5.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= github.com/boombuler/barcode v1.0.1-0.20190219062509-6c824513bacc h1:biVzkmvwrH8WK8raXaxBx6fRVTlJILwEwQGL1I/ByEI= github.com/boombuler/barcode v1.0.1-0.20190219062509-6c824513bacc/go.mod h1:paBWMcWSl3LHKBqUq+rly7CNSldXjb2rDl3JlRe0mD8= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= github.com/containerd/containerd v1.3.0/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk= github.com/coreos/go-oidc v2.1.0+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc= github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= github.com/coreos/pkg v0.0.0-20180108230652-97fdf19511ea/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= github.com/cpuguy83/go-md2man v1.0.10 h1:BSKMNlYxDvnunlTymqtgONjNnaRV1sTpcovwwjF22jk= github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE= github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= github.com/cpuguy83/go-md2man/v2 v2.0.0 h1:EoUDS0afbrsXAZ9YQ9jdu/mZ2sXgT1/2yyNng4PGlyM= github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= github.com/davecgh/go-spew v0.0.0-20151105211317-5215b55f46b2/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= github.com/dimchansky/utfbom v1.1.0/go.mod h1:rO41eb7gLfo8SF1jd9F8HplJm1Fewwi4mQvIirEdv+8= github.com/dnaeon/go-vcr v1.0.1/go.mod h1:aBB1+wY4s93YsC3HHjMBMrwTj2R9FHDzUr9KyGc8n1E= github.com/docker/cli v0.0.0-20191017083524-a8ff7f821017/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= github.com/docker/cli v0.0.0-20200109221225-a4f60165b7a3/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= github.com/docker/docker v0.7.3-0.20190327010347-be7ac8be2ae0/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/docker v1.4.2-0.20190924003213-a8608b5b67c7/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/docker-credential-helpers v0.6.3/go.mod h1:WRaJzqw3CTB9bk10avuGsjVBZsD05qeibJ1/TYlvc0Y= github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM= github.com/docker/spdystream v0.0.0-20181023171402-6480d4af844c h1:ZfSZ3P3BedhKGUhzj7BQlPSU4OvT6tfOKe3DVHzOA7s= github.com/docker/spdystream v0.0.0-20181023171402-6480d4af844c/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM= github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= github.com/elazarl/goproxy v0.0.0-20170405201442-c4fc26588b6e/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= github.com/elazarl/goproxy v0.0.0-20190911111923-ecfe977594f1 h1:yY9rWGoXv1U5pl4gxqlULARMQD7x0QG85lqEXTWysik= github.com/elazarl/goproxy v0.0.0-20190911111923-ecfe977594f1/go.mod h1:Ro8st/ElPeALwNFlcTpWmkr6IoMFfkjXAvTHpevnDsM= github.com/elazarl/goproxy/ext v0.0.0-20190711103511-473e67f1d7d2 h1:dWB6v3RcOy03t/bUadywsbyrQwCqZeNIEX6M1OtSZOM= github.com/elazarl/goproxy/ext v0.0.0-20190711103511-473e67f1d7d2/go.mod h1:gNh8nYJoAm43RfaxurUnxr+N1PwuFV3ZMl/efxlIlY8= github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= github.com/emicklei/go-restful v2.9.5+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/evanphx/json-patch v4.2.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/evanphx/json-patch v4.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/go-errors/errors v1.0.1/go.mod h1:f4zRHt4oKfwPJE5k8C9vpYG+aDHdBFUsgrm6/TyX73Q= github.com/go-errors/errors v1.0.2-0.20180813162953-d98b870cc4e0 h1:skJKxRtNmevLqnayafdLe2AsenqRupVmzZSqrvb5caU= github.com/go-errors/errors v1.0.2-0.20180813162953-d98b870cc4e0/go.mod h1:f4zRHt4oKfwPJE5k8C9vpYG+aDHdBFUsgrm6/TyX73Q= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas= github.com/go-logr/logr v0.2.0 h1:QvGt2nLcHH0WK9orKa+ppBPAxREcH364nPUedEpK0TY= github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= github.com/go-openapi/jsonpointer v0.0.0-20160704185906-46af16f9f7b1/go.mod h1:+35s3my2LFTysnkMfxsJBAMHj/DoqoB9knIWoYG/Vk0= github.com/go-openapi/jsonpointer v0.19.2/go.mod h1:3akKfEdA7DF1sugOqz1dVQHBcuDBPKZGEoHC/NkiQRg= github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= github.com/go-openapi/jsonreference v0.0.0-20160704190145-13c6e3589ad9/go.mod h1:W3Z9FmVs9qj+KR4zFKmDPGiLdk1D9Rlm7cyMvf57TTg= github.com/go-openapi/jsonreference v0.19.2/go.mod h1:jMjeRr2HHw6nAVajTXJ4eiUwohSTlpa0o73RUL1owJc= github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8= github.com/go-openapi/spec v0.0.0-20160808142527-6aced65f8501/go.mod h1:J8+jY1nAiCcj+friV/PDoE1/3eeccG9LYBs0tYvLOWc= github.com/go-openapi/spec v0.19.3/go.mod h1:FpwSN1ksY1eteniUU7X0N/BgJ7a4WvBFVA8Lj9mJglo= github.com/go-openapi/swag v0.0.0-20160704191624-1d0bd113de87/go.mod h1:DXUve3Dpr1UfpPtxFw+EFuQ41HhCWZfha5jSVRG7C7I= github.com/go-openapi/swag v0.19.2/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= github.com/go-sql-driver/mysql v1.4.1 h1:g24URVg0OFbNUTx9qqY1IRZ9D9z3iPyi5zKhQZpNwpA= github.com/go-sql-driver/mysql v1.4.1/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= github.com/go-test/deep v1.0.3 h1:ZrJSEWsXzPOxaZnFteGEfooLba+ju3FYIbOrS+rQd68= github.com/go-test/deep v1.0.3/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= github.com/gogo/protobuf v1.2.2-0.20190723190241-65acae22fc9d/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= github.com/gogo/protobuf v1.3.1 h1:DqDEcV5aeaTmdFBePNpYsp3FlcVH/2ISVVM9Qf8PSls= github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= github.com/golang/protobuf v0.0.0-20161109072736-4bd1920723d7/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.1.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= github.com/golang/protobuf v1.4.2 h1:+Z5KGCizgyZCbGh1KZqA0fcLLkwbsjIzS4aV2v7wJX0= github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.4.0 h1:xsAVV57WRhGj6kEIi8ReJzQlHHqcBYCElAvkovg3B/4= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-containerregistry v0.0.0-20200110202235-f4fb41bf00a3/go.mod h1:2wIuQute9+hhWqvL3vEI7YB0EKluF4WcPzI1eAliazk= github.com/google/gofuzz v0.0.0-20161122191042-44d81051d367/go.mod h1:HP5RmnzzSNb993RKQDq4+1A4ia9nllfqcQFTQJedwGI= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gofuzz v1.1.0 h1:Hsa8mG0dQ46ij8Sl2AYJDUv1oA9/d6Vk+3LG99Oe02g= github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.1 h1:Gkbcsh/GbpXz7lPftLA3P6TYMwjCLYm83jiFQZF/3gY= github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= github.com/googleapis/gnostic v0.0.0-20170729233727-0c5108395e2d/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= github.com/googleapis/gnostic v0.2.2/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= github.com/googleapis/gnostic v0.4.1 h1:DLJCy1n/vrD4HPjOvYcT8aYQXpPIzoRZONaYwyycI+I= github.com/googleapis/gnostic v0.4.1/go.mod h1:LRhVm6pbyptWbWbuZ38d1eyptfvIytN3ir6b65WBswg= github.com/gophercloud/gophercloud v0.1.0/go.mod h1:vxM41WHh5uqHVBMZHzuwNOHh8XEoIEcSTewFxm1c5g8= github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= github.com/gruntwork-io/go-commons v0.8.0 h1:k/yypwrPqSeYHevLlEDmvmgQzcyTwrlZGRaxEM6G0ro= github.com/gruntwork-io/go-commons v0.8.0/go.mod h1:gtp0yTtIBExIZp7vyIV9I0XQkVwiQZze678hvDXof78= github.com/gruntwork-io/terratest v0.37.6 h1:wrmqMImrrIvjGs6CBmmByqvwA6t0Wc3Zo2ohEIptPXM= github.com/gruntwork-io/terratest v0.37.6/go.mod h1:CSHpZNJdqYQ+TUrigM100jcahRUV5X6w7K2kZJ8iylY= github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/go-multierror v1.1.0 h1:B9UzwGQJehnUY1yNrnwREHc3fGbC2xefo8g4TbElacI= github.com/hashicorp/go-multierror v1.1.0/go.mod h1:spPvp8C1qA32ftKqdAHm4hHTbPw+vmowP0z+KUhOZdA= github.com/hashicorp/go-version v1.3.0 h1:McDWVJIU/y+u1BRV06dPaLfLCaT7fUTJLp5r04x7iNw= github.com/hashicorp/go-version v1.3.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.3/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= github.com/hashicorp/hcl/v2 v2.8.2 h1:wmFle3D1vu0okesm8BTLVDyJ6/OL9DCLUwn0b2OptiY= github.com/hashicorp/hcl/v2 v2.8.2/go.mod h1:bQTN5mpo+jewjJgh8jr0JUguIi7qPHUF6yIfAEN3jqY= github.com/hashicorp/terraform-json v0.12.0 h1:8czPgEEWWPROStjkWPUnTQDXmpmZPlkQAwYYLETaTvw= github.com/hashicorp/terraform-json v0.12.0/go.mod h1:pmbq9o4EuL43db5+0ogX10Yofv1nozM+wskr/bGFJpI= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= github.com/imdario/mergo v0.3.7 h1:Y+UAYTZ7gDEuOfhxKWy+dvb5dRQ6rJjFSdX2HZY1/gI= github.com/imdario/mergo v0.3.7/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= github.com/jinzhu/copier v0.0.0-20190924061706-b57f9002281a h1:zPPuIq2jAWWPTrGt70eK/BSch+gFAGrNzecsoENgu2o= github.com/jinzhu/copier v0.0.0-20190924061706-b57f9002281a/go.mod h1:yL958EeXv8Ylng6IfnvG4oflryUi3vgA3xPs9hmII1s= github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af h1:pmfjZENx5imkbgOkpRUYLnmbU7UEFbjtDA2hxJ1ichM= github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8= github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= github.com/joefitzgerald/rainbow-reporter v0.1.0/go.mod h1:481CNgqmVHQZzdIbN52CupLJyoVwB10FQ/IQlF1pdL8= github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= github.com/json-iterator/go v0.0.0-20180612202835-f2b4162afba3/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.8/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.10 h1:Kz6Cvnvv2wGdaG/V8yMvfkmNiXq9Ya2KUv4rouJJr68= github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.2.0 h1:s5hAObm+yFO5uHYt5dYjxi2rXrsnmRpJx4OYvIWUaQs= github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA= github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kylelemons/godebug v0.0.0-20170820004349-d65d576e9348 h1:MtvEpTB6LX3vkb4ax0b5D2DHbNAUsen0Gx5wZoq3lV4= github.com/kylelemons/godebug v0.0.0-20170820004349-d65d576e9348/go.mod h1:B69LEHPfb2qLo0BaaOLcbitczOKLWTsrBG9LczfCD4k= github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= github.com/mailru/easyjson v0.0.0-20160728113105-d5b7844b561a/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.7.0/go.mod h1:KAzv3t3aY1NaHWoQz1+4F1ccyAH66Jk7yos7ldAVICs= github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= github.com/mattn/go-isatty v0.0.11/go.mod h1:PhnuNfih5lzO57/f3n+odYbM4JtupLOxQOAqxQCu2WE= github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= github.com/mattn/go-zglob v0.0.1/go.mod h1:9fxibJccNxU2cnpIKLRRFA7zX7qhkJIQWBb449FYHOo= github.com/mattn/go-zglob v0.0.2-0.20190814121620-e3c945676326 h1:ofNAzWCcyTALn2Zv40+8XitdzCgXY6e9qvXwN9W0YXg= github.com/mattn/go-zglob v0.0.2-0.20190814121620-e3c945676326/go.mod h1:9fxibJccNxU2cnpIKLRRFA7zX7qhkJIQWBb449FYHOo= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/maxbrunsfeld/counterfeiter/v6 v6.2.2/go.mod h1:eD9eIE7cdwcMi9rYluz88Jz2VyhSmden33/aXg4oVIY= github.com/miekg/dns v1.1.31/go.mod h1:KNUDUusw/aVsxyTYZM1oqvCicbwhgbNgztCETuNZ7xM= github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s= github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= github.com/mitchellh/go-wordwrap v0.0.0-20150314170334-ad45545899c7 h1:DpOJ2HYzCv8LZP15IdmG+YdwD2luVPHITV96TkirNBM= github.com/mitchellh/go-wordwrap v0.0.0-20150314170334-ad45545899c7/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo= github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/reflect2 v0.0.0-20180320133207-05fbef0ca5da/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9AWI= github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.8.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.10.1/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/opencontainers/go-digest v1.0.0-rc1/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= github.com/opencontainers/image-spec v1.0.1/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= github.com/oracle/oci-go-sdk v7.1.0+incompatible/go.mod h1:VQb79nF8Z2cwLkLS35ukwStZIg5F66tcBccjip/j888= github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v0.0.0-20151028094244-d8ed2627bdf0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pquerna/cachecontrol v0.0.0-20171018203845-0dec1b30a021/go.mod h1:prYjPmNq4d1NPVmpShWobRqXY3q7Vp+80DqgxxUrUIA= github.com/pquerna/otp v1.2.0 h1:/A3+Jn+cagqayeR3iHs/L62m5ue7710D35zl1zJ1kok= github.com/pquerna/otp v1.2.0/go.mod h1:dkJfzwRKNiegxyNb54X/3fLwhCynbMspSyWKnvi1AEg= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/remyoudompheng/bigfft v0.0.0-20170806203942-52369c62f446/go.mod h1:uYEyJGbgTkfkS4+E/PavXkNJcbFIpEtjt2B0KDQ5+9M= github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= github.com/rogpeppe/go-charset v0.0.0-20180617210344-2471d30d28b4/go.mod h1:qgYeAmZ5ZIpBWTGllZSQnw97Dj+woV0toclVaRGI8pc= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rubiojr/go-vhd v0.0.0-20160810183302-0bfd3b39853c/go.mod h1:DM5xW0nvfNNm2uytzsvhI3OnX8uzaRAg8UX/CnDqbto= github.com/russross/blackfriday v1.5.2 h1:HyvC0ARfnZBqnXwABFeSZHpKvJHJJfPz81GNueLj0oo= github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= github.com/russross/blackfriday/v2 v2.0.1 h1:lPqVAte+HuHNfhJ/0LC98ESWRz8afy9tM/0RK8m9o+Q= github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= github.com/sclevine/spec v1.2.0/go.mod h1:W4J29eT/Kzv7/b9IWLB055Z+qvVC9vt0Arko24q7p+U= github.com/sebdah/goldie v1.0.0/go.mod h1:jXP4hmWywNEwZzhMuv2ccnqTSFpuq8iyQhtQdkkZBH4= github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= github.com/shurcooL/sanitized_anchor_name v1.0.0 h1:PdmoCO6wvbs+7yrJyMORt4/BmY5IYyJwS/kOiWx8mHo= github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU= github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.2/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= github.com/stretchr/testify v0.0.0-20151208002404-e3a8ff8ce365/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0= github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= github.com/urfave/cli v1.22.2 h1:gsqYFH8bb9ekPA12kRo0hfjngWQjkJPlN9R0N78BoUo= github.com/urfave/cli v1.22.2/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= github.com/vdemeester/k8s-pkg-credentialprovider v0.0.0-20200107171650-7c61ffa44238/go.mod h1:JwQJCMWpUDqjZrB5jpw0f5VbN7U95zxFy1ZDpoEarGo= github.com/vmihailenco/msgpack v3.3.3+incompatible/go.mod h1:fy3FlTQTDXWkZ7Bh6AcGMlsjHatGryHQYUTf1ShIgkk= github.com/vmware/govmomi v0.20.3/go.mod h1:URlwyTFZX72RmxtxuaFL2Uj3fD1JTvZdx59bHWk6aFU= github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/zclconf/go-cty v1.2.0/go.mod h1:hOPWgoHbaTUnI5k4D2ld+GRpFJSCe6bCM7m1q/N4PQ8= github.com/zclconf/go-cty v1.2.1 h1:vGMsygfmeCl4Xb6OA5U5XVAaQZ69FvoG7X2jUtQujb8= github.com/zclconf/go-cty v1.2.1/go.mod h1:hOPWgoHbaTUnI5k4D2ld+GRpFJSCe6bCM7m1q/N4PQ8= go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= go.etcd.io/etcd v0.0.0-20191023171146-3cf2f69b5738/go.mod h1:dnLIgRNXwCJa5e+c6mIZCrds/GIG4ncV9HhK5PX7jPg= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190211182817-74369b46fc67/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190426145343-a29dc8fdc734/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9 h1:psW17arqaxU48Z5kZ0CQnkZWQJsqcURM6tKiBApRjXI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190125153040-c74c464bbbf2/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190312203227-4b39c73a6495/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/net v0.0.0-20170114055629-f2499483f923/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180811021610-c39426892332/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191004110552-13f9640d40b9/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20201021035429-f5854403a974 h1:IX6qOQeG5uLjB/hjjwjedwfjND0hgjPMMyO1RoIXQNI= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201110031124-69a78807bb2b h1:uwuIcX0g4Yl1NC5XAz37xsr2lTtcqevgzYNVt49waME= golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d h1:TzXSXBo42m9gQenoE3b9BGiEpg5IG2JkU5FkPIawgtw= golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20170830134202-bb24a47a89ea/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190209173611-3b5209105503/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190502175342-a43fa875dd82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190924154521-2837fb4f24fe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191010194322-b09406accb47/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200622214017-ed371f2e16b4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f h1:+Nyd8tzPX9R7BWHguqsrbFdRx3WQ/1ib8I44HXV5yTA= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/text v0.0.0-20160726164857-2910a502d2bf/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3 h1:cokOdA+Jmi5PJGXLlLllQSgYigAEfHXJAERHVMaCc2k= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0 h1:/5xXl8Y5W96D+TtHSlonuFqGHIWVuyCkGJLwGh9JJFs= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20181011042414-1f849cf54d09/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190206041539-40960b6deb8e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190614205625-5aca471b1d59/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190706070813-72ffa07ba3db/go.mod h1:jcCCGcm9btYwXyDqrUWc6MKQKKGJCWEQ3AfLSRIbEuI= golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190920225731-5eefd052ad72/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191205215504-7b8c8591a921/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191216052735-49a3e744a425/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20201110201400-7099162a900a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= gonum.org/v1/gonum v0.0.0-20190331200053-3d26580ed485/go.mod h1:2ltnJ7xHfj0zHS40VVPYEAAMTa3ZGguvHGBSJeRWqE0= gonum.org/v1/netlib v0.0.0-20190313105609-8cb42192e0e0/go.mod h1:wa6Ws7BG/ESfp6dHfk7C6KdzKA7wR7u/rKwOGE66zvw= gonum.org/v1/netlib v0.0.0-20190331212654-76723241ea4e/go.mod h1:kS+toOQn6AQKjmKJ7gzohV1XkqsFehRA2FbsbkopSuQ= google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= google.golang.org/api v0.6.1-0.20190607001116-5213b8090861/go.mod h1:btoxGiFvQNVUZQ8W08zLtrVS08CNpINPEfxXxgJL1Q4= google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= google.golang.org/appengine v1.6.5 h1:tycE03LOZYQNhDpS27tcQdAzLCVMaj7QT2SXxebnpCM= google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.23.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.24.0/go.mod h1:XDChyiUovWa60DnaeDeZmSW86xtLtjtZbwvSiRnRtcA= google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.24.0 h1:UhZDfRO8JRQru4/+LlLE0BRKGF8L+PICnvYZmx/fEGA= google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= gopkg.in/gcfg.v1 v1.2.0/go.mod h1:yesOnuUOFQAhST5vPY4nbZsb/huCgGGXlipJsBn0b3o= gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k= gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= gopkg.in/square/go-jose.v2 v2.2.2/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= gopkg.in/warnings.v0 v0.1.1/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI= gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.8 h1:obN1ZagJSUGI0Ek/LBmuj4SNLPfIny3KsKFopxRdj10= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= k8s.io/api v0.17.0/go.mod h1:npsyOePkeP0CPwyGfXDHxvypiYMJxBWAMpQxCaJ4ZxI= k8s.io/api v0.19.3 h1:GN6ntFnv44Vptj/b+OnMW7FmzkpDoIDLZRvKX3XH9aU= k8s.io/api v0.19.3/go.mod h1:VF+5FT1B74Pw3KxMdKyinLo+zynBaMBiAfGMuldcNDs= k8s.io/apimachinery v0.17.0/go.mod h1:b9qmWdKlLuU9EBh+06BtLcSf/Mu89rWL33naRxs1uZg= k8s.io/apimachinery v0.19.3 h1:bpIQXlKjB4cB/oNpnNnV+BybGPR7iP5oYpsOTEJ4hgc= k8s.io/apimachinery v0.19.3/go.mod h1:DnPGDnARWFvYa3pMHgSxtbZb7gpzzAZ1pTfaUNDVlmA= k8s.io/apiserver v0.17.0/go.mod h1:ABM+9x/prjINN6iiffRVNCBR2Wk7uY4z+EtEGZD48cg= k8s.io/client-go v0.17.0/go.mod h1:TYgR6EUHs6k45hb6KWjVD6jFZvJV4gHDikv/It0xz+k= k8s.io/client-go v0.19.3 h1:ctqR1nQ52NUs6LpI0w+a5U+xjYwflFwA13OJKcicMxg= k8s.io/client-go v0.19.3/go.mod h1:+eEMktZM+MG0KO+PTkci8xnbCZHvj9TqR6Q1XDUIJOM= k8s.io/cloud-provider v0.17.0/go.mod h1:Ze4c3w2C0bRsjkBUoHpFi+qWe3ob1wI2/7cUn+YQIDE= k8s.io/code-generator v0.0.0-20191121015212-c4c8f8345c7e/go.mod h1:DVmfPQgxQENqDIzVR2ddLXMH34qeszkKSdH/N+s+38s= k8s.io/component-base v0.17.0/go.mod h1:rKuRAokNMY2nn2A6LP/MiwpoaMRHpfRnrPaUJJj1Yoc= k8s.io/csi-translation-lib v0.17.0/go.mod h1:HEF7MEz7pOLJCnxabi45IPkhSsE/KmxPQksuCrHKWls= k8s.io/gengo v0.0.0-20190128074634-0689ccc1d7d6/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= k8s.io/gengo v0.0.0-20190822140433-26a664648505/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= k8s.io/gengo v0.0.0-20200413195148-3a45101e95ac/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= k8s.io/klog v0.0.0-20181102134211-b9b56d5dfc92/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= k8s.io/klog v0.3.0/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= k8s.io/klog v1.0.0 h1:Pt+yjF5aB1xDSVbau4VsWe+dQNzA0qv1LlXdC2dF6Q8= k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I= k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE= k8s.io/klog/v2 v2.2.0 h1:XRvcwJozkgZ1UQJmfMGpvRthQHOvihEhYtDfAaxMz/A= k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= k8s.io/kube-openapi v0.0.0-20191107075043-30be4d16710a/go.mod h1:1TqjTSzOxsLGIKfj0lK8EeCP7K1iUG65v09OM0/WG5E= k8s.io/kube-openapi v0.0.0-20200805222855-6aeccd4b50c6/go.mod h1:UuqjUnNftUyPE5H64/qeyjQoUZhGpeFDVdxjTeEVN2o= k8s.io/legacy-cloud-providers v0.17.0/go.mod h1:DdzaepJ3RtRy+e5YhNtrCYwlgyK87j/5+Yfp0L9Syp8= k8s.io/utils v0.0.0-20191114184206-e782cd3c129f/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew= k8s.io/utils v0.0.0-20200729134348-d5654de09c73 h1:uJmqzgNWG7XyClnU/mLPBWwfKKF1K8Hf8whTseBgJcg= k8s.io/utils v0.0.0-20200729134348-d5654de09c73/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= modernc.org/cc v1.0.0/go.mod h1:1Sk4//wdnYJiUIxnW8ddKpaOJCF37yAdqYnkxUpaYxw= modernc.org/golex v1.0.0/go.mod h1:b/QX9oBD/LhixY6NDh+IdGv17hgB+51fET1i2kPSmvk= modernc.org/mathutil v1.0.0/go.mod h1:wU0vUrJsVWBZ4P6e7xtFJEhFSNsfRLJ8H458uRjg03k= modernc.org/strutil v1.0.0/go.mod h1:lstksw84oURvj9y3tn8lGvRxyRC1S2+g5uuIzNfIOBs= modernc.org/xc v1.0.0/go.mod h1:mRNCo0bvLjGhHO9WsyuKVU4q0ceiDDDoEeWDJHrNx8I= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= sigs.k8s.io/structured-merge-diff v0.0.0-20190525122527-15d366b2352e/go.mod h1:wWxsB5ozmmv/SG7nM11ayaAW51xMvak/t1r0CSlcokI= sigs.k8s.io/structured-merge-diff v1.0.1-0.20191108220359-b1b620dd3f06 h1:zD2IemQ4LmOcAumeiyDWXKUI2SO0NYDe3H6QGvPOVgU= sigs.k8s.io/structured-merge-diff v1.0.1-0.20191108220359-b1b620dd3f06/go.mod h1:/ULNhyfzRopfcjskuui0cTITekDduZ7ycKN3oUT9R18= sigs.k8s.io/structured-merge-diff/v4 v4.0.1 h1:YXTMot5Qz/X1iBRJhAt+vI+HVttY0WkSqqhKxQ0xVbA= sigs.k8s.io/structured-merge-diff/v4 v4.0.1/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= sigs.k8s.io/yaml v1.2.0 h1:kr/MCeFWJWTwyaHoR9c8EjH9OumOmoF9YGiZd7lFm/Q= sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc= ================================================ FILE: test/nomad_cluster_ssh_test.go ================================================ package test import "testing" func TestNomadClusterSSHAccess(t *testing.T) { t.Parallel() runNomadClusterSSHTest(t, "amazon-linux-2-amd64-ami", "ec2-user") } ================================================ FILE: test/nomad_consul_cluster_colocated_test.go ================================================ package test import ( "testing" ) func TestNomadConsulClusterColocatedWithUbuntu18Ami(t *testing.T) { t.Parallel() runNomadClusterColocatedTest(t, "ubuntu18-ami") } func TestNomadConsulClusterColocatedWithUbuntu16Ami(t *testing.T) { t.Parallel() runNomadClusterColocatedTest(t, "ubuntu16-ami") } func TestNomadConsulClusterColocatedAmazonLinux2Amd64Ami(t *testing.T) { t.Parallel() runNomadClusterColocatedTest(t, "amazon-linux-2-amd64-ami") } ================================================ FILE: test/nomad_consul_cluster_separate_test.go ================================================ package test import "testing" func TestNomadConsulClusterSeparateWith18UbuntuAmi(t *testing.T) { t.Parallel() runNomadClusterSeparateTest(t, "ubuntu18-ami") } func TestNomadConsulClusterSeparateWithUbuntu16Ami(t *testing.T) { t.Parallel() runNomadClusterSeparateTest(t, "ubuntu16-ami") } func TestNomadConsulClusterSeparateAmazonLinux2Ami(t *testing.T) { t.Parallel() runNomadClusterSeparateTest(t, "amazon-linux-2-amd64-ami") } ================================================ FILE: test/nomad_helpers.go ================================================ package test import ( "encoding/json" "fmt" "io/ioutil" "net/http" "path/filepath" "strings" "testing" "time" "github.com/gruntwork-io/terratest/modules/aws" "github.com/gruntwork-io/terratest/modules/logger" "github.com/gruntwork-io/terratest/modules/random" "github.com/gruntwork-io/terratest/modules/retry" "github.com/gruntwork-io/terratest/modules/ssh" "github.com/gruntwork-io/terratest/modules/terraform" "github.com/gruntwork-io/terratest/modules/test-structure" ) const REPO_ROOT = "../" const ENV_VAR_AWS_REGION = "AWS_DEFAULT_REGION" const VAR_AMI_ID = "ami_id" const VAR_SSH_CIDR = "allowed_ssh_cidr_blocks" const CLUSTER_COLOCATED_EXAMPLE_PATH = "/" const CLUSTER_COLOCATED_EXAMPLE_VAR_CLUSTER_NAME = "cluster_name" const CLUSTER_COLOCATED_EXAMPLE_VAR_CLUSTER_TAG_VALUE = "cluster_tag_value" const CLUSTER_COLOCATED_EXAMPLE_VAR_NUM_SERVERS = "num_servers" const CLUSTER_COLOCATED_EXAMPLE_VAR_NUM_CLIENTS = "num_clients" const CLUSTER_COLOCATED_EXAMPLE_OUTPUT_SERVER_ASG_NAME = "asg_name_servers" const CLUSTER_SEPARATE_EXAMPLE_PATH = "examples/nomad-consul-separate-cluster" const CLUSTER_SEPARATE_EXAMPLE_VAR_NOMAD_CLUSTER_NAME = "nomad_cluster_name" const CLUSTER_SEPARATE_EXAMPLE_VAR_CONSUL_CLUSTER_NAME = "consul_cluster_name" const CLUSTER_SEPARATE_EXAMPLE_VAR_NUM_NOMAD_SERVERS = "num_nomad_servers" const CLUSTER_SEPARATE_EXAMPLE_VAR_NUM_CONSUL_SERVERS = "num_consul_servers" const CLUSTER_SEPARATE_EXAMPLE_VAR_NUM_NOMAD_CLIENTS = "num_nomad_clients" const CLUSTER_SEPARATE_EXAMPLE_VAR_SSH_KEY_NAME = "ssh_key_name" const CLUSTER_SEPARATE_EXAMPLE_OUTPUT_NOMAD_SERVER_ASG_NAME = "asg_name_nomad_servers" const DEFAULT_NUM_SERVERS = 3 const DEFAULT_NUM_CLIENTS = 6 const SAVED_AWS_REGION = "AwsRegion" const SAVED_UNIQUE_ID = "UniqueId" // Test the Nomad/Consul colocated cluster example by: // // 1. Copying the code in this repo to a temp folder so tests on the Terraform code can run in parallel without the // state files overwriting each other. // 2. Building the AMI in the nomad-consul-ami example with the given build name // 3. Deploying that AMI using the example Terraform code // 4. Checking that the Nomad cluster comes up within a reasonable time period and can respond to requests func runNomadClusterColocatedTest(t *testing.T, packerBuildName string) { examplesDir := test_structure.CopyTerraformFolderToTemp(t, REPO_ROOT, CLUSTER_COLOCATED_EXAMPLE_PATH) defer test_structure.RunTestStage(t, "teardown", func() { terraformOptions := test_structure.LoadTerraformOptions(t, examplesDir) terraform.Destroy(t, terraformOptions) amiId := test_structure.LoadAmiId(t, examplesDir) awsRegion := test_structure.LoadString(t, examplesDir, SAVED_AWS_REGION) aws.DeleteAmi(t, awsRegion, amiId) }) test_structure.RunTestStage(t, "setup_ami", func() { awsRegion := getRandomRegion(t) test_structure.SaveString(t, examplesDir, SAVED_AWS_REGION, awsRegion) uniqueId := random.UniqueId() test_structure.SaveString(t, examplesDir, SAVED_UNIQUE_ID, uniqueId) amiId := buildAmi(t, filepath.Join(examplesDir, "examples", "nomad-consul-ami", "nomad-consul.json"), packerBuildName, awsRegion, uniqueId) test_structure.SaveAmiId(t, examplesDir, amiId) }) test_structure.RunTestStage(t, "deploy", func() { amiId := test_structure.LoadAmiId(t, examplesDir) awsRegion := test_structure.LoadString(t, examplesDir, SAVED_AWS_REGION) uniqueId := test_structure.LoadString(t, examplesDir, SAVED_UNIQUE_ID) terraformOptions := &terraform.Options{ TerraformDir: examplesDir, Vars: map[string]interface{}{ CLUSTER_COLOCATED_EXAMPLE_VAR_CLUSTER_NAME: fmt.Sprintf("test-%s", uniqueId), CLUSTER_COLOCATED_EXAMPLE_VAR_CLUSTER_TAG_VALUE: fmt.Sprintf("auto-join-%s", uniqueId), CLUSTER_COLOCATED_EXAMPLE_VAR_NUM_SERVERS: DEFAULT_NUM_SERVERS, CLUSTER_COLOCATED_EXAMPLE_VAR_NUM_CLIENTS: DEFAULT_NUM_CLIENTS, VAR_AMI_ID: amiId, }, EnvVars: map[string]string{ ENV_VAR_AWS_REGION: awsRegion, }, } test_structure.SaveTerraformOptions(t, examplesDir, terraformOptions) terraform.InitAndApply(t, terraformOptions) }) test_structure.RunTestStage(t, "validate", func() { terraformOptions := test_structure.LoadTerraformOptions(t, examplesDir) awsRegion := test_structure.LoadString(t, examplesDir, SAVED_AWS_REGION) checkNomadClusterIsWorking(t, CLUSTER_COLOCATED_EXAMPLE_OUTPUT_SERVER_ASG_NAME, terraformOptions, awsRegion) }) } // Test the Nomad/Consul separate clusters example by: // // 1. Copying the code in this repo to a temp folder so tests on the Terraform code can run in parallel without the // state files overwriting each other. // 2. Building the AMI in the nomad-consul-ami example with the given build name // 3. Deploying that AMI using the example Terraform code // 4. Checking that the Nomad cluster comes up within a reasonable time period and can respond to requests func runNomadClusterSeparateTest(t *testing.T, packerBuildName string) { examplesDir := test_structure.CopyTerraformFolderToTemp(t, REPO_ROOT, "/") defer test_structure.RunTestStage(t, "teardown", func() { terraformOptions := test_structure.LoadTerraformOptions(t, examplesDir) terraform.Destroy(t, terraformOptions) amiId := test_structure.LoadAmiId(t, examplesDir) awsRegion := test_structure.LoadString(t, examplesDir, SAVED_AWS_REGION) aws.DeleteAmi(t, awsRegion, amiId) }) test_structure.RunTestStage(t, "setup_ami", func() { awsRegion := getRandomRegion(t) test_structure.SaveString(t, examplesDir, SAVED_AWS_REGION, awsRegion) uniqueId := random.UniqueId() test_structure.SaveString(t, examplesDir, SAVED_UNIQUE_ID, uniqueId) amiId := buildAmi(t, filepath.Join(examplesDir, "examples", "nomad-consul-ami", "nomad-consul.json"), packerBuildName, awsRegion, uniqueId) test_structure.SaveAmiId(t, examplesDir, amiId) }) test_structure.RunTestStage(t, "deploy", func() { amiId := test_structure.LoadAmiId(t, examplesDir) awsRegion := test_structure.LoadString(t, examplesDir, SAVED_AWS_REGION) uniqueId := test_structure.LoadString(t, examplesDir, SAVED_UNIQUE_ID) terraformOptions := &terraform.Options{ TerraformDir: filepath.Join(examplesDir, "examples", "nomad-consul-separate-cluster"), Vars: map[string]interface{}{ CLUSTER_SEPARATE_EXAMPLE_VAR_NOMAD_CLUSTER_NAME: fmt.Sprintf("test-%s", uniqueId), CLUSTER_SEPARATE_EXAMPLE_VAR_CONSUL_CLUSTER_NAME: fmt.Sprintf("test-%s", uniqueId), CLUSTER_SEPARATE_EXAMPLE_VAR_NUM_NOMAD_SERVERS: DEFAULT_NUM_SERVERS, CLUSTER_SEPARATE_EXAMPLE_VAR_NUM_CONSUL_SERVERS: DEFAULT_NUM_SERVERS, CLUSTER_SEPARATE_EXAMPLE_VAR_NUM_NOMAD_CLIENTS: DEFAULT_NUM_CLIENTS, VAR_AMI_ID: amiId, }, EnvVars: map[string]string{ ENV_VAR_AWS_REGION: awsRegion, }, } test_structure.SaveTerraformOptions(t, examplesDir, terraformOptions) terraform.InitAndApply(t, terraformOptions) }) test_structure.RunTestStage(t, "validate", func() { terraformOptions := test_structure.LoadTerraformOptions(t, examplesDir) awsRegion := test_structure.LoadString(t, examplesDir, SAVED_AWS_REGION) checkNomadClusterIsWorking(t, CLUSTER_SEPARATE_EXAMPLE_OUTPUT_NOMAD_SERVER_ASG_NAME, terraformOptions, awsRegion) }) } // Check that the Nomad cluster comes up within a reasonable time period and can respond to requests func checkNomadClusterIsWorking(t *testing.T, asgNameOutputVar string, terraformOptions *terraform.Options, awsRegion string) { asgName := rawTerraformOutput(t, terraformOptions, asgNameOutputVar) nodeIpAddress := getIpAddressOfAsgInstance(t, asgName, awsRegion) testNomadCluster(t, nodeIpAddress) } func checkNomadClusterSshAccess(t *testing.T, asgNameOutputVar string, terraformOptions *terraform.Options, awsRegion string, keyPair *ssh.KeyPair, sshUsername string) { asgName := rawTerraformOutput(t, terraformOptions, asgNameOutputVar) nodeIpAddress := getIpAddressOfAsgInstance(t, asgName, awsRegion) publicHost := ssh.Host{ Hostname: nodeIpAddress, SshKeyPair: keyPair, SshUserName: sshUsername, } testSshAccess(t, publicHost, true) } func testSshAccess(t *testing.T, publicHost ssh.Host, ssh_access bool) { // Check basic SSH to the instance // SSH access might fail, if none is configured - this is expected. response, err := retry.DoWithRetryE(t, "SSH to public host", 30, 5*time.Second, func() (string, error) { expectedText := fmt.Sprintf("Hello, %s", publicHost.Hostname) command := fmt.Sprintf("echo -n '%s'", expectedText) actualText, err := ssh.CheckSshCommandE(t, publicHost, command) if err != nil { return "", err } if strings.TrimSpace(actualText) != expectedText { return "", fmt.Errorf("Expected SSH command to return '%s' but got '%s'", expectedText, actualText) } return "SSH access was successful", nil }) // No SSH access results in an error. if err != nil && !ssh_access { logger.Logf(t, "Nomad cluster is properly deployed without SSH access: %s", response) return } if err == nil && !ssh_access { logger.Logf(t, "Nomad cluster is NOT properly deployed without SSH access: %s", response) t.Fatal("No SSH access configured, but nevertheless SSH access was successful.") } // SSH access should result in no error. if err == nil && ssh_access { logger.Logf(t, "Nomad cluster is properly deployed with SSH access: %s", response) return } if err != nil && ssh_access { logger.Logf(t, "Nomad cluster is NOT properly deployed with SSH access: %s", response) t.Fatal("SSH access configured, but SSH test was unsuccessful.") } t.Fatal("Something went wrong. This part should never be reached.") } // Use a Nomad client to connect to the given node and use it to verify that: // // 1. The Nomad cluster has deployed // 2. The cluster has the expected number of server nodes // 2. The cluster has the expected number of client nodes func testNomadCluster(t *testing.T, nodeIpAddress string) { maxRetries := 90 sleepBetweenRetries := 10 * time.Second response := retry.DoWithRetry(t, "Check Nomad cluster has expected number of servers and clients", maxRetries, sleepBetweenRetries, func() (string, error) { clients, err := callNomadApi(t, nodeIpAddress, "v1/nodes") if err != nil { return "", err } if len(clients) != DEFAULT_NUM_CLIENTS { return "", fmt.Errorf("Expected the cluster to have %d clients, but found %d", DEFAULT_NUM_CLIENTS, len(clients)) } servers, err := callNomadApi(t, nodeIpAddress, "v1/status/peers") if err != nil { return "", err } if len(servers) != DEFAULT_NUM_SERVERS { return "", fmt.Errorf("Expected the cluster to have %d servers, but found %d", DEFAULT_NUM_SERVERS, len(servers)) } return fmt.Sprintf("Got back expected number of clients (%d) and servers (%d)", len(clients), len(servers)), nil }) logger.Logf(t, "Nomad cluster is properly deployed: %s", response) } // A quick, hacky way to call the Nomad HTTP API: https://www.nomadproject.io/docs/http/index.html func callNomadApi(t *testing.T, nodeIpAddress string, path string) ([]interface{}, error) { url := fmt.Sprintf("http://%s:4646/%s", nodeIpAddress, path) logger.Logf(t, "Making an HTTP GET to URL %s", url) resp, err := http.Get(url) if err != nil { return nil, err } defer resp.Body.Close() body, err := ioutil.ReadAll(resp.Body) if err != nil { return nil, err } logger.Logf(t, "Response from Nomad for URL %s: %s", url, string(body)) result := []interface{}{} if err := json.Unmarshal(body, &result); err != nil { return nil, err } return result, nil } func runNomadClusterSSHTest(t *testing.T, packerBuildName string, ssh_username string) { examplesDir := test_structure.CopyTerraformFolderToTemp(t, REPO_ROOT, "/") defer test_structure.RunTestStage(t, "teardown", func() { terraformOptions := test_structure.LoadTerraformOptions(t, examplesDir) terraform.Destroy(t, terraformOptions) amiId := test_structure.LoadAmiId(t, examplesDir) awsRegion := test_structure.LoadString(t, examplesDir, SAVED_AWS_REGION) aws.DeleteAmi(t, awsRegion, amiId) }) test_structure.RunTestStage(t, "setup_ami", func() { awsRegion := getRandomRegion(t) test_structure.SaveString(t, examplesDir, SAVED_AWS_REGION, awsRegion) uniqueId := random.UniqueId() test_structure.SaveString(t, examplesDir, SAVED_UNIQUE_ID, uniqueId) amiId := buildAmi(t, filepath.Join(examplesDir, "examples", "nomad-consul-ami", "nomad-consul.json"), packerBuildName, awsRegion, uniqueId) test_structure.SaveAmiId(t, examplesDir, amiId) }) test_structure.RunTestStage(t, "deploy", func() { amiId := test_structure.LoadAmiId(t, examplesDir) awsRegion := test_structure.LoadString(t, examplesDir, SAVED_AWS_REGION) uniqueId := test_structure.LoadString(t, examplesDir, SAVED_UNIQUE_ID) terraformOptions := &terraform.Options{ TerraformDir: filepath.Join(examplesDir, "examples", "nomad-consul-separate-cluster"), Vars: map[string]interface{}{ CLUSTER_SEPARATE_EXAMPLE_VAR_NOMAD_CLUSTER_NAME: fmt.Sprintf("test-%s", uniqueId), CLUSTER_SEPARATE_EXAMPLE_VAR_CONSUL_CLUSTER_NAME: fmt.Sprintf("test-%s", uniqueId), CLUSTER_SEPARATE_EXAMPLE_VAR_NUM_NOMAD_SERVERS: DEFAULT_NUM_SERVERS, CLUSTER_SEPARATE_EXAMPLE_VAR_NUM_CONSUL_SERVERS: DEFAULT_NUM_SERVERS, CLUSTER_SEPARATE_EXAMPLE_VAR_NUM_NOMAD_CLIENTS: DEFAULT_NUM_CLIENTS, VAR_AMI_ID: amiId, }, EnvVars: map[string]string{ ENV_VAR_AWS_REGION: awsRegion, }, } keyPairName := fmt.Sprintf("terratest-onetime-key-%s", uniqueId) keyPair := aws.CreateAndImportEC2KeyPair(t, awsRegion, keyPairName) terraformOptions.Vars[CLUSTER_SEPARATE_EXAMPLE_VAR_SSH_KEY_NAME] = keyPairName test_structure.SaveEc2KeyPair(t, examplesDir, keyPair) test_structure.SaveTerraformOptions(t, examplesDir, terraformOptions) terraform.InitAndApply(t, terraformOptions) }) test_structure.RunTestStage(t, "validate", func() { terraformOptions := test_structure.LoadTerraformOptions(t, examplesDir) awsRegion := test_structure.LoadString(t, examplesDir, SAVED_AWS_REGION) keyPair := test_structure.LoadEc2KeyPair(t, examplesDir) checkNomadClusterSshAccess(t, CLUSTER_SEPARATE_EXAMPLE_OUTPUT_NOMAD_SERVER_ASG_NAME, terraformOptions, awsRegion, keyPair.KeyPair, ssh_username) }) } ================================================ FILE: test/terratest_helpers.go ================================================ package test import ( "fmt" "strings" "testing" "github.com/gruntwork-io/terratest/modules/packer" "github.com/gruntwork-io/terratest/modules/terraform" ) const CONSUL_AMI_TEMPLATE_VAR_REGION = "aws_region" const CONSUL_AMI_TEMPLATE_VAR_AMI_PREFIX = "ami_name_prefix" // Use Packer to build the AMI in the given packer template, with the given build name, and return the AMI's ID func buildAmi(t *testing.T, packerTemplatePath string, packerBuildName string, awsRegion string, uniqueId string) string { options := &packer.Options{ Template: packerTemplatePath, Only: packerBuildName, Vars: map[string]string{ CONSUL_AMI_TEMPLATE_VAR_REGION: awsRegion, CONSUL_AMI_TEMPLATE_VAR_AMI_PREFIX: fmt.Sprintf("nomad-consul-%s", uniqueId), }, } return packer.BuildAmi(t, options) } // Recent terraform version changed the behavior on terraform output. // Values now contain quotations marks, if terraform output is called with `-raw` option. // - https://github.com/gruntwork-io/terratest/issues/766 func rawTerraformOutput(t *testing.T, terraformOptions *terraform.Options, outputVariableName string) string { return strings.Trim(terraform.Output(t, terraformOptions, outputVariableName), "\"") } ================================================ FILE: variables.tf ================================================ # --------------------------------------------------------------------------------------------------------------------- # ENVIRONMENT VARIABLES # Define these secrets as environment variables # --------------------------------------------------------------------------------------------------------------------- # AWS_ACCESS_KEY_ID # AWS_SECRET_ACCESS_KEY # AWS_DEFAULT_REGION # --------------------------------------------------------------------------------------------------------------------- # REQUIRED PARAMETERS # You must provide a value for each of these parameters. # --------------------------------------------------------------------------------------------------------------------- # None # --------------------------------------------------------------------------------------------------------------------- # OPTIONAL PARAMETERS # These parameters have reasonable defaults. # --------------------------------------------------------------------------------------------------------------------- variable "ami_id" { description = "The ID of the AMI to run in the cluster. This should be an AMI built from the Packer template under examples/nomad-consul-ami/nomad-consul.json. If no AMI is specified, the template will 'just work' by using the example public AMIs. WARNING! Do not use the example AMIs in a production setting!" type = string default = null } variable "cluster_name" { description = "What to name the cluster and all of its associated resources" type = string default = "nomad-example" } variable "server_instance_type" { description = "What kind of instance type to use for the nomad servers" type = string default = "t2.micro" } variable "instance_type" { description = "What kind of instance type to use for the nomad clients" type = string default = "t2.micro" } variable "num_servers" { description = "The number of server nodes to deploy. We strongly recommend using 3 or 5." type = number default = 3 } variable "num_clients" { description = "The number of client nodes to deploy. You can deploy as many as you need to run your jobs." type = number default = 6 } variable "cluster_tag_key" { description = "The tag the EC2 Instances will look for to automatically discover each other and form a cluster." type = string default = "nomad-servers" } variable "cluster_tag_value" { description = "Add a tag with key var.cluster_tag_key and this value to each Instance in the ASG. This can be used to automatically find other Consul nodes and form a cluster." type = string default = "auto-join" } variable "ssh_key_name" { description = "The name of an EC2 Key Pair that can be used to SSH to the EC2 Instances in this cluster. Set to an empty string to not associate a Key Pair." type = string default = "" } variable "vpc_id" { description = "The ID of the VPC in which the nodes will be deployed. Uses default VPC if not supplied." type = string default = "" }