[
  {
    "path": ".circleci/config.yml",
    "content": "version: 2\n\nworkflows:\n  version: 2\n  test-all:\n    jobs:\n      - lint\n      - unit-test-37:\n          requires:\n            - lint\n      - functional-test-37:\n          requires:\n            - unit-test-37\n      - unit-test-38:\n          requires:\n            - lint\n      - functional-test-38:\n          requires:\n            - unit-test-38\n            - functional-test-37\n      - unit-test-39:\n          requires:\n            - lint\n      - functional-test-39:\n          requires:\n            - unit-test-39\n            - functional-test-38\n      - unit-test-310:\n          requires:\n            - lint\n      - functional-test-310:\n          requires:\n            - unit-test-310\n            - functional-test-39\n      - cleanup-functional-buckets:\n          requires:\n            - functional-test-37\n            - functional-test-38\n            - functional-test-39\n            - functional-test-310\n\njobs:\n  lint:\n    docker:\n      - image: circleci/python:3.7\n    steps:\n      - checkout\n      - run: sudo pip install flake8 codecov pep8-naming\n      - run: sudo python setup.py install\n      - run: flake8 --version\n      - run: sudo make lint\n\n  unit-test-37:\n    docker:\n      - image: circleci/python:3.7\n    steps: &unit_test_steps\n      - checkout\n      - run: sudo python setup.py install\n      - run: sudo make test-unit\n\n  unit-test-38:\n    docker:\n      - image: circleci/python:3.8\n    steps: *unit_test_steps\n\n  unit-test-39:\n    docker:\n      - image: circleci/python:3.9\n    steps: *unit_test_steps\n\n  unit-test-310:\n    docker:\n      - image: circleci/python:3.10\n    steps: *unit_test_steps\n\n  functional-test-37:\n    docker:\n      - image: circleci/python:3.7\n    steps: &functional_test_steps\n      - checkout\n      - run:\n          command: |\n            git clone https://github.com/bats-core/bats-core.git\n            cd bats-core\n            git checkout v1.0.2\n            sudo ./install.sh /usr/local\n            bats --version\n      - run: sudo python setup.py install\n      - run:\n          command: |\n            export TERM=xterm\n            export AWS_DEFAULT_REGION=us-east-1\n            export STACKER_NAMESPACE=cloudtools-functional-tests-$CIRCLE_BUILD_NUM\n            export STACKER_ROLE=arn:aws:iam::459170252436:role/cloudtools-functional-tests-sta-FunctionalTestRole-1M9HFJ9VQVMFX\n            sudo -E make test-functional\n\n  functional-test-38:\n    docker:\n      - image: circleci/python:3.8\n    steps: *functional_test_steps\n\n  functional-test-39:\n    docker:\n      - image: circleci/python:3.9\n    steps: *functional_test_steps\n\n  functional-test-310:\n    docker:\n      - image: circleci/python:3.10\n    steps: *functional_test_steps\n\n  cleanup-functional-buckets:\n    docker:\n      - image: circleci/python:3.7\n    steps:\n      - checkout\n      - run:\n          command: |\n            tests/cleanup_functional_test_buckets.sh\n"
  },
  {
    "path": ".dockerignore",
    "content": "Dockerfile\n"
  },
  {
    "path": ".gitignore",
    "content": "# Compiled source #\n###################\n*.com\n*.class\n*.dll\n*.exe\n*.o\n*.so\n\n# Packages #\n############\n# it's better to unpack these files and commit the raw source\n# git has its own built in compression methods\n*.7z\n*.dmg\n*.gz\n*.iso\n*.jar\n*.rar\n*.tar\n*.zip\n\n# Logs and databases #\n######################\n*.log\n*.sql\n*.sqlite\n\n# OS generated files #\n######################\n.DS_Store*\nehthumbs.db\nIcon?\nThumbs.db\n\n# Vagrant\n.vagrant\nVagrantfile\n\n# Editor crap\n*.sw*\n*~\n.idea\n*.iml\n\n# Byte-compiled python\n*.pyc\n\n# Package directory\nbuild/\n\n# Build object file directory\nobjdir/\ndist/\n*.egg-info\n.eggs/\n*.egg\n\n# Coverage artifacts\n.coverage\nhtmlcov\n\n# Ignore development conf/env files\ndev.yaml\ndev.env\ntests/fixtures/blueprints/*-result\nFakeKey.pem\nvm_setup.sh\n"
  },
  {
    "path": "AUTHORS.rst",
    "content": "Authors\n=======\n\nStacker was designed and developed by the OpsEng team at `Remind, Inc.`_\n\nCurrent Maintainers\n-------------------\n\n- `Michael Barrett`_\n- `Eric Holmes`_\n- `Ignacio Nin`_\n- `Russell Ballestrini`_\n\nAlumni\n------\n\n- `Michael Hahn`_\n- `Tom Taubkin`_\n\nThanks\n------\n\nStacker wouldn't be where it is today without the open source community that\nhas formed around it. Thank you to everyone who has contributed, and special\nthanks to the following folks who have contributed great features and bug\nrequests, as well as given guidance in stacker's development:\n\n- `Adam McElwee`_\n- `Daniel Miranda`_\n- `Troy Ready`_\n- `Garison Draper`_\n- `Mariusz`_\n- `Tolga Tarhan`_\n\n.. _`Remind, Inc.`: https://www.remind.com/\n\n.. _`Michael Barrett`: https://github.com/phobologic\n.. _`Eric Holmes`: https://github.com/ejholmes\n.. _`Ignacio Nin`: https://github.com/Lowercases\n.. _`Russell Ballestrini`: https://github.com/russellballestrini\n\n.. _`Michael Hahn`: https://github.com/mhahn\n.. _`Tom Taubkin`: https://github.com/ttaub\n\n.. _`Adam McElwee`: https://github.com/acmcelwee\n.. _`Daniel Miranda`: https://github.com/danielkza\n.. _`Troy Ready`: https://github.com/troyready\n.. _`Garison Draper`: https://github.com/GarisonLotus\n.. _`Mariusz`: https://github.com/discobean\n.. _`Tolga Tarhan`: https://github.com/ttarhan\n"
  },
  {
    "path": "CHANGELOG.md",
    "content": "## Upcoming release\n\n## 1.7.2 (2020-11-09)\n- address breaking moto change to awslambda [GH-763]\n- Added Python version validation before update kms decrypt output [GH-765]\n\n## 1.7.1 (2020-08-17)\n- Fixing AMI lookup Key error on 'Name'\n- hooks: lambda: allow uploading pre-built payloads [GH-564]\n- Ensure that base64 lookup codec encodes the bytes object as a string [GH-742]\n- Use CloudFormation Change Sets for `stacker diff`\n- Locked stacks still have requirements [GH-746]\n- change diff to use CFN change sets instead of comparing template dicts [GH-744]\n- Add YAML environment file support [GH-740]\n- fix `stack.set_outputs` not being called by diff if stack did not change [GH-754]\n- Fix python 2.7/3.5 dependency issue\n- add cf notification arns [GH-756]\n\n## 1.7.0 (2019-04-07)\n\n- Additional ECS unit tests [GH-696]\n- Keypair unit tests [GH-700]\n- Jinja2 templates in plain cloudformation templates [GH-701]\n- Custom log output formats [GH-705]\n- Python 3.7 unit tests in CircleCI [GH-711]\n- Upload blueprint templates with bucket-owner-full-control ACL [GH-713]\n- Change test runner from nose to py.test [GH-714]\n- support for importing a local public key file with the keypair hook [GH-715]\n- support for storing private keys in SSM parameter store with the keypair hook [GH-715]\n\n## 1.6.0 (2019-01-21)\n\n- New lookup format/syntax, making it more generic [GH-665]\n- Allow lowercase y/Y when prompted [GH-674]\n- Local package sources [GH-677]\n- Add `in_progress` option to stack config [GH-678]\n- Use default ACL for uploaded lambda code [GH-682]\n- Display rollback reason after error [GH-687]\n- ssm parameter types [GH-692]\n\n## 1.5.0 (2018-10-14)\n\nThe big feature in this release is the introduction of \"targets\" which act as\nsort of \"virtual nodes\" in the graph. It provides a nice way to logically group\nstacks.\n\n- Add support for \"targets\" [GH-572]\n- Fix non-interactive changeset updates w/ stack policies [GH-657]\n- Fix interactive_update_stack calls with empty string parameters [GH-658]\n- Fix KMS unicode lookup in python 2 [GH-659]\n- Locked stacks have no dependencies [GH-661]\n- Set default profile earlier [GH-662]\n- Get rid of recursion for tail retries and extend retry/timeout [GH-663]\n\n## 1.4.1 (2018-08-28)\n\nThis is a minor bugfix release for 1.4.0, no major feature updates.\n\nAs of this release python 3.5+ support is no longer considered experimental, and should be stable.\n\nSpecial thanks to @troyready for this release, I think most of these PRs were his :)\n\n- allow raw cfn templates to be loaded from remote package\\_sources [GH-638]\n- Add missing config keys to s3 package source model [GH-642]\n- Account for UsePreviousValue parameters in diff [GH-644]\n- fix file lookup documented and actual return types [GH-646]\n- Creates a memoized provider builder for AWS [GH-648]\n- update git ref to explicitly return string (fix py3 bytes error) [GH-649]\n- Lock botocore/boto to versions that work with moto [GH-651]\n\n## 1.4.0 (2018-08-05)\n\n- YAML & JSON codecs for `file` lookup [GH-537]\n- Arbitrary `command` hook [GH-565]\n- Fix datetime is not JSON serializable error [GH-591]\n- Run dump and outline actions offline [GH-594]\n- Helper Makefile for functional tests [GH-597]\n- Python3 support!!! [GH-600]\n- YAML blueprint testing framework [GH-606]\n- new `add_output` helper on Blueprint [GH-611]\n- Include lookup contents when lookups fail [GH-614]\n- Fix issue with using previous value for parameters [GH-615]\n- Stricter config parsing - only allow unrecognized config variables at the top-level [GH-623]\n- Documentation for the `default` lookup [GH-636]\n- Allow configs without stacks [GH-640]\n\n## 1.3.0 (2018-05-03)\n\n- Support for provisioning stacks in multiple accounts and regions has been added [GH-553], [GH-551]\n- Added a `--profile` flag, which can be used to set the global default profile that stacker will use (similar to `AWS_PROFILE`) [GH-563]\n- `class_path`/`template_path` are no longer required when a stack is `locked` [GH-557]\n- Support for setting stack policies on stacks has been added [GH-570]\n\n## 1.2.0 (2018-03-01)\n\nThe biggest change in this release has to do with how we build the graph\nof dependencies between stacks. This is now a true DAG.  As well, to\nspeed up performance we now walk the graph in a threaded mode, allowing\ntrue parallelism and speeding up \"wide\" stack graphs considerably.\n\n- assertRenderedBlueprint always dumps current results [GH-528]\n- The `--stacks` flag now automatically builds dependencies of the given stack [GH-523]\n- an unecessary DescribeStacks network call was removed [GH-529]\n- support stack json/yaml templates [GH-530]\n- `stacker {build,destroy}` now executes stacks in parallel. Parallelism can be controled with a `-j` flag. [GH-531]\n- logging output has been simplified and no longer uses ANSI escape sequences to clear the screen [GH-532]\n- logging output is now colorized in `--interactive` mode if the terminal has a TTY [GH-532]\n- removed the upper bound on the boto3 dependency [GH-542]\n\n## 1.2.0rc2 (2018-02-27)\n\n- Fix parameter handling for diffs [GH-540]\n- Fix an issue where SIGTERM/SIGINT weren't handled immediately [GH-543]\n- Log a line when SIGINT/SIGTERM are handled [GH-543]\n- Log failed steps at the end of plan execution [GH-543]\n- Remove upper bound on boto3 dependency [GH-542]\n\n## 1.2.0rc1 (2018-02-15)\n\nThe biggest change in this release has to do with how we build the graph\nof dependencies between stacks. This is now a true DAG.  As well, to\nspeed up performance we now walk the graph in a threaded mode, allowing\ntrue parallelism and speeding up \"wide\" stack graphs considerably.\n\n- assertRenderedBlueprint always dumps current results [GH-528]\n- stacker now builds a DAG internally [GH-523]\n- The `--stacks` flag now automatically builds dependencies of the given stack [GH-523]\n- an unecessary DescribeStacks network call was removed [GH-529]\n- support stack json/yaml templates [GH-530]\n- `stacker {build,destroy}` now executes stacks in parallel. Parallelism can be controled with a `-j` flag. [GH-531]\n- logging output has been simplified and no longer uses ANSI escape sequences to clear the screen [GH-532]\n- logging output is now colorized in `--interactive` mode if the terminal has a TTY [GH-532]\n\n\n## 1.1.4 (2018-01-26)\n\n- Add `blueprint.to_json` for standalone rendering [GH-459]\n- Add global config for troposphere template indent [GH-505]\n- Add serverless transform/CREATE changeset types [GH-517]\n\n## 1.1.3 (2017-12-23)\n\nBugfix release- primarily to deal with a bug that's been around since the\nintroduction of interactive mode/changesets. The bug primarily deals with the\nfact that we weren't deleting Changesets that were not submitted. This didn't\naffect anyone for the longest time, but recently people have started to hit\nlimits on the # of changesets in an account. The current thinking is that the\nlimits weren't enforced before, and only recently has been enforced.\n\n- Add S3 remote package sources [GH-487]\n- Make blueprint dump always create intermediate directories [GH-499]\n- Allow duplicate keys for most config mappings except `stacks` [GH-507]\n- Remove un-submitted changesets [GH-513]\n\n## 1.1.2 (2017-11-01)\n\nThis is a minor update to help deal with some of the issues between `stacker`\nand `stacker_blueprints` both having dependencies on `troposphere`. It loosens\nthe dependencies, allowing stacker to work with any reasonably new version\nof troposphere (anything greater than `1.9.0`). `stacker_blueprints` will\nlikely require newer versions of troposphere, as new types are introduced to\nthe blueprints, but it's unlikely we'll change the `troposphere` version string\nfor stacker, since it relies on only the most basic parts of the `troposphere`\nAPI.\n\n## 1.1.1 (2017-10-11)\n\nThis release is mostly about updating the dependencies for stacker to newer\nversions, since that was missed in the last release.\n\n## 1.1.0 (2017-10-08)\n\n- `--max-zones` removed from CLI [GH-427]\n- Ami lookup: add region specification [GH-433]\n- DynamoDB Lookup [GH-434]\n- Environment file is optional now [GH-436]\n- New functional test suite [GH-439]\n- Structure config object using Schematics [GH-443]\n- S3 endpoint fallback [GH-445]\n- Stack specific tags [GH-450]\n- Allow disabling of stacker bucket (direct CF updates) [GH-451]\n- Uniform deprecation warnings [GH-452]\n- Remote configuration support [GH-458]\n- TroposphereType updates [GH-462]\n- Fix replacements-only issue [GH-464]\n- testutil enhancments to blueprint testing [GH-467]\n- Removal of Interactive Provider (now combined w/ default provider) [GH-469]\n- protected stacks [GH-472]\n- MUCH Better handling of stack rollbacks & recreations [GH-473]\n- follow\\_symlinks argument for aws lambda hook [GH-474]\n- Enable service\\_role for cloudformation operations [GH-476]\n- Allow setting stack description from config [GH-477]\n- Move S3 templates into sub-directories [GH-478]\n\n## 1.0.4 (2017-07-07)\n\n- Fix issue w/ tail being required (but not existing) on diff/info/etc [GH-429]\n\n## 1.0.3 (2017-07-06)\n\nThere was some reworking on how regions are handled, specifically around\ns3 and where the buckets for both stacker and the awslambda lookup are created.\nNow the stacker bucket will default to being created in the region where the\nstacks are being created (ie: from the `--region` argument). If you want to\nhave the bucket be in a different region you now can set the\n`stacker_bucket_region` top level config value.\n\nFor the awslambda hook, you also have the option of using `bucket_region` as\nan argument, provided you are using a custom `bucket` for the hook. If you\nare not using a custom bucket, then it will use the logic used above.\n\n- add ami lookup [GH-360]\n- Add support for Property objects in TroposphereType variables [GH-379]\n- Add debugging statements to sys.path appending [GH-385]\n- Catch undefined variable value [GH-388]\n- Exponential backoff waiting for AWS changeset to stabilize [GH-389]\n- Add parameter changes to diff output [GH-394]\n- Add CODE\\_OF\\_CONDUCT.md [GH-399]\n- Add a hint for forbidden bucket access [GH-401]\n- Fix issues w/ \"none\" as variable values [GH-405]\n- Remove extra '/' in blueprint tests [GH-409]\n- Fix dump provider interaction with lookups [GH-410]\n- Add ssmstore lookup docs [GH-411]\n- Fix issue w/ s3 buckets in different regions [GH-413, GH-417]\n- Disable loop logger whe --tail is provided [GH-414]\n- Add envvar lookup [GH-418]\n\n## 1.0.2 (2017-05-10)\n\n- fix lambda hook determinism [GH-372]\n- give lambda hook ability to upload to a prefix [GH-376]\n- fix bad argument for approval in interactive provider [GH-381]\n\n## 1.0.1 (2017-04-24)\n\n- rxref lookup [GH-328]\n- Cleaned up raise statement in blueprints [GH-348]\n- Fix missing default provider for build\\_parameters [GH-353]\n- Setup codecov [GH-354]\n- Added blueprint testing harness [GH-362]\n- context hook\\_data lookup [GH-366]\n\n## 1.0.0 (2017-03-04)\n\nThis is a major release with the main change being the removal of the old\nParameters logic in favor of Blueprint Variables and Lookups.\n\n- Add support for resolving variables when calling `dump`[GH-231]\n- Remove old Parameters code [GH-232]\n- Pass Context & Provider to hooks [GH-233]\n- Fix Issue w/ Dump [GH-241]\n- Support `allowed_values` within variable definitions [GH-245]\n- Fix filehandler lookups with pseudo parameters [GH-247]\n- keypair hook update to match route53 update [GH-248]\n- Add support for `TroposphereType` [GH-249]\n- Allow = in lookup contents [GH-251]\n- Add troposphere types [GH-257]\n- change capabilities to CAPABILITY\\_NAMED\\_IAM [GH-262]\n- Disable transformation of variables [GH-266]\n- Support destroying a subset of stacks [GH-278]\n- Update all hooks to use advanced results [GH-285]\n- Use sys\\_path for hooks and lookups [GH-286]\n- Remove last of botocore connections [GH-287]\n- Remove --var flag [GH-289]\n- Avoid dictionary sharing pollution [GH-293]\n- Change aws\\_lambda hook handler to use proper parameters [GH-297]\n- New `split` lookup handler [GH-302]\n- add parse\\_user\\_data [GH-306]\n- Add credential caching [GH-307]\n- Require explicit call to `output` lookup [GH-310]\n- Convert booleans to strings for CFNTypes [GH-311]\n- Add ssmstore as a lookup type [GH-314]\n- Added region to the ssm store test client [GH-316]\n- Add default lookup [GH-317]\n- Clean up errors from variables [GH-319]\n\n## 0.8.6 (2017-01-26)\n\n- Support destroying subset of stacks [GH-278]\n- Update all hooks to use advanced results [GH-285]\n- Use sys\\_path for hooks and lookups [GH-286]\n- Remove last of botocore conns [GH-287]\n- Avoid dictionary sharing pollution [GH-293]\n\n## 0.8.5 (2016-11-28)\n\n- Allow `=` in lookup input [GH-251]\n- Add hook for uploading AWS Lambda functions [GH-252]\n- Upgrade hard coded capabilities to include named IAM [GH-262]\n- Allow hooks to return results that can be looked up later [GH-270]\n\n## 0.8.4 (2016-11-01)\n\n- Fix an issue w/ boto3 version string not working with older setuptools\n\n## 0.8.3 (2016-10-31)\n\n- pass context to hooks as a kwarg [GH-234]\n- Fix file handler lookups w/ pseudo parameters [GH-239]\n- Allow use of later boto3 [GH-253]\n\n## 0.8.1 (2016-09-22)\n\nMinor update to remove dependencies on stacker\\_blueprints for tests, since it\nresulted in a circular dependency.  This is just a fix to get tests running again,\nand results in no change in functionality.\n\n## 0.8.0 (2016-09-22)\n\nThis is a big release which introduces the new concepts of Blueprint Variables\nand Lookups. A lot of folks contributed to this release - in both code, and just\ntesting of the new features.  Thanks to:\n\n@kylev, @oliviervg1, @datadotworld, @acmcelwee, @troyready, @danielkza, and @ttarhan\n\nSpecial thanks to @mhahn who did the bulk of the heavy lifting in this release, and\nthe work towards 1.0!\n\n- Add docs on config, environments & translators [GH-157]\n- locked output changed to debug [GH-159]\n- Multi-output parameter doc [GH-160]\n- Remove spaces from multi-item parameters [GH-161]\n- Remove blueprints & configs in favor of stacker\\_blueprints [GH-163]\n- Clean up plan/status split [GH-165]\n- Allow s3 server side encryption [GH-167]\n- Support configurable namespace delimiter [GH-169]\n- Support tags as a new top-level keyword [GH-171]\n- Update to boto3 [GH-174]\n- Interactive AWS Provider [GH-178]\n- Add config option for appending to sys.path [GH-179]\n- More condensed output [GH-182]\n- File loading lookup [GH-185]\n- Handle stacks without parameters [GH-193]\n- Implement blueprint variables & lookups [GH-194]\n- Fix traceback on interactive provider when adding resources [GH-198]\n- kms lookup [GH-200]\n- Compatible release version dependencies [GH-201]\n- add xref lookup [GH-202]\n- Update docstrings for consistency [GH-204]\n- Add support for CFN Parameter types in Blueprint Variables [GH-206]\n- Deal w/ multiprocessing library sharing ssl connections [GH-208]\n- Fix issues with slashes inside variable lookups [GH-213]\n- Custom validators for blueprint variables [GH-218]\n\n## 0.6.3 (2016-05-24)\n- add `stacker dump` subcommand for testing stack/blueprints [GH-156]\n\n## 0.6.2 (2016-05-17)\n- Allow users to override name of bucket to store templates [GH-145]\n- Add support for passing environment variables on the cli via --env [GH-148]\n- Cleanup output on non-verbose runs [GH-153]\n- Added `compare_env` command, for easier comparing of environment files [GH-155]\n\n## 0.6.1 (2016-02-11)\n- Add support for the 'stacker diff' command [GH-133]\n- Python boolean parameters automatically converted to strings for CloudFormation [GH-136]\n- No longer require mappings in config [GH-140]\n- Skipped steps now include a reason [GH-141]\n\n## 0.6.0 (2016-01-07)\n\n- Support tailing cloudformation event stream when building/destroying stacks [GH-90]\n- More customizable ASG userdata & options [GH-100]\n- Deprecate 'blueprints' in favor of 'stacker\\_blueprints' package [GH-125]\n- Add KMS based encryption translator [GH-126]\n- Fix typo in ASG customization [GH-127]\n- Allow file:// prefix with KMS encryption translator [GH-128]\n- No longer require a confirmation if the user passes the `--force` flag when destroying [GH-131]\n\n## 0.5.4 (2015-12-03)\n\n- Fix memory leak issue (GH-111) [GH-114]\n- Add enabled flag to stacks [GH-115]\n- Add support for List<AWS::EC2::*> parameters [GH-117]\n- Add eu-west-1 support for empire [GH-116]\n- Move get\\_fqn to a function, add tests [GH-119]\n- Add new postgres versions (9.4.4, 9.4.5) [GH-121]\n- Handle blank parameter values [GH-120]\n\n## 0.5.3 (2015-11-03)\n\n- Add --version [GH-91]\n- Simplify environment file to key: value, rather than YAML [GH-94]\n- Ensure certificate exists hook [GH-94]\n- Ensure keypair exists hook [GH-99]\n- Custom field constructors & vault encryption [GH-95]\n- DBSnapshotIdentifier to RDS blueprints [GH-105]\n- Empire ECS Agent telemetry support fixes, use new Empire AMI [GH-107]\n- Remove stack tags [GH-110]\n\n## 0.5.2 (2015-09-10)\n\n- Add Dockerfile/image [GH-87]\n- Clean up environment docs [GH-88]\n- Make StorageType configurable in RDS v2 [GH-92]\n\n## 0.5.1 (2015-09-08)\n\n- Add info subcommand [GH-73]\n- Move namespace into environment [GH-72]\n- Simplified basecommand [GH-74]\n- Documentation updates [GH-75, GH-77, GH-78]\n- aws\\_helper removal [GH-79]\n- Move VPC to use LOCAL\\_PARAMETERS [GH-81]\n- Lower default AZ count to 2 [GH-82]\n- Allow use of all parameter properties [GH-83]\n- Parameter gathering in method [GH-84]\n- NoEcho on sensitive parameters in blueprnts [GH-85]\n- Version 2 RDS Blueprints [GH-86]\n\n## 0.5.0 (2015-08-13)\n\n- stacker subcommands [GH-35]\n- Added Empire production stacks [GH-43]\n  - Major change in internal code layout & added testing\n- added destroy subcommand [GH-59]\n- Local Blueprint Parameters [GH-61]\n- Lockable stacks [GH-62]\n- Deal with Cloudformation API throttling [GH-64]\n- Clarify Remind's usage of stacker in README [GH-70]\n\n## 0.4.1 (2015-07-23)\n\n- Stack Specific Parameters [GH-32]\n- Random fixes & cleanup [GH-34]\n- Handle skipped rollbacks [GH-36]\n- Internal zone detection [GH-39]\n- Internal hostname conditional [GH-40]\n- Empire production stacks [GH-43]\n\n## 0.4.0 (2015-05-13)\n\n- Optional internal DNS Zone on vpc blueprint [GH-29]\n- Add environment concept [GH-27]\n- Optional internal zone cname for rds databases [GH-30]\n\n## 0.3.0 (2015-05-05)\n\n- remove auto-subnet splitting in vpc stack (GH-25)\n- create bucket in correct region (GH-17, GH-23)\n- asg sets optionally sets up ELB w/ (optional) SSL\n- Remove DNS core requirement, add plugin/hook system (GH-26)\n\n## 0.2.2 (2015-03-31)\n\n- Allow AWS to generate the DBInstanceIdentifier\n\n## 0.2.1 (2015-03-31)\n- Bah, typo in version string, fixing\n\n## 0.2.0 (2015-03-31)\n\n- New taxonomy (GH-18)\n- better setup.py (GH-16) - thanks mhahn\n- Use exitsing parameters (GH-20)\n- Able to work on subset of stacks (GH-14)\n- Config cleanup (GH-9)\n"
  },
  {
    "path": "CODE_OF_CONDUCT.md",
    "content": "# Contributor Covenant Code of Conduct\n\n## Our Pledge\n\nIn the interest of fostering an open and welcoming environment, we as contributors and maintainers pledge to making participation in our project and our community a harassment-free experience for everyone, regardless of age, body size, disability, ethnicity, gender identity and expression, level of experience, nationality, personal appearance, race, religion, or sexual identity and orientation.\n\n## Our Standards\n\nExamples of behavior that contributes to creating a positive environment include:\n\n* Using welcoming and inclusive language\n* Being respectful of differing viewpoints and experiences\n* Gracefully accepting constructive criticism\n* Focusing on what is best for the community\n* Showing empathy towards other community members\n\nExamples of unacceptable behavior by participants include:\n\n* The use of sexualized language or imagery and unwelcome sexual attention or advances\n* Trolling, insulting/derogatory comments, and personal or political attacks\n* Public or private harassment\n* Publishing others' private information, such as a physical or electronic address, without explicit permission\n* Other conduct which could reasonably be considered inappropriate in a professional setting\n\n## Our Responsibilities\n\nProject maintainers are responsible for clarifying the standards of acceptable behavior and are expected to take appropriate and fair corrective action in response to any instances of unacceptable behavior.\n\nProject maintainers have the right and responsibility to remove, edit, or reject comments, commits, code, wiki edits, issues, and other contributions that are not aligned to this Code of Conduct, or to ban temporarily or permanently any contributor for other behaviors that they deem inappropriate, threatening, offensive, or harmful.\n\n## Scope\n\nThis Code of Conduct applies both within project spaces and in public spaces when an individual is representing the project or its community. Examples of representing a project or community include using an official project e-mail address, posting via an official social media account, or acting as an appointed representative at an online or offline event. Representation of a project may be further defined and clarified by project maintainers.\n\n## Enforcement\n\nInstances of abusive, harassing, or otherwise unacceptable behavior may be reported by contacting the project team at cloudtools-maintainers@groups.google.com. The project team will review and investigate all complaints, and will respond in a way that it deems appropriate to the circumstances. The project team is obligated to maintain confidentiality with regard to the reporter of an incident. Further details of specific enforcement policies may be posted separately.\n\nProject maintainers who do not follow or enforce the Code of Conduct in good faith may face temporary or permanent repercussions as determined by other members of the project's leadership.\n\n## Attribution\n\nThis Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, available at [http://contributor-covenant.org/version/1/4][version]\n\n[homepage]: http://contributor-covenant.org\n[version]: http://contributor-covenant.org/version/1/4/\n"
  },
  {
    "path": "CONTRIBUTING.md",
    "content": "# Contributing\n\nContributions are welcome, and they are greatly appreciated!\n\nYou can contribute in many ways:\n\n## Types of Contributions\n\n### Report Bugs\n\nReport bugs at https://github.com/cloudtools/stacker/issues.\n\nIf you are reporting a bug, please include:\n\n* Your operating system name and version.\n* Any details about your local setup that might be helpful in troubleshooting.\n* Detailed steps to reproduce the bug.\n\n### Fix Bugs\n\nLook through the GitHub issues for bugs. Anything tagged with \"bug\"\nis open to whoever wants to implement it.\n\n### Implement Features\n\nLook through the GitHub issues for features. Anything tagged with \"feature\"\nis open to whoever wants to implement it.\n\n### Write Documentation\n\nstacker could always use more documentation, whether as part of the\nofficial stacker docs, in docstrings, or even on the web in blog posts,\narticles, and such.\n\nNote: We use Google style docstrings (http://sphinxcontrib-napoleon.readthedocs.io/en/latest/example\\_google.html)\n\n### Submit Feedback\n\nThe best way to send feedback is to file an issue at https://github.com/cloudtools/stacker/issues.\n\nIf you are proposing a feature:\n\n* Explain in detail how it would work.\n* Keep the scope as narrow as possible, to make it easier to implement.\n* Remember that this is a volunteer-driven project, and that contributions\n  are welcome :)\n\n\n## Get Started!\n\nReady to contribute? Here's how to set up `stacker` for local development.\n\n1. Fork the `stacker` repo on GitHub.\n2. Clone your fork locally:\n\n    ```console\n    $ git clone git@github.com:your_name_here/stacker.git\n    ```\n\n3. Install your local copy into a virtualenv. Assuming you have virtualenvwrapper installed, this is how you set up your fork for local development:\n\n    ```console\n    $ mkvirtualenv stacker\n    $ cd stacker/\n    $ python setup.py develop\n    ```\n\n4. Create a branch for local development:\n\n    ```console\n    $ git checkout -b name-of-your-bugfix-or-feature\n    ```\n\n   Now you can make your changes locally.\n\n5. When you're done making changes, check that your changes pass flake8 and the tests, including testing other Python versions with tox:\n\n    ```console\n    $ make test\n    ```\n\n   To get flake8 just pip install it into your virtualenv.\n\n6. Commit your changes and push your branch to GitHub:\n\n    ```console\n    $ git add .\n    $ git commit -m \"Your detailed description of your changes.\"\n    $ git push origin name-of-your-bugfix-or-feature\n    ```\n\n7. Submit a pull request through the GitHub website.\n\nFor information about the functional testing suite, see [tests/README.md](./tests).\n\n## Pull Request Guidelines\n\nBefore you submit a pull request, check that it meets these guidelines:\n\n1. The pull request should include tests.\n2. If the pull request adds functionality, the docs should be updated. (See `Write Documentation` above for guidelines)\n3. The pull request should work for Python 2.7 and for PyPy. Check\n   https://circleci.com/gh/cloudtools/stacker and make sure that the tests pass for all supported Python versions.\n4. Please update the `Upcoming/Master` section of the [CHANGELOG](./CHANGELOG.md) with a small bullet point about the change.\n"
  },
  {
    "path": "Dockerfile",
    "content": "FROM python:2.7.10\nMAINTAINER Mike Barrett\n\nCOPY scripts/docker-stacker /bin/docker-stacker\nRUN mkdir -p /stacks && pip install --upgrade pip setuptools\nWORKDIR /stacks\nCOPY . /tmp/stacker\nRUN pip install --upgrade pip\nRUN pip install --upgrade setuptools\nRUN cd /tmp/stacker && python setup.py install && rm -rf /tmp/stacker\n\nENTRYPOINT [\"docker-stacker\"]\nCMD [\"-h\"]\n"
  },
  {
    "path": "LICENSE",
    "content": "Copyright (c) 2015, Remind101, Inc.\nAll rights reserved.\n\nRedistribution and use in source and binary forms, with or without\nmodification, are permitted provided that the following conditions are met:\n\n1. Redistributions of source code must retain the above copyright notice, this\n   list of conditions and the following disclaimer.\n\n2. Redistributions in binary form must reproduce the above copyright notice,\n   this list of conditions and the following disclaimer in the documentation\n   and/or other materials provided with the distribution.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\nAND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\nIMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\nDISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\nFOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\nDAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\nSERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\nCAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\nOR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\nOF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n"
  },
  {
    "path": "Makefile",
    "content": ".PHONY: build lint test-unit test-functional test\n\nbuild:\n\tdocker build -t remind101/stacker .\n\nlint:\n\tflake8 --ignore E402,W503,W504,W605,N818 --exclude stacker/tests/ stacker\n\tflake8 --ignore E402,N802,W605,N818 stacker/tests # ignore setUp naming\n\ntest-unit: clean\n\tpython setup.py test\n\ntest-unit3: clean\n\tpython3 setup.py test\n\nclean:\n\trm -rf .egg stacker.egg-info\n\ntest-functional:\n\tcd tests && bats test_suite\n\n# General testing target for most development.\ntest: lint test-unit test-unit3\n\napidocs:\n\tsphinx-apidoc --force -o docs/api stacker\n"
  },
  {
    "path": "README.rst",
    "content": "=======\nstacker\n=======\n\n.. image:: https://readthedocs.org/projects/stacker/badge/?version=latest\n   :target: http://stacker.readthedocs.org/en/latest/\n\n.. image:: https://circleci.com/gh/cloudtools/stacker.svg?style=shield\n   :target: https://circleci.com/gh/cloudtools/stacker\n\n.. image:: https://empire-slack.herokuapp.com/badge.svg\n   :target: https://empire-slack.herokuapp.com\n\n.. image:: https://badge.fury.io/py/stacker.svg\n   :target: https://badge.fury.io/py/stacker\n\n.. image:: https://landscape.io/github/cloudtools/stacker/master/landscape.svg?style=flat\n   :target: https://landscape.io/github/cloudtools/stacker/master\n   :alt: Code Health\n\n.. image:: https://codecov.io/gh/cloudtools/stacker/branch/master/graph/badge.svg\n   :target: https://codecov.io/gh/cloudtools/stacker\n   :alt: codecov\n\n\nFor full documentation, please see the readthedocs_ site.\n\n`Click here to join the Slack team`_ for stacker, and then join the #stacker\nchannel!\n\nAbout\n=====\n\nstacker is a tool and library used to create & update multiple CloudFormation\nstacks. It was originally written at Remind_ and\nreleased to the open source community.\n\nstacker Blueprints are written in troposphere_, though the purpose of\nmost templates is to keep them as generic as possible and then use\nconfiguration to modify them.\n\nAt Remind we use stacker to manage all of our Cloudformation stacks -\nboth in development, staging, and production without any major issues.\n\nRequirements\n============\n\n* Python 3.7+\n\nStacker Command\n===============\n\nThe ``stacker`` command has sub-commands, similar to git.\n\nHere are some examples:\n\n  ``build``:\n    handles taking your stack config and then launching or updating stacks as necessary.\n\n  ``destroy``:\n    tears down your stacks\n\n  ``diff``:\n    compares your currently deployed stack templates to your config files\n\n  ``info``:\n    prints information about your currently deployed stacks\n\nWe document these sub-commands in full along with others, in the documentation.\n\n\nGetting Started\n===============\n\n``stacker_cookiecutter``: https://github.com/cloudtools/stacker_cookiecutter\n\n  We recommend creating your base `stacker` project using ``stacker_cookiecutter``.\n  This tool will install all the needed dependencies and created the project\n  directory structure and files. The resulting files are well documented\n  with comments to explain their purpose and examples on how to extend.\n\n``stacker_blueprints``: https://github.com/cloudtools/stacker_blueprints\n\n  This repository holds working examples of ``stacker`` blueprints.\n  Each blueprint works in isolation and may be referenced, extended, or\n  copied into your project files. The blueprints are written in Python\n  and use the troposphere_ library.\n\n``stacker reference documentation``:\n\n  We document all functionality and features of stacker in our extensive\n  reference documentation located at readthedocs_.\n\n``AWS OSS Blog``: https://aws.amazon.com/blogs/opensource/using-aws-codepipeline-and-open-source-tools-for-at-scale-infrastructure-deployment/\n\n  The AWS OSS Blog has a getting started guide using stacker with AWS CodePipeline.\n\n\nDocker\n======\n\nStack can also be executed from Docker. Use this method to run stacker if you\nwant to avoid setting up a python environment::\n\n  docker run -it -v `pwd`:/stacks remind101/stacker build ...\n\n.. _Remind: http://www.remind.com/\n.. _troposphere: https://github.com/cloudtools/troposphere\n.. _string.Template: https://docs.python.org/2/library/string.html#template-strings\n.. _readthedocs: http://stacker.readthedocs.io/en/latest/\n.. _`Click here to join the Slack team`: https://empire-slack.herokuapp.com\n"
  },
  {
    "path": "RELEASE.md",
    "content": "# Steps to release a new version\n\n## Preparing for the release\n\n- Check out a branch named for the version: `git checkout -b release-1.1.1`\n- Change version in setup.py and stacker/\\_\\_init\\_\\_.py\n- Update CHANGELOG.md with changes made since last release (see below for helpful\n  command)\n- add changed files: `git add setup.py stacker/\\_\\_init\\_\\_.py CHANGELOG.md`\n- Commit changes: `git commit -m \"Release 1.1.1\"`\n- Create a signed tag: `git tag --sign -m \"Release 1.1.1\" 1.1.1`\n- Push branch up to git: `git push -u origin release-1.1.1`\n- Open a PR for the release, ensure that tests pass\n\n## Releasing\n\n- Push tag: `git push --tags`\n- Merge PR into master, checkout master locally: `git checkout master; git pull`\n- Create PyPI release: `python setup.py sdist upload --sign`\n- Update github release page: https://github.com/cloudtools/stacker/releases \n  - use the contents of the latest CHANGELOG entry for the body.\n\n# Helper to create CHANGELOG entries\ngit log --reverse --pretty=format:\"%s\" | tail -100 | sed 's/^/- /'\n"
  },
  {
    "path": "codecov.yml",
    "content": "comment: false\n"
  },
  {
    "path": "conf/README.rst",
    "content": "Please check out the stacker_blueprints_ repo for example configs and \nblueprints.\n\n.. _stacker_blueprints: https://github.com/cloudtools/stacker_blueprints\n"
  },
  {
    "path": "docs/.gitignore",
    "content": "_build\n"
  },
  {
    "path": "docs/Makefile",
    "content": "# Makefile for Sphinx documentation\n#\n\n# You can set these variables from the command line.\nSPHINXOPTS    =\nSPHINXBUILD   = python -m sphinx\nPAPER         =\nBUILDDIR      = _build\n\n# Internal variables.\nPAPEROPT_a4     = -D latex_paper_size=a4\nPAPEROPT_letter = -D latex_paper_size=letter\nALLSPHINXOPTS   = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) .\n# the i18n builder cannot share the environment and doctrees with the others\nI18NSPHINXOPTS  = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) .\n\n.PHONY: help clean html dirhtml singlehtml pickle json htmlhelp qthelp devhelp epub latex latexpdf text man changes linkcheck doctest coverage gettext\n\nhelp:\n\t@echo \"Please use \\`make <target>' where <target> is one of\"\n\t@echo \"  html       to make standalone HTML files\"\n\t@echo \"  serve      to run a webserver in the html dir (0.0.0.0:8000)\"\n\t@echo \"  dirhtml    to make HTML files named index.html in directories\"\n\t@echo \"  singlehtml to make a single large HTML file\"\n\t@echo \"  pickle     to make pickle files\"\n\t@echo \"  json       to make JSON files\"\n\t@echo \"  htmlhelp   to make HTML files and a HTML help project\"\n\t@echo \"  qthelp     to make HTML files and a qthelp project\"\n\t@echo \"  applehelp  to make an Apple Help Book\"\n\t@echo \"  devhelp    to make HTML files and a Devhelp project\"\n\t@echo \"  epub       to make an epub\"\n\t@echo \"  latex      to make LaTeX files, you can set PAPER=a4 or PAPER=letter\"\n\t@echo \"  latexpdf   to make LaTeX files and run them through pdflatex\"\n\t@echo \"  latexpdfja to make LaTeX files and run them through platex/dvipdfmx\"\n\t@echo \"  text       to make text files\"\n\t@echo \"  man        to make manual pages\"\n\t@echo \"  texinfo    to make Texinfo files\"\n\t@echo \"  info       to make Texinfo files and run them through makeinfo\"\n\t@echo \"  gettext    to make PO message catalogs\"\n\t@echo \"  changes    to make an overview of all changed/added/deprecated items\"\n\t@echo \"  xml        to make Docutils-native XML files\"\n\t@echo \"  pseudoxml  to make pseudoxml-XML files for display purposes\"\n\t@echo \"  linkcheck  to check all external links for integrity\"\n\t@echo \"  doctest    to run all doctests embedded in the documentation (if enabled)\"\n\t@echo \"  coverage   to run coverage check of the documentation (if enabled)\"\n\nclean:\n\trm -rf $(BUILDDIR)/*\n\nhtml:\n\t$(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html\n\t@echo\n\t@echo \"Build finished. The HTML pages are in $(BUILDDIR)/html.\"\n\n\nserve:\n\tcd $(BUILDDIR)/html/ && python -m SimpleHTTPServer\n\ndirhtml:\n\t$(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml\n\t@echo\n\t@echo \"Build finished. The HTML pages are in $(BUILDDIR)/dirhtml.\"\n\nsinglehtml:\n\t$(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml\n\t@echo\n\t@echo \"Build finished. The HTML page is in $(BUILDDIR)/singlehtml.\"\n\npickle:\n\t$(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle\n\t@echo\n\t@echo \"Build finished; now you can process the pickle files.\"\n\njson:\n\t$(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json\n\t@echo\n\t@echo \"Build finished; now you can process the JSON files.\"\n\nhtmlhelp:\n\t$(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp\n\t@echo\n\t@echo \"Build finished; now you can run HTML Help Workshop with the\" \\\n\t      \".hhp project file in $(BUILDDIR)/htmlhelp.\"\n\nqthelp:\n\t$(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp\n\t@echo\n\t@echo \"Build finished; now you can run \"qcollectiongenerator\" with the\" \\\n\t      \".qhcp project file in $(BUILDDIR)/qthelp, like this:\"\n\t@echo \"# qcollectiongenerator $(BUILDDIR)/qthelp/stacker.qhcp\"\n\t@echo \"To view the help file:\"\n\t@echo \"# assistant -collectionFile $(BUILDDIR)/qthelp/stacker.qhc\"\n\napplehelp:\n\t$(SPHINXBUILD) -b applehelp $(ALLSPHINXOPTS) $(BUILDDIR)/applehelp\n\t@echo\n\t@echo \"Build finished. The help book is in $(BUILDDIR)/applehelp.\"\n\t@echo \"N.B. You won't be able to view it unless you put it in\" \\\n\t      \"~/Library/Documentation/Help or install it in your application\" \\\n\t      \"bundle.\"\n\ndevhelp:\n\t$(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp\n\t@echo\n\t@echo \"Build finished.\"\n\t@echo \"To view the help file:\"\n\t@echo \"# mkdir -p $$HOME/.local/share/devhelp/stacker\"\n\t@echo \"# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/stacker\"\n\t@echo \"# devhelp\"\n\nepub:\n\t$(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub\n\t@echo\n\t@echo \"Build finished. The epub file is in $(BUILDDIR)/epub.\"\n\nlatex:\n\t$(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex\n\t@echo\n\t@echo \"Build finished; the LaTeX files are in $(BUILDDIR)/latex.\"\n\t@echo \"Run \\`make' in that directory to run these through (pdf)latex\" \\\n\t      \"(use \\`make latexpdf' here to do that automatically).\"\n\nlatexpdf:\n\t$(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex\n\t@echo \"Running LaTeX files through pdflatex...\"\n\t$(MAKE) -C $(BUILDDIR)/latex all-pdf\n\t@echo \"pdflatex finished; the PDF files are in $(BUILDDIR)/latex.\"\n\nlatexpdfja:\n\t$(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex\n\t@echo \"Running LaTeX files through platex and dvipdfmx...\"\n\t$(MAKE) -C $(BUILDDIR)/latex all-pdf-ja\n\t@echo \"pdflatex finished; the PDF files are in $(BUILDDIR)/latex.\"\n\ntext:\n\t$(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text\n\t@echo\n\t@echo \"Build finished. The text files are in $(BUILDDIR)/text.\"\n\nman:\n\t$(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man\n\t@echo\n\t@echo \"Build finished. The manual pages are in $(BUILDDIR)/man.\"\n\ntexinfo:\n\t$(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo\n\t@echo\n\t@echo \"Build finished. The Texinfo files are in $(BUILDDIR)/texinfo.\"\n\t@echo \"Run \\`make' in that directory to run these through makeinfo\" \\\n\t      \"(use \\`make info' here to do that automatically).\"\n\ninfo:\n\t$(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo\n\t@echo \"Running Texinfo files through makeinfo...\"\n\tmake -C $(BUILDDIR)/texinfo info\n\t@echo \"makeinfo finished; the Info files are in $(BUILDDIR)/texinfo.\"\n\ngettext:\n\t$(SPHINXBUILD) -b gettext $(I18NSPHINXOPTS) $(BUILDDIR)/locale\n\t@echo\n\t@echo \"Build finished. The message catalogs are in $(BUILDDIR)/locale.\"\n\nchanges:\n\t$(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes\n\t@echo\n\t@echo \"The overview file is in $(BUILDDIR)/changes.\"\n\nlinkcheck:\n\t$(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck\n\t@echo\n\t@echo \"Link check complete; look for any errors in the above output \" \\\n\t      \"or in $(BUILDDIR)/linkcheck/output.txt.\"\n\ndoctest:\n\t$(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest\n\t@echo \"Testing of doctests in the sources finished, look at the \" \\\n\t      \"results in $(BUILDDIR)/doctest/output.txt.\"\n\ncoverage:\n\t$(SPHINXBUILD) -b coverage $(ALLSPHINXOPTS) $(BUILDDIR)/coverage\n\t@echo \"Testing of coverage in the sources finished, look at the \" \\\n\t      \"results in $(BUILDDIR)/coverage/python.txt.\"\n\nxml:\n\t$(SPHINXBUILD) -b xml $(ALLSPHINXOPTS) $(BUILDDIR)/xml\n\t@echo\n\t@echo \"Build finished. The XML files are in $(BUILDDIR)/xml.\"\n\npseudoxml:\n\t$(SPHINXBUILD) -b pseudoxml $(ALLSPHINXOPTS) $(BUILDDIR)/pseudoxml\n\t@echo\n\t@echo \"Build finished. The pseudo-XML files are in $(BUILDDIR)/pseudoxml.\"\n"
  },
  {
    "path": "docs/api/modules.rst",
    "content": "stacker\n=======\n\n.. toctree::\n   :maxdepth: 4\n\n   stacker\n"
  },
  {
    "path": "docs/api/stacker.actions.rst",
    "content": "stacker\\.actions package\n========================\n\nSubmodules\n----------\n\nstacker\\.actions\\.base module\n-----------------------------\n\n.. automodule:: stacker.actions.base\n    :members:\n    :undoc-members:\n    :show-inheritance:\n\nstacker\\.actions\\.build module\n------------------------------\n\n.. automodule:: stacker.actions.build\n    :members:\n    :undoc-members:\n    :show-inheritance:\n\nstacker\\.actions\\.destroy module\n--------------------------------\n\n.. automodule:: stacker.actions.destroy\n    :members:\n    :undoc-members:\n    :show-inheritance:\n\nstacker\\.actions\\.diff module\n-----------------------------\n\n.. automodule:: stacker.actions.diff\n    :members:\n    :undoc-members:\n    :show-inheritance:\n\nstacker\\.actions\\.info module\n-----------------------------\n\n.. automodule:: stacker.actions.info\n    :members:\n    :undoc-members:\n    :show-inheritance:\n\n\nModule contents\n---------------\n\n.. automodule:: stacker.actions\n    :members:\n    :undoc-members:\n    :show-inheritance:\n"
  },
  {
    "path": "docs/api/stacker.blueprints.rst",
    "content": "stacker\\.blueprints package\n===========================\n\nSubpackages\n-----------\n\n.. toctree::\n\n    stacker.blueprints.variables\n\nSubmodules\n----------\n\nstacker\\.blueprints\\.base module\n--------------------------------\n\n.. automodule:: stacker.blueprints.base\n    :members:\n    :undoc-members:\n    :show-inheritance:\n\nstacker\\.blueprints\\.testutil module\n------------------------------------\n\n.. automodule:: stacker.blueprints.testutil\n    :members:\n    :undoc-members:\n    :show-inheritance:\n\n\nModule contents\n---------------\n\n.. automodule:: stacker.blueprints\n    :members:\n    :undoc-members:\n    :show-inheritance:\n"
  },
  {
    "path": "docs/api/stacker.blueprints.variables.rst",
    "content": "stacker\\.blueprints\\.variables package\n======================================\n\nSubmodules\n----------\n\nstacker\\.blueprints\\.variables\\.types module\n--------------------------------------------\n\n.. automodule:: stacker.blueprints.variables.types\n    :members:\n    :undoc-members:\n    :show-inheritance:\n\n\nModule contents\n---------------\n\n.. automodule:: stacker.blueprints.variables\n    :members:\n    :undoc-members:\n    :show-inheritance:\n"
  },
  {
    "path": "docs/api/stacker.commands.rst",
    "content": "stacker\\.commands package\n=========================\n\nSubpackages\n-----------\n\n.. toctree::\n\n    stacker.commands.stacker\n\nModule contents\n---------------\n\n.. automodule:: stacker.commands\n    :members:\n    :undoc-members:\n    :show-inheritance:\n"
  },
  {
    "path": "docs/api/stacker.commands.stacker.rst",
    "content": "stacker\\.commands\\.stacker package\n==================================\n\nSubmodules\n----------\n\nstacker\\.commands\\.stacker\\.base module\n---------------------------------------\n\n.. automodule:: stacker.commands.stacker.base\n    :members:\n    :undoc-members:\n    :show-inheritance:\n\nstacker\\.commands\\.stacker\\.build module\n----------------------------------------\n\n.. automodule:: stacker.commands.stacker.build\n    :members:\n    :undoc-members:\n    :show-inheritance:\n\nstacker\\.commands\\.stacker\\.destroy module\n------------------------------------------\n\n.. automodule:: stacker.commands.stacker.destroy\n    :members:\n    :undoc-members:\n    :show-inheritance:\n\nstacker\\.commands\\.stacker\\.diff module\n---------------------------------------\n\n.. automodule:: stacker.commands.stacker.diff\n    :members:\n    :undoc-members:\n    :show-inheritance:\n\nstacker\\.commands\\.stacker\\.info module\n---------------------------------------\n\n.. automodule:: stacker.commands.stacker.info\n    :members:\n    :undoc-members:\n    :show-inheritance:\n\n\nModule contents\n---------------\n\n.. automodule:: stacker.commands.stacker\n    :members:\n    :undoc-members:\n    :show-inheritance:\n"
  },
  {
    "path": "docs/api/stacker.config.rst",
    "content": "stacker\\.config package\n=======================\n\nSubpackages\n-----------\n\n.. toctree::\n\n    stacker.config.translators\n\nModule contents\n---------------\n\n.. automodule:: stacker.config\n    :members:\n    :undoc-members:\n    :show-inheritance:\n"
  },
  {
    "path": "docs/api/stacker.config.translators.rst",
    "content": "stacker\\.config\\.translators package\n====================================\n\nSubmodules\n----------\n\nstacker\\.config\\.translators\\.kms module\n----------------------------------------\n\n.. automodule:: stacker.config.translators.kms\n    :members:\n    :undoc-members:\n    :show-inheritance:\n\n\nModule contents\n---------------\n\n.. automodule:: stacker.config.translators\n    :members:\n    :undoc-members:\n    :show-inheritance:\n"
  },
  {
    "path": "docs/api/stacker.hooks.rst",
    "content": "stacker\\.hooks package\n======================\n\nSubmodules\n----------\n\nstacker\\.hooks\\.aws\\_lambda module\n----------------------------------\n\n.. automodule:: stacker.hooks.aws_lambda\n    :members:\n    :undoc-members:\n    :show-inheritance:\n\nstacker\\.hooks\\.ecs module\n--------------------------\n\n.. automodule:: stacker.hooks.ecs\n    :members:\n    :undoc-members:\n    :show-inheritance:\n\nstacker\\.hooks\\.iam module\n--------------------------\n\n.. automodule:: stacker.hooks.iam\n    :members:\n    :undoc-members:\n    :show-inheritance:\n\nstacker\\.hooks\\.keypair module\n------------------------------\n\n.. automodule:: stacker.hooks.keypair\n    :members:\n    :undoc-members:\n    :show-inheritance:\n\nstacker\\.hooks\\.route53 module\n------------------------------\n\n.. automodule:: stacker.hooks.route53\n    :members:\n    :undoc-members:\n    :show-inheritance:\n\nstacker\\.hooks\\.utils module\n----------------------------\n\n.. automodule:: stacker.hooks.utils\n    :members:\n    :undoc-members:\n    :show-inheritance:\n\n\nModule contents\n---------------\n\n.. automodule:: stacker.hooks\n    :members:\n    :undoc-members:\n    :show-inheritance:\n"
  },
  {
    "path": "docs/api/stacker.logger.rst",
    "content": "stacker\\.logger package\n=======================\n\nSubmodules\n----------\n\nstacker\\.logger\\.formatter module\n---------------------------------\n\n.. automodule:: stacker.logger.formatter\n    :members:\n    :undoc-members:\n    :show-inheritance:\n\nstacker\\.logger\\.handler module\n-------------------------------\n\n.. automodule:: stacker.logger.handler\n    :members:\n    :undoc-members:\n    :show-inheritance:\n\n\nModule contents\n---------------\n\n.. automodule:: stacker.logger\n    :members:\n    :undoc-members:\n    :show-inheritance:\n"
  },
  {
    "path": "docs/api/stacker.lookups.handlers.rst",
    "content": "stacker\\.lookups\\.handlers package\n==================================\n\nSubmodules\n----------\n\nstacker\\.lookups\\.handlers\\.ami module\n--------------------------------------\n\n.. automodule:: stacker.lookups.handlers.ami\n    :members:\n    :undoc-members:\n    :show-inheritance:\n\nstacker\\.lookups\\.handlers\\.default module\n------------------------------------------\n\n.. automodule:: stacker.lookups.handlers.default\n    :members:\n    :undoc-members:\n    :show-inheritance:\n\nstacker\\.lookups\\.handlers\\.dynamodb module\n-------------------------------------------\n\n.. automodule:: stacker.lookups.handlers.dynamodb\n    :members:\n    :undoc-members:\n    :show-inheritance:\n\nstacker\\.lookups\\.handlers\\.envvar module\n-----------------------------------------\n\n.. automodule:: stacker.lookups.handlers.envvar\n    :members:\n    :undoc-members:\n    :show-inheritance:\n\nstacker\\.lookups\\.handlers\\.file module\n---------------------------------------\n\n.. automodule:: stacker.lookups.handlers.file\n    :members:\n    :undoc-members:\n    :show-inheritance:\n\nstacker\\.lookups\\.handlers\\.hook\\_data module\n---------------------------------------------\n\n.. automodule:: stacker.lookups.handlers.hook_data\n    :members:\n    :undoc-members:\n    :show-inheritance:\n\nstacker\\.lookups\\.handlers\\.kms module\n--------------------------------------\n\n.. automodule:: stacker.lookups.handlers.kms\n    :members:\n    :undoc-members:\n    :show-inheritance:\n\nstacker\\.lookups\\.handlers\\.output module\n-----------------------------------------\n\n.. automodule:: stacker.lookups.handlers.output\n    :members:\n    :undoc-members:\n    :show-inheritance:\n\nstacker\\.lookups\\.handlers\\.rxref module\n----------------------------------------\n\n.. automodule:: stacker.lookups.handlers.rxref\n    :members:\n    :undoc-members:\n    :show-inheritance:\n\nstacker\\.lookups\\.handlers\\.split module\n----------------------------------------\n\n.. automodule:: stacker.lookups.handlers.split\n    :members:\n    :undoc-members:\n    :show-inheritance:\n\nstacker\\.lookups\\.handlers\\.ssmstore module\n-------------------------------------------\n\n.. automodule:: stacker.lookups.handlers.ssmstore\n    :members:\n    :undoc-members:\n    :show-inheritance:\n\nstacker\\.lookups\\.handlers\\.xref module\n---------------------------------------\n\n.. automodule:: stacker.lookups.handlers.xref\n    :members:\n    :undoc-members:\n    :show-inheritance:\n\n\nModule contents\n---------------\n\n.. automodule:: stacker.lookups.handlers\n    :members:\n    :undoc-members:\n    :show-inheritance:\n"
  },
  {
    "path": "docs/api/stacker.lookups.rst",
    "content": "stacker\\.lookups package\n========================\n\nSubpackages\n-----------\n\n.. toctree::\n\n    stacker.lookups.handlers\n\nSubmodules\n----------\n\nstacker\\.lookups\\.registry module\n---------------------------------\n\n.. automodule:: stacker.lookups.registry\n    :members:\n    :undoc-members:\n    :show-inheritance:\n\n\nModule contents\n---------------\n\n.. automodule:: stacker.lookups\n    :members:\n    :undoc-members:\n    :show-inheritance:\n"
  },
  {
    "path": "docs/api/stacker.providers.aws.rst",
    "content": "stacker\\.providers\\.aws package\n===============================\n\nSubmodules\n----------\n\nstacker\\.providers\\.aws\\.default module\n---------------------------------------\n\n.. automodule:: stacker.providers.aws.default\n    :members:\n    :undoc-members:\n    :show-inheritance:\n\n\nModule contents\n---------------\n\n.. automodule:: stacker.providers.aws\n    :members:\n    :undoc-members:\n    :show-inheritance:\n"
  },
  {
    "path": "docs/api/stacker.providers.rst",
    "content": "stacker\\.providers package\n==========================\n\nSubpackages\n-----------\n\n.. toctree::\n\n    stacker.providers.aws\n\nSubmodules\n----------\n\nstacker\\.providers\\.base module\n-------------------------------\n\n.. automodule:: stacker.providers.base\n    :members:\n    :undoc-members:\n    :show-inheritance:\n\n\nModule contents\n---------------\n\n.. automodule:: stacker.providers\n    :members:\n    :undoc-members:\n    :show-inheritance:\n"
  },
  {
    "path": "docs/api/stacker.rst",
    "content": "stacker package\n===============\n\nSubpackages\n-----------\n\n.. toctree::\n\n    stacker.actions\n    stacker.blueprints\n    stacker.commands\n    stacker.config\n    stacker.hooks\n    stacker.logger\n    stacker.lookups\n    stacker.providers\n    stacker.tests\n\nSubmodules\n----------\n\nstacker\\.context module\n-----------------------\n\n.. automodule:: stacker.context\n    :members:\n    :undoc-members:\n    :show-inheritance:\n\nstacker\\.environment module\n---------------------------\n\n.. automodule:: stacker.environment\n    :members:\n    :undoc-members:\n    :show-inheritance:\n\nstacker\\.exceptions module\n--------------------------\n\n.. automodule:: stacker.exceptions\n    :members:\n    :undoc-members:\n    :show-inheritance:\n\nstacker\\.plan module\n--------------------\n\n.. automodule:: stacker.plan\n    :members:\n    :undoc-members:\n    :show-inheritance:\n\nstacker\\.session\\_cache module\n------------------------------\n\n.. automodule:: stacker.session_cache\n    :members:\n    :undoc-members:\n    :show-inheritance:\n\nstacker\\.stack module\n---------------------\n\n.. automodule:: stacker.stack\n    :members:\n    :undoc-members:\n    :show-inheritance:\n\nstacker\\.status module\n----------------------\n\n.. automodule:: stacker.status\n    :members:\n    :undoc-members:\n    :show-inheritance:\n\nstacker\\.tokenize\\_userdata module\n----------------------------------\n\n.. automodule:: stacker.tokenize_userdata\n    :members:\n    :undoc-members:\n    :show-inheritance:\n\nstacker\\.util module\n--------------------\n\n.. automodule:: stacker.util\n    :members:\n    :undoc-members:\n    :show-inheritance:\n\nstacker\\.variables module\n-------------------------\n\n.. automodule:: stacker.variables\n    :members:\n    :undoc-members:\n    :show-inheritance:\n\n\nModule contents\n---------------\n\n.. automodule:: stacker\n    :members:\n    :undoc-members:\n    :show-inheritance:\n"
  },
  {
    "path": "docs/blueprints.rst",
    "content": "==========\nBlueprints\n==========\n\nBlueprints are python classes that dynamically build CloudFormation templates. Where\nyou would specify a raw Cloudformation template in a stack using the ``template_path`` key,\nyou instead specify a blueprint python file using the ``class_path`` key.\n\nTraditionally blueprints are built using troposphere_, but that is not absolutely\nnecessary. You are encouraged to check out the library of publicly shared\nBlueprints in the stacker_blueprints_ package.\n\nMaking your own should be easy, and you can take a lot of examples from\nstacker_blueprints_. In the end, all that is required is that the Blueprint\nis a subclass of *stacker.blueprints.base* and it have the following methods:\n\n.. code-block:: python\n\n    # Initializes the blueprint\n    def __init__(self, name, context, mappings=None):\n\n    # Updates self.template to create the actual template\n    def create_template(self):\n\n    # Returns a tuple: (version, rendered_template)\n    def render_template(self):\n\nVariables\n=========\n\nA Blueprint can define a ``VARIABLES`` property that defines the variables\nit accepts from the `Config Variables <config.html#variables>`_.\n\n``VARIABLES`` should be a dictionary of ``<variable name>: <variable\ndefinition>``. The variable definition should be a dictionary which\nsupports the following optional keys:\n\n**type:**\n  The type for the variable value. This can either be a native python\n  type or one of the `Variable Types`_.\n\n**default:**\n  The default value that should be used for the variable if none is\n  provided in the config.\n\n**description:**\n  A string that describes the purpose of the variable.\n\n**validator:**\n  An optional function that can do custom validation of the variable. A\n  validator function should take a single argument, the value being validated,\n  and should return the value if validation is successful. If there is an\n  issue validating the value, an exception (``ValueError``, ``TypeError``, etc)\n  should be raised by the function.\n\n**no_echo:**\n  Only valid for variables whose type subclasses ``CFNType``. Whether to\n  mask the parameter value whenever anyone makes a call that describes the\n  stack. If you set the value to true, the parameter value is masked with\n  asterisks (*****).\n\n**allowed_values:**\n  Only valid for variables whose type subclasses ``CFNType``. The set of\n  values that should be allowed for the CloudFormation Parameter.\n\n**allowed_pattern:**\n  Only valid for variables whose type subclasses ``CFNType``. A regular\n  expression that represents the patterns you want to allow for the\n  CloudFormation Parameter.\n\n**max_length:**\n  Only valid for variables whose type subclasses ``CFNType``. The maximum\n  length of the value for the CloudFormation Parameter.\n\n**min_length:**\n  Only valid for variables whose type subclasses ``CFNType``. The minimum\n  length of the value for the CloudFormation Parameter.\n\n**max_value:**\n  Only valid for variables whose type subclasses ``CFNType``. The max\n  value for the CloudFormation Parameter.\n\n**min_value:**\n  Only valid for variables whose type subclasses ``CFNType``. The min\n  value for the CloudFormation Parameter.\n\n**constraint_description:**\n  Only valid for variables whose type subclasses ``CFNType``. A string\n  that explains the constraint when the constraint is violated for the\n  CloudFormation Parameter.\n\n\nVariable Types\n==============\n\nAny native python type can be specified as the ``type`` for a variable.\nYou can also use the following custom types:\n\nTroposphereType\n---------------\n\nThe ``TroposphereType`` can be used to generate resources for use in the\nblueprint directly from user-specified configuration. Which case applies depends\non what ``type`` was chosen, and how it would be normally used in the blueprint\n(and CloudFormation in general).\n\nResource Types\n^^^^^^^^^^^^^^\n\nWhen ``type`` is a `Resource Type`_, the value specified by the user in the\nconfiguration file must be a dictionary, but with two possible structures.\n\nWhen ``many`` is disabled, the top-level dictionary keys correspond to\nparameters of the ``type`` constructor. The key-value pairs will be used\ndirectly, and one object will be created and stored in the variable.\n\nWhen ``many`` is enabled, the top-level dictionary *keys* are resource titles,\nand the corresponding *values* are themselves dictionaries, to be used as\nparameters for creating each of multiple ``type`` objects. A list of those\nobjects will be stored in the variable.\n\nProperty Types\n^^^^^^^^^^^^^^\n\nWhen ``type`` is a `Property Type`_ the value specified by the user in the\nconfiguration file must be a dictionary or a list of dictionaries.\n\nWhen ``many`` is disabled, the top-level dictionary keys correspond to\nparameters of the ``type`` constructor. The key-value pairs will be used\ndirectly, and one object will be created and stored in the variable.\n\nWhen ``many`` is enabled, a list of dictionaries is expected. For each element,\none corresponding call will be made to the ``type`` constructor, and all the\nobjects produced will be stored (also as a list) in the variable.\n\nOptional variables\n^^^^^^^^^^^^^^^^^^\n\nIn either case, when ``optional`` is enabled, the variable may have no value\nassigned, or be explicitly assigned a null value. When that happens the\nvariable's final value will be ``None``.\n\nExample\n^^^^^^^\n\nBelow is an annotated example:\n\n.. code-block:: python\n\n    from stacker.blueprints.base import Blueprint\n    from stacker.blueprints.variables.types import TroposphereType\n    from troposphere import s3, sns\n\n    class Buckets(Blueprint):\n\n        VARIABLES = {\n            # Specify that Buckets will be a list of s3.Bucket types.\n            # This means the config should a dictionary of dictionaries\n            # which will be converted into troposphere buckets.\n            \"Buckets\": {\n                \"type\": TroposphereType(s3.Bucket, many=True),\n                \"description\": \"S3 Buckets to create.\",\n            },\n            # Specify that only a single bucket can be passed.\n            \"SingleBucket\": {\n                \"type\": TroposphereType(s3.Bucket),\n                \"description\": \"A single S3 bucket\",\n            },\n            # Specify that Subscriptions will be a list of sns.Subscription types.\n            # Note: sns.Subscription is the property type, not the standalone\n            # sns.SubscriptionResource.\n            \"Subscriptions\": {\n                \"type\": TroposphereType(sns.Subscription, many=True),\n                \"description\": \"Multiple SNS subscription designations\"\n            },\n            # Specify that only a single subscription can be passed, and that it\n            # is made optional.\n            \"SingleOptionalSubscription\": {\n                \"type\": TroposphereType(sns.Subscription, optional=True),\n                \"description\": \"A single, optional SNS subscription designation\"\n            }\n        }\n\n        def create_template(self):\n            t = self.template\n            variables = self.get_variables()\n\n            # The Troposphere s3 buckets have already been created when we\n            access variables[\"Buckets\"], we just need to add them as\n            resources to the template.\n            [t.add_resource(bucket) for bucket in variables[\"Buckets\"]]\n\n            # Add the single bucket to the template. You can use\n            `Ref(single_bucket)` to pass CloudFormation references to the\n            bucket just as you would with any other Troposphere type.\n            single_bucket = variables[\"SingleBucket\"]\n            t.add_resource(single_bucket)\n\n            subscriptions = variables[\"Subscriptions\"]\n            optional_subscription = variables[\"SingleOptionalSubscription\"]\n            # Handle it in some special way...\n            if optional_subscription is not None:\n                subscriptions.append(optional_subscription)\n\n            t.add_resource(sns.Topic(\n                TopicName=\"one-test\",\n                Subscriptions=))\n\n            t.add_resource(sns.Topic(\n                TopicName=\"another-test\",\n                Subscriptions=subscriptions))\n\n\n\nA sample config for the above:\n\n..  code-block:: yaml\n\n    stacks:\n      - name: buckets\n        class_path: path.to.above.Buckets\n        variables:\n          Buckets:\n            # resource name (title) that will be added to CloudFormation.\n            FirstBucket:\n              # name of the s3 bucket\n              BucketName: my-first-bucket\n            SecondBucket:\n              BucketName: my-second-bucket\n          SingleBucket:\n            # resource name (title) that will be added to CloudFormation.\n            MySingleBucket:\n              BucketName: my-single-bucket\n          Subscriptions:\n            - Endpoint: one-lambda\n              Protocol: lambda\n            - Endpoint: another-lambda\n              Protocol: lambda\n          # The following could be ommited entirely\n          SingleOptionalSubscription:\n            Endpoint: a-third-lambda\n            Protocol: lambda\n\n\nCFNType\n-------\n\nThe ``CFNType`` can be used to signal that a variable should be submitted\nto CloudFormation as a Parameter instead of only available to the\nBlueprint when rendering. This is useful if you want to leverage AWS-\nSpecific Parameter types (e.g. ``List<AWS::EC2::Image::Id>``) or Systems\nManager Parameter Store values (e.g. ``AWS::SSM::Parameter::Value<String>``).\nSee ``stacker.blueprints.variables.types`` for available subclasses of the\n``CFNType``.\n\nExample\n^^^^^^^\n\nBelow is an annotated example:\n\n.. code-block:: python\n\n    from stacker.blueprints.base import Blueprint\n    from stacker.blueprints.variables.types import (\n        CFNString,\n        EC2AvailabilityZoneNameList,\n    )\n\n\n    class SampleBlueprint(Blueprint):\n\n        VARIABLES = {\n            \"String\": {\n                \"type\": str,\n                \"description\": \"Simple string variable\",\n            },\n            \"List\": {\n                \"type\": list,\n                \"description\": \"Simple list variable\",\n            },\n            \"CloudFormationString\": {\n                \"type\": CFNString,\n                \"description\": \"A variable which will create a CloudFormation Parameter of type String\",\n            },\n            \"CloudFormationSpecificType\": {\n                \"type\": EC2AvailabilityZoneNameList,\n                \"description\": \"A variable which will create a CloudFormation Parameter of type List<AWS::EC2::AvailabilityZone::Name>\"\n            },\n        }\n\n        def create_template(self):\n            t = self.template\n\n            # `get_variables` returns a dictionary of <variable name>: <variable\n            value>. For the subclasses of `CFNType`, the values are\n            instances of `CFNParameter` which have a `ref` helper property\n            which will return a troposphere `Ref` to the parameter name.\n            variables = self.get_variables()\n\n            t.add_output(Output(\"StringOutput\", variables[\"String\"]))\n\n            # variables[\"List\"] is a native list\n            for index, value in enumerate(variables[\"List\"]):\n                t.add_output(Output(\"ListOutput:{}\".format(index), value))\n\n\n            # `CFNParameter` values (which wrap variables with a `type`\n            that is a `CFNType` subclass) can be converted to troposphere\n            `Ref` objects with the `ref` property\n            t.add_output(Output(\"CloudFormationStringOutput\",\n                                variables[\"CloudFormationString\"].ref))\n            t.add_output(Output(\"CloudFormationSpecificTypeOutput\",\n                                variables[\"CloudFormationSpecificType\"].ref))\n\n\nUtilizing Stack name within your Blueprint\n==========================================\n\nSometimes your blueprint might want to utilize the already existing stack name\nwithin your blueprint. Stacker provides access to both the fully qualified\nstack name matching what’s shown in the CloudFormation console, in addition to\nthe stacks short name you have set in your YAML config.\n\nReferencing Fully Qualified Stack name\n--------------------------------------\n\nThe fully qualified name is a combination of the Stacker namespace + the short\nname (what you set as `name` in your YAML config file). If your stacker\nnamespace is `StackerIsCool` and the stacks short name is\n`myAwesomeEC2Instance`, the fully qualified name would be:\n\n``StackerIsCool-myAwesomeEC2Instance``\n\nTo use this in your blueprint, you can get the name from context. The\n``self.context.get_fqn(self.name)``\n\nReferencing the Stack short name\n--------------------------------\n\nThe Stack short name is the name you specified for the stack within your YAML\nconfig. It does not include the namespace. If your stacker namespace is\n`StackerIsCool` and the stacks short name is `myAwesomeEC2Instance`, the\nshort name would be:\n\n``myAwesomeEC2Instance``\n\nTo use this in your blueprint, you can get the name from self.name: ``self.name``\n\nExample\n^^^^^^^\n\nBelow is an annotated example creating a security group:\n\n.. code-block:: python\n\n  # we are importing Ref to allow for CFN References in the EC2 resource.  Tags\n  # will be used to set the Name tag\n  from troposphere import Ref, ec2, Tags\n  from stacker.blueprints.base import Blueprint\n  # CFNString is imported to allow for stand alone stack use\n  from stacker.blueprints.variables.types import CFNString\n\n  class SampleBlueprint(Blueprint):\n\n    # VpcId set here to allow for blueprint to be reused\n    VARIABLES = {\n    \"VpcId\": {\n        \"type\": CFNString,\n        \"description\": \"The VPC to create the Security group in\",\n        }\n    }\n\n\n    def create_template(self):\n        template = self.template\n        # Assigning the variables to a variable\n        variables = self.get_variables()\n        # now adding a SecurityGroup resource named `SecurityGroup` to the CFN template\n        template.add_resource(\n          ec2.SecurityGroup(\n            \"SecurityGroup\",\n            # Refering the VpcId set as the varible\n            VpcId=variables['VpcId'].ref,\n            # Setting the group description as the fully qualified name\n            GroupDescription=self.context.get_fqn(self.name),\n            # setting the Name tag to be the stack short name\n            Tags=Tags(\n              Name=self.name\n              )\n            )\n          )\n\n\nTesting Blueprints\n==================\n\nWhen writing your own blueprints its useful to write tests for them in order\nto make sure they behave the way you expect they would, especially if there is\nany complex logic inside.\n\nTo this end, a sub-class of the `unittest.TestCase` class has been\nprovided: `stacker.blueprints.testutil.BlueprintTestCase`. You use it\nlike the regular TestCase class, but it comes with an addition assertion:\n`assertRenderedBlueprint`. This assertion takes a Blueprint object and renders\nit, then compares it to an expected output, usually in\n`tests/fixtures/blueprints`.\n\nExamples of using the `BlueprintTestCase` class can be found in the\nstacker_blueprints repo. For example, see the tests used to test the\n`Route53 DNSRecords Blueprint`_ and the accompanying `output results`_:\n\nYaml (stacker) format tests\n---------------------------\n\nIn order to wrap the `BlueprintTestCase` tests in a format similar to stacker's\nstack format, the `YamlDirTestGenerator` class is provided. When subclassed in\na directory, it will search for yaml files in that directory with certain\nstructure and execute a test case for it. As an example:\n\n.. code-block:: yaml\n\n  ---\n  namespace: test\n  stacks:\n    - name: test_stack\n      class_path: stacker_blueprints.s3.Buckets\n      variables:\n        var1: val1\n\nWhen run from tests, this will create a template fixture file called\ntest_stack.json containing the output from the `stacker_blueprints.s3.Buckets`\ntemplate.\n\nExamples of using the `YamlDirTestGenerator` class can be found in the\nstacker_blueprints repo. For example, see the tests used to test the\n`s3.Buckets`_ class and the accompanying `fixture`_. These are\ngenerated from a `subclass of YamlDirTestGenerator`_.\n\n.. _troposphere: https://github.com/cloudtools/troposphere\n.. _stacker_blueprints: https://github.com/cloudtools/stacker_blueprints\n.. _Route53 DNSRecords Blueprint: https://github.com/cloudtools/stacker_blueprints/blob/master/tests/test_route53.py\n.. _output results: https://github.com/cloudtools/stacker_blueprints/tree/master/tests/fixtures/blueprints\n.. _Resource Type: https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-template-resource-type-ref.html\n.. _Property Type: https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-product-property-reference.html\n.. _s3.Buckets: https://github.com/cloudtools/stacker_blueprints/blob/master/tests/test_s3.yaml\n.. _fixture: https://github.com/cloudtools/stacker_blueprints/blob/master/tests/fixtures/blueprints/s3_static_website.json\n.. _subclass of YamlDirTestGenerator: https://github.com/cloudtools/stacker_blueprints/blob/master/tests/__init__.py\n"
  },
  {
    "path": "docs/commands.rst",
    "content": "========\nCommands\n========\n\nBuild\n-----\n\nBuild is used to create/update the stacks provided in the config file. It\nautomatically figures out any dependencies between stacks, and creates them\nin parallel safely (if a stack depends on another stack, it will wait for\nthat stack to be finished before updating/creating).\n\nIt also provides the *--dump* flag for testing out blueprints before\npushing them up into CloudFormation.\nEven then, some errors might only be noticed after first submitting a stack,\nat which point it can no longer be updated by Stacker.\nWhen that situation is detected in interactive mode, you will be prompted to\ndelete and re-create the stack, so that you don't need to do it manually in the\nAWS console.\nIf that behavior is also desired in non-interactive mode, enable the\n*--recreate-failed* flag.\n\n::\n\n  # stacker build -h\n  usage: stacker build [-h] [-e ENV=VALUE] [-r REGION] [-v] [-i]\n                       [--replacements-only] [--recreate-failed] [-o]\n                       [--force STACKNAME] [--stacks STACKNAME] [-t] [-d DUMP]\n                       [environment] config\n\n  Launches or updates CloudFormation stacks based on the given config. Stacker\n  is smart enough to figure out if anything (the template or parameters) have\n  changed for a given stack. If nothing has changed, stacker will correctly skip\n  executing anything against the stack.\n\n  positional arguments:\n    environment           Path to a simple `key: value` pair environment file.\n                          The values in the environment file can be used in the\n                          stack config as if it were a string.Template type:\n                          https://docs.python.org/2/library/string.html\n                          #template-strings.\n    config                The config file where stack configuration is located.\n                          Must be in yaml format. If `-` is provided, then the\n                          config will be read from stdin.\n\n  optional arguments:\n    -h, --help            show this help message and exit\n    -e ENV=VALUE, --env ENV=VALUE\n                          Adds environment key/value pairs from the command\n                          line. Overrides your environment file settings. Can be\n                          specified more than once.\n    -r REGION, --region REGION\n                          The AWS region to launch in.\n    -v, --verbose         Increase output verbosity. May be specified up to\n                          twice.\n    -i, --interactive     Enable interactive mode. If specified, this will use\n                          the AWS interactive provider, which leverages\n                          Cloudformation Change Sets to display changes before\n                          running cloudformation templates. You'll be asked if\n                          you want to execute each change set. If you only want\n                          to authorize replacements, run with \"--replacements-\n                          only\" as well.\n    --replacements-only   If interactive mode is enabled, stacker will only\n                          prompt to authorize replacements.\n    --recreate-failed     Destroy and re-create stacks that are stuck in a\n                          failed state from an initial deployment when updating.\n    -o, --outline         Print an outline of what steps will be taken to build\n                          the stacks\n    --force STACKNAME     If a stackname is provided to --force, it will be\n                          updated, even if it is locked in the config.\n    --stacks STACKNAME    Only work on the stacks given. Can be specified more\n                          than once. If not specified then stacker will work on\n                          all stacks in the config file.\n    -t, --tail            Tail the CloudFormation logs while working with stacks\n    -d DUMP, --dump DUMP  Dump the rendered Cloudformation templates to a\n                          directory\n\nDestroy\n-------\n\nDestroy handles the tearing down of CloudFormation stacks defined in the\nconfig file. It figures out any dependencies that may exist, and destroys\nthe stacks in the correct order (in parallel if all dependent stacks have\nalready been destroyed).\n\n::\n\n  # stacker destroy -h\n  usage: stacker destroy [-h] [-e ENV=VALUE] [-r REGION] [-v] [-i]\n                         [--replacements-only] [-f] [--stacks STACKNAME] [-t]\n                         environment config\n\n  Destroys CloudFormation stacks based on the given config. Stacker will\n  determine the order in which stacks should be destroyed based on any manual\n  requirements they specify or output values they rely on from other stacks.\n\n  positional arguments:\n    environment           Path to a simple `key: value` pair environment file.\n                          The values in the environment file can be used in the\n                          stack config as if it were a string.Template type:\n                          https://docs.python.org/2/library/string.html\n                          #template-strings. Must define at least a \"namespace\".\n    config                The config file where stack configuration is located.\n                          Must be in yaml format. If `-` is provided, then the\n                          config will be read from stdin.\n\n  optional arguments:\n    -h, --help            show this help message and exit\n    -e ENV=VALUE, --env ENV=VALUE\n                          Adds environment key/value pairs from the command\n                          line. Overrides your environment file settings. Can be\n                          specified more than once.\n    -r REGION, --region REGION\n                          The AWS region to launch in.\n    -v, --verbose         Increase output verbosity. May be specified up to\n                          twice.\n    -i, --interactive     Enable interactive mode. If specified, this will use\n                          the AWS interactive provider, which leverages\n                          Cloudformation Change Sets to display changes before\n                          running cloudformation templates. You'll be asked if\n                          you want to execute each change set. If you only want\n                          to authorize replacements, run with \"--replacements-\n                          only\" as well.\n    --replacements-only   If interactive mode is enabled, stacker will only\n                          prompt to authorize replacements.\n    -f, --force           Whether or not you want to go through with destroying\n                          the stacks\n    --stacks STACKNAME    Only work on the stacks given. Can be specified more\n                          than once. If not specified then stacker will work on\n                          all stacks in the config file.\n    -t, --tail            Tail the CloudFormation logs while working with stacks\n\nInfo\n----\n\n\nInfo displays information on the CloudFormation stacks based on the given\nconfig.\n\n::\n\n  # stacker info -h\n  usage: stacker info [-h] [-e ENV=VALUE] [-r REGION] [-v] [-i]\n                      [--replacements-only] [--stacks STACKNAME]\n                      environment config\n\n  Gets information on the CloudFormation stacks based on the given config.\n\n  positional arguments:\n    environment           Path to a simple `key: value` pair environment file.\n                          The values in the environment file can be used in the\n                          stack config as if it were a string.Template type:\n                          https://docs.python.org/2/library/string.html\n                          #template-strings. Must define at least a \"namespace\".\n    config                The config file where stack configuration is located.\n                          Must be in yaml format. If `-` is provided, then the\n                          config will be read from stdin.\n\n  optional arguments:\n    -h, --help            show this help message and exit\n    -e ENV=VALUE, --env ENV=VALUE\n                          Adds environment key/value pairs from the command\n                          line. Overrides your environment file settings. Can be\n                          specified more than once.\n    -r REGION, --region REGION\n                          The AWS region to launch in.\n    -v, --verbose         Increase output verbosity. May be specified up to\n                          twice.\n    -i, --interactive     Enable interactive mode. If specified, this will use\n                          the AWS interactive provider, which leverages\n                          Cloudformation Change Sets to display changes before\n                          running cloudformation templates. You'll be asked if\n                          you want to execute each change set. If you only want\n                          to authorize replacements, run with \"--replacements-\n                          only\" as well.\n    --replacements-only   If interactive mode is enabled, stacker will only\n                          prompt to authorize replacements.\n    --stacks STACKNAME    Only work on the stacks given. Can be specified more\n                          than once. If not specified then stacker will work on\n                          all stacks in the config file.\n\nDiff\n----\n\nDiff creates a CloudFormation Change Set for each stack and displays the\nresulting changes. This works for stacks that already exist and new stacks.\n\nFor stacks that are dependent on outputs from other stacks in the same file,\nstacker will infer that an update was made to the \"parent\" stack and invalidate\noutputs from resources that were changed and replace their value with\n``<inferred-change: stackName.outputName=unresolvedValue>``. This is done to\nillustrate the potential blast radius of a change and assist in tracking down\nwhy subsequent stacks could change. This inference is not perfect but takes a\n\"best effort\" approach to showing potential change between stacks that rely on\neach others outputs.\n\n::\n\n  # stacker diff -h\n  usage: stacker diff [-h] [-e ENV=VALUE] [-r REGION] [-v] [-i]\n                      [--replacements-only] [--force STACKNAME]\n                      [--stacks STACKNAME]\n                      environment config\n\n  Diffs the config against the currently running CloudFormation stacks Sometimes\n  small changes can have big impacts. Run \"stacker diff\" before \"stacker build\"\n  to detect bad things(tm) from happening in advance!\n\n  positional arguments:\n    environment           Path to a simple `key: value` pair environment file.\n                          The values in the environment file can be used in the\n                          stack config as if it were a string.Template type:\n                          https://docs.python.org/2/library/string.html\n                          #template-strings. Must define at least a \"namespace\".\n    config                The config file where stack configuration is located.\n                          Must be in yaml format. If `-` is provided, then the\n                          config will be read from stdin.\n\n  optional arguments:\n    -h, --help            show this help message and exit\n    -e ENV=VALUE, --env ENV=VALUE\n                          Adds environment key/value pairs from the command\n                          line. Overrides your environment file settings. Can be\n                          specified more than once.\n    -r REGION, --region REGION\n                          The AWS region to launch in.\n    -v, --verbose         Increase output verbosity. May be specified up to\n                          twice.\n    -i, --interactive     Enable interactive mode. If specified, this will use\n                          the AWS interactive provider, which leverages\n                          Cloudformation Change Sets to display changes before\n                          running cloudformation templates. You'll be asked if\n                          you want to execute each change set. If you only want\n                          to authorize replacements, run with \"--replacements-\n                          only\" as well.\n    --replacements-only   If interactive mode is enabled, stacker will only\n                          prompt to authorize replacements.\n    --force STACKNAME     If a stackname is provided to --force, it will be\n                          diffed, even if it is locked in the config.\n    --stacks STACKNAME    Only work on the stacks given. Can be specified more\n                          than once. If not specified then stacker will work on\n                          all stacks in the config file.\n"
  },
  {
    "path": "docs/conf.py",
    "content": "# -*- coding: utf-8 -*-\n#\n# stacker documentation build configuration file, created by\n# sphinx-quickstart on Fri Aug 14 09:59:29 2015.\n#\n# This file is execfile()d with the current directory set to its\n# containing dir.\n#\n# Note that not all possible configuration values are present in this\n# autogenerated file.\n#\n# All configuration values have a default; values that are commented out\n# serve to show the default.\n\nimport sys\nimport os\nimport shlex\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\nsys.path.insert(0, os.path.abspath('..'))\n\nimport stacker\n\n# -- General configuration ------------------------------------------------\n\n# If your documentation needs a minimal Sphinx version, state it here.\n#needs_sphinx = '1.0'\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n# ones.\nextensions = [\n    'sphinx.ext.autodoc',\n    'sphinx.ext.doctest',\n    'sphinx.ext.todo',\n    'sphinx.ext.coverage',\n    'sphinx.ext.viewcode',\n    'sphinx.ext.napoleon',\n]\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = ['_templates']\n\n# The suffix(es) of source filenames.\n# You can specify multiple suffix as a list of string:\n# source_suffix = ['.rst', '.md']\nsource_suffix = '.rst'\n\n# The encoding of source files.\n#source_encoding = 'utf-8-sig'\n\n# The master toctree document.\nmaster_doc = 'index'\n\n# General information about the project.\nproject = u'stacker'\ncopyright = u'2015, Michael Barrett'\nauthor = u'Michael Barrett'\n\n# The version info for the project you're documenting, acts as replacement for\n# |version| and |release|, also used in various other places throughout the\n# built documents.\n#\n# The short X.Y version.\nversion = stacker.__version__\n# The full version, including alpha/beta/rc tags.\nrelease = stacker.__version__\n\n# The language for content autogenerated by Sphinx. Refer to documentation\n# for a list of supported languages.\n#\n# This is also used if you do content translation via gettext catalogs.\n# Usually you set \"language\" from the command line for these cases.\nlanguage = None\n\n# There are two options for replacing |today|: either, you set today to some\n# non-false value, then it is used:\n#today = ''\n# Else, today_fmt is used as the format for a strftime call.\n#today_fmt = '%B %d, %Y'\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\nexclude_patterns = ['_build']\n\n# The reST default role (used for this markup: `text`) to use for all\n# documents.\n#default_role = None\n\n# If true, '()' will be appended to :func: etc. cross-reference text.\n#add_function_parentheses = True\n\n# If true, the current module name will be prepended to all description\n# unit titles (such as .. function::).\n#add_module_names = True\n\n# If true, sectionauthor and moduleauthor directives will be shown in the\n# output. They are ignored by default.\n#show_authors = False\n\n# The name of the Pygments (syntax highlighting) style to use.\npygments_style = 'sphinx'\n\n# A list of ignored prefixes for module index sorting.\n#modindex_common_prefix = []\n\n# If true, keep warnings as \"system message\" paragraphs in the built documents.\n#keep_warnings = False\n\n# If true, `todo` and `todoList` produce output, else they produce nothing.\ntodo_include_todos = True\n\n\n# -- Options for HTML output ----------------------------------------------\n\n# The theme to use for HTML and HTML Help pages.  See the documentation for\n# a list of builtin themes.\n#html_theme = 'sphinx_rtd_theme'\nhtml_theme = 'alabaster'\n\n# Theme options are theme-specific and customize the look and feel of a theme\n# further.  For a list of options available for each theme, see the\n# documentation.\nhtml_theme_options = {\n    \"description\": \"A Cloudformation Stack Manager\",\n    \"github_button\": True,\n    \"github_user\": \"cloudtools\",\n    \"github_repo\": \"stacker\",\n    \"github_banner\": True,\n}\n\n# Add any paths that contain custom themes here, relative to this directory.\n#html_theme_path = []\n\n# The name for this set of Sphinx documents.  If None, it defaults to\n# \"<project> v<release> documentation\".\n#html_title = None\n\n# A shorter title for the navigation bar.  Default is the same as html_title.\n#html_short_title = None\n\n# The name of an image file (relative to this directory) to place at the top\n# of the sidebar.\n#html_logo = None\n\n# The name of an image file (within the static path) to use as favicon of the\n# docs.  This file should be a Windows icon file (.ico) being 16x16 or 32x32\n# pixels large.\n#html_favicon = None\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = ['_static']\n\n# Add any extra paths that contain custom files (such as robots.txt or\n# .htaccess) here, relative to this directory. These files are copied\n# directly to the root of the documentation.\n#html_extra_path = []\n\n# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,\n# using the given strftime format.\n#html_last_updated_fmt = '%b %d, %Y'\n\n# If true, SmartyPants will be used to convert quotes and dashes to\n# typographically correct entities.\n#html_use_smartypants = True\n\n# Custom sidebar templates, maps document names to template names.\n#html_sidebars = {}\n\n# Additional templates that should be rendered to pages, maps page names to\n# template names.\n#html_additional_pages = {}\n\n# If false, no module index is generated.\n#html_domain_indices = True\n\n# If false, no index is generated.\n#html_use_index = True\n\n# If true, the index is split into individual pages for each letter.\n#html_split_index = False\n\n# If true, links to the reST sources are added to the pages.\n#html_show_sourcelink = True\n\n# If true, \"Created using Sphinx\" is shown in the HTML footer. Default is True.\n#html_show_sphinx = True\n\n# If true, \"(C) Copyright ...\" is shown in the HTML footer. Default is True.\n#html_show_copyright = True\n\n# If true, an OpenSearch description file will be output, and all pages will\n# contain a <link> tag referring to it.  The value of this option must be the\n# base URL from which the finished HTML is served.\n#html_use_opensearch = ''\n\n# This is the file name suffix for HTML files (e.g. \".xhtml\").\n#html_file_suffix = None\n\n# Language to be used for generating the HTML full-text search index.\n# Sphinx supports the following languages:\n#   'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'\n#   'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'\n#html_search_language = 'en'\n\n# A dictionary with options for the search language support, empty by default.\n# Now only 'ja' uses this config value\n#html_search_options = {'type': 'default'}\n\n# The name of a javascript file (relative to the configuration directory) that\n# implements a search results scorer. If empty, the default will be used.\n#html_search_scorer = 'scorer.js'\n\n# Output file base name for HTML help builder.\nhtmlhelp_basename = 'stackerdoc'\n\n# -- Options for LaTeX output ---------------------------------------------\n\nlatex_elements = {\n# The paper size ('letterpaper' or 'a4paper').\n#'papersize': 'letterpaper',\n\n# The font size ('10pt', '11pt' or '12pt').\n#'pointsize': '10pt',\n\n# Additional stuff for the LaTeX preamble.\n#'preamble': '',\n\n# Latex figure (float) alignment\n#'figure_align': 'htbp',\n}\n\n# Grouping the document tree into LaTeX files. List of tuples\n# (source start file, target name, title,\n#  author, documentclass [howto, manual, or own class]).\nlatex_documents = [\n  (master_doc, 'stacker.tex', u'stacker Documentation',\n   u'Michael Barrett', 'manual'),\n]\n\n# The name of an image file (relative to this directory) to place at the top of\n# the title page.\n#latex_logo = None\n\n# For \"manual\" documents, if this is true, then toplevel headings are parts,\n# not chapters.\n#latex_use_parts = False\n\n# If true, show page references after internal links.\n#latex_show_pagerefs = False\n\n# If true, show URL addresses after external links.\n#latex_show_urls = False\n\n# Documents to append as an appendix to all manuals.\n#latex_appendices = []\n\n# If false, no module index is generated.\n#latex_domain_indices = True\n\n\n# -- Options for manual page output ---------------------------------------\n\n# One entry per manual page. List of tuples\n# (source start file, name, description, authors, manual section).\nman_pages = [\n    (master_doc, 'stacker', u'stacker Documentation',\n     [author], 1)\n]\n\n# If true, show URL addresses after external links.\n#man_show_urls = False\n\n\n# -- Options for Texinfo output -------------------------------------------\n\n# Grouping the document tree into Texinfo files. List of tuples\n# (source start file, target name, title, author,\n#  dir menu entry, description, category)\ntexinfo_documents = [\n  (master_doc, 'stacker', u'stacker Documentation',\n   author, 'stacker', 'One line description of project.',\n   'Miscellaneous'),\n]\n\n# Documents to append as an appendix to all manuals.\n#texinfo_appendices = []\n\n# If false, no module index is generated.\n#texinfo_domain_indices = True\n\n# How to display URL addresses: 'footnote', 'no', or 'inline'.\n#texinfo_show_urls = 'footnote'\n\n# If true, do not generate a @detailmenu in the \"Top\" node's menu.\n#texinfo_no_detailmenu = False\n"
  },
  {
    "path": "docs/config.rst",
    "content": "=============\nConfiguration\n=============\n\nstacker makes use of a YAML formatted config file to define the different\nCloudFormation stacks that make up a given environment.\n\nThe configuration file has a loose definition, with only a few top-level\nkeywords. Other than those keywords, you can define your own top-level keys\nto make use of other YAML features like `anchors & references`_ to avoid\nduplicating config. (See `YAML anchors & references`_ for details)\n\nTop Level Keywords\n==================\n\nNamespace\n---------\n\nYou can provide a **namespace** to create all stacks within. The namespace will\nbe used as a prefix for the name of any stack that stacker creates, and makes\nit unnecessary to specify the fully qualified name of the stack in output\nlookups.\n\nIn addition, this value will be used to create an S3 bucket that stacker will\nuse to upload and store all CloudFormation templates.\n\nIn general, this is paired with the concept of `Environments\n<environments.html>`_ to create a namespace per environment::\n\n  namespace: ${namespace}\n\nNamespace Delimiter\n-------------------\n\nBy default, stacker will use '-' as a delimiter between your namespace and the\ndeclared stack name to build the actual CloudFormation stack name that gets\ncreated. Since child resources of your stacks will, by default, use a portion\nof your stack name in the auto-generated resource names, the first characters\nof your fully-qualified stack name potentially convey valuable information to\nsomeone glancing at resource names. If you prefer to not use a delimiter, you\ncan pass the **namespace_delimiter** top level key word in the config as an empty string.\n\nSee the `CloudFormation API Reference`_ for allowed stack name characters\n\n.. _`CloudFormation API Reference`: http://docs.aws.amazon.com/AWSCloudFormation/latest/APIReference/API_CreateStack.html\n\nS3 Bucket\n---------\n\nStacker, by default, pushes your CloudFormation templates into an S3 bucket\nand points CloudFormation at the template in that bucket when launching or\nupdating your stacks. By default it uses a bucket named\n**stacker-${namespace}**, where the namespace is the namespace provided the\nconfig.\n\nIf you want to change this, provide the **stacker_bucket** top level key word\nin the config.\n\nThe bucket will be created in the same region that the stacks will be launched\nin.  If you want to change this, or if you already have an existing bucket\nin a different region, you can set the **stacker_bucket_region** to\nthe region where you want to create the bucket.\n\n**S3 Bucket location prior to 1.0.4:**\n  There was a \"bug\" early on in stacker that created the s3 bucket in us-east-1,\n  no matter what you specified as your --region. An issue came up leading us to\n  believe this shouldn't be the expected behavior, so we fixed the behavior.\n  If you executed a stacker build prior to V 1.0.4, your bucket for templates\n  would already exist in us-east-1, requiring you to specify the\n  **stacker_bucket_region** top level keyword.\n\n.. note::\n  Deprecation of fallback to legacy template bucket. We will first try\n  the region you defined using the top level keyword under\n  **stacker_bucket_region**, or what was specified in the --region flag.\n  If that fails, we fallback to the us-east-1 region. The fallback to us-east-1\n  will be removed in a future release resulting in the following botocore\n  excpetion to be thrown:\n\n  ``TemplateURL must reference a valid S3 object to which you have access.``\n\n  To avoid this issue, specify the stacker_bucket_region top level keyword\n  as described above. You can specify this keyword now to remove the\n  deprecation warning.\n\nIf you want stacker to upload templates directly to CloudFormation, instead of\nfirst uploading to S3, you can set **stacker_bucket** to an empty string.\nHowever, note that template size is greatly limited when uploading directly.\nSee the `CloudFormation Limits Reference`_.\n\n.. _`CloudFormation Limits Reference`: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/cloudformation-limits.html\n\nModule Paths\n------------\nWhen setting the ``classpath`` for blueprints/hooks, it is sometimes desirable to\nload modules from outside the default ``sys.path`` (e.g., to include modules\ninside the same repo as config files).\n\nAdding a path (e.g. ``./``) to the **sys_path** top level key word will allow\nmodules from that path location to be used.\n\nService Role\n------------\n\nBy default stacker doesn't specify a service role when executing changes to\nCloudFormation stacks. If you would prefer that it do so, you can set\n**service_role** to be the ARN of the service that stacker should use when\nexecuting CloudFormation changes.\n\nThis is the equivalent of setting ``RoleARN`` on a call to the following\nCloudFormation api calls: ``CreateStack``, ``UpdateStack``,\n``CreateChangeSet``.\n\nSee the AWS documentation for `AWS CloudFormation Service Roles`_.\n\n.. _`AWS CloudFormation Service Roles`: https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-iam-servicerole.html?icmpid=docs_cfn_console\n\nRemote Packages\n---------------\nThe **package_sources** top level keyword can be used to define remote\nsources for blueprints (e.g., retrieving ``stacker_blueprints`` on github at\ntag ``v1.0.2``).\n\nThe only required key for a git repository config is ``uri``, but ``branch``,\n``tag``, & ``commit`` can also be specified::\n\n    package_sources:\n      git:\n        - uri: git@github.com:acmecorp/stacker_blueprints.git\n        - uri: git@github.com:remind101/stacker_blueprints.git\n          tag: 1.0.0\n          paths:\n            - stacker_blueprints\n        - uri: git@github.com:contoso/webapp.git\n          branch: staging\n        - uri: git@github.com:contoso/foo.git\n          commit: 12345678\n\nIf no specific commit or tag is specified for a repo, the remote repository\nwill be checked for newer commits on every execution of Stacker.\n\nFor ``.tar.gz`` & ``zip`` archives on s3, specify a ``bucket`` & ``key``::\n\n    package_sources:\n      s3:\n        - bucket: mystackers3bucket\n          key: archives/blueprints-v1.zip\n          paths:\n            - stacker_blueprints\n        - bucket: anothers3bucket\n          key: public/public-blueprints-v2.tar.gz\n          requester_pays: true\n        - bucket: yetanothers3bucket\n          key: sallys-blueprints-v1.tar.gz\n          # use_latest defaults to true - will update local copy if the\n          # last modified date on S3 changes\n          use_latest: false\n\nLocal directories can also be specified::\n\n    package_sources:\n      local:\n        - source: ../vpc\n\nUse the ``paths`` option when subdirectories of the repo/archive/directory\nshould be added to Stacker's ``sys.path``.\n\nCloned repos/archives will be cached between builds; the cache location defaults\nto ~/.stacker but can be manually specified via the **stacker_cache_dir** top\nlevel keyword.\n\nRemote Configs\n~~~~~~~~~~~~~~\nConfiguration yamls from remote configs can also be used by specifying a list\nof ``configs`` in the repo to use::\n\n    package_sources:\n      git:\n        - uri: git@github.com:acmecorp/stacker_blueprints.git\n          configs:\n            - vpc.yaml\n\nIn this example, the configuration in ``vpc.yaml`` will be merged into the\nrunning current configuration, with the current configuration's values taking\npriority over the values in ``vpc.yaml``.\n\nDictionary Stack Names & Hook Paths\n:::::::::::::::::::::::::::::::::::\nTo allow remote configs to be selectively overriden, stack names & hook\npaths can optionally be defined as dictionaries, e.g.::\n\n  pre_build:\n    my_route53_hook:\n      path: stacker.hooks.route53.create_domain:\n      required: true\n      enabled: true\n      args:\n        domain: mydomain.com\n  stacks:\n    vpc-example:\n      class_path: stacker_blueprints.vpc.VPC\n      locked: false\n      enabled: true\n    bastion-example:\n      class_path: stacker_blueprints.bastion.Bastion\n      locked: false\n      enabled: true\n\nPre & Post Hooks\n----------------\n\nMany actions allow for pre & post hooks. These are python methods that are\nexecuted before, and after the action is taken for the entire config. Hooks \ncan be enabled or disabled, per hook. Only the following actions allow\npre/post hooks:\n\n* build (keywords: *pre_build*, *post_build*)\n* destroy (keywords: *pre_destroy*, *post_destroy*)\n\nThere are a few reasons to use these, though the most common is if you want\nbetter control over the naming of a resource than what CloudFormation allows.\n\nThe keyword is a list of dictionaries with the following keys:\n\n**path:**\n  the python import path to the hook\n**data_key:**\n  If set, and the hook returns data (a dictionary), the results will be stored\n  in the context.hook_data with the data_key as its key.\n**required:**\n  whether to stop execution if the hook fails\n**enabled:**\n  whether to execute the hook every stacker run. Default: True. This is a bool\n  that grants you the ability to execute a hook per environment when combined\n  with a variable pulled from an environment file.\n**args:**\n  a dictionary of arguments to pass to the hook\n\nAn example using the *create_domain* hook for creating a route53 domain before\nthe build action::\n\n  pre_build:\n    - path: stacker.hooks.route53.create_domain\n      required: true\n      enabled: true\n      args:\n        domain: mydomain.com\n\nAn example of a hook using the ``create_domain_bool`` variable from the environment\nfile to determine if hook should run. Set ``create_domain_bool: true`` or\n``create_domain_bool: false`` in the environment file to determine if the hook\nshould run in the environment stacker is running against::\n\n  pre_build:\n    - path: stacker.hooks.route53.create_domain\n      required: true\n      enabled: ${create_domain_bool}\n      args:\n        domain: mydomain.com\n\nTags\n----\n\nCloudFormation supports arbitrary key-value pair tags. All stack-level, including automatically created tags, are\npropagated to resources that AWS CloudFormation supports. See `AWS CloudFormation Resource Tags Type`_ for more details.\nIf no tags are specified, the `stacker_namespace` tag is applied to your stack with the value of `namespace` as the\ntag value.\n\nIf you prefer to apply a custom set of tags, specify the top-level keyword `tags` as a map. Example::\n\n  tags:\n    \"hello\": world\n    \"my_tag:with_colons_in_key\": ${dynamic_tag_value_from_my_env}\n    simple_tag: simple value\n\nIf you prefer to have no tags applied to your stacks (versus the default tags that stacker applies), specify an empty\nmap for the top-level keyword::\n\n  tags: {}\n\n.. _`AWS CloudFormation Resource Tags Type`: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-resource-tags.html\n\nMappings\n--------\n\nMappings are dictionaries that are provided as Mappings_ to each CloudFormation\nstack that stacker produces.\n\nThese can be useful for providing things like different AMIs for different\ninstance types in different regions::\n\n  mappings:\n    AmiMap:\n      us-east-1:\n        NAT: ami-ad227cc4\n        ubuntu1404: ami-74e27e1c\n        bastion: ami-74e27e1c\n      us-west-2:\n        NAT: ami-290f4119\n        ubuntu1404: ami-5189a661\n        bastion: ami-5189a661\n\nThese can be used in each blueprint/stack as usual.\n\nLookups\n-------\n\nLookups allow you to create custom methods which take a value and are\nresolved at build time. The resolved values are passed to the `Blueprints\n<blueprints.html>`_ before it is rendered. For more information, see the\n`Lookups <lookups.html>`_ documentation.\n\nstacker provides some common `lookups <lookups.html>`_, but it is\nsometimes useful to have your own custom lookup that doesn't get shipped\nwith stacker. You can register your own lookups by defining a `lookups`\nkey::\n\n  lookups:\n    custom: path.to.lookup.handler\n\nThe key name for the lookup will be used as the type name when registering\nthe lookup. The value should be the path to a valid lookup handler.\n\nYou can then use these within your config::\n\n  conf_value: ${custom some-input-here}\n\n\nStacks\n------\n\nThis is the core part of the config - this is where you define each of the\nstacks that will be deployed in the environment.  The top level keyword\n*stacks* is populated with a list of dictionaries, each representing a single\nstack to be built.\n\nA stack has the following keys:\n\n**name:**\n  The logical name for this stack, which can be used in conjuction with the\n  ``output`` lookup. The value here must be unique within the config. If no\n  ``stack_name`` is provided, the value here will be used for the name of the\n  CloudFormation stack.\n**class_path:**\n  The python class path to the Blueprint to be used. Specify this or\n  ``template_path`` for the stack.\n**template_path:**\n  Path to raw CloudFormation template (JSON or YAML). Specify this or\n  ``class_path`` for the stack. Path can be specified relative to the current\n  working directory (e.g. templates stored alongside the Config), or relative\n  to a directory in the python ``sys.path`` (i.e. for loading templates\n  retrieved via ``packages_sources``).\n\n**description:**\n  A short description to apply to the stack. This overwrites any description\n  provided in the Blueprint. See: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/template-description-structure.html\n**variables:**\n  A dictionary of Variables_ to pass into the Blueprint when rendering the\n  CloudFormation template. Variables_ can be any valid YAML data\n  structure.\n**locked:**\n  (optional) If set to true, the stack is locked and will not be\n  updated unless the stack is passed to stacker via the *--force* flag.\n  This is useful for *risky* stacks that you don't want to take the\n  risk of allowing CloudFormation to update, but still want to make\n  sure get launched when the environment is first created. When ``locked``,\n  it's not necessary to specify a ``class_path`` or ``template_path``.\n**enabled:**\n  (optional) If set to false, the stack is disabled, and will not be\n  built or updated. This can allow you to disable stacks in different\n  environments.\n**protected:**\n  (optional) When running an update in non-interactive mode, if a stack has\n  *protected* set to *true* and would get changed, stacker will switch to\n  interactive mode for that stack, allowing you to approve/skip the change.\n**requires:**\n  (optional) a list of other stacks this stack requires. This is for explicit\n  dependencies - you do not need to set this if you refer to another stack in\n  a Parameter, so this is rarely necessary.\n**required_by:**\n  (optional) a list of other stacks or targets that require this stack. It's an\n  inverse to ``requires``.\n**tags:**\n  (optional) a dictionary of CloudFormation tags to apply to this stack. This\n  will be combined with the global tags, but these tags will take precendence.\n**stack_name:**\n  (optional) If provided, this will be used as the name of the CloudFormation\n  stack. Unlike ``name``, the value doesn't need to be unique within the config,\n  since you could have multiple stacks with the same name, but in different\n  regions or accounts. (note: the namespace from the environment will be\n  prepended to this)\n**region**:\n  (optional): If provided, specifies the name of the region that the\n  CloudFormation stack should reside in. If not provided, the default region\n  will be used (``AWS_DEFAULT_REGION``, ``~/.aws/config`` or the ``--region``\n  flag). If both ``region`` and ``profile`` are specified, the value here takes\n  precedence over the value in the profile.\n**profile**:\n  (optional): If provided, specifies the name of a AWS profile to use when\n  performing AWS API calls for this stack. This can be used to provision stacks\n  in multiple accounts or regions.\n**stack_policy_path**:\n  (optional): If provided, specifies the path to a JSON formatted stack policy\n  that will be applied when the CloudFormation stack is created and updated.\n  You can use stack policies to prevent CloudFormation from making updates to\n  protected resources (e.g. databases). See: https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/protect-stack-resources.html\n**in_progress_behavior**:\n  (optional): If provided, specifies the behavior for when a stack is in\n  `CREATE_IN_PROGRESS` or `UPDATE_IN_PROGRESS`. By default, stacker will raise\n  an exception if the stack is in an `IN_PROGRESS` state. You can set this\n  option to `wait` and stacker will wait for the previous update to complete\n  before attempting to update the stack.\n**notification_arns**:\n  (optional): If provided, accepts a list of None or many AWS SNS Topic ARNs\n  which will be notified of this stack's CloudFormation state changes.\n\nStacks Example\n~~~~~~~~~~~~~~\n\nHere's an example from stacker_blueprints_, used to create a VPC::\n\n  stacks:\n    - name: vpc-example\n      class_path: stacker_blueprints.vpc.VPC\n      locked: false\n      enabled: true\n      variables:\n        InstanceType: t2.small\n        SshKeyName: default\n        ImageName: NAT\n        AZCount: 2\n        PublicSubnets:\n          - 10.128.0.0/24\n          - 10.128.1.0/24\n          - 10.128.2.0/24\n          - 10.128.3.0/24\n        PrivateSubnets:\n          - 10.128.8.0/22\n          - 10.128.12.0/22\n          - 10.128.16.0/22\n          - 10.128.20.0/22\n        CidrBlock: 10.128.0.0/16\n\nTargets\n-------\n\nIn stacker, **targets** can be used as a lightweight method to group a number\nof stacks together, as a named \"target\" in the graph. Internally, this adds a\nnode to the underlying DAG, which can then be used alongside the `--targets`\nflag. If you're familiar with the concept of \"targets\" in systemd, the concept\nis the same.\n\n**name:**\n  The logical name for this target.\n**requires:**\n  (optional) a list of stacks or other targets this target requires.\n**required_by:**\n  (optional) a list of stacks or other targets that require this target.\n\nHere's an example of a target that will execute all \"database\" stacks::\n\n  targets:\n    - name: databases\n\n  stacks:\n    - name: dbA\n      class_path: blueprints.DB\n      required_by:\n        - databases\n    - name: dbB\n      class_path: blueprints.DB\n      required_by:\n        - databases\n\nCustom Log Formats\n------------------\n\nBy default, stacker uses the following `log_formats`::\n\n  log_formats:\n    info: \"[%(asctime)s] %(message)s\"\n    color: \"[%(asctime)s] \\033[%(color)sm%(message)s\\033[39m\"\n    debug: \"[%(asctime)s] %(levelname)s %(threadName)s %(name)s:%(lineno)d(%(funcName)s): %(message)s\"\n\nYou may optionally provide custom `log_formats`. In this example, we add the environment name to each log line::\n\n  log_formats:\n    info: \"[%(asctime)s] ${environment} %(message)s\"\n    color: \"[%(asctime)s] ${environment} \\033[%(color)sm%(message)s\\033[39m\"\n    \nYou may use any of the standard Python\n`logging module format attributes <https://docs.python.org/2.7/library/logging.html#logrecord-attributes>`_\nwhen building your `log_formats`.\n\n\nVariables\n==========\n\nVariables are values that will be passed into a `Blueprint\n<blueprints.html>`_ before it is\nrendered. Variables can be any valid YAML data structure and can leverage\nLookups_ to expand values at build time.\n\nThe following concepts make working with variables within large templates\neasier:\n\nYAML anchors & references\n-------------------------\n\nIf you have a common set of variables that you need to pass around in many\nplaces, it can be annoying to have to copy and paste them in multiple places.\nInstead, using a feature of YAML known as `anchors & references`_, you can\ndefine common values in a single place and then refer to them with a simple\nsyntax.\n\nFor example, say you pass a common domain name to each of your stacks, each of\nthem taking it as a Variable. Rather than having to enter the domain into\neach stack (and hopefully not typo'ing any of them) you could do the\nfollowing::\n\n  domain_name: &domain mydomain.com\n\nNow you have an anchor called **domain** that you can use in place of any value\nin the config to provide the value **mydomain.com**. You use the anchor with\na reference::\n\n  stacks:\n    - name: vpc\n      class_path: stacker_blueprints.vpc.VPC\n      variables:\n        DomainName: *domain\n\nEven more powerful is the ability to anchor entire dictionaries, and then\nreference them in another dictionary, effectively providing it with default\nvalues. For example::\n\n  common_variables: &common_variables\n    DomainName: mydomain.com\n    InstanceType: m3.medium\n    AMI: ami-12345abc\n\nNow, rather than having to provide each of those variables to every stack that\ncould use them, you can just do this instead::\n\n  stacks:\n    - name: vpc\n      class_path: stacker_blueprints.vpc.VPC\n      variables:\n        << : *common_variables\n        InstanceType: c4.xlarge # override the InstanceType in this stack\n\nUsing Outputs as Variables\n---------------------------\n\nSince stacker encourages the breaking up of your CloudFormation stacks into\nentirely separate stacks, sometimes you'll need to pass values from one stack\nto another. The way this is handled in stacker is by having one stack\nprovide Outputs_ for all the values that another stack may need, and then\nusing those as the inputs for another stack's Variables_. stacker makes\nthis easier for you by providing a syntax for Variables_ that will cause\nstacker to automatically look up the values of Outputs_ from another stack\nin its config. To do so, use the following format for the Variable on the\ntarget stack::\n\n  MyParameter: ${output OtherStack::OutputName}\n\nSince referencing Outputs_ from stacks is the most common use case,\n`output` is the default lookup type. For more information see Lookups_.\n\nThis example is taken from stacker_blueprints_ example config - when building\nthings inside a VPC, you will need to pass the *VpcId* of the VPC that you\nwant the resources to be located in. If the *vpc* stack provides an Output\ncalled *VpcId*, you can reference it easily::\n\n  domain_name: my_domain &domain\n\n  stacks:\n    - name: vpc\n      class_path: stacker_blueprints.vpc.VPC\n      variables:\n        DomainName: *domain\n    - name: webservers\n      class_path: stacker_blueprints.asg.AutoscalingGroup\n      variables:\n        DomainName: *domain\n        VpcId: ${output vpc::VpcId} # gets the VpcId Output from the vpc stack\n\nNote: Doing this creates an implicit dependency from the *webservers* stack\nto the *vpc* stack, which will cause stacker to submit the *vpc* stack, and\nthen wait until it is complete until it submits the *webservers* stack.\n\nMulti Account/Region Provisioning\n---------------------------------\n\nYou can use stacker to manage CloudFormation stacks in multiple accounts and\nregions, and reference outputs across them.\n\nAs an example, let's say you had 3 accounts you wanted to manage:\n\n#) OpsAccount: An AWS account that has IAM users for employees.\n#) ProdAccount: An AWS account for a \"production\" environment.\n#) StageAccount: An AWS account for a \"staging\" environment.\n\nYou want employees with IAM user accounts in OpsAccount to be able to assume\nroles in both the ProdAccount and StageAccount. You can use stacker to easily\nmanage this::\n\n\n  stacks:\n    # Create some stacks in both the \"prod\" and \"stage\" accounts with IAM roles\n    # that employees can use.\n    - name: prod/roles\n      profile: prod\n      class_path: blueprints.Roles\n    - name: stage/roles\n      profile: stage\n      class_path: blueprints.Roles\n\n    # Create a stack in the \"ops\" account and grant each employee access to\n    # assume the roles we created above.\n    - name: users\n      profile: ops\n      class_path: blueprints.IAMUsers\n      variables:\n        Users:\n          john-smith:\n            Roles:\n              - ${output prod/roles::EmployeeRoleARN}\n              - ${output stage/roles::EmployeeRoleARN}\n\n\nNote how I was able to reference outputs from stacks in multiple accounts using the `output` plugin!\n\nEnvironments\n============\n\nA pretty common use case is to have separate environments that you want to\nlook mostly the same, though with some slight modifications. For example, you\nmight want a *production* and a *staging* environment. The production\nenvironment likely needs more instances, and often those instances will be\nof a larger instance type. Environments allow you to use your existing\nstacker config, but provide different values based on the environment file\nchosen on the command line. For more information, see the\n`Environments <environments.html>`_ documentation.\n\nTranslators\n===========\n\n.. note::\n  Translators have been deprecated in favor of Lookups_ and will be\n  removed in a future release.\n\nTranslators allow you to create custom methods which take a value, then modify\nit before passing it on to the stack. Currently this is used to allow you to\npass a KMS encrypted string as a Parameter, then have KMS decrypt it before\nsubmitting it to CloudFormation. For more information, see the\n`Translators <translators.html>`_ documentation.\n\n.. _`anchors & references`: https://en.wikipedia.org/wiki/YAML#Repeated_nodes\n.. _Mappings: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/mappings-section-structure.html\n.. _Outputs: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/outputs-section-structure.html\n.. _stacker_blueprints: https://github.com/cloudtools/stacker_blueprints\n.. _`AWS profiles`: https://docs.aws.amazon.com/cli/latest/userguide/cli-multiple-profiles.html\n"
  },
  {
    "path": "docs/environments.rst",
    "content": "============\nEnvironments\n============\n\nWhen running stacker, you can optionally provide an \"environment\" file. The\nenvironment file defines values, which can then be referred to by name from\nyour stack config file. The environment file is interpreted as YAML if it\nends in `.yaml` or `.yml`, otherwise it's interpreted as simple key/value\npairs.\n\nKey/Value environments\n----------------------\n\nThe stacker config file will be interpolated as a `string.Template\n<https://docs.python.org/2/library/string.html#template-strings>`_ using the\nkey/value pairs from the environment file. The format of the file is a single\nkey/value per line, separated by a colon (**:**), like this::\n\n  vpcID: vpc-12345678\n\nProvided the key/value vpcID above, you will now be able to use this in\nyour configs for the specific environment you are deploying into. They\nact as keys that can be used in your config file, providing a sort of\ntemplating ability. This allows you to change the values of your config\nbased on the environment you are in. For example, if you have a *webserver*\nstack, and you need to provide it a variable for the instance size it\nshould use, you would have something like this in your config file::\n\n  stacks:\n    - name: webservers\n      class_path: stacker_blueprints.asg.AutoscalingGroup\n      variables:\n        InstanceType: m3.medium\n\nBut what if you needed more CPU in your production environment, but not in your\nstaging? Without Environments, you'd need a separate config for each. With\nenvironments, you can simply define two different environment files with the\nappropriate *InstanceType* in each, and then use the key in the environment\nfiles in your config. For example::\n\n  # in the file: stage.env\n  web_instance_type: m3.medium\n\n  # in the file: prod.env\n  web_instance_type: c4.xlarge\n\n  # in your config file:\n  stacks:\n    - name: webservers\n      class_path: stacker_blueprints.asg.AutoscalingGroup\n      variables:\n        InstanceType: ${web_instance_type}\n\nYAML environments\n-----------------\n\nYAML environments allow for more complex environment configuration rather\nthan simple text substitution, and support YAML features like anchors and\nreferences. To build on the example above, let's define a stack that's\na little more complex::\n\n  stacks:\n    - name: webservers\n      class_path: stacker_blueprints.asg.AutoscalingGroup\n      variables:\n        InstanceType: ${web_instance_type}\n        IngressCIDRsByPort: ${ingress_cidrs_by_port}\n\nWe've defined a stack which expects a list of ingress CIDR's allowed access to\neach port. Our environment files would look like this::\n\n  # in the file: stage.yml\n  web_instance_type: m3.medium\n  ingress_cidrs_by_port:\n    80:\n      - 192.168.1.0/8\n    8080:\n      - 0.0.0.0/0\n\n  # in the file: prod.env\n  web_instance_type: c4.xlarge\n  ingress_cidrs_by_port:\n    80:\n      - 192.168.1.0/8\n    443:\n      - 10.0.0.0/16\n      - 10.1.0.0/16\n\nThe YAML format allows for specifying lists, maps, and supports all `pyyaml`\nfunctionality allowed in `safe_load()` function.\n\nVariable substitution in the YAML case is a bit more complex than in the\n`string.Template` case. Objects can only be substituted for variables in the\ncase where we perform a full substitution, such as this::\n\n  vpcID: ${vpc_variable}\n\nWe can not substitute an object in a sub-string, such as this::\n\n  vpcID: prefix-${vpc_variable}\n\nIt makes no sense to substitute a complex object in this case, and we will raise\nan error if that happens. You can still perform this substitution with\nprimitives; numbers, strings, but not dicts or lists.\n\n.. note::\n  Namespace defined in the environment file has been deprecated in favor of\n  defining the namespace in the config and will be removed in a future release.\n"
  },
  {
    "path": "docs/index.rst",
    "content": ".. stacker documentation master file, created by\n   sphinx-quickstart on Fri Aug 14 09:59:29 2015.\n   You can adapt this file completely to your liking, but it should at least\n   contain the root `toctree` directive.\n\nWelcome to stacker's documentation!\n===================================\n\nstacker is a tool and library used to create & update multiple CloudFormation\nstacks. It was originally written at Remind_ and\nreleased to the open source community.\n\nstacker Blueprints are written in troposphere_, though the purpose of\nmost templates is to keep them as generic as possible and then use\nconfiguration to modify them.\n\nAt Remind we use stacker to manage all of our Cloudformation stacks -\nboth in development, staging and production without any major issues.\n\n\nMain Features\n-------------\n\n- Easily `Create/Update <commands.html#build>`_/`Destroy <commands.html#destroy>`_\n  many stacks in parallel (though with an understanding of cross-stack\n  dependencies)\n- Makes it easy to manage large environments in a single config, while still\n  allowing you to break each part of the environment up into its own\n  completely separate stack.\n- Manages dependencies between stacks, only launching one after all the stacks\n  it depends on are finished.\n- Only updates stacks that have changed and that have not been explicitly\n  locked or disabled.\n- Easily pass Outputs from one stack in as Variables on another (which also\n  automatically provides an implicit dependency)\n- Use `Environments <environments.html>`_ to manage slightly different\n  configuration in different environments.\n- Use `Lookups <lookups.html>`_ to allow dynamic fetching or altering of\n  data used in Variables.\n- A diff command for diffing your config against what is running in a live\n  CloudFormation environment.\n- A small library of pre-shared Blueprints can be found at the\n  stacker_blueprints_ repo, making things like setting up a VPC easy.\n\n\nContents:\n\n.. toctree::\n   :maxdepth: 2\n\n   organizations_using_stacker\n   terminology\n   config\n   environments\n   translators\n   lookups\n   commands\n   blueprints\n   templates\n   API Docs <api/modules>\n\n\n\nIndices and tables\n==================\n\n* :ref:`genindex`\n* :ref:`modindex`\n* :ref:`search`\n\n.. _Remind: http://www.remind.com/\n.. _troposphere: https://github.com/cloudtools/troposphere\n.. _stacker_blueprints: https://github.com/cloudtools/stacker_blueprints\n"
  },
  {
    "path": "docs/lookups.rst",
    "content": "=======\nLookups\n=======\n\nStacker provides the ability to dynamically replace values in the config via a\nconcept called lookups. A lookup is meant to take a value and convert\nit by calling out to another service or system.\n\nA lookup is denoted in the config with the ``${<lookup type> <lookup\ninput>}`` syntax. If ``<lookup type>`` isn't provided, stacker will\nfall back to use the ``output`` lookup .\n\nLookups are only resolved within `Variables\n<terminology.html#variables>`_. They can be nested in any part of a YAML\ndata structure and within another lookup itself.\n\n.. note::\n  If a lookup has a non-string return value, it can be the only lookup\n  within a value.\n\n  ie. if `custom` returns a list, this would raise an exception::\n\n    Variable: ${custom something}, ${output otherStack::Output}\n\n  This is valid::\n\n    Variable: ${custom something}\n\n\nFor example, given the following::\n\n  stacks:\n    - name: sg\n      class_path: some.stack.blueprint.Blueprint\n      variables:\n        Roles:\n          - ${output otherStack::IAMRole}\n        Values:\n          Env:\n            Custom: ${custom ${output otherStack::Output}}\n            DBUrl: postgres://${output dbStack::User}@${output dbStack::HostName}\n\nThe Blueprint would have access to the following resolved variables\ndictionary::\n\n  # variables\n  {\n    \"Roles\": [\"other-stack-iam-role\"],\n    \"Values\": {\n      \"Env\": {\n        \"Custom\": \"custom-output\",\n        \"DBUrl\": \"postgres://user@hostname\",\n      },\n    },\n  }\n\n\nstacker includes the following lookup types:\n\n  - `output lookup`_\n  - `ami lookup`_\n  - `custom lookup`_\n  - `default lookup`_\n  - `dynamodb lookup`_\n  - `envvar lookup`_\n  - `file lookup`_\n  - `hook_data lookup`_\n  - `kms lookup`_\n  - `rxref lookup`_\n  - `ssmstore lookup`_\n  - `xref lookup`_\n\n.. _`output lookup`:\n\nOutput Lookup\n-------------\n\nThe ``output`` lookup takes a value of the format:\n``<stack name>::<output name>`` and retrieves the output from the given stack\nname within the current namespace.\n\nstacker treats output lookups differently than other lookups by auto\nadding the referenced stack in the lookup as a requirement to the stack\nwhose variable the output value is being passed to.\n\nYou can specify an output lookup with the following syntax::\n\n  ConfVariable: ${output someStack::SomeOutput}\n\n\n.. _`default lookup`:\n\ndefault Lookup\n--------------\n\nThe ``default`` lookup type will check if a value exists for the variable\nin the environment file, then fall back to a default defined in the stacker\nconfig if the environment file doesn't contain the variable. This allows\ndefaults to be set at the config file level, while granting the user the\nability to override that value per environment.\n\nFormat of value::\n  <env_var>::<default value>\n\nFor example::\n  Groups: ${default app_security_groups::sg-12345,sg-67890}\n\nIf `app_security_groups` is defined in the environment file, its defined\nvalue will be returned. Otherwise, `sg-12345,sg-67890` will be the returned\nvalue.\n\n.. note::\n  The ``default`` lookup only supports checking if a variable is defined in\n  an environment file. It does not support other embedded lookups to see\n  if they exist. Only checking variables in the environment file are supported.\n  If you attempt to have the default lookup perform any other lookup that\n  fails, stacker will throw an exception for that lookup and will stop your\n  build before it gets a chance to fall back to the default in your config.\n\n.. _`kms lookup`:\n\nKMS Lookup\n----------\n\nThe ``kms`` lookup type decrypts its input value.\n\nAs an example, if you have a database and it has a parameter called\n``DBPassword`` that you don't want to store in clear text in your config\n(maybe because you want to check it into your version control system to\nshare with the team), you could instead encrypt the value using ``kms``.\n\nFor example::\n\n  # We use the aws cli to get the encrypted value for the string\n  # \"PASSWORD\" using the master key called 'myStackerKey' in us-east-1\n  $ aws --region us-east-1 kms encrypt --key-id alias/myStackerKey \\\n      --plaintext \"PASSWORD\" --output text --query CiphertextBlob\n\n  CiD6bC8t2Y<...encrypted blob...>\n\n  # In stacker we would reference the encrypted value like:\n  DBPassword: ${kms us-east-1@CiD6bC8t2Y<...encrypted blob...>}\n\n  # The above would resolve to\n  DBPassword: PASSWORD\n\nThis requires that the person using stacker has access to the master key used\nto encrypt the value.\n\nIt is also possible to store the encrypted blob in a file (useful if the\nvalue is large) using the ``file://`` prefix, ie::\n\n  DockerConfig: ${kms file://dockercfg}\n\n.. note::\n  Lookups resolve the path specified with `file://` relative to\n  the location of the config file, not where the stacker command is run.\n\n.. _`xref lookup`:\n\nXRef Lookup\n-----------\n\nThe ``xref`` lookup type is very similar to the ``output`` lookup type, the\ndifference being that ``xref`` resolves output values from stacks that\naren't contained within the current stacker namespace, but are existing stacks\ncontaining outputs within the same region on the AWS account you are deploying\ninto. ``xref`` allows you to lookup these outputs from the stacks already on\nyour account by specifying the stacks fully qualified name in the\nCloudFormation console.\n\nWhere the ``output`` type will take a stack name and use the current context\nto expand the fully qualified stack name based on the namespace, ``xref``\nskips this expansion because it assumes you've provided it with\nthe fully qualified stack name already. This allows you to reference\noutput values from any CloudFormation stack in the same region.\n\nAlso, unlike the ``output`` lookup type, ``xref`` doesn't impact stack\nrequirements.\n\nFor example::\n\n  ConfVariable: ${xref fully-qualified-stack::SomeOutput}\n\n.. _`rxref lookup`:\n\nRXRef Lookup\n------------\n\nThe ``rxref`` lookup type is very similar to the ``xref`` lookup type,\nthe difference being that ``rxref`` will lookup output values from stacks\nthat are relative to the current namespace but external to the stack, but\nwill not resolve them. ``rxref`` assumes the stack containing the output\nalready exists.\n\nWhere the ``xref`` type assumes you provided a fully qualified stack name,\n``rxref``, like ``output`` expands and retrieves the output from the given\nstack name within the current namespace, even if not defined in the stacker\nconfig you provided it.\n\nBecause there is no requirement to keep all stacks defined within the same\nstacker YAML config, you might need the ability to read outputs from other\nstacks deployed by stacker into your same account under the same namespace.\n``rxref`` gives you that ability. This is useful if you want to break up\nvery large configs into smaller groupings.\n\nAlso, unlike the ``output`` lookup type, ``rxref`` doesn't impact stack\nrequirements.\n\nFor example::\n\n  # in stacker.env\n  namespace: MyNamespace\n\n  # in stacker.yml\n  ConfVariable: ${rxref my-stack::SomeOutput}\n\n  # the above would effectively resolve to\n  ConfVariable: ${xref MyNamespace-my-stack::SomeOutput}\n\nAlthough possible, it is not recommended to use ``rxref`` for stacks defined\nwithin the same stacker YAML config.\n\n.. _`file lookup`:\n\nFile Lookup\n-----------\n\nThe ``file`` lookup type allows the loading of arbitrary data from files on\ndisk. The lookup additionally supports using a ``codec`` to manipulate or\nwrap the file contents prior to injecting it. The parameterized-b64 ``codec``\nis particularly useful to allow the interpolation of CloudFormation parameters\nin a UserData attribute of an instance or launch configuration.\n\nBasic examples::\n\n  # We've written a file to /some/path:\n  $ echo \"hello there\" > /some/path\n\n  # In stacker we would reference the contents of this file with the following\n  conf_key: ${file plain:file://some/path}\n\n  # The above would resolve to\n  conf_key: hello there\n\n  # Or, if we used wanted a base64 encoded copy of the file data\n  conf_key: ${file base64:file://some/path}\n\n  # The above would resolve to\n  conf_key: aGVsbG8gdGhlcmUK\n\nSupported codecs:\n - plain - load the contents of the file untouched. This is the only codec that should be used\n   with raw Cloudformation templates (the other codecs are intended for blueprints).\n - base64 - encode the plain text file at the given path with base64 prior\n   to returning it\n - parameterized - the same as plain, but additionally supports\n   referencing CloudFormation parameters to create userdata that's\n   supplemented with information from the template, as is commonly needed\n   in EC2 UserData. For example, given a template parameter of BucketName,\n   the file could contain the following text::\n\n     #!/bin/sh\n     aws s3 sync s3://{{BucketName}}/somepath /somepath\n\n   and then you could use something like this in the YAML config file::\n\n     UserData: ${file parameterized:/path/to/file}\n\n   resulting in the UserData parameter being defined as::\n\n     { \"Fn::Join\" : [\"\", [\n       \"#!/bin/sh\\naws s3 sync s3://\",\n       {\"Ref\" : \"BucketName\"},\n       \"/somepath /somepath\"\n     ]] }\n\n - parameterized-b64 - the same as parameterized, with the results additionally\n   wrapped in { \"Fn::Base64\": ... } , which is what you actually need for\n   EC2 UserData\n - json - decode the file as JSON and return the resulting object\n - json-parameterized - Same as ``json``, but applying templating rules from\n   ``parameterized`` to every object *value*. Note that object *keys* are not\n   modified. Example (an external PolicyDocument)::\n\n     {\n      \"Version\": \"2012-10-17\",\n      \"Statement\": [\n        {\n          \"Effect\": \"Allow\",\n          \"Action\": [\n            \"some:Action\"\n          ],\n          \"Resource\": \"{{MyResource}}\"\n        }\n      ]\n     }\n\n - yaml - decode the file as YAML and return the resulting object. All strings\n   are returned as ``unicode`` even in Python 2.\n - yaml-parameterized - Same as ``json-parameterized``, but using YAML. Example::\n\n     Version: 2012-10-17\n     Statement\n       - Effect: Allow\n         Action:\n           - \"some:Action\"\n         Resource: \"{{MyResource}}\"\n\n\nWhen using parameterized-b64 for UserData, you should use a local_parameter defined\nas such::\n\n  from troposphere import AWSHelperFn\n\n  \"UserData\": {\n    \"type\": AWSHelperFn,\n    \"description\": \"Instance user data\",\n    \"default\": Ref(\"AWS::NoValue\")\n  }\n\nand then assign UserData in a LaunchConfiguration or Instance to self.get_variables()[\"UserData\"].\nNote that we use AWSHelperFn as the type because the parameterized-b64 codec returns either a\nBase64 or a GenericHelperFn troposphere object.\n\n.. _`ssmstore lookup`:\n\nSSM Parameter Store Lookup\n--------------------------\n\nThe ``ssmstore`` lookup type retrieves a value from the Simple Systems\nManager Parameter Store.\n\nAs an example, if you have a database and it has a parameter called\n``DBUser`` that you don't want to store in clear text in your config,\nyou could instead store it as a SSM parameter named ``MyDBUser``.\n\nFor example::\n\n  # We use the aws cli to store the database username\n  $ aws ssm put-parameter --name \"MyDBUser\" --type \"String\" \\\n      --value \"root\"\n\n  # In stacker we would reference the value like:\n  DBUser: ${ssmstore us-east-1@MyDBUser}\n\n  # Which would resolve to:\n  DBUser: root\n\nEncrypted values (\"SecureStrings\") can also be used, which will be\nautomatically decrypted (assuming the Stacker user has access to the\nassociated KMS key). Care should be taken when using this with encrypted\nvalues (i.e. a safe policy is to only use it with ``no_echo`` CFNString\nvalues)\n\nThe region can be omitted (e.g. ``DBUser: ${ssmstore MyDBUser}``), in which\ncase ``us-east-1`` will be assumed.\n\n.. _`dynamodb lookup`:\n\nDynamoDb Lookup\n--------------------------\n\nThe ``dynamodb`` lookup type retrieves a value from a DynamoDb table.\n\nAs an example, if you have a Dynamo Table named ``TestTable`` and it has an Item\nwith a Primary Partition key called ``TestKey`` and a value named ``BucketName``\n, you can look it up by using Stacker. The lookup key in this case is TestVal\n\nFor example::\n\n  # We can reference that dynamo value\n  BucketName: ${dynamodb us-east-1:TestTable@TestKey:TestVal.BucketName}\n\n  # Which would resolve to:\n  BucketName: stacker-test-bucket\n\nYou can lookup other data types by putting the data type in the lookup. Valid\nvalues are \"S\"(String), \"N\"(Number), \"M\"(Map), \"L\"(List).\n\nFor example::\n\n  ServerCount: ${dynamodb us-east-1:TestTable@TestKey:TestVal.ServerCount[N]}\n\n  This would return an int value, rather than a string\n\nYou can lookup values inside of a map:\n\nFor example::\n\n  ServerCount: ${dynamodb us-east-1:TestTable@TestKey:TestVal.ServerInfo[M].\n                                                                ServerCount[N]}\n\n\n.. _`envvar lookup`:\n\nShell Environment Lookup\n------------------------\n\nThe ``envvar`` lookup type retrieves a value from a variable in the shell's\nenvironment.\n\nExample::\n\n  # Set an environment variable in the current shell.\n  $ export DATABASE_USER=root\n\n  # In the stacker config we could reference the value:\n  DBUser: ${envvar DATABASE_USER}\n\n  # Which would resolve to:\n  DBUser: root\n\nYou can also get the variable name from a file, by using the ``file://`` prefix\nin the lookup, like so::\n\n  DBUser: ${envvar file://dbuser_file.txt}\n\n.. _`ami lookup`:\n\nEC2 AMI Lookup\n--------------\n\nThe ``ami`` lookup is meant to search for the most recent AMI created that\nmatches the given filters.\n\nValid arguments::\n\n  region OPTIONAL ONCE:\n      e.g. us-east-1@\n\n  owners (comma delimited) REQUIRED ONCE:\n      aws_account_id | amazon | self\n\n  name_regex (a regex) REQUIRED ONCE:\n      e.g. my-ubuntu-server-[0-9]+\n\n  executable_users (comma delimited) OPTIONAL ONCE:\n      aws_account_id | amazon | self\n\nAny other arguments specified are sent as filters to the aws api\nFor example, \"architecture:x86_64\" will add a filter.\n\nExample::\n\n  # Grabs the most recently created AMI that is owned by either this account,\n  # amazon, or the account id 888888888888 that has a name that matches\n  # the regex \"server[0-9]+\" and has \"i386\" as its architecture.\n\n  # Note: The region is optional, and defaults to the current stacker region\n  ImageId: ${ami [<region>@]owners:self,888888888888,amazon name_regex:server[0-9]+ architecture:i386}\n\n.. _`hook_data lookup`:\n\nHook Data Lookup\n----------------\n\nWhen using hooks, you can have the hook store results in the\n`hook_data`_ dictionary on the context by setting *data_key* in the hook\nconfig.\n\nThis lookup lets you look up values in that dictionary. A good example of this\nis when you use the `aws_lambda hook`_ to upload AWS Lambda code, then need to\npass that code object as the *Code* variable in the `aws_lambda blueprint`_\ndictionary.\n\nExample::\n\n  # If you set the \"data_key\" config on the aws_lambda hook to be \"myfunction\"\n  # and you name the function package \"TheCode\" you can get the troposphere\n  # awslambda.Code object with:\n\n  Code: ${hook_data myfunction::TheCode}\n\n.. _`custom lookup`:\n\nCustom Lookup\n--------------\n\nA custom lookup may be registered within the config.\nFor more information see `Configuring Lookups <config.html#lookups>`_.\n\n\n.. _`hook_data`: http://stacker.readthedocs.io/en/latest/config.html#pre-post-hooks\n.. _`aws_lambda hook`: http://stacker.readthedocs.io/en/latest/api/stacker.hooks.html#stacker.hooks.aws_lambda.upload_lambda_functions\n.. _`aws_lambda blueprint`: https://github.com/cloudtools/stacker_blueprints/blob/master/stacker_blueprints/aws_lambda.py\n"
  },
  {
    "path": "docs/organizations_using_stacker.rst",
    "content": "===========================\nOrganizations using stacker\n===========================\n\nBelow is a list of organizations that currently use stacker in some sense. If\nyou are using stacker, please submit a PR and add your company below!\n\nRemind_\n\n  Remind helps educators send quick, simple messages to students and parents on\n  any device. We believe that when communication improves, relationships get\n  stronger. Education gets better. \n\n  Remind is the original author of stacker, and has been using it to manage the\n  infrastructure in multiple environments (including production) since early\n  2015.\n\n\n.. _Remind: https://www.remind.com/\n\n`Onica`_\n\n  Onica is a global technology consulting company at the forefront of \n  cloud computing. Through collaboration with Amazon Web Services, \n  we help customers embrace a broad spectrum of innovative solutions. \n  From migration strategy to operational excellence, cloud native \n  development, and immersive transformation. Onica is a full spectrum \n  AWS integrator.\n\n.. _`Onica`: https://www.onica.com\n\nAltoStack_\n\n  AltoStack is a technology and services consultancy specialising in Cloud\n  Consultancy, DevOps, Continuous Delivery and Configuration Management.\n\n  From strategy and operations to culture and technology, AltoStack helps\n  businesses identify and address opportunities for growth and profitability.\n\n  We are an Amazon Web Services - (AWS) APN Consulting Partner.\n\n.. _AltoStack: https://altostack.io/\n\nCobli_\n\n  Cobli develops cutting-edge solutions for fleet management efficiency and\n  intelligence in South America. We bring advanced tracking, analysis and\n  predictions to fleets of any size by connecting vehicles to an easy to use\n  platform through smart devices.\n\n  Cobli manages most of its AWS infrastructure using stacker, and we encourage\n  our developers to contribute to free-software whenever possible.\n\n.. _Cobli: https://cobli.co/\n"
  },
  {
    "path": "docs/templates.rst",
    "content": "==========\nTemplates\n==========\n\nCloudFormation templates can be provided via python Blueprints_ or JSON/YAML.\nJSON/YAML templates are specified for stacks via the ``template_path`` config\noption (see `Stacks <config.html#stacks>`_).\n\nJinja2 Templating\n=================\n\nTemplates with a ``.j2`` extension will be parsed using `Jinja2 \n<http://jinja.pocoo.org/>`_. The stacker ``context`` and ``mappings`` objects\nand stack ``variables`` objects are available for use in the template:\n\n.. code-block:: yaml\n\n    Description: TestTemplate\n    Resources:\n      Bucket:\n        Type: AWS::S3::Bucket\n        Properties:\n          BucketName: {{ context.environment.foo }}-{{ variables.myparamname }}\n"
  },
  {
    "path": "docs/terminology.rst",
    "content": "===========\nTerminology\n===========\n\nblueprint\n=========\n\n.. _blueprints:\n\nA python class that is responsible for creating a CloudFormation template.\nUsually this is built using troposphere_.\n\nconfig\n======\n\nA YAML config file that defines the `stack definitions`_ for all of the\nstacks you want stacker to manage.\n\nenvironment\n===========\n\nA set of variables that can be used inside the config, allowing you to\nslightly adjust configs based on which environment you are launching.\n\nnamespace\n=========\n\nA way to uniquely identify a stack. Used to determine the naming of many\nthings, such as the S3 bucket where compiled templates are stored, as well\nas the prefix for stack names.\n\nstack definition\n================\n\n.. _stack definitions:\n\nDefines the stack_ you want to build, usually there are multiple of these in\nthe config_. It also defines the variables_ to be used when building the\nstack_.\n\nstack\n=====\n\n.. _stacks:\n\nThe resulting stack of resources that is created by CloudFormation when it\nexecutes a template. Each stack managed by stacker is defined by a\n`stack definition`_ in the config_.\n\noutput\n======\n\nA CloudFormation Template concept. Stacks can output values, allowing easy\naccess to those values. Often used to export the unique ID's of resources that\ntemplates create. Stacker makes it simple to pull outputs from one stack and\nthen use them as a variable_ in another stack.\n\nvariable\n========\n\n.. _variables:\n\nDynamic variables that are passed into stacks when they are being built.\nVariables are defined within the config_.\n\nlookup\n======\n\nA method for expanding values in the config_ at build time. By default\nlookups are used to reference Output values from other stacks_ within the\nsame namespace_.\n\nprovider\n========\n\nProvider that supports provisioning rendered blueprints_. By default, an\nAWS provider is used.\n\ncontext\n=======\n\nContext is responsible for translating the values passed in via the\ncommand line and specified in the config_ to stacks_.\n\n.. _troposphere: https://github.com/cloudtools/troposphere\n.. _CloudFormation Parameters: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/parameters-section-structure.html\n"
  },
  {
    "path": "docs/translators.rst",
    "content": "===========\nTranslators\n===========\n\n.. note::\n  Translators have been deprecated in favor of `Lookups <lookups.html>`_\n  and will be removed in a future release.\n\nStacker provides the ability to dynamically replace values in the config via a\nconcept called translators. A translator is meant to take a value and convert\nit by calling out to another service or system. This is initially meant to\ndeal with encrypting fields in your config.\n\nTranslators are custom YAML constructors. As an example, if you have a\ndatabase and it has a parameter called ``DBPassword`` that you don't want to\nstore in clear text in your config (maybe because you want to check it into\nyour version control system to share with the team), you could instead\nencrypt the value using ``kms``. For example::\n\n  # We use the aws cli to get the encrypted value for the string\n  # \"PASSWORD\" using the master key called 'myStackerKey' in us-east-1\n  $ aws --region us-east-1 kms encrypt --key-id alias/myStackerKey \\\n      --plaintext \"PASSWORD\" --output text --query CiphertextBlob\n\n  CiD6bC8t2Y<...encrypted blob...>\n\n  # In stacker we would reference the encrypted value like:\n  DBPassword: !kms us-east-1@CiD6bC8t2Y<...encrypted blob...>\n\n  # The above would resolve to\n  DBPassword: PASSWORD\n\nThis requires that the person using stacker has access to the master key used\nto encrypt the value.\n\nIt is also possible to store the encrypted blob in a file (useful if the\nvalue is large) using the `file://` prefix, ie::\n\n  DockerConfig: !kms file://dockercfg\n\n.. note::\n  Translators resolve the path specified with `file://` relative to\n  the location of the config file, not where the stacker command is run.\n"
  },
  {
    "path": "examples/cross-account/.aws/config",
    "content": "# The master account is like the root of our AWS account tree. It's the\n# entrypoint for all other profiles to sts.AssumeRole from.\n[profile master]\nregion = us-east-1\nrole_arn = arn:aws:iam::<master account id>:role/Stacker\nrole_session_name = stacker\ncredential_source = Environment\n\n[profile prod]\nregion = us-east-1\nrole_arn = arn:aws:iam::<prod account id>:role/Stacker\nrole_session_name = stacker\nsource_profile = master\n\n[profile stage]\nregion = us-east-1\nrole_arn = arn:aws:iam::<stage account id>:role/Stacker\nrole_session_name = stacker\nsource_profile = master\n"
  },
  {
    "path": "examples/cross-account/README.md",
    "content": "This is a secure example setup to support cross-account provisioning of stacks with stacker. It:\n\n1. Sets up an appropriate [AWS Config File](https://docs.aws.amazon.com/cli/latest/topic/config-vars.html) in [.aws/config] for stacker to use, with profiles for a \"master\", \"prod\" and \"stage\" AWS account.\n2. Configures a stacker bucket in the \"master\" account, with permissions that allows CloudFormation in \"sub\" accounts to fetch templates.\n\n## Setup\n\n### Create IAM roles\n\nFirst things first, we need to create some IAM roles that stacker can assume to make changes in each AWS account. This is generally a manual step after you've created a new AWS account.\n\nIn each account, create a new stack using the [stacker-role.yaml](./templates/stacker-role.yaml) CloudFormation template. This will create an IAM role called `Stacker` in the target account, with a trust policy that will allow the `Stacker` role in the master account to `sts:AssumeRole` it.\n\nOnce the roles have been created, update the `role_arn`'s in [.aws/config] to match the ones that were just created.\n\n```console\n$ aws cloudformation describe-stacks \\\n  --profile <profile> \\\n  --stack-name <stack name> \\\n  --query 'Stacks[0].Outputs' --output text\nStackerRole     arn:aws:iam::<account id>:role/Stacker\n```\n\n### GetSessionToken\n\nIn order for stacker to be able to call `sts:AssumeRole` with the roles we've specified in [.aws/config], we'll need to pass it credentials via environment variables (see [`credential_source = Environment`](./.aws/config)) with appropriate permissions. Generally, the best way to do this is to obtain temporary credentials via the `sts:GetSessionToken` API, while passing an MFA OTP.\n\nAssuming you have an IAM user in your master account, you can get temporary credentials using the AWS CLI:\n\n```console\n$ aws sts get-session-token \\\n  --serial-number arn:aws:iam::<master account id>:mfa/<iam username> \\\n  --token-code <mfa otp>\n```\n\nAt Remind, we like to use [aws-vault], which allows us to simplify this to:\n\n```console\n$ aws-vault exec default -- env\nAWS_VAULT=default\nAWS_DEFAULT_REGION=us-east-1\nAWS_REGION=us-east-1\nAWS_ACCESS_KEY_ID=ASIAJ...ICSXSQ\nAWS_SECRET_ACCESS_KEY=4oFx...LSNjpFq\nAWS_SESSION_TOKEN=FQoDYXdzED...V6Wrdko2KjW1QU=\nAWS_SECURITY_TOKEN=FQoDYXdzED...V6Wrdko2KjW1QU=\n```\n\nFor the rest of this guide, I'll use `aws-vault` for simplicity.\n\n**NOTE**: You'll need to ensure that this IAM user has access to call `sts:AssumeRole` on the `Stacker` IAM role in the \"master\" account.\n\n### Bootstrap Stacker Bucket\n\nAfter we have some IAM roles that stacker can assume, and some temporary credentials, we'll want to create a stacker bucket in the master account, and allow the Stacker roles in sub-accounts access to fetch templates from it.\n\nTo do that, first, change the \"Roles\" variable in [stacker.yaml], then:\n\n```console\n$ aws-vault exec default # GetSessionToken + MFA\n$ AWS_CONFIG_FILE=.aws/config stacker build --profile master --stacks stacker-bucket stacker.yaml\n```\n\nOnce the bucket has been created, replace `stacker_bucket` with the name of the bucket in [stacker.yaml].\n\n```console\n$ aws cloudformation describe-stacks \\\n  --profile master \\\n  --stack-name stacker-bucket \\\n  --query 'Stacks[0].Outputs' --output text\nBucketId     stacker-bucket-1234\n```\n\n### Provision stacks\n\nNow that everything is setup, you can add new stacks to your config file, and target them to a specific AWS account using the `profile` option. For example, if I wanted to create a new VPC in both the \"production\" and \"staging\" accounts:\n\n```yaml\nstacks:\n  - name: prod/vpc\n    stack_name: vpc\n    class_path: stacker_blueprints.vpc.VPC\n    profile: prod # target this to the production account\n  - name: stage/vpc\n    stack_name: vpc\n    class_path: stacker_blueprints.vpc.VPC\n    profile: stage # target this to the staging account\n```\n\n```console\n$ AWS_CONFIG_FILE=.aws/config stacker build --profile master stacker.yaml\n```\n\n[.aws/config]: ./.aws/config\n[stacker.yaml]: ./stacker.yaml\n[aws-vault]: https://github.com/99designs/aws-vault\n"
  },
  {
    "path": "examples/cross-account/stacker.yaml",
    "content": "---\nnamespace: ''\n\n# We'll set this to an empty string until we've provisioned the\n# \"stacker-bucket\" stack below.\nstacker_bucket: ''\n\nstacks:\n  # This stack will provision an S3 bucket for stacker to use to upload\n  # templates. This will also configure the bucket with a bucket policy\n  # allowing CloudFormation in other accounts to fetch templates from it.\n  - name: stacker-bucket\n    # We're going to \"target\" this stack in our \"master\" account.\n    profile: master\n    template_path: templates/stacker-bucket.yaml\n    variables:\n      # Change these to the correct AWS account IDs, must be comma seperated list\n      Roles: arn:aws:iam::<prod account id>:role/Stacker, arn:aws:iam::<stage account id>:role/Stacker\n"
  },
  {
    "path": "examples/cross-account/templates/stacker-bucket.yaml",
    "content": "---\nAWSTemplateFormatVersion: \"2010-09-09\"\nDescription: A bucket for stacker to store CloudFormation templates\nParameters:\n  Roles:\n    Type: CommaDelimitedList\n    Description: A list of IAM roles that will be given read access on the bucket.\n\nResources:\n  StackerBucket:\n    Type: AWS::S3::Bucket\n    Properties:\n      BucketEncryption:\n        ServerSideEncryptionConfiguration:\n        - ServerSideEncryptionByDefault:\n            SSEAlgorithm: AES256\n\n  BucketPolicy:\n    Type: AWS::S3::BucketPolicy\n    Properties:\n      Bucket:\n        Ref: StackerBucket\n      PolicyDocument:\n        Statement:\n        - Action:\n          - s3:GetObject\n          Effect: Allow\n          Principal:\n            AWS:\n              Ref: Roles\n          Resource:\n          - Fn::Sub: arn:aws:s3:::${StackerBucket}/*\n\nOutputs:\n  BucketId:\n    Value:\n      Ref: StackerBucket\n"
  },
  {
    "path": "examples/cross-account/templates/stacker-role.yaml",
    "content": "---\nAWSTemplateFormatVersion: \"2010-09-09\"\nDescription: A role that stacker can assume\nParameters:\n  MasterAccountId:\n    Type: String\n    Description: The 12-digit ID for the master account\n    MinLength: 12\n    MaxLength: 12\n    AllowedPattern: \"[0-9]+\"\n    ConstraintDescription: Must contain a 12 digit account ID\n  RoleName:\n    Type: String\n    Description: The name of the stacker role.\n    Default: Stacker\n\n\nConditions:\n  # Check if we're creating this role in the master account.\n  InMasterAccount:\n    Fn::Equals:\n      - { Ref: \"AWS::AccountId\" }\n      - { Ref: \"MasterAccountId\" }\n\nResources:\n  StackerRole:\n    Type: AWS::IAM::Role\n    Properties:\n      RoleName:\n        Ref: RoleName\n      AssumeRolePolicyDocument:\n        Version: \"2012-10-17\"\n        Statement:\n          Fn::If:\n            - InMasterAccount\n            - Effect: Allow\n              Principal:\n                AWS:\n                  Fn::Sub: \"arn:aws:iam::${MasterAccountId}:root\"\n              Action: sts:AssumeRole\n              Condition:\n                'Null':\n                  aws:MultiFactorAuthAge: false\n            - Effect: Allow\n              Principal:\n                AWS:\n                  Fn::Sub: \"arn:aws:iam::${MasterAccountId}:role/${RoleName}\"\n              Action: sts:AssumeRole\n              Condition:\n                'Null':\n                  aws:MultiFactorAuthAge: false\n\n  # Generally, Stacker will need fairly wide open permissions, since it will be\n  # managing all resources in an account.\n  StackerPolicies:\n    Type: AWS::IAM::Policy\n    Properties:\n      PolicyName: Stacker\n      PolicyDocument:\n        Version: \"2012-10-17\"\n        Statement:\n          - Effect: Allow\n            Action: [\"*\"]\n            Resource: \"*\"\n      Roles:\n        - Ref: StackerRole\n\nOutputs:\n  StackerRole:\n    Value:\n      Fn::GetAtt:\n        - StackerRole\n        - Arn\n"
  },
  {
    "path": "requirements.in",
    "content": "troposphere>=3.0.0\nbotocore>=1.12.111\nboto3>=1.9.111,<2.0\nPyYAML>=3.13b1\nawacs>=0.6.0\ngitpython>=3.0\njinja2>=2.7\nschematics>=2.1.0\nformic2\npython-dateutil>=2.0,<3.0\nMarkupSafe>=2\nmore-itertools\nrsa>=4.7\npython-jose\nfuture\n"
  },
  {
    "path": "scripts/compare_env",
    "content": "#!/usr/bin/env python\n\"\"\" A script to compare environment files. \"\"\"\n\nimport argparse\nimport os.path\n\nfrom stacker.environment import parse_environment\n\n\ndef parse_args():\n    parser = argparse.ArgumentParser(description=__doc__)\n    parser.add_argument(\n        \"-i\", \"--ignore-changed\", action=\"store_true\",\n        help=\"Only print added & deleted keys, not changed keys.\")\n    parser.add_argument(\n        \"-s\", \"--show-changes\", action=\"store_true\",\n        help=\"Print content changes.\")\n    parser.add_argument(\n        \"first_env\", type=str,\n        help=\"The first environment file to compare.\")\n    parser.add_argument(\n        \"second_env\", type=str,\n        help=\"The second environment file to compare.\")\n\n    return parser.parse_args()\n\n\ndef parse_env_file(path):\n    expanded_path = os.path.expanduser(path)\n    with open(expanded_path) as fd:\n        return parse_environment(fd.read())\n\n\ndef main():\n    args = parse_args()\n\n    first_env = parse_env_file(args.first_env)\n    second_env = parse_env_file(args.second_env)\n\n    first_env_keys = set(first_env.keys())\n    second_env_keys = set(second_env.keys())\n\n    common_keys = first_env_keys & second_env_keys\n    removed_keys = first_env_keys - second_env_keys\n    added_keys = second_env_keys - first_env_keys\n\n    changed_keys = set()\n\n    for k in common_keys:\n        if first_env[k] != second_env[k]:\n            changed_keys.add(k)\n\n    print \"-- Added keys:\"\n    print \"  %s\" % \", \".join(added_keys)\n    print\n    print \"-- Removed keys:\"\n    print \"  %s\" % \", \".join(removed_keys)\n    print\n    print \"-- Changed keys:\"\n    if not args.show_changes:\n        print \"  %s\" % \", \".join(changed_keys)\n    if args.show_changes:\n        for k in changed_keys:\n            print \"  %s:\" % (k)\n            print \"    < %s\" % (first_env[k])\n            print \"    > %s\" % (second_env[k])\n\nif __name__ == \"__main__\":\n    main()\n"
  },
  {
    "path": "scripts/docker-stacker",
    "content": "#!/bin/bash\n\n# This script is meant to be used from within the Docker image for stacker. It\n# simply installs the stacks at /stacks and then runs stacker.\n\nset -e\n\ncd /stacks\npython setup.py install\n\nexec stacker $@\n"
  },
  {
    "path": "scripts/stacker",
    "content": "#!/usr/bin/env python\n\nfrom stacker.logger import setup_logging\nfrom stacker.commands import Stacker\n\nif __name__ == \"__main__\":\n    stacker = Stacker(setup_logging=setup_logging)\n    args = stacker.parse_args()\n    stacker.configure(args)\n    args.run(args)\n"
  },
  {
    "path": "scripts/stacker.cmd",
    "content": "@echo OFF\nREM=\"\"\"\nsetlocal\nset PythonExe=\"\"\nset PythonExeFlags=\n\nfor %%i in (cmd bat exe) do (\n    for %%j in (python.%%i) do (\n        call :SetPythonExe \"%%~$PATH:j\"\n    )\n)\nfor /f \"tokens=2 delims==\" %%i in ('assoc .py') do (\n    for /f \"tokens=2 delims==\" %%j in ('ftype %%i') do (\n        for /f \"tokens=1\" %%k in (\"%%j\") do (\n            call :SetPythonExe %%k\n        )\n    )\n)\n%PythonExe% -x %PythonExeFlags% \"%~f0\" %*\nexit /B %ERRORLEVEL%\ngoto :EOF\n\n:SetPythonExe\nif not [\"%~1\"]==[\"\"] (\n    if [%PythonExe%]==[\"\"] (\n        set PythonExe=\"%~1\"\n    )\n)\ngoto :EOF\n\"\"\"\n\n# ===================================================\n# Python script starts here\n# Above helper adapted from https://github.com/aws/aws-cli/blob/1.11.121/bin/aws.cmd\n# ===================================================\n\n#!/usr/bin/env python\n\nfrom stacker.logger import setup_logging\nfrom stacker.commands import Stacker\n\nif __name__ == \"__main__\":\n    stacker = Stacker(setup_logging=setup_logging)\n    args = stacker.parse_args()\n    stacker.configure(args)\n    args.run(args)\n"
  },
  {
    "path": "setup.cfg",
    "content": "[metadata]\ndescription-file = README.rst\n\n[aliases]\ntest = pytest\n\n[tool:pytest]\ntestpaths = stacker/tests\ncov = stacker\nfilterwarnings =\n  ignore::DeprecationWarning\n"
  },
  {
    "path": "setup.py",
    "content": "import os\nfrom setuptools import setup, find_packages\n\nVERSION = \"1.7.2\"\n\nsrc_dir = os.path.dirname(__file__)\n\ndef get_install_requirements(path):\n    content = open(os.path.join(os.path.dirname(__file__), path)).read()\n    return [req for req in content.split(\"\\n\") if req != \"\" and not req.startswith(\"#\")]\n\ninstall_requires = get_install_requirements(\"requirements.in\")\n\nsetup_requires = ['pytest-runner']\n\ntests_require = get_install_requirements(\"test-requirements.in\")\n\nscripts = [\n    \"scripts/compare_env\",\n    \"scripts/docker-stacker\",\n    \"scripts/stacker.cmd\",\n    \"scripts/stacker\",\n]\n\n\ndef read(filename):\n    full_path = os.path.join(src_dir, filename)\n    with open(full_path) as fd:\n        return fd.read()\n\n\nif __name__ == \"__main__\":\n    setup(\n        name=\"stacker\",\n        version=VERSION,\n        author=\"Michael Barrett\",\n        author_email=\"loki77@gmail.com\",\n        license=\"New BSD license\",\n        url=\"https://github.com/cloudtools/stacker\",\n        description=\"AWS CloudFormation Stack manager\",\n        long_description=read(\"README.rst\"),\n        packages=find_packages(),\n        scripts=scripts,\n        install_requires=install_requires,\n        tests_require=tests_require,\n        setup_requires=setup_requires,\n        extras_require=dict(testing=tests_require),\n        classifiers=[\n            \"Development Status :: 5 - Production/Stable\",\n            \"Environment :: Console\",\n            \"License :: OSI Approved :: BSD License\",\n            \"Programming Language :: Python :: 3.7\",\n            \"Programming Language :: Python :: 3.8\",\n            \"Programming Language :: Python :: 3.9\",\n            \"Programming Language :: Python :: 3.10\",\n        ],\n    )\n"
  },
  {
    "path": "stacker/__init__.py",
    "content": "\n__version__ = \"1.7.2\"\n"
  },
  {
    "path": "stacker/actions/__init__.py",
    "content": ""
  },
  {
    "path": "stacker/actions/base.py",
    "content": "import os\nimport sys\nimport logging\nimport threading\n\nfrom ..dag import walk, ThreadedWalker, UnlimitedSemaphore\nfrom ..plan import Step, build_plan, build_graph\n\nimport botocore.exceptions\nfrom stacker.session_cache import get_session\nfrom stacker.exceptions import PlanFailed\n\nfrom ..status import (\n    COMPLETE\n)\n\nfrom stacker.util import (\n    ensure_s3_bucket,\n    get_s3_endpoint,\n)\n\nlogger = logging.getLogger(__name__)\n\n# After submitting a stack update/create, this controls how long we'll wait\n# between calls to DescribeStacks to check on it's status. Most stack updates\n# take at least a couple minutes, so 30 seconds is pretty reasonable and inline\n# with the suggested value in\n# https://github.com/boto/botocore/blob/1.6.1/botocore/data/cloudformation/2010-05-15/waiters-2.json#L22\n#\n# This can be controlled via an environment variable, mostly for testing.\nSTACK_POLL_TIME = int(os.environ.get(\"STACKER_STACK_POLL_TIME\", 30))\n\n\ndef build_walker(concurrency):\n    \"\"\"This will return a function suitable for passing to\n    :class:`stacker.plan.Plan` for walking the graph.\n\n    If concurrency is 1 (no parallelism) this will return a simple topological\n    walker that doesn't use any multithreading.\n\n    If concurrency is 0, this will return a walker that will walk the graph as\n    fast as the graph topology allows.\n\n    If concurrency is greater than 1, it will return a walker that will only\n    execute a maximum of concurrency steps at any given time.\n\n    Returns:\n        func: returns a function to walk a :class:`stacker.dag.DAG`.\n    \"\"\"\n    if concurrency == 1:\n        return walk\n\n    semaphore = UnlimitedSemaphore()\n    if concurrency > 1:\n        semaphore = threading.Semaphore(concurrency)\n\n    return ThreadedWalker(semaphore).walk\n\n\ndef plan(description, stack_action, context,\n         tail=None, reverse=False):\n    \"\"\"A simple helper that builds a graph based plan from a set of stacks.\n\n    Args:\n        description (str): a description of the plan.\n        action (func): a function to call for each stack.\n        context (:class:`stacker.context.Context`): a\n            :class:`stacker.context.Context` to build the plan from.\n        tail (func): an optional function to call to tail the stack progress.\n        reverse (bool): if True, execute the graph in reverse (useful for\n            destroy actions).\n\n    Returns:\n        :class:`plan.Plan`: The resulting plan object\n    \"\"\"\n\n    def target_fn(*args, **kwargs):\n        return COMPLETE\n\n    steps = [\n        Step(stack, fn=stack_action, watch_func=tail)\n        for stack in context.get_stacks()]\n\n    steps += [\n        Step(target, fn=target_fn) for target in context.get_targets()]\n\n    graph = build_graph(steps)\n\n    return build_plan(\n        description=description,\n        graph=graph,\n        targets=context.stack_names,\n        reverse=reverse)\n\n\ndef stack_template_key_name(blueprint):\n    \"\"\"Given a blueprint, produce an appropriate key name.\n\n    Args:\n        blueprint (:class:`stacker.blueprints.base.Blueprint`): The blueprint\n            object to create the key from.\n\n    Returns:\n        string: Key name resulting from blueprint.\n    \"\"\"\n    name = blueprint.name\n    return \"stack_templates/%s/%s-%s.json\" % (blueprint.context.get_fqn(name),\n                                              name,\n                                              blueprint.version)\n\n\ndef stack_template_url(bucket_name, blueprint, endpoint):\n    \"\"\"Produces an s3 url for a given blueprint.\n\n    Args:\n        bucket_name (string): The name of the S3 bucket where the resulting\n            templates are stored.\n        blueprint (:class:`stacker.blueprints.base.Blueprint`): The blueprint\n            object to create the URL to.\n        endpoint (string): The s3 endpoint used for the bucket.\n\n    Returns:\n        string: S3 URL.\n    \"\"\"\n    key_name = stack_template_key_name(blueprint)\n    return \"%s/%s/%s\" % (endpoint, bucket_name, key_name)\n\n\nclass BaseAction(object):\n\n    \"\"\"Actions perform the actual work of each Command.\n\n    Each action is tied to a :class:`stacker.commands.base.BaseCommand`, and\n    is responsible for building the :class:`stacker.plan.Plan` that will be\n    executed to perform that command.\n\n    Args:\n        context (:class:`stacker.context.Context`): The stacker context for\n            the current run.\n        provider_builder (:class:`stacker.providers.base.BaseProviderBuilder`,\n            optional): An object that will build a provider that will be\n            interacted with in order to perform the necessary actions.\n    \"\"\"\n\n    def __init__(self, context, provider_builder=None, cancel=None):\n        self.context = context\n        self.provider_builder = provider_builder\n        self.bucket_name = context.bucket_name\n        self.cancel = cancel or threading.Event()\n        self.bucket_region = context.config.stacker_bucket_region\n        if not self.bucket_region and provider_builder:\n            self.bucket_region = provider_builder.region\n        self.s3_conn = get_session(self.bucket_region).client('s3')\n\n    def ensure_cfn_bucket(self):\n        \"\"\"The CloudFormation bucket where templates will be stored.\"\"\"\n        if self.bucket_name:\n            ensure_s3_bucket(self.s3_conn,\n                             self.bucket_name,\n                             self.bucket_region)\n\n    def stack_template_url(self, blueprint):\n        return stack_template_url(\n            self.bucket_name, blueprint, get_s3_endpoint(self.s3_conn)\n        )\n\n    def s3_stack_push(self, blueprint, force=False):\n        \"\"\"Pushes the rendered blueprint's template to S3.\n\n        Verifies that the template doesn't already exist in S3 before\n        pushing.\n\n        Returns the URL to the template in S3.\n        \"\"\"\n        key_name = stack_template_key_name(blueprint)\n        template_url = self.stack_template_url(blueprint)\n        try:\n            template_exists = self.s3_conn.head_object(\n                Bucket=self.bucket_name, Key=key_name) is not None\n        except botocore.exceptions.ClientError as e:\n            if e.response['Error']['Code'] == '404':\n                template_exists = False\n            else:\n                raise\n\n        if template_exists and not force:\n            logger.debug(\"Cloudformation template %s already exists.\",\n                         template_url)\n            return template_url\n        self.s3_conn.put_object(Bucket=self.bucket_name,\n                                Key=key_name,\n                                Body=blueprint.rendered,\n                                ServerSideEncryption='AES256',\n                                ACL='bucket-owner-full-control')\n        logger.debug(\"Blueprint %s pushed to %s.\", blueprint.name,\n                     template_url)\n        return template_url\n\n    def execute(self, *args, **kwargs):\n        try:\n            self.pre_run(*args, **kwargs)\n            self.run(*args, **kwargs)\n            self.post_run(*args, **kwargs)\n        except PlanFailed as e:\n            logger.error(str(e))\n            sys.exit(1)\n\n    def pre_run(self, *args, **kwargs):\n        pass\n\n    def run(self, *args, **kwargs):\n        raise NotImplementedError(\"Subclass must implement \\\"run\\\" method\")\n\n    def post_run(self, *args, **kwargs):\n        pass\n\n    def build_provider(self, stack):\n        \"\"\"Builds a :class:`stacker.providers.base.Provider` suitable for\n        operating on the given :class:`stacker.Stack`.\"\"\"\n        return self.provider_builder.build(region=stack.region,\n                                           profile=stack.profile)\n\n    @property\n    def provider(self):\n        \"\"\"Some actions need a generic provider using the default region (e.g.\n        hooks).\"\"\"\n        return self.provider_builder.build()\n\n    def _tail_stack(self, stack, cancel, retries=0, **kwargs):\n        provider = self.build_provider(stack)\n        return provider.tail_stack(stack, cancel, retries, **kwargs)\n"
  },
  {
    "path": "stacker/actions/build.py",
    "content": "import logging\n\nfrom .base import BaseAction, plan, build_walker\nfrom .base import STACK_POLL_TIME\n\nfrom ..providers.base import Template\nfrom stacker.hooks import utils\nfrom ..exceptions import (\n    MissingParameterException,\n    StackDidNotChange,\n    StackDoesNotExist,\n    CancelExecution,\n)\n\nfrom ..status import (\n    NotSubmittedStatus,\n    NotUpdatedStatus,\n    DidNotChangeStatus,\n    SubmittedStatus,\n    CompleteStatus,\n    FailedStatus,\n    SkippedStatus,\n    PENDING,\n    WAITING,\n    SUBMITTED,\n    INTERRUPTED\n)\n\n\nlogger = logging.getLogger(__name__)\n\n\ndef build_stack_tags(stack):\n    \"\"\"Builds a common set of tags to attach to a stack\"\"\"\n    return [{'Key': t[0], 'Value': t[1]} for t in stack.tags.items()]\n\n\ndef should_update(stack):\n    \"\"\"Tests whether a stack should be submitted for updates to CF.\n\n    Args:\n        stack (:class:`stacker.stack.Stack`): The stack object to check.\n\n    Returns:\n        bool: If the stack should be updated, return True.\n\n    \"\"\"\n    if stack.locked:\n        if not stack.force:\n            logger.debug(\"Stack %s locked and not in --force list. \"\n                         \"Refusing to update.\", stack.name)\n            return False\n        else:\n            logger.debug(\"Stack %s locked, but is in --force \"\n                         \"list.\", stack.name)\n    return True\n\n\ndef should_submit(stack):\n    \"\"\"Tests whether a stack should be submitted to CF for update/create\n\n    Args:\n        stack (:class:`stacker.stack.Stack`): The stack object to check.\n\n    Returns:\n        bool: If the stack should be submitted, return True.\n\n    \"\"\"\n    if stack.enabled:\n        return True\n\n    logger.debug(\"Stack %s is not enabled.  Skipping.\", stack.name)\n    return False\n\n\ndef should_ensure_cfn_bucket(outline, dump):\n    \"\"\"Test whether access to the cloudformation template bucket is required\n\n    Args:\n        outline (bool): The outline action.\n        dump (bool): The dump action.\n\n    Returns:\n        bool: If access to CF bucket is needed, return True.\n\n    \"\"\"\n    return not outline and not dump\n\n\ndef _resolve_parameters(parameters, blueprint):\n    \"\"\"Resolves CloudFormation Parameters for a given blueprint.\n\n    Given a list of parameters, handles:\n        - discard any parameters that the blueprint does not use\n        - discard any empty values\n        - convert booleans to strings suitable for CloudFormation\n\n    Args:\n        parameters (dict): A dictionary of parameters provided by the\n            stack definition\n        blueprint (:class:`stacker.blueprint.base.Blueprint`): A Blueprint\n            object that is having the parameters applied to it.\n\n    Returns:\n        dict: The resolved parameters.\n\n    \"\"\"\n    params = {}\n    param_defs = blueprint.get_parameter_definitions()\n\n    for key, value in parameters.items():\n        if key not in param_defs:\n            logger.debug(\"Blueprint %s does not use parameter %s.\",\n                         blueprint.name, key)\n            continue\n        if value is None:\n            logger.debug(\"Got None value for parameter %s, not submitting it \"\n                         \"to cloudformation, default value should be used.\",\n                         key)\n            continue\n        if isinstance(value, bool):\n            logger.debug(\"Converting parameter %s boolean \\\"%s\\\" to string.\",\n                         key, value)\n            value = str(value).lower()\n        params[key] = value\n    return params\n\n\nclass UsePreviousParameterValue(object):\n    \"\"\" A simple class used to indicate a Parameter should use it's existng\n    value.\n    \"\"\"\n    pass\n\n\ndef _handle_missing_parameters(parameter_values, all_params, required_params,\n                               existing_stack=None):\n    \"\"\"Handles any missing parameters.\n\n    If an existing_stack is provided, look up missing parameters there.\n\n    Args:\n        parameter_values (dict): key/value dictionary of stack definition\n            parameters\n        all_params (list): A list of all the parameters used by the\n            template/blueprint.\n        required_params (list): A list of all the parameters required by the\n            template/blueprint.\n        existing_stack (dict): A dict representation of the stack. If\n            provided, will be searched for any missing parameters.\n\n    Returns:\n        list of tuples: The final list of key/value pairs returned as a\n            list of tuples.\n\n    Raises:\n        MissingParameterException: Raised if a required parameter is\n            still missing.\n\n    \"\"\"\n    missing_params = list(set(all_params) - set(parameter_values.keys()))\n    if existing_stack and 'Parameters' in existing_stack:\n        stack_parameters = [\n            p[\"ParameterKey\"] for p in existing_stack[\"Parameters\"]\n        ]\n        for p in missing_params:\n            if p in stack_parameters:\n                logger.debug(\n                    \"Using previous value for parameter %s from existing \"\n                    \"stack\",\n                    p\n                )\n                parameter_values[p] = UsePreviousParameterValue\n    final_missing = list(set(required_params) - set(parameter_values.keys()))\n    if final_missing:\n        raise MissingParameterException(final_missing)\n\n    return list(parameter_values.items())\n\n\ndef handle_hooks(stage, hooks, provider, context, dump, outline):\n    \"\"\"Handle pre/post hooks.\n\n    Args:\n        stage (str): The name of the hook stage - pre_build/post_build.\n        hooks (list): A list of dictionaries containing the hooks to execute.\n        provider (:class:`stacker.provider.base.BaseProvider`): The provider\n            the current stack is using.\n        context (:class:`stacker.context.Context`): The current stacker\n            context.\n        dump (bool): Whether running with dump set or not.\n        outline (bool): Whether running with outline set or not.\n\n    \"\"\"\n    if not outline and not dump and hooks:\n        utils.handle_hooks(\n            stage=stage,\n            hooks=hooks,\n            provider=provider,\n            context=context\n        )\n\n\nclass Action(BaseAction):\n    \"\"\"Responsible for building & coordinating CloudFormation stacks.\n\n    Generates the build plan based on stack dependencies (these dependencies\n    are determined automatically based on output lookups from other stacks).\n\n    The plan can then either be printed out as an outline or executed. If\n    executed, each stack will get launched in order which entails:\n\n        - Pushing the generated CloudFormation template to S3 if it has changed\n        - Submitting either a build or update of the given stack to the\n            :class:`stacker.provider.base.Provider`.\n\n    \"\"\"\n\n    def build_parameters(self, stack, provider_stack=None):\n        \"\"\"Builds the CloudFormation Parameters for our stack.\n\n        Args:\n            stack (:class:`stacker.stack.Stack`): A stacker stack\n            provider_stack (dict): An optional Stacker provider object\n\n        Returns:\n            dict: The parameters for the given stack\n\n        \"\"\"\n        resolved = _resolve_parameters(stack.parameter_values, stack.blueprint)\n        required_parameters = list(stack.required_parameter_definitions)\n        all_parameters = list(stack.all_parameter_definitions)\n        parameters = _handle_missing_parameters(resolved, all_parameters,\n                                                required_parameters,\n                                                provider_stack)\n\n        param_list = []\n\n        for key, value in parameters:\n            param_dict = {\"ParameterKey\": key}\n            if value is UsePreviousParameterValue:\n                param_dict[\"UsePreviousValue\"] = True\n            else:\n                param_dict[\"ParameterValue\"] = str(value)\n\n            param_list.append(param_dict)\n\n        return param_list\n\n    def _launch_stack(self, stack, **kwargs):\n        \"\"\"Handles the creating or updating of a stack in CloudFormation.\n\n        Also makes sure that we don't try to create or update a stack while\n        it is already updating or creating.\n\n        \"\"\"\n        old_status = kwargs.get(\"status\")\n        wait_time = 0 if old_status is PENDING else STACK_POLL_TIME\n        if self.cancel.wait(wait_time):\n            return INTERRUPTED\n\n        if not should_submit(stack):\n            return NotSubmittedStatus()\n\n        provider = self.build_provider(stack)\n\n        try:\n            provider_stack = provider.get_stack(stack.fqn)\n        except StackDoesNotExist:\n            provider_stack = None\n\n        if provider_stack and not should_update(stack):\n            stack.set_outputs(\n                self.provider.get_output_dict(provider_stack))\n            return NotUpdatedStatus()\n\n        recreate = False\n        if provider_stack and old_status == SUBMITTED:\n            logger.debug(\n                \"Stack %s provider status: %s\",\n                stack.fqn,\n                provider.get_stack_status(provider_stack),\n            )\n\n            if provider.is_stack_rolling_back(provider_stack):\n                if 'rolling back' in old_status.reason:\n                    return old_status\n\n                logger.debug(\"Stack %s entered a roll back\", stack.fqn)\n                if 'updating' in old_status.reason:\n                    reason = 'rolling back update'\n                else:\n                    reason = 'rolling back new stack'\n\n                return SubmittedStatus(reason)\n            elif provider.is_stack_in_progress(provider_stack):\n                logger.debug(\"Stack %s in progress.\", stack.fqn)\n                return old_status\n            elif provider.is_stack_destroyed(provider_stack):\n                logger.debug(\"Stack %s finished deleting\", stack.fqn)\n                recreate = True\n                # Continue with creation afterwards\n            # Failure must be checked *before* completion, as both will be true\n            # when completing a rollback, and we don't want to consider it as\n            # a successful update.\n            elif provider.is_stack_failed(provider_stack):\n                reason = old_status.reason\n                if 'rolling' in reason:\n                    reason = reason.replace('rolling', 'rolled')\n                status_reason = provider.get_rollback_status_reason(stack.fqn)\n                logger.info(\n                    \"%s Stack Roll Back Reason: \" + status_reason, stack.fqn)\n                return FailedStatus(reason)\n\n            elif provider.is_stack_completed(provider_stack):\n                stack.set_outputs(\n                    provider.get_output_dict(provider_stack))\n                return CompleteStatus(old_status.reason)\n            else:\n                return old_status\n\n        logger.debug(\"Resolving stack %s\", stack.fqn)\n        stack.resolve(self.context, self.provider)\n\n        logger.debug(\"Launching stack %s now.\", stack.fqn)\n        template = self._template(stack.blueprint)\n        stack_policy = self._stack_policy(stack)\n        tags = build_stack_tags(stack)\n        parameters = self.build_parameters(stack, provider_stack)\n        force_change_set = stack.blueprint.requires_change_set\n\n        if recreate:\n            logger.debug(\"Re-creating stack: %s\", stack.fqn)\n            provider.create_stack(stack.fqn, template, parameters,\n                                  tags, stack_policy=stack_policy)\n            return SubmittedStatus(\"re-creating stack\")\n        elif not provider_stack:\n            logger.debug(\"Creating new stack: %s\", stack.fqn)\n            provider.create_stack(stack.fqn, template, parameters, tags,\n                                  force_change_set,\n                                  stack_policy=stack_policy,\n                                  notification_arns=stack.notification_arns)\n            return SubmittedStatus(\"creating new stack\")\n\n        try:\n            wait = stack.in_progress_behavior == \"wait\"\n            if wait and provider.is_stack_in_progress(provider_stack):\n                return WAITING\n            if provider.prepare_stack_for_update(provider_stack, tags):\n                existing_params = provider_stack.get('Parameters', [])\n                provider.update_stack(\n                    stack.fqn,\n                    template,\n                    existing_params,\n                    parameters,\n                    tags,\n                    force_interactive=stack.protected,\n                    force_change_set=force_change_set,\n                    stack_policy=stack_policy,\n                    notification_arns=stack.notification_arns\n                )\n\n                logger.debug(\"Updating existing stack: %s\", stack.fqn)\n                return SubmittedStatus(\"updating existing stack\")\n            else:\n                return SubmittedStatus(\"destroying stack for re-creation\")\n        except CancelExecution:\n            stack.set_outputs(provider.get_output_dict(provider_stack))\n            return SkippedStatus(reason=\"canceled execution\")\n        except StackDidNotChange:\n            stack.set_outputs(provider.get_output_dict(provider_stack))\n            return DidNotChangeStatus()\n\n    def _template(self, blueprint):\n        \"\"\"Generates a suitable template based on whether or not an S3 bucket\n        is set.\n\n        If an S3 bucket is set, then the template will be uploaded to S3 first,\n        and CreateStack/UpdateStack operations will use the uploaded template.\n        If not bucket is set, then the template will be inlined.\n        \"\"\"\n        if self.bucket_name:\n            return Template(url=self.s3_stack_push(blueprint))\n        else:\n            return Template(body=blueprint.rendered)\n\n    def _stack_policy(self, stack):\n        \"\"\"Returns a Template object for the stacks stack policy, or None if\n        the stack doesn't have a stack policy.\"\"\"\n        if stack.stack_policy:\n            return Template(body=stack.stack_policy)\n\n    def _generate_plan(self, tail=False):\n        return plan(\n            description=\"Create/Update stacks\",\n            stack_action=self._launch_stack,\n            tail=self._tail_stack if tail else None,\n            context=self.context)\n\n    def pre_run(self, outline=False, dump=False, *args, **kwargs):\n        \"\"\"Any steps that need to be taken prior to running the action.\"\"\"\n        if should_ensure_cfn_bucket(outline, dump):\n            self.ensure_cfn_bucket()\n        hooks = self.context.config.pre_build\n        handle_hooks(\n            \"pre_build\",\n            hooks,\n            self.provider,\n            self.context,\n            dump,\n            outline\n        )\n\n    def run(self, concurrency=0, outline=False,\n            tail=False, dump=False, *args, **kwargs):\n        \"\"\"Kicks off the build/update of the stacks in the stack_definitions.\n\n        This is the main entry point for the Builder.\n\n        \"\"\"\n        plan = self._generate_plan(tail=tail)\n        if not plan.keys():\n            logger.warn('WARNING: No stacks detected (error in config?)')\n        if not outline and not dump:\n            plan.outline(logging.DEBUG)\n            logger.debug(\"Launching stacks: %s\", \", \".join(plan.keys()))\n            walker = build_walker(concurrency)\n            plan.execute(walker)\n        else:\n            if outline:\n                plan.outline()\n            if dump:\n                plan.dump(directory=dump, context=self.context,\n                          provider=self.provider)\n\n    def post_run(self, outline=False, dump=False, *args, **kwargs):\n        \"\"\"Any steps that need to be taken after running the action.\"\"\"\n        hooks = self.context.config.post_build\n        handle_hooks(\n            \"post_build\",\n            hooks,\n            self.provider,\n            self.context,\n            dump,\n            outline\n        )\n"
  },
  {
    "path": "stacker/actions/destroy.py",
    "content": "import logging\n\nfrom .base import BaseAction, plan, build_walker\nfrom .base import STACK_POLL_TIME\nfrom ..exceptions import StackDoesNotExist\nfrom stacker.hooks.utils import handle_hooks\nfrom ..status import (\n    CompleteStatus,\n    SubmittedStatus,\n    PENDING,\n    SUBMITTED,\n    INTERRUPTED\n)\n\nfrom ..status import StackDoesNotExist as StackDoesNotExistStatus\n\nlogger = logging.getLogger(__name__)\n\nDestroyedStatus = CompleteStatus(\"stack destroyed\")\nDestroyingStatus = SubmittedStatus(\"submitted for destruction\")\n\n\nclass Action(BaseAction):\n    \"\"\"Responsible for destroying CloudFormation stacks.\n\n    Generates a destruction plan based on stack dependencies. Stack\n    dependencies are reversed from the build action. For example, if a Stack B\n    requires Stack A during build, during destroy Stack A requires Stack B be\n    destroyed first.\n\n    The plan defaults to printing an outline of what will be destroyed. If\n    forced to execute, each stack will get destroyed in order.\n\n    \"\"\"\n\n    def _generate_plan(self, tail=False):\n        return plan(\n            description=\"Destroy stacks\",\n            stack_action=self._destroy_stack,\n            tail=self._tail_stack if tail else None,\n            context=self.context,\n            reverse=True)\n\n    def _destroy_stack(self, stack, **kwargs):\n        old_status = kwargs.get(\"status\")\n        wait_time = 0 if old_status is PENDING else STACK_POLL_TIME\n        if self.cancel.wait(wait_time):\n            return INTERRUPTED\n\n        provider = self.build_provider(stack)\n\n        try:\n            provider_stack = provider.get_stack(stack.fqn)\n        except StackDoesNotExist:\n            logger.debug(\"Stack %s does not exist.\", stack.fqn)\n            # Once the stack has been destroyed, it doesn't exist. If the\n            # status of the step was SUBMITTED, we know we just deleted it,\n            # otherwise it should be skipped\n            if kwargs.get(\"status\", None) == SUBMITTED:\n                return DestroyedStatus\n            else:\n                return StackDoesNotExistStatus()\n\n        logger.debug(\n            \"Stack %s provider status: %s\",\n            provider.get_stack_name(provider_stack),\n            provider.get_stack_status(provider_stack),\n        )\n        if provider.is_stack_destroyed(provider_stack):\n            return DestroyedStatus\n        elif provider.is_stack_in_progress(provider_stack):\n            return DestroyingStatus\n        else:\n            logger.debug(\"Destroying stack: %s\", stack.fqn)\n            provider.destroy_stack(provider_stack)\n        return DestroyingStatus\n\n    def pre_run(self, outline=False, *args, **kwargs):\n        \"\"\"Any steps that need to be taken prior to running the action.\"\"\"\n        pre_destroy = self.context.config.pre_destroy\n        if not outline and pre_destroy:\n            handle_hooks(\n                stage=\"pre_destroy\",\n                hooks=pre_destroy,\n                provider=self.provider,\n                context=self.context)\n\n    def run(self, force, concurrency=0, tail=False, *args, **kwargs):\n        plan = self._generate_plan(tail=tail)\n        if not plan.keys():\n            logger.warn('WARNING: No stacks detected (error in config?)')\n        if force:\n            # need to generate a new plan to log since the outline sets the\n            # steps to COMPLETE in order to log them\n            plan.outline(logging.DEBUG)\n            walker = build_walker(concurrency)\n            plan.execute(walker)\n        else:\n            plan.outline(message=\"To execute this plan, run with \\\"--force\\\" \"\n                                 \"flag.\")\n\n    def post_run(self, outline=False, *args, **kwargs):\n        \"\"\"Any steps that need to be taken after running the action.\"\"\"\n        post_destroy = self.context.config.post_destroy\n        if not outline and post_destroy:\n            handle_hooks(\n                stage=\"post_destroy\",\n                hooks=post_destroy,\n                provider=self.provider,\n                context=self.context)\n"
  },
  {
    "path": "stacker/actions/diff.py",
    "content": "import logging\nfrom operator import attrgetter\n\nfrom .base import plan, build_walker\nfrom . import build\nfrom .. import exceptions\nfrom ..status import (\n    NotSubmittedStatus,\n    NotUpdatedStatus,\n    COMPLETE,\n    INTERRUPTED,\n)\n\nlogger = logging.getLogger(__name__)\n\n\nclass DictValue(object):\n    ADDED = \"ADDED\"\n    REMOVED = \"REMOVED\"\n    MODIFIED = \"MODIFIED\"\n    UNMODIFIED = \"UNMODIFIED\"\n\n    formatter = \"%s%s = %s\"\n\n    def __init__(self, key, old_value, new_value):\n        self.key = key\n        self.old_value = old_value\n        self.new_value = new_value\n\n    def __eq__(self, other):\n        return self.__dict__ == other.__dict__\n\n    def changes(self):\n        \"\"\"Returns a list of changes to represent the diff between\n        old and new value.\n\n        Returns:\n            list: [string] representation of the change (if any)\n                between old and new value\n        \"\"\"\n        output = []\n        if self.status() is self.UNMODIFIED:\n            output = [self.formatter % (' ', self.key, self.old_value)]\n        elif self.status() is self.ADDED:\n            output.append(self.formatter % ('+', self.key, self.new_value))\n        elif self.status() is self.REMOVED:\n            output.append(self.formatter % ('-', self.key, self.old_value))\n        elif self.status() is self.MODIFIED:\n            output.append(self.formatter % ('-', self.key, self.old_value))\n            output.append(self.formatter % ('+', self.key, self.new_value))\n        return output\n\n    def status(self):\n        if self.old_value == self.new_value:\n            return self.UNMODIFIED\n        elif self.old_value is None:\n            return self.ADDED\n        elif self.new_value is None:\n            return self.REMOVED\n        else:\n            return self.MODIFIED\n\n\ndef diff_dictionaries(old_dict, new_dict):\n    \"\"\"Diffs two single dimension dictionaries\n\n    Returns the number of changes and an unordered list\n    expressing the common entries and changes.\n\n    Args:\n        old_dict(dict): old dictionary\n        new_dict(dict): new dictionary\n\n    Returns: list()\n        int: number of changed records\n        list: [DictValue]\n    \"\"\"\n\n    old_set = set(old_dict)\n    new_set = set(new_dict)\n\n    added_set = new_set - old_set\n    removed_set = old_set - new_set\n    common_set = old_set & new_set\n\n    changes = 0\n    output = []\n    for key in added_set:\n        changes += 1\n        output.append(DictValue(key, None, new_dict[key]))\n\n    for key in removed_set:\n        changes += 1\n        output.append(DictValue(key, old_dict[key], None))\n\n    for key in common_set:\n        output.append(DictValue(key, old_dict[key], new_dict[key]))\n        if str(old_dict[key]) != str(new_dict[key]):\n            changes += 1\n\n    output.sort(key=attrgetter(\"key\"))\n    return [changes, output]\n\n\ndef format_params_diff(parameter_diff):\n    \"\"\"Handles the formatting of differences in parameters.\n\n    Args:\n        parameter_diff (list): A list of DictValues detailing the\n            differences between two dicts returned by\n            :func:`stacker.actions.diff.diff_dictionaries`\n    Returns:\n        string: A formatted string that represents a parameter diff\n    \"\"\"\n\n    params_output = '\\n'.join([line for v in parameter_diff\n                               for line in v.changes()])\n    return \"\"\"--- Old Parameters\n+++ New Parameters\n******************\n%s\\n\"\"\" % params_output\n\n\ndef diff_parameters(old_params, new_params):\n    \"\"\"Compares the old vs. new parameters and returns a \"diff\"\n\n    If there are no changes, we return an empty list.\n\n    Args:\n        old_params(dict): old paramters\n        new_params(dict): new parameters\n\n    Returns:\n        list: A list of differences\n    \"\"\"\n    [changes, diff] = diff_dictionaries(old_params, new_params)\n    if changes == 0:\n        return []\n    return diff\n\n\nclass Action(build.Action):\n    \"\"\" Responsible for diff'ing CF stacks in AWS and on disk\n\n    Generates the build plan based on stack dependencies (these dependencies\n    are determined automatically based on references to output values from\n    other stacks).\n\n    The plan is then used to create a changeset for a stack using a\n    generated template based on the current config.\n    \"\"\"\n\n    def _diff_stack(self, stack, **kwargs):\n        \"\"\"Handles the diffing a stack in CloudFormation vs our config\"\"\"\n        if self.cancel.wait(0):\n            return INTERRUPTED\n\n        if not build.should_submit(stack):\n            return NotSubmittedStatus()\n\n        provider = self.build_provider(stack)\n\n        if not build.should_update(stack):\n            stack.set_outputs(provider.get_outputs(stack.fqn))\n            return NotUpdatedStatus()\n\n        tags = build.build_stack_tags(stack)\n\n        stack.resolve(self.context, provider)\n        parameters = self.build_parameters(stack)\n\n        try:\n            outputs = provider.get_stack_changes(\n                stack, self._template(stack.blueprint), parameters, tags\n            )\n            stack.set_outputs(outputs)\n        except exceptions.StackDidNotChange:\n            logger.info('No changes: %s', stack.fqn)\n            stack.set_outputs(provider.get_outputs(stack.fqn))\n\n        return COMPLETE\n\n    def _generate_plan(self):\n        return plan(\n            description=\"Diff stacks\",\n            stack_action=self._diff_stack,\n            context=self.context)\n\n    def run(self, concurrency=0, *args, **kwargs):\n        plan = self._generate_plan()\n        plan.outline(logging.DEBUG)\n        if plan.keys():\n            logger.info(\"Diffing stacks: %s\", \", \".join(plan.keys()))\n        else:\n            logger.warn('WARNING: No stacks detected (error in config?)')\n        walker = build_walker(concurrency)\n        plan.execute(walker)\n\n    \"\"\"Don't ever do anything for pre_run or post_run\"\"\"\n\n    def pre_run(self, *args, **kwargs):\n        pass\n\n    def post_run(self, *args, **kwargs):\n        pass\n"
  },
  {
    "path": "stacker/actions/graph.py",
    "content": "import logging\nimport sys\nimport json\n\nfrom .base import BaseAction, plan\n\n\nlogger = logging.getLogger(__name__)\n\n\ndef each_step(graph):\n    \"\"\"Returns an iterator that yields each step and it's direct\n    dependencies.\n    \"\"\"\n\n    steps = graph.topological_sort()\n    steps.reverse()\n\n    for step in steps:\n        deps = graph.downstream(step.name)\n        yield (step, deps)\n\n\ndef dot_format(out, graph, name=\"digraph\"):\n    \"\"\"Outputs the graph using the graphviz \"dot\" format.\"\"\"\n\n    out.write(\"digraph %s {\\n\" % name)\n    for step, deps in each_step(graph):\n        for dep in deps:\n            out.write(\"  \\\"%s\\\" -> \\\"%s\\\";\\n\" % (step, dep))\n\n    out.write(\"}\\n\")\n\n\ndef json_format(out, graph):\n    \"\"\"Outputs the graph in a machine readable JSON format.\"\"\"\n    steps = {}\n    for step, deps in each_step(graph):\n        steps[step.name] = {}\n        steps[step.name][\"deps\"] = [dep.name for dep in deps]\n\n    json.dump({\"steps\": steps}, out, indent=4)\n    out.write(\"\\n\")\n\n\nFORMATTERS = {\n    \"dot\": dot_format,\n    \"json\": json_format,\n}\n\n\nclass Action(BaseAction):\n\n    def _generate_plan(self):\n        return plan(\n            description=\"Print graph\",\n            stack_action=None,\n            context=self.context)\n\n    def run(self, format=None, reduce=False, *args, **kwargs):\n        \"\"\"Generates the underlying graph and prints it.\n\n        \"\"\"\n        plan = self._generate_plan()\n        if reduce:\n            # This will performa a transitive reduction on the underlying\n            # graph, producing less edges. Mostly useful for the \"dot\" format,\n            # when converting to PNG, so it creates a prettier/cleaner\n            # dependency graph.\n            plan.graph.transitive_reduction()\n\n        fn = FORMATTERS[format]\n        fn(sys.stdout, plan.graph)\n        sys.stdout.flush()\n"
  },
  {
    "path": "stacker/actions/info.py",
    "content": "import logging\n\nfrom .base import BaseAction\nfrom .. import exceptions\n\nlogger = logging.getLogger(__name__)\n\n\nclass Action(BaseAction):\n    \"\"\"Get information on CloudFormation stacks.\n\n    Displays the outputs for the set of CloudFormation stacks.\n\n    \"\"\"\n\n    def run(self, *args, **kwargs):\n        logger.info('Outputs for stacks: %s', self.context.get_fqn())\n        if not self.context.get_stacks():\n            logger.warn('WARNING: No stacks detected (error in config?)')\n        for stack in self.context.get_stacks():\n            provider = self.build_provider(stack)\n\n            try:\n                provider_stack = provider.get_stack(stack.fqn)\n            except exceptions.StackDoesNotExist:\n                logger.info('Stack \"%s\" does not exist.' % (stack.fqn,))\n                continue\n\n            logger.info('%s:', stack.fqn)\n            if 'Outputs' in provider_stack:\n                for output in provider_stack['Outputs']:\n                    logger.info(\n                        '\\t%s: %s',\n                        output['OutputKey'],\n                        output['OutputValue']\n                    )\n"
  },
  {
    "path": "stacker/awscli_yamlhelper.py",
    "content": "# Copyright 2012-2015 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"). You\n# may not use this file except in compliance with the License. A copy of\n# the License is located at\n#\n# http://aws.amazon.com/apache2.0/\n#\n# or in the \"license\" file accompanying this file. This file is\n# distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF\n# ANY KIND, either express or implied. See the License for the specific\n# language governing permissions and limitations under the License.\nimport json\nimport yaml\nfrom yaml.resolver import ScalarNode, SequenceNode\n\nfrom botocore.compat import six\n\n\ndef intrinsics_multi_constructor(loader, tag_prefix, node):\n    \"\"\"\n    YAML constructor to parse CloudFormation intrinsics.\n    This will return a dictionary with key being the instrinsic name\n    \"\"\"\n\n    # Get the actual tag name excluding the first exclamation\n    tag = node.tag[1:]\n\n    # Some intrinsic functions doesn't support prefix \"Fn::\"\n    prefix = \"Fn::\"\n    if tag in [\"Ref\", \"Condition\"]:\n        prefix = \"\"\n\n    cfntag = prefix + tag\n\n    if tag == \"GetAtt\" and isinstance(node.value, six.string_types):\n        # ShortHand notation for !GetAtt accepts Resource.Attribute format\n        # while the standard notation is to use an array\n        # [Resource, Attribute]. Convert shorthand to standard format\n        value = node.value.split(\".\", 1)\n\n    elif isinstance(node, ScalarNode):\n        # Value of this node is scalar\n        value = loader.construct_scalar(node)\n\n    elif isinstance(node, SequenceNode):\n        # Value of this node is an array (Ex: [1,2])\n        value = loader.construct_sequence(node)\n\n    else:\n        # Value of this node is an mapping (ex: {foo: bar})\n        value = loader.construct_mapping(node)\n\n    return {cfntag: value}\n\n\ndef yaml_dump(dict_to_dump):\n    \"\"\"\n    Dumps the dictionary as a YAML document\n    :param dict_to_dump:\n    :return:\n    \"\"\"\n    return yaml.safe_dump(dict_to_dump, default_flow_style=False)\n\n\ndef yaml_parse(yamlstr):\n    \"\"\"Parse a yaml string\"\"\"\n    try:\n        # PyYAML doesn't support json as well as it should, so if the input\n        # is actually just json it is better to parse it with the standard\n        # json parser.\n        return json.loads(yamlstr)\n    except ValueError:\n        yaml.SafeLoader.add_multi_constructor(\n            \"!\", intrinsics_multi_constructor)\n        return yaml.safe_load(yamlstr)\n"
  },
  {
    "path": "stacker/blueprints/__init__.py",
    "content": ""
  },
  {
    "path": "stacker/blueprints/base.py",
    "content": "from past.builtins import basestring\nimport copy\nimport hashlib\nimport logging\nimport string\nfrom stacker.util import read_value_from_path\nfrom stacker.variables import Variable\n\nfrom troposphere import (\n    Output,\n    Parameter,\n    Ref,\n    Template,\n)\n\nfrom ..exceptions import (\n    MissingVariable,\n    UnresolvedVariable,\n    UnresolvedVariables,\n    ValidatorError,\n    VariableTypeRequired,\n    InvalidUserdataPlaceholder\n)\nfrom .variables.types import (\n    CFNType,\n    TroposphereType,\n)\n\nlogger = logging.getLogger(__name__)\n\nPARAMETER_PROPERTIES = {\n    \"default\": \"Default\",\n    \"description\": \"Description\",\n    \"no_echo\": \"NoEcho\",\n    \"allowed_values\": \"AllowedValues\",\n    \"allowed_pattern\": \"AllowedPattern\",\n    \"max_length\": \"MaxLength\",\n    \"min_length\": \"MinLength\",\n    \"max_value\": \"MaxValue\",\n    \"min_value\": \"MinValue\",\n    \"constraint_description\": \"ConstraintDescription\"\n}\n\n\nclass CFNParameter(object):\n\n    def __init__(self, name, value):\n        \"\"\"Wrapper around a value to indicate a CloudFormation Parameter.\n\n        Args:\n            name (str): the name of the CloudFormation Parameter\n            value (str, list, int or bool): the value we're going to submit as\n                a CloudFormation Parameter.\n\n        \"\"\"\n        acceptable_types = [basestring, bool, list, int]\n        acceptable = False\n        for acceptable_type in acceptable_types:\n            if isinstance(value, acceptable_type):\n                acceptable = True\n                if acceptable_type == bool:\n                    logger.debug(\"Converting parameter %s boolean '%s' \"\n                                 \"to string.\", name, value)\n                    value = str(value).lower()\n                    break\n\n                if acceptable_type == int:\n                    logger.debug(\"Converting parameter %s integer '%s' \"\n                                 \"to string.\", name, value)\n                    value = str(value)\n                    break\n\n        if not acceptable:\n            raise ValueError(\n                \"CFNParameter (%s) value must be one of %s got: %s\" % (\n                    name, \"str, int, bool, or list\", value))\n\n        self.name = name\n        self.value = value\n\n    def __repr__(self):\n        return \"CFNParameter({}: {})\".format(self.name, self.value)\n\n    def to_parameter_value(self):\n        \"\"\"Return the value to be submitted to CloudFormation\"\"\"\n        return self.value\n\n    @property\n    def ref(self):\n        return Ref(self.name)\n\n\ndef build_parameter(name, properties):\n    \"\"\"Builds a troposphere Parameter with the given properties.\n\n    Args:\n        name (string): The name of the parameter.\n        properties (dict): Contains the properties that will be applied to the\n            parameter. See:\n            http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/parameters-section-structure.html\n\n    Returns:\n        :class:`troposphere.Parameter`: The created parameter object.\n    \"\"\"\n    p = Parameter(name, Type=properties.get(\"type\"))\n    for name, attr in PARAMETER_PROPERTIES.items():\n        if name in properties:\n            setattr(p, attr, properties[name])\n    return p\n\n\ndef validate_variable_type(var_name, var_type, value):\n    \"\"\"Ensures the value is the correct variable type.\n\n    Args:\n        var_name (str): The name of the defined variable on a blueprint.\n        var_type (type): The type that the value should be.\n        value (obj): The object representing the value provided for the\n            variable\n\n    Returns:\n        object: Returns the appropriate value object. If the original value\n            was of CFNType, the returned value will be wrapped in CFNParameter.\n\n    Raises:\n        ValueError: If the `value` isn't of `var_type` and can't be cast as\n            that type, this is raised.\n    \"\"\"\n\n    if isinstance(var_type, CFNType):\n        value = CFNParameter(name=var_name, value=value)\n    elif isinstance(var_type, TroposphereType):\n        try:\n            value = var_type.create(value)\n        except Exception as exc:\n            name = \"{}.create\".format(var_type.resource_name)\n            raise ValidatorError(var_name, name, value, exc)\n    else:\n        if not isinstance(value, var_type):\n            raise ValueError(\n                \"Value for variable %s must be of type %s. Actual \"\n                \"type: %s.\" % (var_name, var_type, type(value))\n            )\n\n    return value\n\n\ndef validate_allowed_values(allowed_values, value):\n    \"\"\"Support a variable defining which values it allows.\n\n    Args:\n        allowed_values (Optional[list]): A list of allowed values from the\n            variable definition\n        value (obj): The object representing the value provided for the\n            variable\n\n    Returns:\n        bool: Boolean for whether or not the value is valid.\n\n    \"\"\"\n    # ignore CFNParameter, troposphere handles these for us\n    if not allowed_values or isinstance(value, CFNParameter):\n        return True\n\n    return value in allowed_values\n\n\ndef resolve_variable(var_name, var_def, provided_variable, blueprint_name):\n    \"\"\"Resolve a provided variable value against the variable definition.\n\n    Args:\n        var_name (str): The name of the defined variable on a blueprint.\n        var_def (dict): A dictionary representing the defined variables\n            attributes.\n        provided_variable (:class:`stacker.variables.Variable`): The variable\n            value provided to the blueprint.\n        blueprint_name (str): The name of the blueprint that the variable is\n            being applied to.\n\n    Returns:\n        object: The resolved variable value, could be any python object.\n\n    Raises:\n        MissingVariable: Raised when a variable with no default is not\n            provided a value.\n        UnresolvedVariable: Raised when the provided variable is not already\n            resolved.\n        ValueError: Raised when the value is not the right type and cannot be\n            cast as the correct type. Raised by\n            :func:`stacker.blueprints.base.validate_variable_type`\n        ValidatorError: Raised when a validator raises an exception. Wraps the\n            original exception.\n    \"\"\"\n\n    try:\n        var_type = var_def[\"type\"]\n    except KeyError:\n        raise VariableTypeRequired(blueprint_name, var_name)\n\n    if provided_variable:\n        if not provided_variable.resolved:\n            raise UnresolvedVariable(blueprint_name, provided_variable)\n\n        value = provided_variable.value\n    else:\n        # Variable value not provided, try using the default, if it exists\n        # in the definition\n        try:\n            value = var_def[\"default\"]\n        except KeyError:\n            raise MissingVariable(blueprint_name, var_name)\n\n    # If no validator, return the value as is, otherwise apply validator\n    validator = var_def.get(\"validator\", lambda v: v)\n    try:\n        value = validator(value)\n    except Exception as exc:\n        raise ValidatorError(var_name, validator.__name__, value, exc)\n\n    # Ensure that the resulting value is the correct type\n    value = validate_variable_type(var_name, var_type, value)\n\n    allowed_values = var_def.get(\"allowed_values\")\n    if not validate_allowed_values(allowed_values, value):\n        message = (\n            \"Invalid value passed to '%s' in blueprint: %s. Got: '%s', \"\n            \"expected one of %s\"\n        ) % (var_name, blueprint_name, value, allowed_values)\n        raise ValueError(message)\n\n    return value\n\n\ndef parse_user_data(variables, raw_user_data, blueprint_name):\n    \"\"\"Parse the given user data and renders it as a template\n\n    It supports referencing template variables to create userdata\n    that's supplemented with information from the stack, as commonly\n    required when creating EC2 userdata files.\n\n    For example:\n        Given a raw_user_data string: 'open file ${file}'\n        And a variables dictionary with: {'file': 'test.txt'}\n        parse_user_data would output: open file test.txt\n\n    Args:\n        variables (dict): variables available to the template\n        raw_user_data (str): the user_data to be parsed\n        blueprint_name (str): the name of the blueprint\n\n    Returns:\n        str: The parsed user data, with all the variables values and\n             refs replaced with their resolved values.\n\n    Raises:\n        InvalidUserdataPlaceholder: Raised when a placeholder name in\n                                    raw_user_data is not valid.\n                                    E.g ${100} would raise this.\n        MissingVariable: Raised when a variable is in the raw_user_data that\n                         is not given in the blueprint\n\n    \"\"\"\n    variable_values = {}\n\n    for key, value in variables.items():\n        if type(value) is CFNParameter:\n            variable_values[key] = value.to_parameter_value()\n        else:\n            variable_values[key] = value\n\n    template = string.Template(raw_user_data)\n\n    res = \"\"\n\n    try:\n        res = template.substitute(variable_values)\n    except ValueError as exp:\n        raise InvalidUserdataPlaceholder(blueprint_name, exp.args[0])\n    except KeyError as key:\n        raise MissingVariable(blueprint_name, key)\n\n    return res\n\n\nclass Blueprint(object):\n\n    \"\"\"Base implementation for rendering a troposphere template.\n\n    Args:\n        name (str): A name for the blueprint.\n        context (:class:`stacker.context.Context`): the context the blueprint\n            is being executed under.\n        mappings (dict, optional): Cloudformation Mappings to be used in the\n            template.\n\n    \"\"\"\n\n    def __init__(self, name, context, mappings=None, description=None):\n        self.name = name\n        self.context = context\n        self.mappings = mappings\n        self.outputs = {}\n        self.reset_template()\n        self.resolved_variables = None\n        self.description = description\n\n        if hasattr(self, \"PARAMETERS\") or hasattr(self, \"LOCAL_PARAMETERS\"):\n            raise AttributeError(\"DEPRECATION WARNING: Blueprint %s uses \"\n                                 \"deprecated PARAMETERS or \"\n                                 \"LOCAL_PARAMETERS, rather than VARIABLES. \"\n                                 \"Please update your blueprints. See https://\"\n                                 \"stacker.readthedocs.io/en/latest/blueprints.\"\n                                 \"html#variables for aditional information.\"\n                                 % name)\n\n    def get_parameter_definitions(self):\n        \"\"\"Get the parameter definitions to submit to CloudFormation.\n\n        Any variable definition whose `type` is an instance of `CFNType` will\n        be returned as a CloudFormation Parameter.\n\n        Returns:\n            dict: parameter definitions. Keys are parameter names, the values\n                are dicts containing key/values for various parameter\n                properties.\n\n        \"\"\"\n        output = {}\n        for var_name, attrs in self.defined_variables().items():\n            var_type = attrs.get(\"type\")\n            if isinstance(var_type, CFNType):\n                cfn_attrs = copy.deepcopy(attrs)\n                cfn_attrs[\"type\"] = var_type.parameter_type\n                output[var_name] = cfn_attrs\n        return output\n\n    def get_output_definitions(self):\n        \"\"\"Gets the output definitions.\n\n        Returns:\n            dict: output definitions. Keys are output names, the values\n                are dicts containing key/values for various output\n                properties.\n\n        \"\"\"\n        return {k: output.to_dict() for k, output in\n                self.template.outputs.items()}\n\n    def get_required_parameter_definitions(self):\n        \"\"\"Returns all template parameters that do not have a default value.\n\n        Returns:\n            dict: dict of required CloudFormation Parameters for the blueprint.\n                Will be a dictionary of <parameter name>: <parameter\n                attributes>.\n\n        \"\"\"\n        required = {}\n        for name, attrs in self.get_parameter_definitions().items():\n            if \"Default\" not in attrs:\n                required[name] = attrs\n        return required\n\n    def get_parameter_values(self):\n        \"\"\"Return a dictionary of variables with `type` :class:`CFNType`.\n\n        Returns:\n            dict: variables that need to be submitted as CloudFormation\n                Parameters. Will be a dictionary of <parameter name>:\n                <parameter value>.\n\n        \"\"\"\n        variables = self.get_variables()\n        output = {}\n        for key, value in variables.items():\n            try:\n                output[key] = value.to_parameter_value()\n            except AttributeError:\n                continue\n\n        return output\n\n    def setup_parameters(self):\n        \"\"\"Add any CloudFormation parameters to the template\"\"\"\n        t = self.template\n        parameters = self.get_parameter_definitions()\n\n        if not parameters:\n            logger.debug(\"No parameters defined.\")\n            return\n\n        for name, attrs in parameters.items():\n            p = build_parameter(name, attrs)\n            t.add_parameter(p)\n\n    def defined_variables(self):\n        \"\"\"Return a dictionary of variables defined by the blueprint.\n\n        By default, this will just return the values from `VARIABLES`, but this\n        makes it easy for subclasses to add variables.\n\n        Returns:\n            dict: variables defined by the blueprint\n\n        \"\"\"\n        return copy.deepcopy(getattr(self, \"VARIABLES\", {}))\n\n    def get_variables(self):\n        \"\"\"Return a dictionary of variables available to the template.\n\n        These variables will have been defined within `VARIABLES` or\n        `self.defined_variables`. Any variable value that contains a lookup\n        will have been resolved.\n\n        Returns:\n            dict: variables available to the template\n\n        Raises:\n\n        \"\"\"\n        if self.resolved_variables is None:\n            raise UnresolvedVariables(self.name)\n        return self.resolved_variables\n\n    def get_cfn_parameters(self):\n        \"\"\"Return a dictionary of variables with `type` :class:`CFNType`.\n\n        Returns:\n            dict: variables that need to be submitted as CloudFormation\n                Parameters.\n\n        \"\"\"\n        variables = self.get_variables()\n        output = {}\n        for key, value in variables.items():\n            if hasattr(value, \"to_parameter_value\"):\n                output[key] = value.to_parameter_value()\n        return output\n\n    def resolve_variables(self, provided_variables):\n        \"\"\"Resolve the values of the blueprint variables.\n\n        This will resolve the values of the `VARIABLES` with values from the\n        env file, the config, and any lookups resolved.\n\n        Args:\n            provided_variables (list of :class:`stacker.variables.Variable`):\n                list of provided variables\n\n        \"\"\"\n        self.resolved_variables = {}\n        defined_variables = self.defined_variables()\n        variable_dict = dict((var.name, var) for var in provided_variables)\n        for var_name, var_def in defined_variables.items():\n            value = resolve_variable(\n                var_name,\n                var_def,\n                variable_dict.get(var_name),\n                self.name\n            )\n            self.resolved_variables[var_name] = value\n\n    def import_mappings(self):\n        if not self.mappings:\n            return\n\n        for name, mapping in self.mappings.items():\n            logger.debug(\"Adding mapping %s.\", name)\n            self.template.add_mapping(name, mapping)\n\n    def reset_template(self):\n        self.template = Template()\n        self._rendered = None\n        self._version = None\n\n    def render_template(self):\n        \"\"\"Render the Blueprint to a CloudFormation template\"\"\"\n        self.import_mappings()\n        self.create_template()\n        if self.description:\n            self.set_template_description(self.description)\n        self.setup_parameters()\n        rendered = self.template.to_json(indent=self.context.template_indent)\n        version = hashlib.md5(rendered.encode()).hexdigest()[:8]\n        return (version, rendered)\n\n    def to_json(self, variables=None):\n        \"\"\"Render the blueprint and return the template in json form.\n\n        Args:\n            variables (dict):\n                Optional dictionary providing/overriding variable values.\n\n        Returns:\n            str: the rendered CFN JSON template\n        \"\"\"\n\n        variables_to_resolve = []\n        if variables:\n            for key, value in variables.items():\n                variables_to_resolve.append(Variable(key, value))\n        for k in self.get_parameter_definitions():\n            if not variables or k not in variables:\n                # The provided value for a CFN parameter has no effect in this\n                # context (generating the CFN template), so any string can be\n                # provided for its value - just needs to be something\n                variables_to_resolve.append(Variable(k, 'unused_value'))\n        self.resolve_variables(variables_to_resolve)\n\n        return self.render_template()[1]\n\n    def read_user_data(self, user_data_path):\n        \"\"\"Reads and parses a user_data file.\n\n        Args:\n            user_data_path (str):\n                path to the userdata file\n\n        Returns:\n            str: the parsed user data file\n\n        \"\"\"\n        raw_user_data = read_value_from_path(user_data_path)\n\n        variables = self.get_variables()\n\n        return parse_user_data(variables, raw_user_data, self.name)\n\n    def set_template_description(self, description):\n        \"\"\"Adds a description to the Template\n\n        Args:\n            description (str): A description to be added to the resulting\n                template.\n\n        \"\"\"\n        self.template.set_description(description)\n\n    def add_output(self, name, value):\n        \"\"\"Simple helper for adding outputs.\n\n        Args:\n            name (str): The name of the output to create.\n            value (str): The value to put in the output.\n        \"\"\"\n        self.template.add_output(Output(name, Value=value))\n\n    @property\n    def requires_change_set(self):\n        \"\"\"Returns true if the underlying template has transforms.\"\"\"\n        return self.template.transform is not None\n\n    @property\n    def rendered(self):\n        if not self._rendered:\n            self._version, self._rendered = self.render_template()\n        return self._rendered\n\n    @property\n    def version(self):\n        if not self._version:\n            self._version, self._rendered = self.render_template()\n        return self._version\n\n    def create_template(self):\n        raise NotImplementedError\n"
  },
  {
    "path": "stacker/blueprints/raw.py",
    "content": "\"\"\"Blueprint representing raw template module.\"\"\"\n\nimport hashlib\nimport json\nimport os\nimport sys\n\nfrom jinja2 import Template\n\nfrom ..util import parse_cloudformation_template\nfrom ..exceptions import InvalidConfig, UnresolvedVariable\nfrom .base import Blueprint\n\n\ndef get_template_path(filename):\n    \"\"\"Find raw template in working directory or in sys.path.\n\n    template_path from config may refer to templates colocated with the Stacker\n    config, or files in remote package_sources. Here, we emulate python module\n    loading to find the path to the template.\n\n    Args:\n        filename (str): Template filename.\n\n    Returns:\n        Optional[str]: Path to file, or None if no file found\n\n    \"\"\"\n    if os.path.isfile(filename):\n        return os.path.abspath(filename)\n    for i in sys.path:\n        if os.path.isfile(os.path.join(i, filename)):\n            return os.path.abspath(os.path.join(i, filename))\n\n    return None\n\n\ndef get_template_params(template):\n    \"\"\"Parse a CFN template for defined parameters.\n\n    Args:\n        template (dict): Parsed CFN template.\n\n    Returns:\n        dict: Template parameters.\n\n    \"\"\"\n    params = {}\n\n    if 'Parameters' in template:\n        params = template['Parameters']\n    return params\n\n\ndef resolve_variable(provided_variable, blueprint_name):\n    \"\"\"Resolve a provided variable value against the variable definition.\n\n    This acts as a subset of resolve_variable logic in the base module, leaving\n    out everything that doesn't apply to CFN parameters.\n\n    Args:\n        provided_variable (:class:`stacker.variables.Variable`): The variable\n            value provided to the blueprint.\n        blueprint_name (str): The name of the blueprint that the variable is\n            being applied to.\n\n    Returns:\n        object: The resolved variable string value.\n\n    Raises:\n        UnresolvedVariable: Raised when the provided variable is not already\n            resolved.\n\n    \"\"\"\n    value = None\n    if provided_variable:\n        if not provided_variable.resolved:\n            raise UnresolvedVariable(blueprint_name, provided_variable)\n\n        value = provided_variable.value\n\n    return value\n\n\nclass RawTemplateBlueprint(Blueprint):\n    \"\"\"Blueprint class for blueprints auto-generated from raw templates.\"\"\"\n\n    def __init__(self, name, context, raw_template_path, mappings=None, # noqa pylint: disable=too-many-arguments\n                 description=None):  # pylint: disable=unused-argument\n        \"\"\"Initialize RawTemplateBlueprint object.\"\"\"\n        self.name = name\n        self.context = context\n        self.mappings = mappings\n        self.resolved_variables = None\n        self.raw_template_path = raw_template_path\n        self._rendered = None\n        self._version = None\n\n    def to_json(self, variables=None):  # pylint: disable=unused-argument\n        \"\"\"Return the template in JSON.\n\n        Args:\n            variables (dict):\n                Unused in this subclass (variables won't affect the template).\n\n        Returns:\n            str: the rendered CFN JSON template\n\n        \"\"\"\n        # load -> dumps will produce json from json or yaml templates\n        return json.dumps(self.to_dict(), sort_keys=True, indent=4)\n\n    def to_dict(self):\n        \"\"\"Return the template as a python dictionary.\n\n        Returns:\n            dict: the loaded template as a python dictionary\n\n        \"\"\"\n        return parse_cloudformation_template(self.rendered)\n\n    def render_template(self):\n        \"\"\"Load template and generate its md5 hash.\"\"\"\n        return (self.version, self.rendered)\n\n    def get_parameter_definitions(self):\n        \"\"\"Get the parameter definitions to submit to CloudFormation.\n\n        Returns:\n            dict: parameter definitions. Keys are parameter names, the values\n                are dicts containing key/values for various parameter\n                properties.\n\n        \"\"\"\n        return get_template_params(self.to_dict())\n\n    def get_output_definitions(self):\n        \"\"\"Gets the output definitions.\n\n        Returns:\n            dict: output definitions. Keys are output names, the values\n                are dicts containing key/values for various output\n                properties.\n\n        \"\"\"\n        return self.to_dict().get('Outputs', {})\n\n    def resolve_variables(self, provided_variables):\n        \"\"\"Resolve the values of the blueprint variables.\n\n        This will resolve the values of the template parameters with values\n        from the env file, the config, and any lookups resolved. The\n        resolution is run twice, in case the blueprint is jinja2 templated\n        and requires provided variables to render.\n\n        Args:\n            provided_variables (list of :class:`stacker.variables.Variable`):\n                list of provided variables\n\n        \"\"\"\n        # Pass 1 to set resolved_variables to provided variables\n        self.resolved_variables = {}\n        variable_dict = dict((var.name, var) for var in provided_variables)\n        for var_name, _var_def in variable_dict.items():\n            value = resolve_variable(\n                variable_dict.get(var_name),\n                self.name\n            )\n            if value is not None:\n                self.resolved_variables[var_name] = value\n\n        # Pass 2 to render the blueprint and set resolved_variables according\n        # to defined variables\n        defined_variables = self.get_parameter_definitions()\n        self.resolved_variables = {}\n        variable_dict = dict((var.name, var) for var in provided_variables)\n        for var_name, _var_def in defined_variables.items():\n            value = resolve_variable(\n                variable_dict.get(var_name),\n                self.name\n            )\n            if value is not None:\n                self.resolved_variables[var_name] = value\n\n    def get_parameter_values(self):\n        \"\"\"Return a dictionary of variables with `type` :class:`CFNType`.\n\n        Returns:\n            dict: variables that need to be submitted as CloudFormation\n                Parameters. Will be a dictionary of <parameter name>:\n                <parameter value>.\n\n        \"\"\"\n        return self.resolved_variables\n\n    @property\n    def requires_change_set(self):\n        \"\"\"Return True if the underlying template has transforms.\"\"\"\n        return bool(\"Transform\" in self.to_dict())\n\n    @property\n    def rendered(self):\n        \"\"\"Return (generating first if needed) rendered template.\"\"\"\n        if not self._rendered:\n            template_path = get_template_path(self.raw_template_path)\n            if template_path:\n                with open(template_path, 'r') as template:\n                    if len(os.path.splitext(template_path)) == 2 and (\n                            os.path.splitext(template_path)[1] == '.j2'):\n                        self._rendered = Template(template.read()).render(\n                            context=self.context,\n                            mappings=self.mappings,\n                            name=self.name,\n                            variables=self.resolved_variables\n                        )\n                    else:\n                        self._rendered = template.read()\n            else:\n                raise InvalidConfig(\n                    'Could not find template %s' % self.raw_template_path\n                )\n\n        return self._rendered\n\n    @property\n    def version(self):\n        \"\"\"Return (generating first if needed) version hash.\"\"\"\n        if not self._version:\n            self._version = hashlib.md5(self.rendered.encode()).hexdigest()[:8]\n        return self._version\n"
  },
  {
    "path": "stacker/blueprints/testutil.py",
    "content": "import difflib\nimport json\nimport unittest\nimport os.path\nfrom glob import glob\n\nfrom stacker.config import parse as parse_config\nfrom stacker.context import Context\nfrom stacker.util import load_object_from_string\nfrom stacker.variables import Variable\n\n\ndef diff(a, b):\n    \"\"\"A human readable differ.\"\"\"\n    return '\\n'.join(\n        list(\n            difflib.Differ().compare(\n                a.splitlines(),\n                b.splitlines()\n            )\n        )\n    )\n\n\nclass BlueprintTestCase(unittest.TestCase):\n    OUTPUT_PATH = \"tests/fixtures/blueprints\"\n\n    def assertRenderedBlueprint(self, blueprint):  # noqa: N802\n        expected_output = \"%s/%s.json\" % (self.OUTPUT_PATH, blueprint.name)\n\n        rendered_dict = blueprint.template.to_dict()\n        rendered_text = json.dumps(rendered_dict, indent=4, sort_keys=True)\n\n        with open(expected_output + \"-result\", \"w\") as fd:\n            fd.write(rendered_text)\n\n        with open(expected_output) as fd:\n            expected_dict = json.loads(fd.read())\n            expected_text = json.dumps(expected_dict, indent=4, sort_keys=True)\n\n        self.assertEquals(rendered_dict, expected_dict,\n                          diff(rendered_text, expected_text))\n\n\nclass YamlDirTestGenerator(object):\n    \"\"\"Generate blueprint tests from yaml config files.\n\n    This class creates blueprint tests from yaml files with a syntax similar to\n    stackers' configuration syntax. For example,\n\n       ---\n       namespace: test\n       stacks:\n         - name: test_sample\n           class_path: stacker_blueprints.test.Sample\n           variables:\n             var1: value1\n\n    will create a test for the specified blueprint, passing that variable as\n    part of the test.\n\n    The test will generate a .json file for this blueprint, and compare it with\n    the stored result.\n\n\n    By default, the generator looks for files named 'test_*.yaml' in its same\n    directory. In order to use it, subclass it in a directory containing such\n    tests, and name the class with a pattern that will include it in nosetests'\n    tests (for example, TestGenerator).\n\n    The subclass may override some properties:\n\n    @property base_class: by default, the generated tests are subclasses of\n    stacker.blueprints.testutil.BlueprintTestCase. In order to change this,\n    set this property to the desired base class.\n\n    @property yaml_dirs: by default, the directory where the generator is\n    subclassed is searched for test files. Override this array for specifying\n    more directories. These must be relative to the directory in which the\n    subclass lives in. Globs may be used.\n        Default: [ '.' ]. Example override: [ '.', 'tests/*/' ]\n\n    @property yaml_filename: by default, the generator looks for files named\n    'test_*.yaml'. Use this to change this pattern. Globs may be used.\n\n\n    There's an example of this use in the tests/ subdir of stacker_blueprints.\n\n    \"\"\"\n\n    def __init__(self):\n        self.classdir = os.path.relpath(\n            self.__class__.__module__.replace('.', '/'))\n        if not os.path.isdir(self.classdir):\n            self.classdir = os.path.dirname(self.classdir)\n\n    # These properties can be overriden from the test generator subclass.\n    @property\n    def base_class(self):\n        return BlueprintTestCase\n\n    @property\n    def yaml_dirs(self):\n        return ['.']\n\n    @property\n    def yaml_filename(self):\n        return 'test_*.yaml'\n\n    def test_generator(self):\n        # Search for tests in given paths\n        configs = []\n        for d in self.yaml_dirs:\n            configs.extend(\n                glob('%s/%s/%s' % (self.classdir, d, self.yaml_filename)))\n\n        class ConfigTest(self.base_class):\n            def __init__(self, config, stack, filepath):\n                self.config = config\n                self.stack = stack\n                self.description = \"%s (%s)\" % (stack.name, filepath)\n\n            def __call__(self):\n                # Use the context property of the baseclass, if present.\n                # If not, default to a basic context.\n                try:\n                    ctx = self.context\n                except AttributeError:\n                    ctx = Context(config=self.config,\n                                  environment={'environment': 'test'})\n\n                configvars = self.stack.variables or {}\n                variables = [Variable(k, v) for k, v in configvars.iteritems()]\n\n                blueprint_class = load_object_from_string(\n                    self.stack.class_path)\n                blueprint = blueprint_class(self.stack.name, ctx)\n                blueprint.resolve_variables(variables or [])\n                blueprint.setup_parameters()\n                blueprint.create_template()\n                self.assertRenderedBlueprint(blueprint)\n\n            def assertEquals(self, a, b, msg):  # noqa: N802\n                assert a == b, msg\n\n        for f in configs:\n            with open(f) as test:\n                config = parse_config(test.read())\n                config.validate()\n\n                for stack in config.stacks:\n                    # Nosetests supports \"test generators\", which allows us to\n                    # yield a callable object which will be wrapped as a test\n                    # case.\n                    #\n                    # http://nose.readthedocs.io/en/latest/writing_tests.html#test-generators\n                    yield ConfigTest(config, stack, filepath=f)\n"
  },
  {
    "path": "stacker/blueprints/variables/__init__.py",
    "content": ""
  },
  {
    "path": "stacker/blueprints/variables/types.py",
    "content": "\n\nclass TroposphereType(object):\n\n    def __init__(self, defined_type, many=False, optional=False,\n                 validate=True):\n        \"\"\"Represents a Troposphere type.\n\n        :class:`Troposphere` will convert the value provided to the variable to\n        the specified Troposphere type.\n\n        Both resource and parameter classes (which are just used to configure\n        other resources) are acceptable as configuration values.\n\n        Complete resource definitions must be dictionaries, with the keys\n        identifying the resource titles, and the values being used as the\n        constructor parameters.\n\n        Parameter classes can be defined as dictionariy or a list of\n        dictionaries. In either case, the keys and values will be used directly\n        as constructor parameters.\n\n        Args:\n            defined_type (type): Troposphere type\n            many (bool): Whether or not multiple resources can be constructed.\n                If the defined type is a resource, multiple resources can be\n                passed as a dictionary of dictionaries.\n                If it is a parameter class, multiple resources are passed as\n                a list.\n            optional (bool): Whether an undefined/null configured value is\n                acceptable. In that case a value of ``None`` will be passed to\n                the template, even if ``many`` is enabled.\n            validate (bool): Whether to validate the generated object on\n                creation. Should be left enabled unless the object will be\n                augmented with mandatory parameters in the template code, such\n                that it must be validated at a later point.\n\n        \"\"\"\n\n        self._validate_type(defined_type)\n\n        self._type = defined_type\n        self._many = many\n        self._optional = optional\n        self._validate = validate\n\n    def _validate_type(self, defined_type):\n        if not hasattr(defined_type, \"from_dict\"):\n            raise ValueError(\"Type must have `from_dict` attribute\")\n\n    @property\n    def resource_name(self):\n        return (\n            getattr(self._type, 'resource_name', None) or self._type.__name__\n        )\n\n    def create(self, value):\n        \"\"\"Create the troposphere type from the value.\n\n        Args:\n            value (Union[dict, list]): A dictionary or list of dictionaries\n                (see class documentation for details) to use as parameters to\n                create the Troposphere type instance.\n                Each dictionary will be passed to the `from_dict` method of the\n                type.\n\n        Returns:\n            Union[list, type]: Returns the value converted to the troposphere\n                type\n\n        \"\"\"\n\n        # Explicitly check with len such that non-sequence types throw.\n        if self._optional and (value is None or len(value) == 0):\n            return None\n\n        if hasattr(self._type, 'resource_type'):\n            # Our type is a resource, so ensure we have a dict of title to\n            # parameters\n            if not isinstance(value, dict):\n                raise ValueError(\"Resources must be specified as a dict of \"\n                                 \"title to parameters\")\n            if not self._many and len(value) > 1:\n                raise ValueError(\"Only one resource can be provided for this \"\n                                 \"TroposphereType variable\")\n\n            result = [\n                self._type.from_dict(title, v) for title, v in value.items()\n            ]\n        else:\n            # Our type is for properties, not a resource, so don't use\n            # titles\n            if self._many:\n                result = [self._type.from_dict(None, v) for v in value]\n            elif not isinstance(value, dict):\n                raise ValueError(\"TroposphereType for a single non-resource\"\n                                 \"type must be specified as a dict of \"\n                                 \"parameters\")\n            else:\n                result = [self._type.from_dict(None, value)]\n\n        if self._validate:\n            for v in result:\n                v._validate_props()\n\n        return result[0] if not self._many else result\n\n\nclass CFNType(object):\n\n    def __init__(self, parameter_type):\n        \"\"\"Represents a CloudFormation Parameter Type.\n\n        :class:`CFNType`` can be used as the `type` for a Blueprint variable.\n        Unlike other variables, a variable with `type` :class:`CFNType`, will\n        be submitted to CloudFormation as a Parameter.\n\n        Args:\n            parameter_type (str): An AWS specific parameter type\n                (http://goo.gl/PthovJ)\n\n        \"\"\"\n        self.parameter_type = parameter_type\n\n\n# General CFN types\nCFNString = CFNType(\"String\")\nCFNNumber = CFNType(\"Number\")\nCFNNumberList = CFNType(\"List<Number>\")\nCFNCommaDelimitedList = CFNType(\"CommaDelimitedList\")\n\n# AWS-Specific Parameter Types\n# https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/parameters-section-structure.html#aws-specific-parameter-types\nEC2AvailabilityZoneName = CFNType(\"AWS::EC2::AvailabilityZone::Name\")\nEC2ImageId = CFNType(\"AWS::EC2::Image::Id\")\nEC2InstanceId = CFNType(\"AWS::EC2::Instance::Id\")\nEC2KeyPairKeyName = CFNType(\"AWS::EC2::KeyPair::KeyName\")\nEC2SecurityGroupGroupName = CFNType(\"AWS::EC2::SecurityGroup::GroupName\")\nEC2SecurityGroupId = CFNType(\"AWS::EC2::SecurityGroup::Id\")\nEC2SubnetId = CFNType(\"AWS::EC2::Subnet::Id\")\nEC2VolumeId = CFNType(\"AWS::EC2::Volume::Id\")\nEC2VPCId = CFNType(\"AWS::EC2::VPC::Id\")\nRoute53HostedZoneId = CFNType(\"AWS::Route53::HostedZone::Id\")\nEC2AvailabilityZoneNameList = CFNType(\"List<AWS::EC2::AvailabilityZone::Name>\")\nEC2ImageIdList = CFNType(\"List<AWS::EC2::Image::Id>\")\nEC2InstanceIdList = CFNType(\"List<AWS::EC2::Instance::Id>\")\nEC2SecurityGroupGroupNameList = CFNType(\n    \"List<AWS::EC2::SecurityGroup::GroupName>\")\nEC2SecurityGroupIdList = CFNType(\"List<AWS::EC2::SecurityGroup::Id>\")\nEC2SubnetIdList = CFNType(\"List<AWS::EC2::Subnet::Id>\")\nEC2VolumeIdList = CFNType(\"List<AWS::EC2::Volume::Id>\")\nEC2VPCIdList = CFNType(\"List<AWS::EC2::VPC::Id>\")\nRoute53HostedZoneIdList = CFNType(\"List<AWS::Route53::HostedZone::Id>\")\n\n# SSM Parameter Types\n# https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/parameters-section-structure.html#aws-ssm-parameter-types\nSSMParameterName = CFNType(\"AWS::SSM::Parameter::Name\")\nSSMParameterValueString = CFNType(\"AWS::SSM::Parameter::Value<String>\")\nSSMParameterValueStringList = CFNType(\n    \"AWS::SSM::Parameter::Value<List<String>>\")\nSSMParameterValueCommaDelimitedList = CFNType(\n    \"AWS::SSM::Parameter::Value<CommaDelimitedList>\")\n# Each AWS-specific type here is repeated from the the list above\nSSMParameterValueEC2AvailabilityZoneName = CFNType(\n    \"AWS::SSM::Parameter::Value<AWS::EC2::AvailabilityZone::Name>\")\nSSMParameterValueEC2ImageId = CFNType(\n    \"AWS::SSM::Parameter::Value<AWS::EC2::Image::Id>\")\nSSMParameterValueEC2InstanceId = CFNType(\n    \"AWS::SSM::Parameter::Value<AWS::EC2::Instance::Id>\")\nSSMParameterValueEC2KeyPairKeyName = CFNType(\n    \"AWS::SSM::Parameter::Value<AWS::EC2::KeyPair::KeyName>\")\nSSMParameterValueEC2SecurityGroupGroupName = CFNType(\n    \"AWS::SSM::Parameter::Value<AWS::EC2::SecurityGroup::GroupName>\")\nSSMParameterValueEC2SecurityGroupId = CFNType(\n    \"AWS::SSM::Parameter::Value<AWS::EC2::SecurityGroup::Id>\")\nSSMParameterValueEC2SubnetId = CFNType(\n    \"AWS::SSM::Parameter::Value<AWS::EC2::Subnet::Id>\")\nSSMParameterValueEC2VolumeId = CFNType(\n    \"AWS::SSM::Parameter::Value<AWS::EC2::Volume::Id>\")\nSSMParameterValueEC2VPCId = CFNType(\n    \"AWS::SSM::Parameter::Value<AWS::EC2::VPC::Id>\")\nSSMParameterValueRoute53HostedZoneId = CFNType(\n    \"AWS::SSM::Parameter::Value<AWS::Route53::HostedZone::Id>\")\nSSMParameterValueEC2AvailabilityZoneNameList = CFNType(\n    \"AWS::SSM::Parameter::Value<List<AWS::EC2::AvailabilityZone::Name>>\")\nSSMParameterValueEC2ImageIdList = CFNType(\n    \"AWS::SSM::Parameter::Value<List<AWS::EC2::Image::Id>>\")\nSSMParameterValueEC2InstanceIdList = CFNType(\n    \"AWS::SSM::Parameter::Value<List<AWS::EC2::Instance::Id>>\")\nSSMParameterValueEC2SecurityGroupGroupNameList = CFNType(\n    \"AWS::SSM::Parameter::Value<List<AWS::EC2::SecurityGroup::GroupName>>\")\nSSMParameterValueEC2SecurityGroupIdList = CFNType(\n    \"AWS::SSM::Parameter::Value<List<AWS::EC2::SecurityGroup::Id>>\")\nSSMParameterValueEC2SubnetIdList = CFNType(\n    \"AWS::SSM::Parameter::Value<List<AWS::EC2::Subnet::Id>>\")\nSSMParameterValueEC2VolumeIdList = CFNType(\n    \"AWS::SSM::Parameter::Value<List<AWS::EC2::Volume::Id>>\")\nSSMParameterValueEC2VPCIdList = CFNType(\n    \"AWS::SSM::Parameter::Value<List<AWS::EC2::VPC::Id>>\")\nSSMParameterValueRoute53HostedZoneIdList = CFNType(\n    \"AWS::SSM::Parameter::Value<List<AWS::Route53::HostedZone::Id>>\")\n"
  },
  {
    "path": "stacker/commands/__init__.py",
    "content": "from .stacker import Stacker  # NOQA\n"
  },
  {
    "path": "stacker/commands/stacker/__init__.py",
    "content": "import logging\n\nfrom .build import Build\nfrom .destroy import Destroy\nfrom .info import Info\nfrom .diff import Diff\nfrom .graph import Graph\nfrom .base import BaseCommand\nfrom ...config import render_parse_load as load_config\nfrom ...context import Context\nfrom ...providers.aws import default\nfrom ... import __version__\nfrom ... import session_cache\n\nlogger = logging.getLogger(__name__)\n\n\nclass Stacker(BaseCommand):\n\n    name = \"stacker\"\n    subcommands = (Build, Destroy, Info, Diff, Graph)\n\n    def configure(self, options, **kwargs):\n\n        session_cache.default_profile = options.profile\n\n        self.config = load_config(\n            options.config.read(),\n            environment=options.environment,\n            validate=True,\n        )\n\n        options.provider_builder = default.ProviderBuilder(\n            region=options.region,\n            interactive=options.interactive,\n            replacements_only=options.replacements_only,\n            recreate_failed=options.recreate_failed,\n            service_role=self.config.service_role,\n        )\n\n        options.context = Context(\n            environment=options.environment,\n            config=self.config,\n            # Allow subcommands to provide any specific kwargs to the Context\n            # that it wants.\n            **options.get_context_kwargs(options)\n        )\n\n        super(Stacker, self).configure(options, **kwargs)\n        if options.interactive:\n            logger.info(\"Using interactive AWS provider mode.\")\n        else:\n            logger.info(\"Using default AWS provider mode\")\n\n    def add_arguments(self, parser):\n        parser.add_argument(\"--version\", action=\"version\",\n                            version=\"%%(prog)s %s\" % (__version__,))\n"
  },
  {
    "path": "stacker/commands/stacker/base.py",
    "content": "import argparse\nimport threading\nimport signal\nfrom collections.abc import Mapping\nimport logging\nimport os.path\n\nfrom ...environment import (\n    DictWithSourceType,\n    parse_environment,\n    parse_yaml_environment\n)\n\nlogger = logging.getLogger(__name__)\n\nSIGNAL_NAMES = {\n    signal.SIGINT: \"SIGINT\",\n    signal.SIGTERM: \"SIGTERM\",\n}\n\n\ndef cancel():\n    \"\"\"Returns a threading.Event() that will get set when SIGTERM, or\n    SIGINT are triggered. This can be used to cancel execution of threads.\n    \"\"\"\n    cancel = threading.Event()\n\n    def cancel_execution(signum, frame):\n        signame = SIGNAL_NAMES.get(signum, signum)\n        logger.info(\"Signal %s received, quitting \"\n                    \"(this can take some time)...\", signame)\n        cancel.set()\n\n    signal.signal(signal.SIGINT, cancel_execution)\n    signal.signal(signal.SIGTERM, cancel_execution)\n    return cancel\n\n\nclass KeyValueAction(argparse.Action):\n    def __init__(self, option_strings, dest, default=None, nargs=None,\n                 **kwargs):\n        if nargs:\n            raise ValueError(\"nargs not allowed\")\n        default = default or {}\n        super(KeyValueAction, self).__init__(option_strings, dest, nargs,\n                                             default=default, **kwargs)\n\n    def __call__(self, parser, namespace, values, option_string=None):\n        if not isinstance(values, Mapping):\n            raise ValueError(\"type must be \\\"key_value\\\"\")\n        if not getattr(namespace, self.dest):\n            setattr(namespace, self.dest, {})\n        getattr(namespace, self.dest).update(values)\n\n\ndef key_value_arg(string):\n    try:\n        k, v = string.split(\"=\", 1)\n    except ValueError:\n        raise argparse.ArgumentTypeError(\n            \"%s does not match KEY=VALUE format.\" % string)\n    return {k: v}\n\n\ndef environment_file(input_file):\n    \"\"\"Reads a stacker environment file and returns the resulting data.\"\"\"\n\n    is_yaml = os.path.splitext(input_file)[1].lower() in ['.yaml', '.yml']\n\n    with open(input_file) as fd:\n        if is_yaml:\n            return parse_yaml_environment(fd.read())\n        else:\n            return parse_environment(fd.read())\n\n\nclass BaseCommand(object):\n    \"\"\"Base class for all stacker subcommands.\n\n    The way argparse handles common arguments that should be passed to the\n    subparser is confusing. You can add arguments to the parent parser that\n    will get passed to the subparser, but these then need to be provided on the\n    command line before specifying the subparser. Furthermore, when viewing the\n    help for a subcommand, you can't view these parameters.\n\n    By including shared parameters for stacker commands within this subclass,\n    we don't have to redundantly add the parameters we want on all subclasses\n    within each subparser and these shared parameters are treated as normal\n    arguments to the subcommand.\n\n    \"\"\"\n\n    name = None\n    description = None\n    subcommands = tuple()\n    subcommands_help = None\n\n    def __init__(self, setup_logging=None, *args, **kwargs):\n        self.setup_logging = setup_logging\n        if not self.name:\n            raise ValueError(\"Subcommands must set \\\"name\\\": %s\" % (self,))\n\n    def add_subcommands(self, parser):\n        if self.subcommands:\n            subparsers = parser.add_subparsers(help=self.subcommands_help)\n            for subcommand_class in self.subcommands:\n                subcommand = subcommand_class()\n                subparser = subparsers.add_parser(\n                    subcommand.name,\n                    description=subcommand.description,\n                )\n                subcommand.add_arguments(subparser)\n                subparser.set_defaults(run=subcommand.run)\n                subparser.set_defaults(\n                    get_context_kwargs=subcommand.get_context_kwargs)\n\n    def parse_args(self, *vargs):\n        parser = argparse.ArgumentParser(description=self.description)\n        self.add_subcommands(parser)\n        self.add_arguments(parser)\n        args = parser.parse_args(*vargs)\n        args.environment.update(args.cli_envs)\n        return args\n\n    def run(self, options, **kwargs):\n        pass\n\n    def configure(self, options, **kwargs):\n        if self.setup_logging:\n            self.setup_logging(options.verbose, self.config.log_formats)\n\n    def get_context_kwargs(self, options, **kwargs):\n        \"\"\"Return a dictionary of kwargs that will be used with the Context.\n\n        This allows commands to pass in any specific arguments they define to\n        the context.\n\n        Args:\n            options (:class:`argparse.Namespace`): arguments that have been\n                passed via the command line\n\n        Returns:\n            dict: Dictionary that will be passed to Context initializer as\n                kwargs.\n\n        \"\"\"\n        return {}\n\n    def add_arguments(self, parser):\n        parser.add_argument(\n            \"-e\", \"--env\", dest=\"cli_envs\", metavar=\"ENV=VALUE\",\n            type=key_value_arg, action=KeyValueAction, default={},\n            help=\"Adds environment key/value pairs from the command line. \"\n                 \"Overrides your environment file settings. Can be specified \"\n                 \"more than once.\")\n        parser.add_argument(\n            \"-r\", \"--region\",\n            help=\"The default AWS region to use for all AWS API calls.\")\n        parser.add_argument(\n            \"-p\", \"--profile\",\n            help=\"The default AWS profile to use for all AWS API calls. If \"\n                 \"not specified, the default will be according to http://bo\"\n                 \"to3.readthedocs.io/en/latest/guide/configuration.html.\")\n        parser.add_argument(\n            \"-v\", \"--verbose\", action=\"count\", default=0,\n            help=\"Increase output verbosity. May be specified up to twice.\")\n        parser.add_argument(\n            \"environment\", type=environment_file, nargs='?',\n            default=DictWithSourceType('simple'),\n            help=\"Path to an environment file. The file can be a simple \"\n                 \"`key: value` pair environment file, or a YAML file ending in\"\n                 \".yaml or .yml. In the simple key:value case, values in the \"\n                 \"environment file can be used in the stack config as if it \"\n                 \"were a string.Template type: \"\n                 \"https://docs.python.org/2/library/\"\n                 \"string.html#template-strings. In the YAML case, variable\"\n                 \"references in the stack config are replaced with the objects\"\n                 \"in the environment after parsing\")\n        parser.add_argument(\n            \"config\", type=argparse.FileType(),\n            help=\"The config file where stack configuration is located. Must \"\n                 \"be in yaml format. If `-` is provided, then the config will \"\n                 \"be read from stdin.\")\n        parser.add_argument(\n            \"-i\", \"--interactive\", action=\"store_true\",\n            help=\"Enable interactive mode. If specified, this will use the \"\n                 \"AWS interactive provider, which leverages Cloudformation \"\n                 \"Change Sets to display changes before running \"\n                 \"cloudformation templates. You'll be asked if you want to \"\n                 \"execute each change set. If you only want to authorize \"\n                 \"replacements, run with \\\"--replacements-only\\\" as well.\")\n        parser.add_argument(\n            \"--replacements-only\", action=\"store_true\",\n            help=\"If interactive mode is enabled, stacker will only prompt to \"\n                 \"authorize replacements.\")\n        parser.add_argument(\n            \"--recreate-failed\", action=\"store_true\",\n            help=\"Destroy and re-create stacks that are stuck in a failed \"\n                 \"state from an initial deployment when updating.\")\n"
  },
  {
    "path": "stacker/commands/stacker/build.py",
    "content": "\"\"\"Launches or updates CloudFormation stacks based on the given config.\n\nStacker is smart enough to figure out if anything (the template or parameters)\nhave changed for a given stack. If nothing has changed, stacker will correctly\nskip executing anything against the stack.\n\n\"\"\"\n\nfrom .base import BaseCommand, cancel\nfrom ...actions import build\n\n\nclass Build(BaseCommand):\n\n    name = \"build\"\n    description = __doc__\n\n    def add_arguments(self, parser):\n        super(Build, self).add_arguments(parser)\n        parser.add_argument(\"-o\", \"--outline\", action=\"store_true\",\n                            help=\"Print an outline of what steps will be \"\n                                 \"taken to build the stacks\")\n        parser.add_argument(\"--force\", action=\"append\", default=[],\n                            metavar=\"STACKNAME\", type=str,\n                            help=\"If a stackname is provided to --force, it \"\n                                 \"will be updated, even if it is locked in \"\n                                 \"the config.\")\n        parser.add_argument(\"--targets\", \"--stacks\", action=\"append\",\n                            metavar=\"STACKNAME\", type=str,\n                            help=\"Only work on the stacks given, and their \"\n                                 \"dependencies. Can be specified more than \"\n                                 \"once. If not specified then stacker will \"\n                                 \"work on all stacks in the config file.\")\n        parser.add_argument(\"-j\", \"--max-parallel\", action=\"store\", type=int,\n                            default=0,\n                            help=\"The maximum number of stacks to execute in \"\n                                 \"parallel. If not provided, the value will \"\n                                 \"be constrained based on the underlying \"\n                                 \"graph.\")\n        parser.add_argument(\"-t\", \"--tail\", action=\"store_true\",\n                            help=\"Tail the CloudFormation logs while working \"\n                                 \"with stacks\")\n        parser.add_argument(\"-d\", \"--dump\", action=\"store\", type=str,\n                            help=\"Dump the rendered Cloudformation templates \"\n                                 \"to a directory\")\n\n    def run(self, options, **kwargs):\n        super(Build, self).run(options, **kwargs)\n        action = build.Action(options.context,\n                              provider_builder=options.provider_builder,\n                              cancel=cancel())\n        action.execute(concurrency=options.max_parallel,\n                       outline=options.outline,\n                       tail=options.tail,\n                       dump=options.dump)\n\n    def get_context_kwargs(self, options, **kwargs):\n        return {\"stack_names\": options.targets, \"force_stacks\": options.force}\n"
  },
  {
    "path": "stacker/commands/stacker/destroy.py",
    "content": "\"\"\"Destroys CloudFormation stacks based on the given config.\n\nStacker will determine the order in which stacks should be destroyed based on\nany manual requirements they specify or output values they rely on from other\nstacks.\n\n\"\"\"\nfrom .base import BaseCommand, cancel\nfrom ...actions import destroy\n\n\nclass Destroy(BaseCommand):\n\n    name = \"destroy\"\n    description = __doc__\n\n    def add_arguments(self, parser):\n        super(Destroy, self).add_arguments(parser)\n        parser.add_argument(\"-f\", \"--force\", action=\"store_true\",\n                            help=\"Whether or not you want to go through \"\n                                 \" with destroying the stacks\")\n        parser.add_argument(\"--targets\", \"--stacks\", action=\"append\",\n                            metavar=\"STACKNAME\", type=str,\n                            help=\"Only work on the stacks given. Can be \"\n                                 \"specified more than once. If not specified \"\n                                 \"then stacker will work on all stacks in the \"\n                                 \"config file.\")\n        parser.add_argument(\"-j\", \"--max-parallel\", action=\"store\", type=int,\n                            default=0,\n                            help=\"The maximum number of stacks to execute in \"\n                                 \"parallel. If not provided, the value will \"\n                                 \"be constrained based on the underlying \"\n                                 \"graph.\")\n        parser.add_argument(\"-t\", \"--tail\", action=\"store_true\",\n                            help=\"Tail the CloudFormation logs while working \"\n                                 \"with stacks\")\n\n    def run(self, options, **kwargs):\n        super(Destroy, self).run(options, **kwargs)\n        action = destroy.Action(options.context,\n                                provider_builder=options.provider_builder,\n                                cancel=cancel())\n        action.execute(concurrency=options.max_parallel,\n                       force=options.force,\n                       tail=options.tail)\n\n    def get_context_kwargs(self, options, **kwargs):\n        return {\"stack_names\": options.targets}\n"
  },
  {
    "path": "stacker/commands/stacker/diff.py",
    "content": "\"\"\" Diffs the config against the currently running CloudFormation stacks\n\nSometimes small changes can have big impacts.  Run \"stacker diff\" before\n\"stacker build\" to detect bad things(tm) from happening in advance!\n\"\"\"\n\nfrom .base import BaseCommand\nfrom ...actions import diff\n\n\nclass Diff(BaseCommand):\n    name = \"diff\"\n    description = __doc__\n\n    def add_arguments(self, parser):\n        super(Diff, self).add_arguments(parser)\n        parser.add_argument(\"--force\", action=\"append\", default=[],\n                            metavar=\"STACKNAME\", type=str,\n                            help=\"If a stackname is provided to --force, it \"\n                                 \"will be diffed, even if it is locked in \"\n                                 \"the config.\")\n        parser.add_argument(\"--stacks\", action=\"append\",\n                            metavar=\"STACKNAME\", type=str,\n                            help=\"Only work on the stacks given. Can be \"\n                                 \"specified more than once. If not specified \"\n                                 \"then stacker will work on all stacks in the \"\n                                 \"config file.\")\n\n    def run(self, options, **kwargs):\n        super(Diff, self).run(options, **kwargs)\n        action = diff.Action(options.context,\n                             provider_builder=options.provider_builder)\n        action.execute()\n\n    def get_context_kwargs(self, options, **kwargs):\n        return {\"stack_names\": options.stacks, \"force_stacks\": options.force}\n"
  },
  {
    "path": "stacker/commands/stacker/graph.py",
    "content": "\"\"\"Prints the the relationships between steps as a graph.\n\n\"\"\"\n\nfrom .base import BaseCommand\nfrom ...actions import graph\n\n\nclass Graph(BaseCommand):\n\n    name = \"graph\"\n    description = __doc__\n\n    def add_arguments(self, parser):\n        super(Graph, self).add_arguments(parser)\n        parser.add_argument(\"-f\", \"--format\", default=\"dot\",\n                            choices=graph.FORMATTERS,\n                            help=\"The format to print the graph in.\")\n        parser.add_argument(\"--reduce\", action=\"store_true\",\n                            help=\"When provided, this will create a \"\n                                 \"graph with less edges, by performing \"\n                                 \"a transitive reduction on the underlying \"\n                                 \"graph. While this will produce a less \"\n                                 \"noisy graph, it is slower.\")\n\n    def run(self, options, **kwargs):\n        super(Graph, self).run(options, **kwargs)\n        action = graph.Action(options.context,\n                              provider_builder=options.provider_builder)\n        action.execute(\n            format=options.format,\n            reduce=options.reduce)\n"
  },
  {
    "path": "stacker/commands/stacker/info.py",
    "content": "\"\"\"Gets information on the CloudFormation stacks based on the given config.\"\"\"\n\nfrom .base import BaseCommand\nfrom ...actions import info\n\n\nclass Info(BaseCommand):\n\n    name = \"info\"\n    description = __doc__\n\n    def add_arguments(self, parser):\n        super(Info, self).add_arguments(parser)\n        parser.add_argument(\"--stacks\", action=\"append\",\n                            metavar=\"STACKNAME\", type=str,\n                            help=\"Only work on the stacks given. Can be \"\n                                 \"specified more than once. If not specified \"\n                                 \"then stacker will work on all stacks in the \"\n                                 \"config file.\")\n\n    def run(self, options, **kwargs):\n        super(Info, self).run(options, **kwargs)\n        action = info.Action(options.context,\n                             provider_builder=options.provider_builder)\n\n        action.execute()\n\n    def get_context_kwargs(self, options, **kwargs):\n        return {\"stack_names\": options.stacks}\n"
  },
  {
    "path": "stacker/config/__init__.py",
    "content": "from past.types import basestring\nimport copy\nimport sys\nimport logging\nimport re\n\nfrom string import Template\nfrom io import StringIO\n\nfrom schematics import Model\nfrom schematics.exceptions import ValidationError\nfrom schematics.exceptions import (\n    BaseError as SchematicsError,\n    UndefinedValueError\n)\n\nfrom schematics.types import (\n    ModelType,\n    ListType,\n    StringType,\n    BooleanType,\n    DictType,\n    BaseType\n)\n\nimport yaml\n\nfrom ..lookups import register_lookup_handler\nfrom ..util import merge_map, yaml_to_ordered_dict, SourceProcessor\nfrom .. import exceptions\nfrom ..environment import DictWithSourceType\n\n# register translators (yaml constructors)\nfrom .translators import *  # NOQA\n\nlogger = logging.getLogger(__name__)\n\n\ndef render_parse_load(raw_config, environment=None, validate=True):\n    \"\"\"Encapsulates the render -> parse -> validate -> load process.\n\n    Args:\n        raw_config (str): the raw stacker configuration string.\n        environment (dict, optional): any environment values that should be\n            passed to the config\n        validate (bool): if provided, the config is validated before being\n            loaded.\n\n    Returns:\n        :class:`Config`: the parsed stacker config.\n\n    \"\"\"\n\n    pre_rendered = render(raw_config, environment)\n\n    rendered = process_remote_sources(pre_rendered, environment)\n\n    config = parse(rendered)\n\n    # For backwards compatibility, if the config doesn't specify a namespace,\n    # we fall back to fetching it from the environment, if provided.\n    if config.namespace is None:\n        namespace = environment.get(\"namespace\")\n        if namespace:\n            logger.warn(\"DEPRECATION WARNING: specifying namespace in the \"\n                        \"environment is deprecated. See \"\n                        \"https://stacker.readthedocs.io/en/latest/config.html\"\n                        \"#namespace \"\n                        \"for more info.\")\n            config.namespace = namespace\n\n    if validate:\n        config.validate()\n\n    return load(config)\n\n\ndef render(raw_config, environment=None):\n    \"\"\"Renders a config, using it as a template with the environment.\n\n    Args:\n        raw_config (str): the raw stacker configuration string.\n        environment (DictWithSourceType, optional): any environment values that\n            should be passed to the config\n\n    Returns:\n        str: the stacker configuration populated with any values passed from\n            the environment\n\n    \"\"\"\n    if not environment:\n        environment = {}\n    # If we have a naked dict, we got here through the old non-YAML path, so\n    # we can't have a YAML config file.\n    is_yaml = False\n    if type(environment) == DictWithSourceType:\n        is_yaml = environment.source_type == 'yaml'\n\n    if is_yaml:\n        # First, read the config as yaml\n        config = yaml.safe_load(raw_config)\n\n        # Next, we need to walk the yaml structure, and find all things which\n        # look like variable references. This regular expression is copied from\n        # string.template to match variable references identically as the\n        # simple configuration case below. We've got two cases of this pattern,\n        # since python 2.7 doesn't support re.fullmatch(), so we have to add\n        # the end of line anchor to the inner patterns.\n        idpattern = r'[_a-z][_a-z0-9]*'\n        pattern = r\"\"\"\n            %(delim)s(?:\n              (?P<named>%(id)s)         |   # delimiter and a Python identifier\n              {(?P<braced>%(id)s)}         # delimiter and a braced identifier\n            )\n            \"\"\" % {'delim': re.escape('$'),\n                   'id': idpattern,\n                   }\n        full_pattern = r\"\"\"\n            %(delim)s(?:\n              (?P<named>%(id)s)$         |  # delimiter and a Python identifier\n              {(?P<braced>%(id)s)}$         # delimiter and a braced identifier\n            )\n            \"\"\" % {'delim': re.escape('$'),\n                   'id': idpattern,\n                   }\n        exp = re.compile(pattern, re.IGNORECASE | re.VERBOSE)\n        full_exp = re.compile(full_pattern, re.IGNORECASE | re.VERBOSE)\n        new_config = substitute_references(config, environment, exp, full_exp)\n        # Now, re-encode the whole thing as YAML and return that.\n        return yaml.safe_dump(new_config)\n    else:\n        t = Template(raw_config)\n        buff = StringIO()\n\n        try:\n            substituted = t.substitute(environment)\n        except KeyError as e:\n            raise exceptions.MissingEnvironment(e.args[0])\n        except ValueError:\n            # Support \"invalid\" placeholders for lookup placeholders.\n            substituted = t.safe_substitute(environment)\n\n        if not isinstance(substituted, str):\n            substituted = substituted.decode('utf-8')\n\n        buff.write(substituted)\n        buff.seek(0)\n        return buff.read()\n\n\ndef substitute_references(root, environment, exp, full_exp):\n    # We need to check for something being a string in both python 2.7 and\n    # 3+. The aliases in the future package don't work for yaml sourced\n    # strings, so we have to spin our own.\n    def isstr(s):\n        try:\n            return isinstance(s, basestring)\n        except NameError:\n            return isinstance(s, str)\n\n    if isinstance(root, list):\n        result = []\n        for x in root:\n            result.append(substitute_references(x, environment, exp, full_exp))\n        return result\n    elif isinstance(root, dict):\n        result = {}\n        for k, v in root.items():\n            result[k] = substitute_references(v, environment, exp, full_exp)\n        return result\n    elif isstr(root):\n        # Strings are the special type where all substitutions happen. If we\n        # encounter a string object in the expression tree, we need to perform\n        # one of two different kinds of matches on it. First, if the entire\n        # string is a variable, we can replace it with an arbitrary object;\n        # dict, list, primitive. If the string contains variables within it,\n        # then we have to do string substitution.\n        match_obj = full_exp.match(root.strip())\n        if match_obj:\n            matches = match_obj.groupdict()\n            var_name = matches['named'] or matches['braced']\n            if var_name is not None:\n                value = environment.get(var_name)\n                if value is None:\n                    raise exceptions.MissingEnvironment(var_name)\n                return value\n\n        # Returns if an object is a basic type. Once again, the future package\n        # overrides don't work for string here, so we have to special case it\n        def is_basic_type(o):\n            if isstr(o):\n                return True\n            basic_types = [int, bool, float]\n            for t in basic_types:\n                if isinstance(o, t):\n                    return True\n            return False\n\n        # If we got here, then we didn't have any full matches, now perform\n        # partial substitutions within a string.\n        def replace(mo):\n            name = mo.groupdict()['braced'] or mo.groupdict()['named']\n            if not name:\n                return root[mo.start():mo.end()]\n            val = environment.get(name)\n            if val is None:\n                raise exceptions.MissingEnvironment(name)\n            if not is_basic_type(val):\n                raise exceptions.WrongEnvironmentType(name)\n            return str(val)\n        value = exp.sub(replace, root)\n        return value\n    # In all other unhandled cases, return a copy of the input\n    return copy.copy(root)\n\n\ndef parse(raw_config):\n    \"\"\"Parse a raw yaml formatted stacker config.\n\n    Args:\n        raw_config (str): the raw stacker configuration string in yaml format.\n\n    Returns:\n        :class:`Config`: the parsed stacker config.\n\n    \"\"\"\n\n    # Convert any applicable dictionaries back into lists\n    # This is necessary due to the move from lists for these top level config\n    # values to either lists or OrderedDicts.\n    # Eventually we should probably just make them OrderedDicts only.\n    config_dict = yaml_to_ordered_dict(raw_config)\n    if config_dict:\n        for top_level_key in ['stacks', 'pre_build', 'post_build',\n                              'pre_destroy', 'post_destroy']:\n            top_level_value = config_dict.get(top_level_key)\n            if isinstance(top_level_value, dict):\n                tmp_list = []\n                for key, value in top_level_value.items():\n                    tmp_dict = copy.deepcopy(value)\n                    if top_level_key == 'stacks':\n                        tmp_dict['name'] = key\n                    tmp_list.append(tmp_dict)\n                config_dict[top_level_key] = tmp_list\n\n    # Top-level excess keys are removed by Config._convert, so enabling strict\n    # mode is fine here.\n    try:\n        return Config(config_dict, strict=True)\n    except SchematicsError as e:\n        raise exceptions.InvalidConfig(e.errors)\n\n\ndef load(config):\n    \"\"\"Loads a stacker configuration by modifying sys paths, loading lookups,\n    etc.\n\n    Args:\n        config (:class:`Config`): the stacker config to load.\n\n    Returns:\n        :class:`Config`: the stacker config provided above.\n\n    \"\"\"\n\n    if config.sys_path:\n        logger.debug(\"Appending %s to sys.path.\", config.sys_path)\n        sys.path.append(config.sys_path)\n        logger.debug(\"sys.path is now %s\", sys.path)\n    if config.lookups:\n        for key, handler in config.lookups.items():\n            register_lookup_handler(key, handler)\n\n    return config\n\n\ndef dump(config):\n    \"\"\"Dumps a stacker Config object as yaml.\n\n    Args:\n        config (:class:`Config`): the stacker Config object.\n        stream (stream): an optional stream object to write to.\n\n    Returns:\n        str: the yaml formatted stacker Config.\n\n    \"\"\"\n\n    return yaml.safe_dump(\n        config.to_primitive(),\n        default_flow_style=False,\n        encoding='utf-8',\n        allow_unicode=True)\n\n\ndef process_remote_sources(raw_config, environment=None):\n    \"\"\"Stage remote package sources and merge in remote configs.\n\n    Args:\n        raw_config (str): the raw stacker configuration string.\n        environment (dict, optional): any environment values that should be\n            passed to the config\n\n    Returns:\n        str: the raw stacker configuration string\n\n    \"\"\"\n\n    config = yaml.safe_load(raw_config)\n    if config and config.get('package_sources'):\n        processor = SourceProcessor(\n            sources=config['package_sources'],\n            stacker_cache_dir=config.get('stacker_cache_dir')\n        )\n        processor.get_package_sources()\n        if processor.configs_to_merge:\n            for i in processor.configs_to_merge:\n                logger.debug(\"Merging in remote config \\\"%s\\\"\", i)\n                remote_config = yaml.safe_load(open(i))\n                config = merge_map(remote_config, config)\n            # Call the render again as the package_sources may have merged in\n            # additional environment lookups\n            if not environment:\n                environment = {}\n            return render(str(config), environment)\n\n    return raw_config\n\n\ndef not_empty_list(value):\n    if not value or len(value) < 1:\n        raise ValidationError(\"Should have more than one element.\")\n    return value\n\n\nclass AnyType(BaseType):\n    pass\n\n\nclass LocalPackageSource(Model):\n    source = StringType(required=True)\n\n    paths = ListType(StringType, serialize_when_none=False)\n\n    configs = ListType(StringType, serialize_when_none=False)\n\n\nclass GitPackageSource(Model):\n    uri = StringType(required=True)\n\n    tag = StringType(serialize_when_none=False)\n\n    branch = StringType(serialize_when_none=False)\n\n    commit = StringType(serialize_when_none=False)\n\n    paths = ListType(StringType, serialize_when_none=False)\n\n    configs = ListType(StringType, serialize_when_none=False)\n\n\nclass S3PackageSource(Model):\n    bucket = StringType(required=True)\n\n    key = StringType(required=True)\n\n    use_latest = BooleanType(serialize_when_none=False)\n\n    requester_pays = BooleanType(serialize_when_none=False)\n\n    paths = ListType(StringType, serialize_when_none=False)\n\n    configs = ListType(StringType, serialize_when_none=False)\n\n\nclass PackageSources(Model):\n    local = ListType(ModelType(LocalPackageSource))\n\n    git = ListType(ModelType(GitPackageSource))\n\n    s3 = ListType(ModelType(S3PackageSource))\n\n\nclass Hook(Model):\n    path = StringType(required=True)\n\n    required = BooleanType(default=True)\n\n    enabled = BooleanType(default=True)\n\n    data_key = StringType(serialize_when_none=False)\n\n    args = DictType(AnyType)\n\n\nclass Target(Model):\n    name = StringType(required=True)\n\n    requires = ListType(StringType, serialize_when_none=False)\n\n    required_by = ListType(StringType, serialize_when_none=False)\n\n\nclass Stack(Model):\n    name = StringType(required=True)\n\n    stack_name = StringType(serialize_when_none=False)\n\n    region = StringType(serialize_when_none=False)\n\n    profile = StringType(serialize_when_none=False)\n\n    class_path = StringType(serialize_when_none=False)\n\n    template_path = StringType(serialize_when_none=False)\n\n    description = StringType(serialize_when_none=False)\n\n    requires = ListType(StringType, serialize_when_none=False)\n\n    required_by = ListType(StringType, serialize_when_none=False)\n\n    locked = BooleanType(default=False)\n\n    enabled = BooleanType(default=True)\n\n    protected = BooleanType(default=False)\n\n    variables = DictType(AnyType, serialize_when_none=False)\n\n    parameters = DictType(AnyType, serialize_when_none=False)\n\n    tags = DictType(StringType, serialize_when_none=False)\n\n    stack_policy_path = StringType(serialize_when_none=False)\n\n    in_progress_behavior = StringType(serialize_when_none=False)\n\n    notification_arns = ListType(\n        StringType, serialize_when_none=False, default=[])\n\n    def validate_class_path(self, data, value):\n        if value and data[\"template_path\"]:\n            raise ValidationError(\n                \"template_path cannot be present when \"\n                \"class_path is provided.\")\n        self.validate_stack_source(data)\n\n    def validate_template_path(self, data, value):\n        if value and data[\"class_path\"]:\n            raise ValidationError(\n                \"class_path cannot be present when \"\n                \"template_path is provided.\")\n        self.validate_stack_source(data)\n\n    def validate_stack_source(self, data):\n        # Locked stacks don't actually need a template, since they're\n        # read-only.\n        if data[\"locked\"]:\n            return\n\n        if not (data[\"class_path\"] or data[\"template_path\"]):\n            raise ValidationError(\n                \"class_path or template_path is required.\")\n\n    def validate_parameters(self, data, value):\n        if value:\n            stack_name = data['name']\n            raise ValidationError(\n                \"DEPRECATION: Stack definition %s contains \"\n                \"deprecated 'parameters', rather than 'variables'. You are\"\n                \" required to update your config. See https://stacker.rea\"\n                \"dthedocs.io/en/latest/config.html#variables for \"\n                \"additional information.\"\n                % stack_name)\n        return value\n\n\nclass Config(Model):\n    \"\"\"This is the Python representation of a stacker config file.\n\n    This is used internally by stacker to parse and validate a yaml formatted\n    stacker configuration file, but can also be used in scripts to generate a\n    stacker config file before handing it off to stacker to build/destroy.\n\n    Example::\n\n        from stacker.config import dump, Config, Stack\n\n        vpc = Stack({\n            \"name\": \"vpc\",\n            \"class_path\": \"blueprints.VPC\"})\n\n        config = Config()\n        config.namespace = \"prod\"\n        config.stacks = [vpc]\n\n        print dump(config)\n\n    \"\"\"\n\n    namespace = StringType(required=True)\n\n    namespace_delimiter = StringType(serialize_when_none=False)\n\n    stacker_bucket = StringType(serialize_when_none=False)\n\n    stacker_bucket_region = StringType(serialize_when_none=False)\n\n    stacker_cache_dir = StringType(serialize_when_none=False)\n\n    sys_path = StringType(serialize_when_none=False)\n\n    package_sources = ModelType(PackageSources, serialize_when_none=False)\n\n    service_role = StringType(serialize_when_none=False)\n\n    pre_build = ListType(ModelType(Hook), serialize_when_none=False)\n\n    post_build = ListType(ModelType(Hook), serialize_when_none=False)\n\n    pre_destroy = ListType(ModelType(Hook), serialize_when_none=False)\n\n    post_destroy = ListType(ModelType(Hook), serialize_when_none=False)\n\n    tags = DictType(StringType, serialize_when_none=False)\n\n    template_indent = StringType(serialize_when_none=False)\n\n    mappings = DictType(\n        DictType(DictType(StringType)), serialize_when_none=False)\n\n    lookups = DictType(StringType, serialize_when_none=False)\n\n    targets = ListType(\n        ModelType(Target), serialize_when_none=False)\n\n    stacks = ListType(\n        ModelType(Stack), default=[])\n\n    log_formats = DictType(StringType, serialize_when_none=False)\n\n    def _remove_excess_keys(self, data):\n        excess_keys = set(data.keys())\n        excess_keys -= self._schema.valid_input_keys\n        if not excess_keys:\n            return data\n\n        logger.debug('Removing excess keys from config input: %s',\n                     excess_keys)\n        clean_data = data.copy()\n        for key in excess_keys:\n            del clean_data[key]\n\n        return clean_data\n\n    def _convert(self, raw_data=None, context=None, **kwargs):\n        if raw_data is not None:\n            # Remove excess top-level keys, since we want to allow them to be\n            # used for custom user variables to be reference later. This is\n            # preferable to just disabling strict mode, as we can still\n            # disallow excess keys in the inner models.\n            raw_data = self._remove_excess_keys(raw_data)\n\n        return super(Config, self)._convert(raw_data=raw_data, context=context,\n                                            **kwargs)\n\n    def validate(self, *args, **kwargs):\n        try:\n            return super(Config, self).validate(*args, **kwargs)\n        except UndefinedValueError as e:\n            raise exceptions.InvalidConfig([e.message])\n        except SchematicsError as e:\n            raise exceptions.InvalidConfig(e.errors)\n\n    def validate_stacks(self, data, value):\n        if value:\n            stack_names = [stack.name for stack in value]\n            if len(set(stack_names)) != len(stack_names):\n                # only loop / enumerate if there is an issue.\n                for i, stack_name in enumerate(stack_names):\n                    if stack_names.count(stack_name) != 1:\n                        raise ValidationError(\n                            \"Duplicate stack %s found at index %d.\"\n                            % (stack_name, i))\n"
  },
  {
    "path": "stacker/config/translators/__init__.py",
    "content": "import yaml\n\nfrom .kms import kms_simple_constructor\n\nyaml.add_constructor('!kms', kms_simple_constructor)\n"
  },
  {
    "path": "stacker/config/translators/kms.py",
    "content": "# NOTE: The translator is going to be deprecated in favor of the lookup\nfrom ...lookups.handlers.kms import KmsLookup\n\n\ndef kms_simple_constructor(loader, node):\n    value = loader.construct_scalar(node)\n    return KmsLookup.handler(value)\n"
  },
  {
    "path": "stacker/context.py",
    "content": "import collections.abc\nimport logging\n\nfrom stacker.config import Config\nfrom .stack import Stack\nfrom .target import Target\n\nlogger = logging.getLogger(__name__)\n\n\nDEFAULT_NAMESPACE_DELIMITER = \"-\"\nDEFAULT_TEMPLATE_INDENT = 4\n\n\ndef get_fqn(base_fqn, delimiter, name=None):\n    \"\"\"Return the fully qualified name of an object within this context.\n\n    If the name passed already appears to be a fully qualified name, it\n    will be returned with no further processing.\n\n    \"\"\"\n    if name and name.startswith(\"%s%s\" % (base_fqn, delimiter)):\n        return name\n\n    return delimiter.join([_f for _f in [base_fqn, name] if _f])\n\n\nclass Context(object):\n    \"\"\"The context under which the current stacks are being executed.\n\n    The stacker Context is responsible for translating the values passed in via\n    the command line and specified in the config to `Stack` objects.\n\n    Args:\n        environment (dict): A dictionary used to pass in information about\n            the environment. Useful for templating.\n        stack_names (list): A list of stack_names to operate on. If not passed,\n            usually all stacks defined in the config will be operated on.\n        config (:class:`stacker.config.Config`): The stacker configuration\n            being operated on.\n        force_stacks (list): A list of stacks to force work on. Used to work\n            on locked stacks.\n\n    \"\"\"\n\n    def __init__(self, environment=None,\n                 stack_names=None,\n                 config=None,\n                 force_stacks=None):\n        self.environment = environment\n        self.stack_names = stack_names or []\n        self.config = config or Config()\n        self.force_stacks = force_stacks or []\n        self.hook_data = {}\n\n    @property\n    def namespace(self):\n        return self.config.namespace\n\n    @property\n    def namespace_delimiter(self):\n        delimiter = self.config.namespace_delimiter\n        if delimiter is not None:\n            return delimiter\n        return DEFAULT_NAMESPACE_DELIMITER\n\n    @property\n    def template_indent(self):\n        indent = self.config.template_indent\n        if indent is not None:\n            return int(indent)\n        return DEFAULT_TEMPLATE_INDENT\n\n    @property\n    def bucket_name(self):\n        if not self.upload_templates_to_s3:\n            return None\n\n        return self.config.stacker_bucket \\\n            or \"stacker-%s\" % (self.get_fqn(),)\n\n    @property\n    def upload_templates_to_s3(self):\n        # Don't upload stack templates to S3 if `stacker_bucket` is explicitly\n        # set to an empty string.\n        if self.config.stacker_bucket == '':\n            logger.debug(\"Not uploading templates to s3 because \"\n                         \"`stacker_bucket` is explicity set to an \"\n                         \"empty string\")\n            return False\n\n        # If no namespace is specificied, and there's no explicit stacker\n        # bucket specified, don't upload to s3. This makes sense because we\n        # can't realistically auto generate a stacker bucket name in this case.\n        if not self.namespace and not self.config.stacker_bucket:\n            logger.debug(\"Not uploading templates to s3 because \"\n                         \"there is no namespace set, and no \"\n                         \"stacker_bucket set\")\n            return False\n\n        return True\n\n    @property\n    def tags(self):\n        tags = self.config.tags\n        if tags is not None:\n            return tags\n        if self.namespace:\n            return {\"stacker_namespace\": self.namespace}\n        return {}\n\n    @property\n    def _base_fqn(self):\n        return self.namespace.replace(\".\", \"-\").lower()\n\n    @property\n    def mappings(self):\n        return self.config.mappings or {}\n\n    def _get_stack_definitions(self):\n        return self.config.stacks\n\n    def get_targets(self):\n        \"\"\"Returns the named targets that are specified in the config.\n\n        Returns:\n            list: a list of :class:`stacker.target.Target` objects\n\n        \"\"\"\n        if not hasattr(self, \"_targets\"):\n            targets = []\n            for target_def in self.config.targets or []:\n                target = Target(target_def)\n                targets.append(target)\n            self._targets = targets\n        return self._targets\n\n    def get_stacks(self):\n        \"\"\"Get the stacks for the current action.\n\n        Handles configuring the :class:`stacker.stack.Stack` objects that will\n        be used in the current action.\n\n        Returns:\n            list: a list of :class:`stacker.stack.Stack` objects\n\n        \"\"\"\n        if not hasattr(self, \"_stacks\"):\n            stacks = []\n            definitions = self._get_stack_definitions()\n            for stack_def in definitions:\n                stack = Stack(\n                    definition=stack_def,\n                    context=self,\n                    mappings=self.mappings,\n                    force=stack_def.name in self.force_stacks,\n                    locked=stack_def.locked,\n                    enabled=stack_def.enabled,\n                    protected=stack_def.protected,\n                    notification_arns=stack_def.notification_arns\n                )\n                stacks.append(stack)\n            self._stacks = stacks\n        return self._stacks\n\n    def get_stack(self, name):\n        for stack in self.get_stacks():\n            if stack.name == name:\n                return stack\n\n    def get_stacks_dict(self):\n        return dict((stack.fqn, stack) for stack in self.get_stacks())\n\n    def get_fqn(self, name=None):\n        \"\"\"Return the fully qualified name of an object within this context.\n\n        If the name passed already appears to be a fully qualified name, it\n        will be returned with no further processing.\n\n        \"\"\"\n        return get_fqn(self._base_fqn, self.namespace_delimiter, name)\n\n    def set_hook_data(self, key, data):\n        \"\"\"Set hook data for the given key.\n\n        Args:\n            key(str): The key to store the hook data in.\n            data(:class:`collections.Mapping`): A dictionary of data to store,\n                as returned from a hook.\n        \"\"\"\n\n        if not isinstance(data, collections.abc.Mapping):\n            raise ValueError(\"Hook (key: %s) data must be an instance of \"\n                             \"collections.Mapping (a dictionary for \"\n                             \"example).\" % key)\n\n        if key in self.hook_data:\n            raise KeyError(\"Hook data for key %s already exists, each hook \"\n                           \"must have a unique data_key.\", key)\n\n        self.hook_data[key] = data\n"
  },
  {
    "path": "stacker/dag/__init__.py",
    "content": "import logging\nfrom threading import Thread\nfrom copy import copy, deepcopy\nimport collections.abc\nfrom collections import deque, OrderedDict\n\nlogger = logging.getLogger(__name__)\n\n\nclass DAGValidationError(Exception):\n    pass\n\n\nclass DAG(object):\n    \"\"\" Directed acyclic graph implementation. \"\"\"\n\n    def __init__(self):\n        \"\"\" Construct a new DAG with no nodes or edges. \"\"\"\n        self.reset_graph()\n\n    def add_node(self, node_name):\n        \"\"\" Add a node if it does not exist yet, or error out.\n\n        Args:\n            node_name (str): The unique name of the node to add.\n\n        Raises:\n            KeyError: Raised if a node with the same name already exist in the\n                      graph\n        \"\"\"\n        graph = self.graph\n        if node_name in graph:\n            raise KeyError('node %s already exists' % node_name)\n        graph[node_name] = set()\n\n    def add_node_if_not_exists(self, node_name):\n        \"\"\" Add a node if it does not exist yet, ignoring duplicates.\n\n        Args:\n            node_name (str): The name of the node to add.\n        \"\"\"\n        try:\n            self.add_node(node_name)\n        except KeyError:\n            pass\n\n    def delete_node(self, node_name):\n        \"\"\" Deletes this node and all edges referencing it.\n\n        Args:\n            node_name (str): The name of the node to delete.\n\n        Raises:\n            KeyError: Raised if the node does not exist in the graph.\n        \"\"\"\n        graph = self.graph\n        if node_name not in graph:\n            raise KeyError('node %s does not exist' % node_name)\n        graph.pop(node_name)\n\n        for node, edges in graph.items():\n            if node_name in edges:\n                edges.remove(node_name)\n\n    def delete_node_if_exists(self, node_name):\n        \"\"\" Deletes this node and all edges referencing it.\n\n        Ignores any node that is not in the graph, rather than throwing an\n        exception.\n\n        Args:\n            node_name (str): The name of the node to delete.\n        \"\"\"\n\n        try:\n            self.delete_node(node_name)\n        except KeyError:\n            pass\n\n    def add_edge(self, ind_node, dep_node):\n        \"\"\" Add an edge (dependency) between the specified nodes.\n\n        Args:\n            ind_node (str): The independent node to add an edge to.\n            dep_node (str): The dependent node that has a dependency on the\n                            ind_node.\n\n        Raises:\n            KeyError: Either the ind_node, or dep_node do not exist.\n            DAGValidationError: Raised if the resulting graph is invalid.\n        \"\"\"\n        graph = self.graph\n        if ind_node not in graph:\n            raise KeyError('independent node %s does not exist' % ind_node)\n        if dep_node not in graph:\n            raise KeyError('dependent node %s does not exist' % dep_node)\n        test_graph = deepcopy(graph)\n        test_graph[ind_node].add(dep_node)\n        test_dag = DAG()\n        test_dag.graph = test_graph\n        is_valid, message = test_dag.validate()\n        if is_valid:\n            graph[ind_node].add(dep_node)\n        else:\n            raise DAGValidationError(message)\n\n    def delete_edge(self, ind_node, dep_node):\n        \"\"\" Delete an edge from the graph.\n\n        Args:\n            ind_node (str): The independent node to delete an edge from.\n            dep_node (str): The dependent node that has a dependency on the\n                            ind_node.\n\n        Raises:\n            KeyError: Raised when the edge doesn't already exist.\n        \"\"\"\n        graph = self.graph\n        if dep_node not in graph.get(ind_node, []):\n            raise KeyError(\n                \"No edge exists between %s and %s.\" % (ind_node, dep_node)\n            )\n        graph[ind_node].remove(dep_node)\n\n    def transpose(self):\n        \"\"\" Builds a new graph with the edges reversed.\n\n        Returns:\n            :class:`stacker.dag.DAG`: The transposed graph.\n        \"\"\"\n        graph = self.graph\n        transposed = DAG()\n        for node, edges in graph.items():\n            transposed.add_node(node)\n        for node, edges in graph.items():\n            # for each edge A -> B, transpose it so that B -> A\n            for edge in edges:\n                transposed.add_edge(edge, node)\n        return transposed\n\n    def walk(self, walk_func):\n        \"\"\" Walks each node of the graph in reverse topological order.\n        This can be used to perform a set of operations, where the next\n        operation depends on the previous operation. It's important to note\n        that walking happens serially, and is not paralellized.\n\n        Args:\n            walk_func (:class:`types.FunctionType`): The function to be called\n                on each node of the graph.\n        \"\"\"\n        nodes = self.topological_sort()\n        # Reverse so we start with nodes that have no dependencies.\n        nodes.reverse()\n\n        for n in nodes:\n            walk_func(n)\n\n    def transitive_reduction(self):\n        \"\"\" Performs a transitive reduction on the DAG. The transitive\n        reduction of a graph is a graph with as few edges as possible with the\n        same reachability as the original graph.\n\n        See https://en.wikipedia.org/wiki/Transitive_reduction\n        \"\"\"\n        combinations = []\n        for node, edges in self.graph.items():\n            combinations += [[node, edge] for edge in edges]\n\n        while True:\n            new_combinations = []\n            for comb1 in combinations:\n                for comb2 in combinations:\n                    if not comb1[-1] == comb2[0]:\n                        continue\n                    new_entry = comb1 + comb2[1:]\n                    if new_entry not in combinations:\n                        new_combinations.append(new_entry)\n            if not new_combinations:\n                break\n            combinations += new_combinations\n\n        constructed = {(c[0], c[-1]) for c in combinations if len(c) != 2}\n        for node, edges in self.graph.items():\n            bad_nodes = {e for n, e in constructed if node == n}\n            self.graph[node] = edges - bad_nodes\n\n    def rename_edges(self, old_node_name, new_node_name):\n        \"\"\" Change references to a node in existing edges.\n\n        Args:\n            old_node_name (str): The old name for the node.\n            new_node_name (str): The new name for the node.\n        \"\"\"\n        graph = self.graph\n        for node, edges in graph.items():\n            if node == old_node_name:\n                graph[new_node_name] = copy(edges)\n                del graph[old_node_name]\n\n            else:\n                if old_node_name in edges:\n                    edges.remove(old_node_name)\n                    edges.add(new_node_name)\n\n    def predecessors(self, node):\n        \"\"\" Returns a list of all immediate predecessors of the given node\n\n        Args:\n            node (str): The node whose predecessors you want to find.\n\n        Returns:\n            list: A list of nodes that are immediate predecessors to node.\n        \"\"\"\n        graph = self.graph\n        return [key for key in graph if node in graph[key]]\n\n    def downstream(self, node):\n        \"\"\" Returns a list of all nodes this node has edges towards.\n\n        Args:\n            node (str): The node whose downstream nodes you want to find.\n\n        Returns:\n            list: A list of nodes that are immediately downstream from the\n                  node.\n        \"\"\"\n        graph = self.graph\n        if node not in graph:\n            raise KeyError('node %s is not in graph' % node)\n        return list(graph[node])\n\n    def all_downstreams(self, node):\n        \"\"\"Returns a list of all nodes ultimately downstream\n        of the given node in the dependency graph, in\n        topological order.\n\n        Args:\n             node (str): The node whose downstream nodes you want to find.\n\n        Returns:\n            list: A list of nodes that are downstream from the node.\n        \"\"\"\n        nodes = [node]\n        nodes_seen = set()\n        i = 0\n        while i < len(nodes):\n            downstreams = self.downstream(nodes[i])\n            for downstream_node in downstreams:\n                if downstream_node not in nodes_seen:\n                    nodes_seen.add(downstream_node)\n                    nodes.append(downstream_node)\n            i += 1\n        return [\n            node_ for node_ in self.topological_sort() if node_ in nodes_seen\n        ]\n\n    def filter(self, nodes):\n        \"\"\" Returns a new DAG with only the given nodes and their\n        dependencies.\n\n        Args:\n            nodes (list): The nodes you are interested in.\n\n        Returns:\n            :class:`stacker.dag.DAG`: The filtered graph.\n        \"\"\"\n\n        filtered_dag = DAG()\n\n        # Add only the nodes we need.\n        for node in nodes:\n            filtered_dag.add_node_if_not_exists(node)\n            for edge in self.all_downstreams(node):\n                filtered_dag.add_node_if_not_exists(edge)\n\n        # Now, rebuild the graph for each node that's present.\n        for node, edges in self.graph.items():\n            if node in filtered_dag.graph:\n                filtered_dag.graph[node] = edges\n\n        return filtered_dag\n\n    def all_leaves(self):\n        \"\"\" Return a list of all leaves (nodes with no downstreams)\n\n        Returns:\n            list: A list of all the nodes with no downstreams.\n        \"\"\"\n        graph = self.graph\n        return [key for key in graph if not graph[key]]\n\n    def from_dict(self, graph_dict):\n        \"\"\" Reset the graph and build it from the passed dictionary.\n\n        The dictionary takes the form of {node_name: [directed edges]}\n\n        Args:\n            graph_dict (dict): The dictionary used to create the graph.\n\n        Raises:\n            TypeError: Raised if the value of items in the dict are not lists.\n        \"\"\"\n\n        self.reset_graph()\n        for new_node in graph_dict:\n            self.add_node(new_node)\n        for ind_node, dep_nodes in graph_dict.items():\n            if not isinstance(dep_nodes, collections.abc.Iterable):\n                raise TypeError('%s: dict values must be lists' % ind_node)\n            for dep_node in dep_nodes:\n                self.add_edge(ind_node, dep_node)\n\n    def reset_graph(self):\n        \"\"\" Restore the graph to an empty state. \"\"\"\n        self.graph = OrderedDict()\n\n    def ind_nodes(self):\n        \"\"\" Returns a list of all nodes in the graph with no dependencies.\n\n        Returns:\n            list: A list of all independent nodes.\n        \"\"\"\n        graph = self.graph\n\n        dependent_nodes = set(\n            node for dependents\n            in graph.values() for node in dependents)\n        return [node_ for node_ in graph if node_ not in dependent_nodes]\n\n    def validate(self):\n        \"\"\" Returns (Boolean, message) of whether DAG is valid. \"\"\"\n        if len(self.ind_nodes()) == 0:\n            return (False, 'no independent nodes detected')\n        try:\n            self.topological_sort()\n        except ValueError as e:\n            return (False, str(e))\n        return (True, 'valid')\n\n    def topological_sort(self):\n        \"\"\" Returns a topological ordering of the DAG.\n\n        Returns:\n            list: A list of topologically sorted nodes in the graph.\n\n        Raises:\n            ValueError: Raised if the graph is not acyclic.\n        \"\"\"\n        graph = self.graph\n\n        in_degree = {}\n        for u in graph:\n            in_degree[u] = 0\n\n        for u in graph:\n            for v in graph[u]:\n                in_degree[v] += 1\n\n        queue = deque()\n        for u in in_degree:\n            if in_degree[u] == 0:\n                queue.appendleft(u)\n\n        sorted_graph = []\n        while queue:\n            u = queue.pop()\n            sorted_graph.append(u)\n            for v in sorted(graph[u]):\n                in_degree[v] -= 1\n                if in_degree[v] == 0:\n                    queue.appendleft(v)\n\n        if len(sorted_graph) == len(graph):\n            return sorted_graph\n        else:\n            raise ValueError('graph is not acyclic')\n\n    def size(self):\n        return len(self)\n\n    def __len__(self):\n        return len(self.graph)\n\n\ndef walk(dag, walk_func):\n    return dag.walk(walk_func)\n\n\nclass UnlimitedSemaphore(object):\n    \"\"\"UnlimitedSemaphore implements the same interface as threading.Semaphore,\n    but acquire's always succeed.\n    \"\"\"\n\n    def acquire(self, *args):\n        pass\n\n    def release(self):\n        pass\n\n\nclass ThreadedWalker(object):\n    \"\"\"A DAG walker that walks the graph as quickly as the graph topology\n    allows, using threads.\n\n    Args:\n        semaphore (threading.Semaphore): a semaphore object which\n            can be used to control how many steps are executed in parallel.\n    \"\"\"\n\n    def __init__(self, semaphore):\n        self.semaphore = semaphore\n\n    def walk(self, dag, walk_func):\n        \"\"\" Walks each node of the graph, in parallel if it can.\n        The walk_func is only called when the nodes dependencies have been\n        satisfied\n        \"\"\"\n\n        # First, we'll topologically sort all of the nodes, with nodes that\n        # have no dependencies first. We do this to ensure that we don't call\n        # .join on a thread that hasn't yet been started.\n        #\n        # TODO(ejholmes): An alternative would be to ensure that Thread.join\n        # blocks if the thread has not yet been started.\n        nodes = dag.topological_sort()\n        nodes.reverse()\n\n        # This maps a node name to a thread of execution.\n        threads = {}\n\n        # Blocks until all of the given nodes have completed execution (whether\n        # successfully, or errored). Returns True if all nodes returned True.\n        def wait_for(nodes):\n            for node in nodes:\n                thread = threads[node]\n                while thread.is_alive():\n                    threads[node].join(0.5)\n\n        # For each node in the graph, we're going to allocate a thread to\n        # execute. The thread will block executing walk_func, until all of the\n        # nodes dependencies have executed.\n        for node in nodes:\n            def fn(n, deps):\n                if deps:\n                    logger.debug(\n                        \"%s waiting for %s to complete\",\n                        n,\n                        \", \".join(deps))\n\n                # Wait for all dependencies to complete.\n                wait_for(deps)\n\n                logger.debug(\"%s starting\", n)\n\n                self.semaphore.acquire()\n                try:\n                    return walk_func(n)\n                finally:\n                    self.semaphore.release()\n\n            deps = dag.all_downstreams(node)\n            threads[node] = Thread(target=fn, args=(node, deps), name=node)\n\n        # Start up all of the threads.\n        for node in nodes:\n            threads[node].start()\n\n        # Wait for all threads to complete executing.\n        wait_for(nodes)\n"
  },
  {
    "path": "stacker/environment.py",
    "content": "\nimport yaml\n\n\nclass DictWithSourceType(dict):\n    \"\"\"An environment dict which keeps track of its source.\n\n    Environment files may be loaded from simple key/value files, or from\n    structured YAML files, and we need to render them using a different\n    strategy based on their source. This class adds a source_type property\n    to a dict which keeps track of whether the source for the dict is\n    yaml or simple.\n    \"\"\"\n    def __init__(self, source_type, *args):\n        dict.__init__(self, args)\n        if source_type not in ['yaml', 'simple']:\n            raise ValueError('source_type must be yaml or simple')\n        self.source_type = source_type\n\n\ndef parse_environment(raw_environment):\n    environment = DictWithSourceType('simple')\n    for line in raw_environment.split('\\n'):\n        line = line.strip()\n        if not line:\n            continue\n\n        if line.startswith('#'):\n            continue\n\n        try:\n            key, value = line.split(':', 1)\n        except ValueError:\n            raise ValueError('Environment must be in key: value format')\n\n        environment[key] = value.strip()\n    return environment\n\n\ndef parse_yaml_environment(raw_environment):\n    environment = DictWithSourceType('yaml')\n    parsed_env = yaml.safe_load(raw_environment)\n\n    if type(parsed_env) != dict:\n        raise ValueError('Environment must be valid YAML')\n    environment.update(parsed_env)\n    return environment\n"
  },
  {
    "path": "stacker/exceptions.py",
    "content": "\n\nclass InvalidConfig(Exception):\n    def __init__(self, errors):\n        super(InvalidConfig, self).__init__(errors)\n        self.errors = errors\n\n\nclass InvalidLookupCombination(Exception):\n\n    def __init__(self, lookup, lookups, value, *args, **kwargs):\n        message = (\n            \"Lookup: \\\"{}\\\" has non-string return value, must be only lookup \"\n            \"present (not {}) in \\\"{}\\\"\"\n        ).format(str(lookup), len(lookups), value)\n        super(InvalidLookupCombination, self).__init__(message,\n                                                       *args,\n                                                       **kwargs)\n\n\nclass InvalidLookupConcatenation(Exception):\n    \"\"\"\n    Intermediary Exception to be converted to InvalidLookupCombination once it\n    bubbles up there\n    \"\"\"\n    def __init__(self, lookup, lookups, *args, **kwargs):\n        self.lookup = lookup\n        self.lookups = lookups\n        super(InvalidLookupConcatenation, self).__init__(\"\", *args, **kwargs)\n\n\nclass UnknownLookupType(Exception):\n\n    def __init__(self, lookup_type, *args, **kwargs):\n        message = \"Unknown lookup type: \\\"{}\\\"\".format(lookup_type)\n        super(UnknownLookupType, self).__init__(message, *args, **kwargs)\n\n\nclass FailedVariableLookup(Exception):\n\n    def __init__(self, variable_name, lookup, error, *args, **kwargs):\n        self.lookup = lookup\n        self.error = error\n        message = \"Couldn't resolve lookup in variable `%s`, \" % variable_name\n        message += \"lookup: ${%s}: \" % repr(lookup)\n        message += \"(%s) %s\" % (error.__class__, error)\n        super(FailedVariableLookup, self).__init__(message, *args, **kwargs)\n\n\nclass FailedLookup(Exception):\n    \"\"\"\n    Intermediary Exception to be converted to FailedVariableLookup once it\n    bubbles up there\n    \"\"\"\n    def __init__(self, lookup, error, *args, **kwargs):\n        self.lookup = lookup\n        self.error = error\n        super(FailedLookup, self).__init__(\"Failed lookup\", *args, **kwargs)\n\n\nclass InvalidUserdataPlaceholder(Exception):\n\n    def __init__(self, blueprint_name, exception_message, *args, **kwargs):\n        message = exception_message + \". \"\n        message += \"Could not parse userdata in blueprint \\\"%s\\\". \" % (\n            blueprint_name)\n        message += \"Make sure to escape all $ symbols with a $$.\"\n        super(InvalidUserdataPlaceholder, self).__init__(\n            message, *args, **kwargs)\n\n\nclass UnresolvedVariables(Exception):\n\n    def __init__(self, blueprint_name, *args, **kwargs):\n        message = \"Blueprint: \\\"%s\\\" hasn't resolved it's variables\" % (\n            blueprint_name)\n        super(UnresolvedVariables, self).__init__(message, *args, **kwargs)\n\n\nclass UnresolvedVariable(Exception):\n\n    def __init__(self, blueprint_name, variable, *args, **kwargs):\n        message = (\n            \"Variable \\\"%s\\\" in blueprint \\\"%s\\\" hasn't been resolved\" % (\n                variable.name, blueprint_name\n            )\n        )\n        super(UnresolvedVariable, self).__init__(message, *args, **kwargs)\n\n\nclass UnresolvedVariableValue(Exception):\n    \"\"\"\n    Intermediary Exception to be converted to UnresolvedVariable once it\n    bubbles up there\n    \"\"\"\n    def __init__(self, lookup, *args, **kwargs):\n        self.lookup = lookup\n        super(UnresolvedVariableValue, self).__init__(\n            \"Unresolved lookup\", *args, **kwargs)\n\n\nclass MissingVariable(Exception):\n\n    def __init__(self, blueprint_name, variable_name, *args, **kwargs):\n        message = \"Variable \\\"%s\\\" in blueprint \\\"%s\\\" is missing\" % (\n            variable_name, blueprint_name)\n        super(MissingVariable, self).__init__(message, *args, **kwargs)\n\n\nclass VariableTypeRequired(Exception):\n\n    def __init__(self, blueprint_name, variable_name, *args, **kwargs):\n        message = (\n            \"Variable \\\"%s\\\" in blueprint \\\"%s\\\" does not have a type\" % (\n                variable_name, blueprint_name)\n        )\n        super(VariableTypeRequired, self).__init__(message, *args, **kwargs)\n\n\nclass StackDoesNotExist(Exception):\n\n    def __init__(self, stack_name, *args, **kwargs):\n        message = (\"Stack: \\\"%s\\\" does not exist in outputs or the lookup is \"\n                   \"not available in this stacker run\") % (stack_name,)\n        super(StackDoesNotExist, self).__init__(message, *args, **kwargs)\n\n\nclass MissingParameterException(Exception):\n\n    def __init__(self, parameters, *args, **kwargs):\n        self.parameters = parameters\n        message = \"Missing required cloudformation parameters: %s\" % (\n            \", \".join(parameters),\n        )\n        super(MissingParameterException, self).__init__(message, *args,\n                                                        **kwargs)\n\n\nclass OutputDoesNotExist(Exception):\n\n    def __init__(self, stack_name, output, *args, **kwargs):\n        self.stack_name = stack_name\n        self.output = output\n\n        message = \"Output %s does not exist on stack %s\" % (output,\n                                                            stack_name)\n        super(OutputDoesNotExist, self).__init__(message, *args, **kwargs)\n\n\nclass MissingEnvironment(Exception):\n\n    def __init__(self, key, *args, **kwargs):\n        self.key = key\n        message = \"Environment missing key %s.\" % (key,)\n        super(MissingEnvironment, self).__init__(message, *args, **kwargs)\n\n\nclass WrongEnvironmentType(Exception):\n\n    def __init__(self, key, *args, **kwargs):\n        self.key = key\n        message = \"Environment key %s can't be merged into a string\" % (key,)\n        super(WrongEnvironmentType, self).__init__(message, *args, **kwargs)\n\n\nclass ImproperlyConfigured(Exception):\n\n    def __init__(self, cls, error, *args, **kwargs):\n        message = \"Class \\\"%s\\\" is improperly configured: %s\" % (\n            cls,\n            error,\n        )\n        super(ImproperlyConfigured, self).__init__(message, *args, **kwargs)\n\n\nclass StackDidNotChange(Exception):\n\n    \"\"\"Exception raised when there are no changes to be made by the\n    provider.\n    \"\"\"\n\n\nclass CancelExecution(Exception):\n\n    \"\"\"Exception raised when we want to cancel executing the plan.\"\"\"\n\n\nclass ValidatorError(Exception):\n\n    \"\"\"Used for errors raised by custom validators of blueprint variables.\n    \"\"\"\n\n    def __init__(self, variable, validator, value, exception=None):\n        self.variable = variable\n        self.validator = validator\n        self.value = value\n        self.exception = exception\n        self.message = (\"Validator '%s' failed for variable '%s' with value \"\n                        \"'%s'\") % (self.validator, self.variable, self.value)\n\n        if self.exception:\n            self.message += \": %s: %s\" % (self.exception.__class__.__name__,\n                                          str(self.exception))\n\n    def __str__(self):\n        return self.message\n\n\nclass ChangesetDidNotStabilize(Exception):\n    def __init__(self, change_set_id):\n        self.id = change_set_id\n        message = \"Changeset '%s' did not reach a completed state.\" % (\n            change_set_id\n        )\n\n        super(ChangesetDidNotStabilize, self).__init__(message)\n\n\nclass UnhandledChangeSetStatus(Exception):\n    def __init__(self, stack_name, change_set_id, status, status_reason):\n        self.stack_name = stack_name\n        self.id = change_set_id\n        self.status = status\n        self.status_reason = status_reason\n        message = (\n            \"Changeset '%s' on stack '%s' returned an unhandled status \"\n            \"'%s: %s'.\" % (change_set_id, stack_name, status,\n                           status_reason)\n        )\n\n        super(UnhandledChangeSetStatus, self).__init__(message)\n\n\nclass UnableToExecuteChangeSet(Exception):\n    def __init__(self, stack_name, change_set_id, execution_status):\n        self.stack_name = stack_name\n        self.id = change_set_id\n        self.execution_status = execution_status\n\n        message = (\"Changeset '%s' on stack '%s' had bad execution status: \"\n                   \"%s\" % (change_set_id, stack_name, execution_status))\n\n        super(UnableToExecuteChangeSet, self).__init__(message)\n\n\nclass StackUpdateBadStatus(Exception):\n\n    def __init__(self, stack_name, stack_status, reason, *args, **kwargs):\n        self.stack_name = stack_name\n        self.stack_status = stack_status\n\n        message = (\"Stack: \\\"%s\\\" cannot be updated nor re-created from state \"\n                   \"%s: %s\" % (stack_name, stack_status, reason))\n        super(StackUpdateBadStatus, self).__init__(message, *args, **kwargs)\n\n\nclass PlanFailed(Exception):\n\n    def __init__(self, failed_steps, *args, **kwargs):\n        self.failed_steps = failed_steps\n\n        step_names = ', '.join(step.name for step in failed_steps)\n        message = \"The following steps failed: %s\" % (step_names,)\n\n        super(PlanFailed, self).__init__(message, *args, **kwargs)\n\n\nclass GraphError(Exception):\n    \"\"\"Raised when the graph is invalid (e.g. acyclic dependencies)\n    \"\"\"\n\n    def __init__(self, exception, stack, dependency):\n        self.stack = stack\n        self.dependency = dependency\n        self.exception = exception\n        message = (\n            \"Error detected when adding '%s' \"\n            \"as a dependency of '%s': %s\"\n        ) % (dependency, stack, str(exception))\n        super(GraphError, self).__init__(message)\n"
  },
  {
    "path": "stacker/hooks/__init__.py",
    "content": ""
  },
  {
    "path": "stacker/hooks/aws_lambda.py",
    "content": "from past.builtins import basestring\nimport os\nimport os.path\nimport stat\nimport logging\nimport hashlib\nfrom io import BytesIO as StringIO\nfrom zipfile import ZipFile, ZIP_DEFLATED\nimport botocore\nimport formic\nfrom troposphere.awslambda import Code\nfrom stacker.session_cache import get_session\n\nfrom stacker.util import (\n    get_config_directory,\n    ensure_s3_bucket,\n)\n\n\n\"\"\"Mask to retrieve only UNIX file permissions from the external attributes\nfield of a ZIP entry.\n\"\"\"\nZIP_PERMS_MASK = (stat.S_IRWXU | stat.S_IRWXG | stat.S_IRWXO) << 16\n\nlogger = logging.getLogger(__name__)\n\n\ndef _zip_files(files, root):\n    \"\"\"Generates a ZIP file in-memory from a list of files.\n\n    Files will be stored in the archive with relative names, and have their\n    UNIX permissions forced to 755 or 644 (depending on whether they are\n    user-executable in the source filesystem).\n\n    Args:\n        files (list[str]): file names to add to the archive, relative to\n            ``root``.\n        root (str): base directory to retrieve files from.\n\n    Returns:\n        str: content of the ZIP file as a byte string.\n        str: A calculated hash of all the files.\n\n    \"\"\"\n    zip_data = StringIO()\n    with ZipFile(zip_data, 'w', ZIP_DEFLATED) as zip_file:\n        for fname in files:\n            zip_file.write(os.path.join(root, fname), fname)\n\n        # Fix file permissions to avoid any issues - only care whether a file\n        # is executable or not, choosing between modes 755 and 644 accordingly.\n        for zip_entry in zip_file.filelist:\n            perms = (zip_entry.external_attr & ZIP_PERMS_MASK) >> 16\n            if perms & stat.S_IXUSR != 0:\n                new_perms = 0o755\n            else:\n                new_perms = 0o644\n\n            if new_perms != perms:\n                logger.debug(\"lambda: fixing perms: %s: %o => %o\",\n                             zip_entry.filename, perms, new_perms)\n                new_attr = ((zip_entry.external_attr & ~ZIP_PERMS_MASK) |\n                            (new_perms << 16))\n                zip_entry.external_attr = new_attr\n\n    contents = zip_data.getvalue()\n    zip_data.close()\n    content_hash = _calculate_hash(files, root)\n\n    return contents, content_hash\n\n\ndef _calculate_hash(files, root):\n    \"\"\" Returns a hash of all of the given files at the given root.\n\n    Args:\n        files (list[str]): file names to include in the hash calculation,\n            relative to ``root``.\n        root (str): base directory to analyze files in.\n\n    Returns:\n        str: A hash of the hashes of the given files.\n    \"\"\"\n    file_hash = hashlib.md5()\n    for fname in sorted(files):\n        f = os.path.join(root, fname)\n        file_hash.update((fname + \"\\0\").encode())\n        with open(f, \"rb\") as fd:\n            for chunk in iter(lambda: fd.read(4096), \"\"):\n                if not chunk:\n                    break\n                file_hash.update(chunk)\n            file_hash.update(\"\\0\".encode())\n\n    return file_hash.hexdigest()\n\n\ndef _calculate_prebuilt_hash(f):\n    file_hash = hashlib.md5()\n    while True:\n        chunk = f.read(4096)\n        if not chunk:\n            break\n\n        file_hash.update(chunk)\n\n    return file_hash.hexdigest()\n\n\ndef _find_files(root, includes, excludes, follow_symlinks):\n    \"\"\"List files inside a directory based on include and exclude rules.\n\n    This is a more advanced version of `glob.glob`, that accepts multiple\n    complex patterns.\n\n    Args:\n        root (str): base directory to list files from.\n        includes (list[str]): inclusion patterns. Only files matching those\n            patterns will be included in the result.\n        excludes (list[str]): exclusion patterns. Files matching those\n            patterns will be excluded from the result. Exclusions take\n            precedence over inclusions.\n        follow_symlinks (bool): If true, symlinks will be included in the\n            resulting zip file\n\n    Yields:\n        str: a file name relative to the root.\n\n    Note:\n        Documentation for the patterns can be found at\n        http://www.aviser.asia/formic/doc/index.html\n    \"\"\"\n\n    root = os.path.abspath(root)\n    file_set = formic.FileSet(\n        directory=root, include=includes,\n        exclude=excludes, symlinks=follow_symlinks,\n    )\n\n    for filename in file_set.qualified_files(absolute=False):\n        yield filename\n\n\ndef _zip_from_file_patterns(root, includes, excludes, follow_symlinks):\n    \"\"\"Generates a ZIP file in-memory from file search patterns.\n\n    Args:\n        root (str): base directory to list files from.\n        includes (list[str]): inclusion patterns. Only files  matching those\n            patterns will be included in the result.\n        excludes (list[str]): exclusion patterns. Files matching those\n            patterns will be excluded from the result. Exclusions take\n            precedence over inclusions.\n        follow_symlinks (bool): If true, symlinks will be included in the\n            resulting zip file\n\n    See Also:\n        :func:`_zip_files`, :func:`_find_files`.\n\n    Raises:\n        RuntimeError: when the generated archive would be empty.\n\n    \"\"\"\n    logger.info('lambda: base directory: %s', root)\n\n    files = list(_find_files(root, includes, excludes, follow_symlinks))\n    if not files:\n        raise RuntimeError('Empty list of files for Lambda payload. Check '\n                           'your include/exclude options for errors.')\n\n    logger.info('lambda: adding %d files:', len(files))\n\n    for fname in files:\n        logger.debug('lambda: + %s', fname)\n\n    return _zip_files(files, root)\n\n\ndef _head_object(s3_conn, bucket, key):\n    \"\"\"Retrieve information about an object in S3 if it exists.\n\n    Args:\n        s3_conn (botocore.client.S3): S3 connection to use for operations.\n        bucket (str): name of the bucket containing the key.\n        key (str): name of the key to lookup.\n\n    Returns:\n        dict: S3 object information, or None if the object does not exist.\n            See the AWS documentation for explanation of the contents.\n\n    Raises:\n        botocore.exceptions.ClientError: any error from boto3 other than key\n            not found is passed through.\n    \"\"\"\n    try:\n        return s3_conn.head_object(Bucket=bucket, Key=key)\n    except botocore.exceptions.ClientError as e:\n        if e.response['Error']['Code'] == '404':\n            return None\n        else:\n            raise\n\n\ndef _upload_code(s3_conn, bucket, prefix, name, contents, content_hash,\n                 payload_acl):\n    \"\"\"Upload a ZIP file to S3 for use by Lambda.\n\n    The key used for the upload will be unique based on the checksum of the\n    contents. No changes will be made if the contents in S3 already match the\n    expected contents.\n\n    Args:\n        s3_conn (botocore.client.S3): S3 connection to use for operations.\n        bucket (str): name of the bucket to create.\n        prefix (str): S3 prefix to prepend to the constructed key name for\n            the uploaded file\n        name (str): desired name of the Lambda function. Will be used to\n            construct a key name for the uploaded file.\n        contents (str): byte string with the content of the file upload.\n        content_hash (str): md5 hash of the contents to be uploaded.\n        payload_acl (str): The canned S3 object ACL to be applied to the\n            uploaded payload\n\n    Returns:\n        troposphere.awslambda.Code: CloudFormation Lambda Code object,\n        pointing to the uploaded payload in S3.\n\n    Raises:\n        botocore.exceptions.ClientError: any error from boto3 is passed\n            through.\n    \"\"\"\n\n    logger.debug('lambda: ZIP hash: %s', content_hash)\n    key = '{}lambda-{}-{}.zip'.format(prefix, name, content_hash)\n\n    if _head_object(s3_conn, bucket, key):\n        logger.info('lambda: object %s already exists, not uploading', key)\n    else:\n        logger.info('lambda: uploading object %s', key)\n        s3_conn.put_object(Bucket=bucket, Key=key, Body=contents,\n                           ContentType='application/zip',\n                           ACL=payload_acl)\n\n    return Code(S3Bucket=bucket, S3Key=key)\n\n\ndef _check_pattern_list(patterns, key, default=None):\n    \"\"\"Validates file search patterns from user configuration.\n\n    Acceptable input is a string (which will be converted to a singleton list),\n    a list of strings, or anything falsy (such as None or an empty dictionary).\n    Empty or unset input will be converted to a default.\n\n    Args:\n        patterns: input from user configuration (YAML).\n        key (str): name of the configuration key the input came from,\n            used for error display purposes.\n\n    Keyword Args:\n        default: value to return in case the input is empty or unset.\n\n    Returns:\n        list[str]: validated list of patterns\n\n    Raises:\n        ValueError: if the input is unacceptable.\n    \"\"\"\n    if not patterns:\n        return default\n\n    if isinstance(patterns, basestring):\n        return [patterns]\n\n    if isinstance(patterns, list):\n        if all(isinstance(p, basestring) for p in patterns):\n            return patterns\n\n    raise ValueError(\"Invalid file patterns in key '{}': must be a string or \"\n                     'list of strings'.format(key))\n\n\ndef _upload_prebuilt_zip(s3_conn, bucket, prefix, name, options, path,\n                         payload_acl):\n    logging.debug('lambda: using prebuilt ZIP %s', path)\n\n    with open(path, 'rb') as zip_file:\n        # Default to the MD5 of the ZIP if no explicit version is provided\n        version = options.get('version')\n        if not version:\n            version = _calculate_prebuilt_hash(zip_file)\n            zip_file.seek(0)\n\n        return _upload_code(s3_conn, bucket, prefix, name, zip_file,\n                            version, payload_acl)\n\n\ndef _build_and_upload_zip(s3_conn, bucket, prefix, name, options, path,\n                          follow_symlinks, payload_acl):\n    includes = _check_pattern_list(options.get('include'), 'include',\n                                   default=['**'])\n    excludes = _check_pattern_list(options.get('exclude'), 'exclude',\n                                   default=[])\n\n    # os.path.join will ignore other parameters if the right-most one is an\n    # absolute path, which is exactly what we want.\n    zip_contents, zip_version = _zip_from_file_patterns(\n        path, includes, excludes, follow_symlinks)\n    version = options.get('version') or zip_version\n\n    return _upload_code(s3_conn, bucket, prefix, name, zip_contents, version,\n                        payload_acl)\n\n\ndef _upload_function(s3_conn, bucket, prefix, name, options, follow_symlinks,\n                     payload_acl):\n    \"\"\"Builds a Lambda payload from user configuration and uploads it to S3.\n\n    Args:\n        s3_conn (botocore.client.S3): S3 connection to use for operations.\n        bucket (str): name of the bucket to upload to.\n        prefix (str): S3 prefix to prepend to the constructed key name for\n            the uploaded file\n        name (str): desired name of the Lambda function. Will be used to\n            construct a key name for the uploaded file.\n        options (dict): configuration for how to build the payload.\n            Consists of the following keys:\n                * path:\n                    base path to retrieve files from (mandatory). If not\n                    absolute, it will be interpreted as relative to the stacker\n                    configuration file directory, then converted to an absolute\n                    path. See :func:`stacker.util.get_config_directory`.\n                * include:\n                    file patterns to include in the payload (optional).\n                * exclude:\n                    file patterns to exclude from the payload (optional).\n        follow_symlinks  (bool): If true, symlinks will be included in the\n            resulting zip file\n        payload_acl (str): The canned S3 object ACL to be applied to the\n            uploaded payload\n\n    Returns:\n        troposphere.awslambda.Code: CloudFormation AWS Lambda Code object,\n        pointing to the uploaded object in S3.\n\n    Raises:\n        ValueError: if any configuration is invalid.\n        botocore.exceptions.ClientError: any error from boto3 is passed\n            through.\n    \"\"\"\n    try:\n        path = os.path.expanduser(options['path'])\n    except KeyError as e:\n        raise ValueError(\n            \"missing required property '{}' in function '{}'\".format(\n                e.args[0], name))\n\n    if not os.path.isabs(path):\n        path = os.path.abspath(os.path.join(get_config_directory(), path))\n\n    if path.endswith('.zip') and os.path.isfile(path):\n        logging.debug('lambda: using prebuilt zip: %s', path)\n\n        return _upload_prebuilt_zip(s3_conn, bucket, prefix, name, options,\n                                    path, payload_acl)\n    elif os.path.isdir(path):\n        logging.debug('lambda: building from directory: %s', path)\n\n        return _build_and_upload_zip(s3_conn, bucket, prefix, name, options,\n                                     path, follow_symlinks, payload_acl)\n    else:\n        raise ValueError('Path must be an existing ZIP file or directory')\n\n\ndef select_bucket_region(custom_bucket, hook_region, stacker_bucket_region,\n                         provider_region):\n    \"\"\"Returns the appropriate region to use when uploading functions.\n\n    Select the appropriate region for the bucket where lambdas are uploaded in.\n\n    Args:\n        custom_bucket (str, None): The custom bucket name provided by the\n            `bucket` kwarg of the aws_lambda hook, if provided.\n        hook_region (str): The contents of the `bucket_region` argument to\n            the hook.\n        stacker_bucket_region (str): The contents of the\n            `stacker_bucket_region` global setting.\n        provider_region (str): The region being used by the provider.\n\n    Returns:\n        str: The appropriate region string.\n    \"\"\"\n    region = None\n    if custom_bucket:\n        region = hook_region\n    else:\n        region = stacker_bucket_region\n    return region or provider_region\n\n\ndef upload_lambda_functions(context, provider, **kwargs):\n    \"\"\"Builds Lambda payloads from user configuration and uploads them to S3.\n\n    Constructs ZIP archives containing files matching specified patterns for\n    each function, uploads the result to Amazon S3, then stores objects (of\n    type :class:`troposphere.awslambda.Code`) in the context's hook data,\n    ready to be referenced in blueprints.\n\n    Configuration consists of some global options, and a dictionary of function\n    specifications. In the specifications, each key indicating the name of the\n    function (used for generating names for artifacts), and the value\n    determines what files to include in the ZIP (see more details below).\n\n    Payloads are uploaded to either a custom bucket or stackers default bucket,\n    with the key containing it's checksum, to allow repeated uploads to be\n    skipped in subsequent runs.\n\n    The configuration settings are documented as keyword arguments below.\n\n    Keyword Arguments:\n        bucket (str, optional): Custom bucket to upload functions to.\n            Omitting it will cause the default stacker bucket to be used.\n        bucket_region (str, optional): The region in which the bucket should\n            exist. If not given, the region will be either be that of the\n            global `stacker_bucket_region` setting, or else the region in\n            use by the provider.\n        prefix (str, optional): S3 key prefix to prepend to the uploaded\n            zip name.\n        follow_symlinks (bool, optional): Will determine if symlinks should\n            be followed and included with the zip artifact. Default: False\n        payload_acl (str, optional): The canned S3 object ACL to be applied to\n            the uploaded payload. Default: private\n        functions (dict):\n            Configurations of desired payloads to build. Keys correspond to\n            function names, used to derive key names for the payload. Each\n            value should itself be a dictionary, with the following data:\n\n                * path (str):\n\n                    Base directory or path of a ZIP file of the Lambda function\n                    payload content.\n\n                    If it not an absolute path, it will be considered relative\n                    to the directory containing the stacker configuration file\n                    in use.\n\n                    When a directory, files contained will be added to the\n                    payload ZIP, according to the include and exclude patterns.\n                    If not patterns are provided, all files in the directory\n                    (respecting default exclusions) will be used.\n\n                    Files are stored in the archive with path names relative to\n                    this directory. So, for example, all the files contained\n                    directly under this directory will be added to the root of\n                    the ZIP file.\n\n                    When a ZIP file, it will be uploaded directly to S3.\n                    The hash of whole ZIP file will be used as the version key\n                    by default, which may cause spurious rebuilds when building\n                    the ZIP in different environments. To avoid that,\n                    explicitly provide a `version` option.\n\n                * include(str or list[str], optional):\n\n                    Pattern or list of patterns of files to include in the\n                    payload. If provided, only files that match these\n                    patterns will be included in the payload.\n\n                    Omitting it is equivalent to accepting all files that are\n                    not otherwise excluded.\n\n                * exclude(str or list[str], optional):\n                    Pattern or list of patterns of files to exclude from the\n                    payload. If provided, any files that match will be ignored,\n                    regardless of whether they match an inclusion pattern.\n\n                    Commonly ignored files are already excluded by default,\n                    such as ``.git``, ``.svn``, ``__pycache__``, ``*.pyc``,\n                    ``.gitignore``, etc.\n\n                * version(str, optional):\n                    Value to use as the version for the current function, which\n                    will be used to determine if a payload already exists in\n                    S3. The value can be any string, such as a version number\n                    or a git commit.\n\n                    Note that when setting this value, to re-build/re-upload a\n                    payload you must change the version manually.\n\n    Examples:\n        .. Hook configuration.\n        .. code-block:: yaml\n\n            pre_build:\n              - path: stacker.hooks.aws_lambda.upload_lambda_functions\n                required: true\n                enabled: true\n                data_key: lambda\n                args:\n                  bucket: custom-bucket\n                  follow_symlinks: true\n                  prefix: cloudformation-custom-resources/\n                  payload_acl: authenticated-read\n                  functions:\n                    MyFunction:\n                      path: ./lambda_functions\n                      include:\n                        - '*.py'\n                        - '*.txt'\n                      exclude:\n                        - '*.pyc'\n                        - test/\n\n        .. Blueprint usage\n        .. code-block:: python\n\n            from troposphere.awslambda import Function\n            from stacker.blueprints.base import Blueprint\n\n            class LambdaBlueprint(Blueprint):\n                def create_template(self):\n                    code = self.context.hook_data['lambda']['MyFunction']\n\n                    self.template.add_resource(\n                        Function(\n                            'MyFunction',\n                            Code=code,\n                            Handler='my_function.handler',\n                            Role='...',\n                            Runtime='python2.7'\n                        )\n                    )\n    \"\"\"\n    custom_bucket = kwargs.get('bucket')\n    if not custom_bucket:\n        bucket_name = context.bucket_name\n        logger.info(\"lambda: using default bucket from stacker: %s\",\n                    bucket_name)\n    else:\n        bucket_name = custom_bucket\n        logger.info(\"lambda: using custom bucket: %s\", bucket_name)\n\n    custom_bucket_region = kwargs.get(\"bucket_region\")\n    if not custom_bucket and custom_bucket_region:\n        raise ValueError(\"Cannot specify `bucket_region` without specifying \"\n                         \"`bucket`.\")\n\n    bucket_region = select_bucket_region(\n        custom_bucket,\n        custom_bucket_region,\n        context.config.stacker_bucket_region,\n        provider.region\n    )\n\n    # Check if we should walk / follow symlinks\n    follow_symlinks = kwargs.get('follow_symlinks', False)\n    if not isinstance(follow_symlinks, bool):\n        raise ValueError('follow_symlinks option must be a boolean')\n\n    # Check for S3 object acl. Valid values from:\n    # https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl\n    payload_acl = kwargs.get('payload_acl', 'private')\n\n    # Always use the global client for s3\n    session = get_session(bucket_region)\n    s3_client = session.client('s3')\n\n    ensure_s3_bucket(s3_client, bucket_name, bucket_region)\n\n    prefix = kwargs.get('prefix', '')\n\n    results = {}\n    for name, options in kwargs['functions'].items():\n        results[name] = _upload_function(s3_client, bucket_name, prefix, name,\n                                         options, follow_symlinks, payload_acl)\n\n    return results\n"
  },
  {
    "path": "stacker/hooks/command.py",
    "content": "\nimport logging\nimport os\nfrom subprocess import PIPE, Popen\n\nfrom stacker.exceptions import ImproperlyConfigured\n\nlogger = logging.getLogger(__name__)\n\n\ndef _devnull():\n    return open(os.devnull, 'wb')\n\n\ndef run_command(provider, context, command, capture=False, interactive=False,\n                ignore_status=False, quiet=False, stdin=None, env=None,\n                **kwargs):\n    \"\"\"Run a custom command as a hook\n\n    Keyword Arguments:\n        command (list or str):\n            Command to run\n        capture (bool, optional):\n            If enabled, capture the command's stdout and stderr, and return\n            them in the hook result. Default: false\n        interactive (bool, optional):\n            If enabled, allow the command to interact with stdin. Otherwise,\n            stdin will be set to the null device. Default: false\n        ignore_status (bool, optional):\n            Don't fail the hook if the command returns a non-zero status.\n            Default: false\n        quiet (bool, optional):\n            Redirect the command's stdout and stderr to the null device,\n            silencing all output. Should not be enaled if `capture` is also\n            enabled. Default: false\n        stdin (str, optional):\n            String to send to the stdin of the command. Implicitly disables\n            `interactive`.\n        env (dict, optional):\n            Dictionary of environment variable overrides for the command\n            context. Will be merged with the current environment.\n        **kwargs:\n            Any other arguments will be forwarded to the `subprocess.Popen`\n            function. Interesting ones include: `cwd` and `shell`.\n\n    Examples:\n        .. code-block:: yaml\n\n            pre_build:\n              - path: stacker.hooks.command.run_command\n                required: true\n                enabled: true\n                data_key: copy_env\n                args:\n                  command: ['cp', 'environment.template', 'environment']\n              - path: stacker.hooks.command.run_command\n                required: true\n                enabled: true\n                data_key: get_git_commit\n                args:\n                  command: ['git', 'rev-parse', 'HEAD']\n                  cwd: ./my-git-repo\n                  capture: true\n              - path: stacker.hooks.command.run_command\n                args:\n                  command: `cd $PROJECT_DIR/project; npm install'\n                  env:\n                    PROJECT_DIR: ./my-project\n                  shell: true\n    \"\"\"\n\n    if quiet and capture:\n        raise ImproperlyConfigured(\n            __name__ + '.run_command',\n            'Cannot enable `quiet` and `capture` options simultaneously')\n\n    if quiet:\n        out_err_type = _devnull()\n    elif capture:\n        out_err_type = PIPE\n    else:\n        out_err_type = None\n\n    if interactive:\n        in_type = None\n    elif stdin:\n        in_type = PIPE\n    else:\n        in_type = _devnull()\n\n    if env:\n        full_env = os.environ.copy()\n        full_env.update(env)\n        env = full_env\n\n    logger.info('Running command: %s', command)\n\n    proc = Popen(command, stdin=in_type, stdout=out_err_type,\n                 stderr=out_err_type, env=env, **kwargs)\n    try:\n        out, err = proc.communicate(stdin)\n        status = proc.wait()\n\n        if status == 0 or ignore_status:\n            return {\n                'returncode': proc.returncode,\n                'stdout': out,\n                'stderr': err\n            }\n\n        # Don't print the command line again if we already did earlier\n        if logger.isEnabledFor(logging.INFO):\n            logger.warn('Command failed with returncode %d', status)\n        else:\n            logger.warn('Command failed with returncode %d: %s', status,\n                        command)\n\n        return None\n    finally:\n        if proc.returncode is None:\n            proc.kill()\n"
  },
  {
    "path": "stacker/hooks/ecs.py",
    "content": "# A lot of this code exists to deal w/ the broken ECS connect_to_region\n# function, and will be removed once this pull request is accepted:\n#   https://github.com/boto/boto/pull/3143\nfrom past.builtins import basestring\nimport logging\n\nfrom stacker.session_cache import get_session\n\nlogger = logging.getLogger(__name__)\n\n\ndef create_clusters(provider, context, **kwargs):\n    \"\"\"Creates ECS clusters.\n\n    Expects a \"clusters\" argument, which should contain a list of cluster\n    names to create.\n\n    Args:\n        provider (:class:`stacker.providers.base.BaseProvider`): provider\n            instance\n        context (:class:`stacker.context.Context`): context instance\n\n    Returns: boolean for whether or not the hook succeeded.\n\n    \"\"\"\n    conn = get_session(provider.region).client('ecs')\n\n    try:\n        clusters = kwargs[\"clusters\"]\n    except KeyError:\n        logger.error(\"setup_clusters hook missing \\\"clusters\\\" argument\")\n        return False\n\n    if isinstance(clusters, basestring):\n        clusters = [clusters]\n\n    cluster_info = {}\n    for cluster in clusters:\n        logger.debug(\"Creating ECS cluster: %s\", cluster)\n        r = conn.create_cluster(clusterName=cluster)\n        cluster_info[r[\"cluster\"][\"clusterName\"]] = r\n    return {\"clusters\": cluster_info}\n"
  },
  {
    "path": "stacker/hooks/iam.py",
    "content": "import copy\nimport logging\n\nfrom stacker.session_cache import get_session\nfrom botocore.exceptions import ClientError\n\nfrom awacs.aws import Statement, Allow, Policy\nfrom awacs import ecs\nfrom awacs.helpers.trust import get_ecs_assumerole_policy\n\nfrom . import utils\n\nlogger = logging.getLogger(__name__)\n\n\ndef create_ecs_service_role(provider, context, **kwargs):\n    \"\"\"Used to create the ecsServieRole, which has to be named exactly that\n    currently, so cannot be created via CloudFormation. See:\n\n    http://docs.aws.amazon.com/AmazonECS/latest/developerguide/IAM_policies.html#service_IAM_role\n\n    Args:\n        provider (:class:`stacker.providers.base.BaseProvider`): provider\n            instance\n        context (:class:`stacker.context.Context`): context instance\n\n    Returns: boolean for whether or not the hook succeeded.\n\n    \"\"\"\n    role_name = kwargs.get(\"role_name\", \"ecsServiceRole\")\n    client = get_session(provider.region).client('iam')\n\n    try:\n        client.create_role(\n            RoleName=role_name,\n            AssumeRolePolicyDocument=get_ecs_assumerole_policy().to_json()\n        )\n    except ClientError as e:\n        if \"already exists\" in str(e):\n            pass\n        else:\n            raise\n\n    policy = Policy(\n        Version='2012-10-17',\n        Statement=[\n            Statement(\n                Effect=Allow,\n                Resource=[\"*\"],\n                Action=[ecs.CreateCluster, ecs.DeregisterContainerInstance,\n                        ecs.DiscoverPollEndpoint, ecs.Poll,\n                        ecs.Action(\"Submit*\")]\n            )\n        ])\n    client.put_role_policy(\n        RoleName=role_name,\n        PolicyName=\"AmazonEC2ContainerServiceRolePolicy\",\n        PolicyDocument=policy.to_json()\n    )\n    return True\n\n\ndef _get_cert_arn_from_response(response):\n    result = copy.deepcopy(response)\n    # GET response returns this extra key\n    if \"ServerCertificate\" in response:\n        result = response[\"ServerCertificate\"]\n    return result[\"ServerCertificateMetadata\"][\"Arn\"]\n\n\ndef get_cert_contents(kwargs):\n    \"\"\"Builds parameters with server cert file contents.\n\n    Args:\n        kwargs(dict): The keyword args passed to ensure_server_cert_exists,\n            optionally containing the paths to the cert, key and chain files.\n\n    Returns:\n        dict: A dictionary containing the appropriate parameters to supply to\n            upload_server_certificate. An empty dictionary if there is a\n            problem.\n    \"\"\"\n    paths = {\n        \"certificate\": kwargs.get(\"path_to_certificate\"),\n        \"private_key\": kwargs.get(\"path_to_private_key\"),\n        \"chain\": kwargs.get(\"path_to_chain\"),\n    }\n\n    for key, value in paths.items():\n        if value is not None:\n            continue\n\n        path = input(\"Path to %s (skip): \" % (key,))\n        if path == \"skip\" or not path.strip():\n            continue\n\n        paths[key] = path\n\n    parameters = {\n        \"ServerCertificateName\": kwargs.get(\"cert_name\"),\n    }\n\n    for key, path in paths.items():\n        if not path:\n            continue\n\n        # Allow passing of file like object for tests\n        try:\n            contents = path.read()\n        except AttributeError:\n            with open(utils.full_path(path)) as read_file:\n                contents = read_file.read()\n\n        if key == \"certificate\":\n            parameters[\"CertificateBody\"] = contents\n        elif key == \"private_key\":\n            parameters[\"PrivateKey\"] = contents\n        elif key == \"chain\":\n            parameters[\"CertificateChain\"] = contents\n\n    return parameters\n\n\ndef ensure_server_cert_exists(provider, context, **kwargs):\n    client = get_session(provider.region).client('iam')\n    cert_name = kwargs[\"cert_name\"]\n    status = \"unknown\"\n    try:\n        response = client.get_server_certificate(\n            ServerCertificateName=cert_name\n        )\n        cert_arn = _get_cert_arn_from_response(response)\n        status = \"exists\"\n        logger.info(\"certificate exists: %s (%s)\", cert_name, cert_arn)\n    except ClientError:\n        if kwargs.get(\"prompt\", True):\n            upload = input(\n                \"Certificate '%s' wasn't found. Upload it now? (yes/no) \" % (\n                    cert_name,\n                )\n            )\n            if upload != \"yes\":\n                return False\n\n        parameters = get_cert_contents(kwargs)\n        if not parameters:\n            return False\n        response = client.upload_server_certificate(**parameters)\n        cert_arn = _get_cert_arn_from_response(response)\n        status = \"uploaded\"\n        logger.info(\n            \"uploaded certificate: %s (%s)\",\n            cert_name,\n            cert_arn,\n        )\n\n    return {\n        \"status\": status,\n        \"cert_name\": cert_name,\n        \"cert_arn\": cert_arn,\n    }\n"
  },
  {
    "path": "stacker/hooks/keypair.py",
    "content": "\nimport logging\nimport os\nimport sys\n\nfrom botocore.exceptions import ClientError\n\nfrom stacker.session_cache import get_session\nfrom stacker.hooks import utils\nfrom stacker.ui import get_raw_input\n\n\nlogger = logging.getLogger(__name__)\n\nKEYPAIR_LOG_MESSAGE = \"keypair: %s (%s) %s\"\n\n\ndef get_existing_key_pair(ec2, keypair_name):\n    resp = ec2.describe_key_pairs()\n    keypair = next((kp for kp in resp[\"KeyPairs\"]\n                    if kp[\"KeyName\"] == keypair_name), None)\n\n    if keypair:\n        logger.info(KEYPAIR_LOG_MESSAGE,\n                    keypair[\"KeyName\"],\n                    keypair[\"KeyFingerprint\"],\n                    \"exists\")\n        return {\n            \"status\": \"exists\",\n            \"key_name\": keypair[\"KeyName\"],\n            \"fingerprint\": keypair[\"KeyFingerprint\"],\n        }\n\n    logger.info(\"keypair: \\\"%s\\\" not found\", keypair_name)\n    return None\n\n\ndef import_key_pair(ec2, keypair_name, public_key_data):\n    keypair = ec2.import_key_pair(\n        KeyName=keypair_name,\n        PublicKeyMaterial=public_key_data.strip(),\n        DryRun=False)\n    logger.info(KEYPAIR_LOG_MESSAGE,\n                keypair[\"KeyName\"],\n                keypair[\"KeyFingerprint\"],\n                \"imported\")\n    return keypair\n\n\ndef read_public_key_file(path):\n    try:\n        with open(utils.full_path(path), 'rb') as f:\n            data = f.read()\n\n        if not data.startswith(b\"ssh-rsa\"):\n            raise ValueError(\n                \"Bad public key data, must be an RSA key in SSH authorized \"\n                \"keys format (beginning with `ssh-rsa`)\")\n\n        return data.strip()\n    except (ValueError, IOError, OSError) as e:\n        logger.error(\"Failed to read public key file {}: {}\".format(\n            path, e))\n        return None\n\n\ndef create_key_pair_from_public_key_file(ec2, keypair_name, public_key_path):\n    public_key_data = read_public_key_file(public_key_path)\n    if not public_key_data:\n        return None\n\n    keypair = import_key_pair(ec2, keypair_name, public_key_data)\n    return {\n        \"status\": \"imported\",\n        \"key_name\": keypair[\"KeyName\"],\n        \"fingerprint\": keypair[\"KeyFingerprint\"],\n    }\n\n\ndef create_key_pair_in_ssm(ec2, ssm, keypair_name, parameter_name,\n                           kms_key_id=None):\n    keypair = create_key_pair(ec2, keypair_name)\n    try:\n        kms_key_label = 'default'\n        kms_args = {}\n        if kms_key_id:\n            kms_key_label = kms_key_id\n            kms_args = {\"KeyId\": kms_key_id}\n\n        logger.info(\"Storing generated key in SSM parameter \\\"%s\\\" \"\n                    \"using KMS key \\\"%s\\\"\", parameter_name, kms_key_label)\n\n        ssm.put_parameter(\n            Name=parameter_name,\n            Description=\"SSH private key for KeyPair \\\"{}\\\" \"\n                        \"(generated by Stacker)\".format(keypair_name),\n            Value=keypair[\"KeyMaterial\"],\n            Type=\"SecureString\",\n            Overwrite=False,\n            **kms_args)\n    except ClientError:\n        # Erase the key pair if we failed to store it in SSM, since the\n        # private key will be lost anyway\n\n        logger.exception(\"Failed to store generated key in SSM, deleting \"\n                         \"created key pair as private key will be lost\")\n        ec2.delete_key_pair(KeyName=keypair_name, DryRun=False)\n        return None\n\n    return {\n        \"status\": \"created\",\n        \"key_name\": keypair[\"KeyName\"],\n        \"fingerprint\": keypair[\"KeyFingerprint\"],\n    }\n\n\ndef create_key_pair(ec2, keypair_name):\n    keypair = ec2.create_key_pair(KeyName=keypair_name, DryRun=False)\n    logger.info(KEYPAIR_LOG_MESSAGE,\n                keypair[\"KeyName\"],\n                keypair[\"KeyFingerprint\"],\n                \"created\")\n    return keypair\n\n\ndef create_key_pair_local(ec2, keypair_name, dest_dir):\n    dest_dir = utils.full_path(dest_dir)\n    if not os.path.isdir(dest_dir):\n        logger.error(\"\\\"%s\\\" is not a valid directory\", dest_dir)\n        return None\n\n    file_name = \"{0}.pem\".format(keypair_name)\n    key_path = os.path.join(dest_dir, file_name)\n    if os.path.isfile(key_path):\n        # This mimics the old boto2 keypair.save error\n        logger.error(\"\\\"%s\\\" already exists in \\\"%s\\\" directory\",\n                     file_name, dest_dir)\n        return None\n\n    # Open the file before creating the key pair to catch errors early\n    with open(key_path, \"wb\") as f:\n        keypair = create_key_pair(ec2, keypair_name)\n        f.write(keypair[\"KeyMaterial\"].encode(\"ascii\"))\n\n    return {\n        \"status\": \"created\",\n        \"key_name\": keypair[\"KeyName\"],\n        \"fingerprint\": keypair[\"KeyFingerprint\"],\n        \"file_path\": key_path\n    }\n\n\ndef interactive_prompt(keypair_name, ):\n    if not sys.stdin.isatty():\n        return None, None\n\n    try:\n        while True:\n            action = get_raw_input(\n                \"import or create keypair \\\"%s\\\"? (import/create/cancel) \" % (\n                    keypair_name,\n                )\n            )\n\n            if action.lower() == \"cancel\":\n                break\n\n            if action.lower() in (\"i\", \"import\"):\n                path = get_raw_input(\"path to keypair file: \")\n                return \"import\", path.strip()\n\n            if action.lower() == \"create\":\n                path = get_raw_input(\"directory to save keyfile: \")\n                return \"create\", path.strip()\n    except (EOFError, KeyboardInterrupt):\n        return None, None\n\n    return None, None\n\n\ndef ensure_keypair_exists(provider, context, **kwargs):\n    \"\"\"Ensure a specific keypair exists within AWS.\n\n    If the key doesn't exist, upload it.\n\n    Args:\n        provider (:class:`stacker.providers.base.BaseProvider`): provider\n            instance\n        context (:class:`stacker.context.Context`): context instance\n        keypair (str): name of the key pair to create\n        ssm_parameter_name (str, optional): path to an SSM store parameter to\n            receive the generated private key, instead of importing it or\n            storing it locally.\n        ssm_key_id (str, optional): ID of a KMS key to encrypt the SSM\n            parameter with. If omitted, the default key will be used.\n        public_key_path (str, optional): path to a public key file to be\n            imported instead of generating a new key. Incompatible with the SSM\n            options, as the private key will not be available for storing.\n\n    Returns:\n        In case of failure ``False``, otherwise a dict containing:\n            status (str): one of \"exists\", \"imported\" or \"created\"\n            key_name (str): name of the key pair\n            fingerprint (str): fingerprint of the key pair\n            file_path (str, optional): if a new key was created, the path to\n                the file where the private key was stored\n\n    \"\"\"\n\n    keypair_name = kwargs[\"keypair\"]\n    ssm_parameter_name = kwargs.get(\"ssm_parameter_name\")\n    ssm_key_id = kwargs.get(\"ssm_key_id\")\n    public_key_path = kwargs.get(\"public_key_path\")\n\n    if public_key_path and ssm_parameter_name:\n        logger.error(\"public_key_path and ssm_parameter_name cannot be \"\n                     \"specified at the same time\")\n        return False\n\n    session = get_session(region=provider.region,\n                          profile=kwargs.get(\"profile\"))\n    ec2 = session.client(\"ec2\")\n\n    keypair = get_existing_key_pair(ec2, keypair_name)\n    if keypair:\n        return keypair\n\n    if public_key_path:\n        keypair = create_key_pair_from_public_key_file(\n            ec2, keypair_name, public_key_path)\n\n    elif ssm_parameter_name:\n        ssm = session.client('ssm')\n        keypair = create_key_pair_in_ssm(\n            ec2, ssm, keypair_name, ssm_parameter_name, ssm_key_id)\n    else:\n        action, path = interactive_prompt(keypair_name)\n        if action == \"import\":\n            keypair = create_key_pair_from_public_key_file(\n                ec2, keypair_name, path)\n        elif action == \"create\":\n            keypair = create_key_pair_local(ec2, keypair_name, path)\n        else:\n            logger.warning(\"no action to find keypair, failing\")\n\n    if not keypair:\n        return False\n\n    return keypair\n"
  },
  {
    "path": "stacker/hooks/route53.py",
    "content": "import logging\n\nfrom stacker.session_cache import get_session\n\nfrom stacker.util import create_route53_zone\n\nlogger = logging.getLogger(__name__)\n\n\ndef create_domain(provider, context, **kwargs):\n    \"\"\"Create a domain within route53.\n\n    Args:\n        provider (:class:`stacker.providers.base.BaseProvider`): provider\n            instance\n        context (:class:`stacker.context.Context`): context instance\n\n    Returns: boolean for whether or not the hook succeeded.\n\n    \"\"\"\n    session = get_session(provider.region)\n    client = session.client(\"route53\")\n    domain = kwargs.get(\"domain\")\n    if not domain:\n        logger.error(\"domain argument or BaseDomain variable not provided.\")\n        return False\n    zone_id = create_route53_zone(client, domain)\n    return {\"domain\": domain, \"zone_id\": zone_id}\n"
  },
  {
    "path": "stacker/hooks/utils.py",
    "content": "import os\nimport sys\nimport collections.abc\nimport logging\n\nfrom stacker.util import load_object_from_string\n\nlogger = logging.getLogger(__name__)\n\n\ndef full_path(path):\n    return os.path.abspath(os.path.expanduser(path))\n\n\ndef handle_hooks(stage, hooks, provider, context):\n    \"\"\" Used to handle pre/post_build hooks.\n\n    These are pieces of code that we want to run before/after the builder\n    builds the stacks.\n\n    Args:\n        stage (string): The current stage (pre_run, post_run, etc).\n        hooks (list): A list of :class:`stacker.config.Hook` containing the\n            hooks to execute.\n        provider (:class:`stacker.provider.base.BaseProvider`): The provider\n            the current stack is using.\n        context (:class:`stacker.context.Context`): The current stacker\n            context.\n    \"\"\"\n    if not hooks:\n        logger.debug(\"No %s hooks defined.\", stage)\n        return\n\n    hook_paths = []\n    for i, h in enumerate(hooks):\n        try:\n            hook_paths.append(h.path)\n        except KeyError:\n            raise ValueError(\"%s hook #%d missing path.\" % (stage, i))\n\n    logger.info(\"Executing %s hooks: %s\", stage, \", \".join(hook_paths))\n    for hook in hooks:\n        data_key = hook.data_key\n        required = hook.required\n        kwargs = hook.args or {}\n        enabled = hook.enabled\n        if not enabled:\n            logger.debug(\"hook with method %s is disabled, skipping\",\n                         hook.path)\n            continue\n        try:\n            method = load_object_from_string(hook.path)\n        except (AttributeError, ImportError):\n            logger.exception(\"Unable to load method at %s:\", hook.path)\n            if required:\n                raise\n            continue\n        try:\n            result = method(context=context, provider=provider, **kwargs)\n        except Exception:\n            logger.exception(\"Method %s threw an exception:\", hook.path)\n            if required:\n                raise\n            continue\n        if not result:\n            if required:\n                logger.error(\"Required hook %s failed. Return value: %s\",\n                             hook.path, result)\n                sys.exit(1)\n            logger.warning(\"Non-required hook %s failed. Return value: %s\",\n                           hook.path, result)\n        else:\n            if isinstance(result, collections.abc.Mapping):\n                if data_key:\n                    logger.debug(\"Adding result for hook %s to context in \"\n                                 \"data_key %s.\", hook.path, data_key)\n                    context.set_hook_data(data_key, result)\n                else:\n                    logger.debug(\"Hook %s returned result data, but no data \"\n                                 \"key set, so ignoring.\", hook.path)\n"
  },
  {
    "path": "stacker/logger/__init__.py",
    "content": "import sys\nimport logging\n\nDEBUG_FORMAT = (\"[%(asctime)s] %(levelname)s %(threadName)s \"\n                \"%(name)s:%(lineno)d(%(funcName)s): %(message)s\")\nINFO_FORMAT = (\"[%(asctime)s] %(message)s\")\nCOLOR_FORMAT = (\"[%(asctime)s] \\033[%(color)sm%(message)s\\033[39m\")\n\nISO_8601 = \"%Y-%m-%dT%H:%M:%S\"\n\n\nclass ColorFormatter(logging.Formatter):\n    \"\"\" Handles colorizing formatted log messages if color provided. \"\"\"\n    def format(self, record):\n        if 'color' not in record.__dict__:\n            record.__dict__['color'] = 37\n        msg = super(ColorFormatter, self).format(record)\n        return msg\n\n\ndef setup_logging(verbosity, formats=None):\n    \"\"\"\n    Configure a proper logger based on verbosity and optional log formats.\n\n    Args:\n        verbosity (int): 0, 1, 2\n        formats (dict): Optional, looks for `info`, `color`, and `debug` keys\n                        which may override the associated default log formats.\n    \"\"\"\n    if formats is None:\n        formats = {}\n\n    log_level = logging.INFO\n\n    log_format = formats.get(\"info\", INFO_FORMAT)\n\n    if sys.stdout.isatty():\n        log_format = formats.get(\"color\", COLOR_FORMAT)\n\n    if verbosity > 0:\n        log_level = logging.DEBUG\n        log_format = formats.get(\"debug\", DEBUG_FORMAT)\n\n    if verbosity < 2:\n        logging.getLogger(\"botocore\").setLevel(logging.CRITICAL)\n\n    hdlr = logging.StreamHandler()\n    hdlr.setFormatter(ColorFormatter(log_format, ISO_8601))\n    logging.root.addHandler(hdlr)\n    logging.root.setLevel(log_level)\n"
  },
  {
    "path": "stacker/lookups/__init__.py",
    "content": "from past.builtins import basestring\nfrom collections import namedtuple\nimport re\n\n# export resolve_lookups at this level\nfrom .registry import resolve_lookups  # NOQA\nfrom .registry import register_lookup_handler  # NOQA\n\n# TODO: we can remove the optionality of of the type in a later release, it\n#       is only included to allow for an error to be thrown while people are\n#       converting their configuration files to 1.0\n\nLOOKUP_REGEX = re.compile(\"\"\"\n\\$\\{                                   # opening brace for the lookup\n((?P<type>[._\\-a-zA-Z0-9]*(?=\\s))      # type of lookup, must be followed by a\n                                       # space\n?\\s*                                   # any number of spaces separating the\n                                       # type from the input\n(?P<input>[@\\+\\/,\\.\\?_\\-a-zA-Z0-9\\:\\s=\\[\\]\\*]+) # the input value to the lookup\n)\\}                                    # closing brace of the lookup\n\"\"\", re.VERBOSE)\n\nLookup = namedtuple(\"Lookup\", (\"type\", \"input\", \"raw\"))\n\n\ndef extract_lookups_from_string(value):\n    \"\"\"Extract any lookups within a string.\n\n    Args:\n        value (str): string value we're extracting lookups from\n\n    Returns:\n        list: list of :class:`stacker.lookups.Lookup` if any\n\n    \"\"\"\n    lookups = set()\n    for match in LOOKUP_REGEX.finditer(value):\n        groupdict = match.groupdict()\n        raw = match.groups()[0]\n        lookup_type = groupdict[\"type\"]\n        lookup_input = groupdict[\"input\"]\n        lookups.add(Lookup(lookup_type, lookup_input, raw))\n    return lookups\n\n\ndef extract_lookups(value):\n    \"\"\"Recursively extracts any stack lookups within the data structure.\n\n    Args:\n        value (one of str, list, dict): a structure that contains lookups to\n            output values\n\n    Returns:\n        list: list of lookups if any\n\n    \"\"\"\n    lookups = set()\n    if isinstance(value, basestring):\n        lookups = lookups.union(extract_lookups_from_string(value))\n    elif isinstance(value, list):\n        for v in value:\n            lookups = lookups.union(extract_lookups(v))\n    elif isinstance(value, dict):\n        for v in value.values():\n            lookups = lookups.union(extract_lookups(v))\n    return lookups\n"
  },
  {
    "path": "stacker/lookups/handlers/__init__.py",
    "content": "\n\nclass LookupHandler(object):\n    @classmethod\n    def handle(cls, value, context, provider):\n        \"\"\"\n        Perform the actual lookup\n\n        :param value: Parameter(s) given to this lookup\n        :type value: str\n        :param context:\n        :param provider:\n        :return: Looked-up value\n        :rtype: str\n        \"\"\"\n        raise NotImplementedError()\n\n    @classmethod\n    def dependencies(cls, lookup_data):\n        \"\"\"\n        Calculate any dependencies required to perform this lookup.\n\n        Note that lookup_data may not be (completely) resolved at this time.\n\n        :param lookup_data: Parameter(s) given to this lookup\n        :type lookup_data VariableValue\n        :return: Set of stack names (str) this lookup depends on\n        :rtype: set\n        \"\"\"\n        del lookup_data  # unused in this implementation\n        return set()\n"
  },
  {
    "path": "stacker/lookups/handlers/ami.py",
    "content": "from stacker.session_cache import get_session\nimport re\nimport operator\n\nfrom . import LookupHandler\nfrom ...util import read_value_from_path\n\nTYPE_NAME = \"ami\"\n\n\nclass ImageNotFound(Exception):\n    def __init__(self, search_string):\n        self.search_string = search_string\n        message = (\"Unable to find ec2 image with search string: {}\").format(\n            search_string\n        )\n        super(ImageNotFound, self).__init__(message)\n\n\nclass AmiLookup(LookupHandler):\n    @classmethod\n    def handle(cls, value, provider, **kwargs):\n        \"\"\"Fetch the most recent AMI Id using a filter\n    \n        For example:\n    \n            ${ami [<region>@]owners:self,account,amazon name_regex:serverX-[0-9]+ architecture:x64,i386}\n    \n            The above fetches the most recent AMI where owner is self\n            account or amazon and the ami name matches the regex described,\n            the architecture will be either x64 or i386\n    \n            You can also optionally specify the region in which to perform the\n            AMI lookup.\n    \n            Valid arguments:\n    \n            owners (comma delimited) REQUIRED ONCE:\n                aws_account_id | amazon | self\n    \n            name_regex (a regex) REQUIRED ONCE:\n                e.g. my-ubuntu-server-[0-9]+\n    \n            executable_users (comma delimited) OPTIONAL ONCE:\n                aws_account_id | amazon | self\n    \n            Any other arguments specified are sent as filters to the aws api\n            For example, \"architecture:x86_64\" will add a filter\n        \"\"\"  # noqa\n        value = read_value_from_path(value)\n\n        if \"@\" in value:\n            region, value = value.split(\"@\", 1)\n        else:\n            region = provider.region\n\n        ec2 = get_session(region).client('ec2')\n\n        values = {}\n        describe_args = {}\n\n        # now find any other arguments that can be filters\n        matches = re.findall('([0-9a-zA-z_-]+:[^\\s$]+)', value)\n        for match in matches:\n            k, v = match.split(':', 1)\n            values[k] = v\n\n        if not values.get('owners'):\n            raise Exception(\"'owners' value required when using ami\")\n        owners = values.pop('owners').split(',')\n        describe_args[\"Owners\"] = owners\n\n        if not values.get('name_regex'):\n            raise Exception(\"'name_regex' value required when using ami\")\n        name_regex = values.pop('name_regex')\n\n        executable_users = None\n        if values.get('executable_users'):\n            executable_users = values.pop('executable_users').split(',')\n            describe_args[\"ExecutableUsers\"] = executable_users\n\n        filters = []\n        for k, v in values.items():\n            filters.append({\"Name\": k, \"Values\": v.split(',')})\n        describe_args[\"Filters\"] = filters\n\n        result = ec2.describe_images(**describe_args)\n\n        images = sorted(result['Images'],\n                        key=operator.itemgetter('CreationDate'),\n                        reverse=True)\n        for image in images:\n            if re.match(\"^%s$\" % name_regex, image.get('Name', '')):\n                return image['ImageId']\n\n        raise ImageNotFound(value)\n"
  },
  {
    "path": "stacker/lookups/handlers/default.py",
    "content": "\nfrom . import LookupHandler\n\n\nTYPE_NAME = \"default\"\n\n\nclass DefaultLookup(LookupHandler):\n    @classmethod\n    def handle(cls, value, **kwargs):\n        \"\"\"Use a value from the environment or fall back to a default if the\n           environment doesn't contain the variable.\n\n        Format of value:\n\n            <env_var>::<default value>\n\n        For example:\n\n            Groups: ${default app_security_groups::sg-12345,sg-67890}\n\n        If `app_security_groups` is defined in the environment, its defined\n        value will be returned. Otherwise, `sg-12345,sg-67890` will be the\n        returned value.\n\n        This allows defaults to be set at the config file level.\n        \"\"\"\n\n        try:\n            env_var_name, default_val = value.split(\"::\", 1)\n        except ValueError:\n            raise ValueError(\"Invalid value for default: %s. Must be in \"\n                             \"<env_var>::<default value> format.\" % value)\n\n        if env_var_name in kwargs['context'].environment:\n            return kwargs['context'].environment[env_var_name]\n        else:\n            return default_val\n"
  },
  {
    "path": "stacker/lookups/handlers/dynamodb.py",
    "content": "from botocore.exceptions import ClientError\nimport re\nfrom stacker.session_cache import get_session\n\nfrom . import LookupHandler\nfrom ...util import read_value_from_path\n\nTYPE_NAME = 'dynamodb'\n\n\nclass DynamodbLookup(LookupHandler):\n    @classmethod\n    def handle(cls, value, **kwargs):\n        \"\"\"Get a value from a dynamodb table\n\n        dynamodb field types should be in the following format:\n\n            [<region>:]<tablename>@<primarypartionkey>:<keyvalue>.<keyvalue>...\n\n        Note: The region is optional, and defaults to the environment's\n        `AWS_DEFAULT_REGION` if not specified.\n        \"\"\"\n        value = read_value_from_path(value)\n        table_info = None\n        table_keys = None\n        region = None\n        table_name = None\n        if '@' in value:\n            table_info, table_keys = value.split('@', 1)\n            if ':' in table_info:\n                region, table_name = table_info.split(':', 1)\n            else:\n                table_name = table_info\n        else:\n            raise ValueError('Please make sure to include a tablename')\n\n        if not table_name:\n            raise ValueError('Please make sure to include a dynamodb table '\n                             'name')\n\n        table_lookup, table_keys = table_keys.split(':', 1)\n\n        table_keys = table_keys.split('.')\n\n        key_dict = _lookup_key_parse(table_keys)\n        new_keys = key_dict['new_keys']\n        clean_table_keys = key_dict['clean_table_keys']\n\n        projection_expression = _build_projection_expression(clean_table_keys)\n\n        # lookup the data from dynamodb\n        dynamodb = get_session(region).client('dynamodb')\n        try:\n            response = dynamodb.get_item(\n                TableName=table_name,\n                Key={\n                    table_lookup: new_keys[0]\n                },\n                ProjectionExpression=projection_expression\n            )\n        except ClientError as e:\n            if e.response['Error']['Code'] == 'ResourceNotFoundException':\n                raise ValueError(\n                    'Cannot find the dynamodb table: {}'.format(table_name))\n            elif e.response['Error']['Code'] == 'ValidationException':\n                raise ValueError(\n                    'No dynamodb record matched the partition key: '\n                    '{}'.format(table_lookup))\n            else:\n                raise ValueError('The dynamodb lookup {} had an error: '\n                                 '{}'.format(value, e))\n        # find and return the key from the dynamo data returned\n        if 'Item' in response:\n            return (_get_val_from_ddb_data(response['Item'], new_keys[1:]))\n        else:\n            raise ValueError(\n                'The dynamodb record could not be found using the following '\n                'key: {}'.format(new_keys[0]))\n\n\ndef _lookup_key_parse(table_keys):\n    \"\"\"Return the order in which the stacks should be executed.\n\n    Args:\n        dependencies (dict): a dictionary where each key should be the\n            fully qualified name of a stack whose value is an array of\n            fully qualified stack names that the stack depends on. This is\n            used to generate the order in which the stacks should be\n            executed.\n\n    Returns:\n        dict: includes a dict of lookup types with data types ('new_keys')\n              and a list of the lookups with without ('clean_table_keys')\n\n    \"\"\"\n    # we need to parse the key lookup passed in\n    regex_matcher = '\\[([^\\]]+)]'\n    valid_dynamodb_datatypes = ['M', 'S', 'N', 'L']\n    clean_table_keys = []\n    new_keys = []\n\n    for key in table_keys:\n        match = re.search(regex_matcher, key)\n        if match:\n            # the datatypes are pulled from the dynamodb docs\n            if match.group(1) in valid_dynamodb_datatypes:\n                match_val = str(match.group(1))\n                key = key.replace(match.group(0), '')\n                new_keys.append({match_val: key})\n                clean_table_keys.append(key)\n            else:\n                raise ValueError(\n                    ('Stacker does not support looking up the datatype: {}')\n                    .format(str(match.group(1))))\n        else:\n            new_keys.append({'S': key})\n            clean_table_keys.append(key)\n    key_dict = {}\n    key_dict['new_keys'] = new_keys\n    key_dict['clean_table_keys'] = clean_table_keys\n\n    return key_dict\n\n\ndef _build_projection_expression(clean_table_keys):\n    \"\"\"Given cleaned up keys, this will return a projection expression for\n    the dynamodb lookup.\n\n    Args:\n        clean_table_keys (dict): keys without the data types attached\n\n    Returns:\n        str: A projection expression for the dynamodb lookup.\n    \"\"\"\n    projection_expression = ''\n    for key in clean_table_keys[:-1]:\n        projection_expression += ('{},').format(key)\n    projection_expression += clean_table_keys[-1]\n    return projection_expression\n\n\ndef _get_val_from_ddb_data(data, keylist):\n    \"\"\"Given a dictionary of dynamodb data (including the datatypes) and a\n    properly structured keylist, it will return the value of the lookup\n\n    Args:\n        data (dict): the raw dynamodb data\n            keylist(list): a list of keys to lookup. This must include the\n                datatype\n\n    Returns:\n        various: It returns the value from the dynamodb record, and casts it\n            to a matching python datatype\n    \"\"\"\n    next_type = None\n    # iterate through the keylist to find the matching key/datatype\n    for k in keylist:\n        for k1 in k:\n            if next_type is None:\n                data = data[k[k1]]\n            else:\n                temp_dict = data[next_type]\n                data = temp_dict[k[k1]]\n            next_type = k1\n    if next_type == 'L':\n        # if type is list, convert it to a list and return\n        return _convert_ddb_list_to_list(data[next_type])\n    if next_type == 'N':\n        # TODO: handle various types of 'number' datatypes, (e.g. int, double)\n        # if a number, convert to an int and return\n        return int(data[next_type])\n    # else, just assume its a string and return\n    return str(data[next_type])\n\n\ndef _convert_ddb_list_to_list(conversion_list):\n    \"\"\"Given a dynamodb list, it will return a python list without the dynamodb\n        datatypes\n\n    Args:\n        conversion_list (dict): a dynamodb list which includes the\n            datatypes\n\n    Returns:\n        list: Returns a sanitized list without the dynamodb datatypes\n    \"\"\"\n    ret_list = []\n    for v in conversion_list:\n        for v1 in v:\n            ret_list.append(v[v1])\n    return ret_list\n"
  },
  {
    "path": "stacker/lookups/handlers/envvar.py",
    "content": "import os\n\nfrom . import LookupHandler\nfrom ...util import read_value_from_path\n\nTYPE_NAME = \"envvar\"\n\n\nclass EnvvarLookup(LookupHandler):\n    @classmethod\n    def handle(cls, value, **kwargs):\n        \"\"\"Retrieve an environment variable.\n\n        For example:\n\n            # In stacker we would reference the environment variable like this:\n            conf_key: ${envvar ENV_VAR_NAME}\n\n            You can optionally store the value in a file, ie:\n\n            $ cat envvar_value.txt\n            ENV_VAR_NAME\n\n            and reference it within stacker (NOTE: the path should be relative\n            to the stacker config file):\n\n            conf_key: ${envvar file://envvar_value.txt}\n\n            # Both of the above would resolve to\n            conf_key: ENV_VALUE\n        \"\"\"\n        value = read_value_from_path(value)\n\n        try:\n            return os.environ[value]\n        except KeyError:\n            raise ValueError('EnvVar \"{}\" does not exist'.format(value))\n"
  },
  {
    "path": "stacker/lookups/handlers/file.py",
    "content": "\nimport base64\nimport json\nimport re\nfrom collections.abc import Mapping, Sequence\n\nimport yaml\n\nfrom troposphere import GenericHelperFn, Base64\n\nfrom . import LookupHandler\nfrom ...util import read_value_from_path\n\n\nTYPE_NAME = \"file\"\n\n_PARAMETER_PATTERN = re.compile(r'{{([::|\\w]+)}}')\n\n\nclass FileLookup(LookupHandler):\n    @classmethod\n    def handle(cls, value, **kwargs):\n        \"\"\"Translate a filename into the file contents.\n\n        Fields should use the following format::\n\n            <codec>:<path>\n\n        For example::\n\n            # We've written a file to /some/path:\n            $ echo \"hello there\" > /some/path\n\n            # In stacker we would reference the contents of this file with the\n            # following\n            conf_key: ${file plain:file://some/path}\n\n            # The above would resolve to\n            conf_key: hello there\n\n            # Or, if we used wanted a base64 encoded copy of the file data\n            conf_key: ${file base64:file://some/path}\n\n            # The above would resolve to\n            conf_key: aGVsbG8gdGhlcmUK\n\n        Supported codecs:\n\n            - plain\n\n            - base64 - encode the plain text file at the given path with base64\n              prior to returning it\n\n            - parameterized - the same as plain, but additionally supports\n              referencing template parameters to create userdata that's\n              supplemented with information from the template, as is commonly\n              needed in EC2 UserData. For example, given a template parameter\n              of BucketName, the file could contain the following text::\n\n                #!/bin/sh\n                aws s3 sync s3://{{BucketName}}/somepath /somepath\n\n              and then you could use something like this in the YAML config\n              file::\n\n                UserData: ${file parameterized:/path/to/file}\n\n              resulting in the UserData parameter being defined as::\n\n                  { \"Fn::Join\" : [\"\", [\n                      \"#!/bin/sh\\\\naws s3 sync s3://\",\n                      {\"Ref\" : \"BucketName\"},\n                      \"/somepath /somepath\"\n                  ]] }\n\n            - parameterized-b64 - the same as parameterized, with the results\n              additionally wrapped in *{ \"Fn::Base64\": ... }* , which is what\n              you actually need for EC2 UserData\n\n        When using parameterized-b64 for UserData, you should use a variable\n        defined as such:\n\n        .. code-block:: python\n\n            from troposphere import AWSHelperFn\n\n              \"UserData\": {\n                  \"type\": AWSHelperFn,\n                  \"description\": \"Instance user data\",\n                  \"default\": Ref(\"AWS::NoValue\")\n              }\n\n        and then assign UserData in a LaunchConfiguration or Instance to\n        *self.get_variables()[\"UserData\"]*. Note that we use AWSHelperFn as the\n        type because the parameterized-b64 codec returns either a Base64 or a\n        GenericHelperFn troposphere object\n        \"\"\"\n\n        try:\n            codec, path = value.split(\":\", 1)\n        except ValueError:\n            raise TypeError(\n                \"File value must be of the format\"\n                \" \\\"<codec>:<path>\\\" (got %s)\" % (value)\n            )\n\n        value = read_value_from_path(path)\n\n        return CODECS[codec](value)\n\n\ndef _parameterize_string(raw):\n    \"\"\"Substitute placeholders in a string using CloudFormation references\n\n    Args:\n        raw (`str`): String to be processed. Byte strings are not\n        supported; decode them before passing them to this function.\n\n    Returns:\n        `str` | :class:`troposphere.GenericHelperFn`: An expression with\n            placeholders from the input replaced, suitable to be passed to\n            Troposphere to be included in CloudFormation template. This will\n            be the input string without modification if no substitutions are\n            found, and a composition of CloudFormation calls otherwise.\n    \"\"\"\n\n    parts = []\n    s_index = 0\n\n    for match in _PARAMETER_PATTERN.finditer(raw):\n        parts.append(raw[s_index:match.start()])\n        parts.append({u\"Ref\": match.group(1)})\n        s_index = match.end()\n\n    if not parts:\n        return GenericHelperFn(raw)\n\n    parts.append(raw[s_index:])\n    return GenericHelperFn({u\"Fn::Join\": [u\"\", parts]})\n\n\ndef parameterized_codec(raw, b64):\n    \"\"\"Parameterize a string, possibly encoding it as Base64 afterwards\n\n    Args:\n        raw (`str` | `bytes`): String to be processed. Byte strings will be\n            interpreted as UTF-8.\n        b64 (`bool`): Whether to wrap the output in a Base64 CloudFormation\n            call\n\n    Returns:\n        :class:`troposphere.AWSHelperFn`: output to be included in a\n        CloudFormation template.\n    \"\"\"\n\n    if isinstance(raw, bytes):\n        raw = raw.decode('utf-8')\n\n    result = _parameterize_string(raw)\n\n    # Note, since we want a raw JSON object (not a string) output in the\n    # template, we wrap the result in GenericHelperFn (not needed if we're\n    # using Base64)\n    return Base64(result.data) if b64 else result\n\n\ndef _parameterize_obj(obj):\n    \"\"\"Recursively parameterize all strings contained in an object.\n\n    Parameterizes all values of a Mapping, all items of a Sequence, an\n    unicode string, or pass other objects through unmodified.\n\n    Byte strings will be interpreted as UTF-8.\n\n    Args:\n        obj: data to parameterize\n\n    Return:\n        A parameterized object to be included in a CloudFormation template.\n        Mappings are converted to `dict`, Sequences are converted to  `list`,\n        and strings possibly replaced by compositions of function calls.\n    \"\"\"\n\n    if isinstance(obj, Mapping):\n        return dict((key, _parameterize_obj(value))\n                    for key, value in obj.items())\n    elif isinstance(obj, bytes):\n        return _parameterize_string(obj.decode('utf8'))\n    elif isinstance(obj, str):\n        return _parameterize_string(obj)\n    elif isinstance(obj, Sequence):\n        return list(_parameterize_obj(item) for item in obj)\n    else:\n        return obj\n\n\nclass SafeUnicodeLoader(yaml.SafeLoader):\n    def construct_yaml_str(self, node):\n        return self.construct_scalar(node)\n\n\ndef yaml_codec(raw, parameterized=False):\n    data = yaml.load(raw, Loader=SafeUnicodeLoader)\n    return _parameterize_obj(data) if parameterized else data\n\n\ndef json_codec(raw, parameterized=False):\n    data = json.loads(raw)\n    return _parameterize_obj(data) if parameterized else data\n\n\nCODECS = {\n    \"plain\": lambda x: x,\n    \"base64\": lambda x: base64.b64encode(x.encode('utf8')).decode('utf-8'),\n    \"parameterized\": lambda x: parameterized_codec(x, False),\n    \"parameterized-b64\": lambda x: parameterized_codec(x, True),\n    \"yaml\": lambda x: yaml_codec(x, parameterized=False),\n    \"yaml-parameterized\": lambda x: yaml_codec(x, parameterized=True),\n    \"json\": lambda x: json_codec(x, parameterized=False),\n    \"json-parameterized\": lambda x: json_codec(x, parameterized=True),\n}\n"
  },
  {
    "path": "stacker/lookups/handlers/hook_data.py",
    "content": "\nfrom . import LookupHandler\n\n\nTYPE_NAME = \"hook_data\"\n\n\nclass HookDataLookup(LookupHandler):\n    @classmethod\n    def handle(cls, value, context, **kwargs):\n        \"\"\"Returns the value of a key for a given hook in hook_data.\n\n        Format of value:\n\n            <hook_name>::<key>\n        \"\"\"\n        try:\n            hook_name, key = value.split(\"::\")\n        except ValueError:\n            raise ValueError(\"Invalid value for hook_data: %s. Must be in \"\n                             \"<hook_name>::<key> format.\" % value)\n\n        return context.hook_data[hook_name][key]\n"
  },
  {
    "path": "stacker/lookups/handlers/kms.py",
    "content": "import codecs\nimport sys\nfrom stacker.session_cache import get_session\n\nfrom . import LookupHandler\nfrom ...util import read_value_from_path\n\nTYPE_NAME = \"kms\"\n\n\nclass KmsLookup(LookupHandler):\n    @classmethod\n    def handle(cls, value, **kwargs):\n        \"\"\"Decrypt the specified value with a master key in KMS.\n\n        kmssimple field types should be in the following format:\n\n            [<region>@]<base64 encrypted value>\n\n        Note: The region is optional, and defaults to the environment's\n        `AWS_DEFAULT_REGION` if not specified.\n\n        For example:\n\n            # We use the aws cli to get the encrypted value for the string\n            # \"PASSWORD\" using the master key called \"myStackerKey\" in\n            # us-east-1\n            $ aws --region us-east-1 kms encrypt --key-id alias/myStackerKey \\\n                    --plaintext \"PASSWORD\" --output text --query CiphertextBlob\n\n            CiD6bC8t2Y<...encrypted blob...>\n\n            # In stacker we would reference the encrypted value like:\n            conf_key: ${kms us-east-1@CiD6bC8t2Y<...encrypted blob...>}\n\n            You can optionally store the encrypted value in a file, ie:\n\n            kms_value.txt\n            us-east-1@CiD6bC8t2Y<...encrypted blob...>\n\n            and reference it within stacker (NOTE: the path should be relative\n            to the stacker config file):\n\n            conf_key: ${kms file://kms_value.txt}\n\n            # Both of the above would resolve to\n            conf_key: PASSWORD\n\n        \"\"\"\n        value = read_value_from_path(value)\n\n        region = None\n        if \"@\" in value:\n            region, value = value.split(\"@\", 1)\n\n        kms = get_session(region).client('kms')\n\n        # encode str value as an utf-8 bytestring for use with codecs.decode.\n        value = value.encode('utf-8')\n\n        # get raw but still encrypted value from base64 version.\n        decoded = codecs.decode(value, 'base64')\n\n        # check python version in your system\n        python3_or_later = sys.version_info[0] >= 3\n\n        # decrypt and return the plain text raw value.\n        if python3_or_later:\n            return kms.decrypt(CiphertextBlob=decoded)[\"Plaintext\"]\\\n                .decode('utf-8')\n        else:\n            return kms.decrypt(CiphertextBlob=decoded)[\"Plaintext\"]\n"
  },
  {
    "path": "stacker/lookups/handlers/output.py",
    "content": "\nimport re\nfrom collections import namedtuple\n\nfrom . import LookupHandler\n\nTYPE_NAME = \"output\"\n\nOutput = namedtuple(\"Output\", (\"stack_name\", \"output_name\"))\n\n\nclass OutputLookup(LookupHandler):\n    @classmethod\n    def handle(cls, value, context=None, **kwargs):\n        \"\"\"Fetch an output from the designated stack.\n\n        Args:\n            value (str): string with the following format:\n                <stack_name>::<output_name>, ie. some-stack::SomeOutput\n            context (:class:`stacker.context.Context`): stacker context\n\n        Returns:\n            str: output from the specified stack\n\n        \"\"\"\n\n        if context is None:\n            raise ValueError('Context is required')\n\n        d = deconstruct(value)\n        stack = context.get_stack(d.stack_name)\n        return stack.outputs[d.output_name]\n\n    @classmethod\n    def dependencies(cls, lookup_data):\n        # try to get the stack name\n        stack_name = ''\n        for data_item in lookup_data:\n            if not data_item.resolved():\n                # We encountered an unresolved substitution.\n                # StackName is calculated dynamically based on context:\n                #  e.g. ${output ${default var::source}::name}\n                # Stop here\n                return set()\n            stack_name = stack_name + data_item.value()\n            match = re.search(r'::', stack_name)\n            if match:\n                stack_name = stack_name[0:match.start()]\n                return {stack_name}\n            # else: try to append the next item\n\n        # We added all lookup_data, and still couldn't find a `::`...\n        # Probably an error...\n        return set()\n\n\ndef deconstruct(value):\n\n    try:\n        stack_name, output_name = value.split(\"::\")\n    except ValueError:\n        raise ValueError(\"output handler requires syntax \"\n                         \"of <stack>::<output>.  Got: %s\" % value)\n\n    return Output(stack_name, output_name)\n"
  },
  {
    "path": "stacker/lookups/handlers/rxref.py",
    "content": "\"\"\"Handler for fetching outputs from fully qualified stacks.\n\nThe `output` handler supports fetching outputs from stacks created within a\nsigle config file. Sometimes it's useful to fetch outputs from stacks created\noutside of the current config file. `rxref` supports this by not using the\n:class:`stacker.context.Context` to expand the fqn of the stack.\n\nExample:\n\n    conf_value: ${rxref\n        some-relative-fully-qualified-stack-name::SomeOutputName}\n\n\"\"\"\nfrom . import LookupHandler\nfrom .output import deconstruct\n\nTYPE_NAME = \"rxref\"\n\n\nclass RxrefLookup(LookupHandler):\n    @classmethod\n    def handle(cls, value, provider=None, context=None, **kwargs):\n        \"\"\"Fetch an output from the designated stack.\n\n        Args:\n            value (str): string with the following format:\n                <stack_name>::<output_name>, ie. some-stack::SomeOutput\n            provider (:class:`stacker.provider.base.BaseProvider`): subclass of\n                the base provider\n            context (:class:`stacker.context.Context`): stacker context\n\n        Returns:\n            str: output from the specified stack\n        \"\"\"\n\n        if provider is None:\n            raise ValueError('Provider is required')\n        if context is None:\n            raise ValueError('Context is required')\n\n        d = deconstruct(value)\n        stack_fqn = context.get_fqn(d.stack_name)\n        output = provider.get_output(stack_fqn, d.output_name)\n        return output\n"
  },
  {
    "path": "stacker/lookups/handlers/split.py",
    "content": "from . import LookupHandler\nTYPE_NAME = \"split\"\n\n\nclass SplitLookup(LookupHandler):\n    @classmethod\n    def handle(cls, value, **kwargs):\n        \"\"\"Split the supplied string on the given delimiter, providing a list.\n\n        Format of value:\n\n            <delimiter>::<value>\n\n        For example:\n\n            Subnets: ${split ,::subnet-1,subnet-2,subnet-3}\n\n        Would result in the variable `Subnets` getting a list consisting of:\n\n            [\"subnet-1\", \"subnet-2\", \"subnet-3\"]\n\n        This is particularly useful when getting an output from another stack\n        that contains a list. For example, the standard vpc blueprint outputs\n        the list of Subnets it creates as a pair of Outputs (PublicSubnets,\n        PrivateSubnets) that are comma separated, so you could use this in your\n        config:\n\n            Subnets: ${split ,::${output vpc::PrivateSubnets}}\n        \"\"\"\n\n        try:\n            delimiter, text = value.split(\"::\", 1)\n        except ValueError:\n            raise ValueError(\"Invalid value for split: %s. Must be in \"\n                             \"<delimiter>::<text> format.\" % value)\n\n        return text.split(delimiter)\n"
  },
  {
    "path": "stacker/lookups/handlers/ssmstore.py",
    "content": "\nfrom stacker.session_cache import get_session\n\nfrom . import LookupHandler\nfrom ...util import read_value_from_path\n\nTYPE_NAME = \"ssmstore\"\n\n\nclass SsmstoreLookup(LookupHandler):\n    @classmethod\n    def handle(cls, value, **kwargs):\n        \"\"\"Retrieve (and decrypt if applicable) a parameter from\n        AWS SSM Parameter Store.\n\n        ssmstore field types should be in the following format:\n\n            [<region>@]ssmkey\n\n        Note: The region is optional, and defaults to us-east-1 if not given.\n\n        For example:\n\n            # In stacker we would reference the encrypted value like:\n            conf_key: ${ssmstore us-east-1@ssmkey}\n\n            You can optionally store the value in a file, ie:\n\n            ssmstore_value.txt\n            us-east-1@ssmkey\n\n            and reference it within stacker (NOTE: the path should be relative\n            to the stacker config file):\n\n            conf_key: ${ssmstore file://ssmstore_value.txt}\n\n            # Both of the above would resolve to\n            conf_key: PASSWORD\n\n        \"\"\"\n        value = read_value_from_path(value)\n\n        region = \"us-east-1\"\n        if \"@\" in value:\n            region, value = value.split(\"@\", 1)\n\n        client = get_session(region).client(\"ssm\")\n        response = client.get_parameters(\n            Names=[\n                value,\n            ],\n            WithDecryption=True\n        )\n        if 'Parameters' in response:\n            return str(response['Parameters'][0]['Value'])\n\n        raise ValueError('SSMKey \"{}\" does not exist in region {}'.format(\n            value, region))\n"
  },
  {
    "path": "stacker/lookups/handlers/xref.py",
    "content": "\"\"\"Handler for fetching outputs from fully qualified stacks.\n\nThe `output` handler supports fetching outputs from stacks created within a\nsigle config file. Sometimes it's useful to fetch outputs from stacks created\noutside of the current config file. `xref` supports this by not using the\n:class:`stacker.context.Context` to expand the fqn of the stack.\n\nExample:\n\n    conf_value: ${xref some-fully-qualified-stack-name::SomeOutputName}\n\n\"\"\"\nfrom . import LookupHandler\nfrom .output import deconstruct\n\nTYPE_NAME = \"xref\"\n\n\nclass XrefLookup(LookupHandler):\n    @classmethod\n    def handle(cls, value, provider=None, **kwargs):\n        \"\"\"Fetch an output from the designated stack.\n\n        Args:\n            value (str): string with the following format:\n                <stack_name>::<output_name>, ie. some-stack::SomeOutput\n            provider (:class:`stacker.provider.base.BaseProvider`): subclass of\n                the base provider\n\n        Returns:\n            str: output from the specified stack\n        \"\"\"\n\n        if provider is None:\n            raise ValueError('Provider is required')\n\n        d = deconstruct(value)\n        stack_fqn = d.stack_name\n        output = provider.get_output(stack_fqn, d.output_name)\n        return output\n"
  },
  {
    "path": "stacker/lookups/registry.py",
    "content": "\nimport logging\nimport warnings\n\nfrom past.builtins import basestring\n\nfrom ..exceptions import UnknownLookupType, FailedVariableLookup\nfrom ..util import load_object_from_string\n\nfrom .handlers import output\nfrom .handlers import kms\nfrom .handlers import xref\nfrom .handlers import ssmstore\nfrom .handlers import dynamodb\nfrom .handlers import envvar\nfrom .handlers import rxref\nfrom .handlers import ami\nfrom .handlers import file as file_handler\nfrom .handlers import split\nfrom .handlers import default\nfrom .handlers import hook_data\n\nLOOKUP_HANDLERS = {}\n\n\ndef register_lookup_handler(lookup_type, handler_or_path):\n    \"\"\"Register a lookup handler.\n\n    Args:\n        lookup_type (str): Name to register the handler under\n        handler_or_path (OneOf[func, str]): a function or a path to a handler\n\n    \"\"\"\n    handler = handler_or_path\n    if isinstance(handler_or_path, basestring):\n        handler = load_object_from_string(handler_or_path)\n    LOOKUP_HANDLERS[lookup_type] = handler\n    if type(handler) != type:\n        # Hander is a not a new-style handler\n        logger = logging.getLogger(__name__)\n        logger.warning(\"Registering lookup `%s`: Please upgrade to use the \"\n                       \"new style of Lookups.\" % lookup_type)\n        warnings.warn(\n            # For some reason, this does not show up...\n            # Leaving it in anyway\n            \"Lookup `%s`: Please upgrade to use the new style of Lookups\"\n            \".\" % lookup_type,\n            DeprecationWarning,\n            stacklevel=2,\n        )\n\n\ndef unregister_lookup_handler(lookup_type):\n    \"\"\"Unregister the specified lookup type.\n\n    This is useful when testing various lookup types if you want to unregister\n    the lookup type after the test runs.\n\n    Args:\n        lookup_type (str): Name of the lookup type to unregister\n\n    \"\"\"\n    LOOKUP_HANDLERS.pop(lookup_type, None)\n\n\ndef resolve_lookups(variable, context, provider):\n    \"\"\"Resolve a set of lookups.\n\n    Args:\n        variable (:class:`stacker.variables.Variable`): The variable resolving\n            it's lookups.\n        context (:class:`stacker.context.Context`): stacker context\n        provider (:class:`stacker.provider.base.BaseProvider`): subclass of the\n            base provider\n\n    Returns:\n        dict: dict of Lookup -> resolved value\n\n    \"\"\"\n    resolved_lookups = {}\n    for lookup in variable.lookups:\n        try:\n            handler = LOOKUP_HANDLERS[lookup.type]\n        except KeyError:\n            raise UnknownLookupType(lookup)\n        try:\n            resolved_lookups[lookup] = handler(\n                value=lookup.input,\n                context=context,\n                provider=provider,\n            )\n        except Exception as e:\n            raise FailedVariableLookup(variable.name, lookup, e)\n    return resolved_lookups\n\n\nregister_lookup_handler(output.TYPE_NAME, output.OutputLookup)\nregister_lookup_handler(kms.TYPE_NAME, kms.KmsLookup)\nregister_lookup_handler(ssmstore.TYPE_NAME, ssmstore.SsmstoreLookup)\nregister_lookup_handler(envvar.TYPE_NAME, envvar.EnvvarLookup)\nregister_lookup_handler(xref.TYPE_NAME, xref.XrefLookup)\nregister_lookup_handler(rxref.TYPE_NAME, rxref.RxrefLookup)\nregister_lookup_handler(ami.TYPE_NAME, ami.AmiLookup)\nregister_lookup_handler(file_handler.TYPE_NAME, file_handler.FileLookup)\nregister_lookup_handler(split.TYPE_NAME, split.SplitLookup)\nregister_lookup_handler(default.TYPE_NAME, default.DefaultLookup)\nregister_lookup_handler(hook_data.TYPE_NAME, hook_data.HookDataLookup)\nregister_lookup_handler(dynamodb.TYPE_NAME, dynamodb.DynamodbLookup)\n"
  },
  {
    "path": "stacker/plan.py",
    "content": "import os\nimport logging\nimport time\nimport uuid\nimport threading\n\nfrom .util import stack_template_key_name\nfrom .exceptions import (\n    GraphError,\n    PlanFailed,\n)\nfrom .ui import ui\nfrom .dag import DAG, DAGValidationError, walk\nfrom .status import (\n    FailedStatus,\n    PENDING,\n    SUBMITTED,\n    COMPLETE,\n    SKIPPED,\n    FAILED,\n)\n\nlogger = logging.getLogger(__name__)\n\nCOLOR_CODES = {\n    SUBMITTED.code: 33,  # yellow\n    COMPLETE.code: 32,   # green\n    FAILED.code: 31,     # red\n}\n\n\ndef log_step(step):\n    msg = \"%s: %s\" % (step, step.status.name)\n    if step.status.reason:\n        msg += \" (%s)\" % (step.status.reason)\n    color_code = COLOR_CODES.get(step.status.code, 37)\n    ui.info(msg, extra={\"color\": color_code})\n\n\nclass Step(object):\n    \"\"\"State machine for executing generic actions related to stacks.\n    Args:\n        stack (:class:`stacker.stack.Stack`): the stack associated\n            with this step\n        fn (func): the function to run to execute the step. This function will\n            be ran multiple times until the step is \"done\".\n        watch_func (func): an optional function that will be called to \"tail\"\n            the step action.\n    \"\"\"\n\n    def __init__(self, stack, fn, watch_func=None):\n        self.stack = stack\n        self.status = PENDING\n        self.last_updated = time.time()\n        self.fn = fn\n        self.watch_func = watch_func\n\n    def __repr__(self):\n        return \"<stacker.plan.Step:%s>\" % (self.stack.name,)\n\n    def __str__(self):\n        return self.stack.name\n\n    def run(self):\n        \"\"\"Runs this step until it has completed successfully, or been\n        skipped.\n        \"\"\"\n\n        stop_watcher = threading.Event()\n        watcher = None\n        if self.watch_func:\n            watcher = threading.Thread(\n                target=self.watch_func,\n                args=(self.stack, stop_watcher)\n            )\n            watcher.start()\n\n        try:\n            while not self.done:\n                self._run_once()\n        finally:\n            if watcher:\n                stop_watcher.set()\n                watcher.join()\n        return self.ok\n\n    def _run_once(self):\n        try:\n            status = self.fn(self.stack, status=self.status)\n        except Exception as e:\n            logger.exception(e)\n            status = FailedStatus(reason=str(e))\n        self.set_status(status)\n        return status\n\n    @property\n    def name(self):\n        return self.stack.name\n\n    @property\n    def requires(self):\n        return self.stack.requires\n\n    @property\n    def required_by(self):\n        return self.stack.required_by\n\n    @property\n    def completed(self):\n        \"\"\"Returns True if the step is in a COMPLETE state.\"\"\"\n        return self.status == COMPLETE\n\n    @property\n    def skipped(self):\n        \"\"\"Returns True if the step is in a SKIPPED state.\"\"\"\n        return self.status == SKIPPED\n\n    @property\n    def failed(self):\n        \"\"\"Returns True if the step is in a FAILED state.\"\"\"\n        return self.status == FAILED\n\n    @property\n    def done(self):\n        \"\"\"Returns True if the step is finished (either COMPLETE, SKIPPED or FAILED)\n        \"\"\"\n        return self.completed or self.skipped or self.failed\n\n    @property\n    def ok(self):\n        \"\"\"Returns True if the step is finished (either COMPLETE or SKIPPED)\"\"\"\n        return self.completed or self.skipped\n\n    @property\n    def submitted(self):\n        \"\"\"Returns True if the step is SUBMITTED, COMPLETE, or SKIPPED.\"\"\"\n        return self.status >= SUBMITTED\n\n    def set_status(self, status):\n        \"\"\"Sets the current step's status.\n        Args:\n            status (:class:`Status <Status>` object): The status to set the\n                step to.\n        \"\"\"\n        if status is not self.status:\n            logger.debug(\"Setting %s state to %s.\", self.stack.name,\n                         status.name)\n            self.status = status\n            self.last_updated = time.time()\n            if self.stack.logging:\n                log_step(self)\n\n    def complete(self):\n        \"\"\"A shortcut for set_status(COMPLETE)\"\"\"\n        self.set_status(COMPLETE)\n\n    def skip(self):\n        \"\"\"A shortcut for set_status(SKIPPED)\"\"\"\n        self.set_status(SKIPPED)\n\n    def submit(self):\n        \"\"\"A shortcut for set_status(SUBMITTED)\"\"\"\n        self.set_status(SUBMITTED)\n\n\ndef build_plan(description, graph,\n               targets=None, reverse=False):\n    \"\"\"Builds a plan from a list of steps.\n    Args:\n        description (str): an arbitrary string to\n            describe the plan.\n        graph (:class:`Graph`): a list of :class:`Graph` to execute.\n        targets (list): an optional list of step names to filter the graph to.\n            If provided, only these steps, and their transitive dependencies\n            will be executed. If no targets are specified, every node in the\n            graph will be executed.\n        reverse (bool): If provided, the graph will be walked in reverse order\n            (dependencies last).\n    \"\"\"\n\n    # If we want to execute the plan in reverse (e.g. Destroy), transpose the\n    # graph.\n    if reverse:\n        graph = graph.transposed()\n\n    # If we only want to build a specific target, filter the graph.\n    if targets:\n        nodes = []\n        for target in targets:\n            for k, step in graph.steps.items():\n                if step.name == target:\n                    nodes.append(step.name)\n        graph = graph.filtered(nodes)\n\n    return Plan(description=description, graph=graph)\n\n\ndef build_graph(steps):\n    \"\"\"Builds a graph of steps.\n    Args:\n        steps (list): a list of :class:`Step` objects to execute.\n    \"\"\"\n\n    graph = Graph()\n\n    for step in steps:\n        graph.add_step(step)\n\n    for step in steps:\n        for dep in step.requires:\n            graph.connect(step.name, dep)\n\n        for parent in step.required_by:\n            graph.connect(parent, step.name)\n\n    return graph\n\n\nclass Graph(object):\n    \"\"\"Graph represents a graph of steps.\n\n    The :class:`Graph` helps organize the steps needed to execute a particular\n    action for a set of :class:`stacker.stack.Stack` objects. When initialized\n    with a set of steps, it will first build a Directed Acyclic Graph from the\n    steps and their dependencies.\n\n    Example:\n\n    >>> dag = DAG()\n    >>> a = Step(\"a\", fn=build)\n    >>> b = Step(\"b\", fn=build)\n    >>> dag.add_step(a)\n    >>> dag.add_step(b)\n    >>> dag.connect(a, b)\n\n    Args:\n        steps (list): an optional list of :class:`Step` objects to execute.\n        dag (:class:`stacker.dag.DAG`): an optional :class:`stacker.dag.DAG`\n            object. If one is not provided, a new one will be initialized.\n    \"\"\"\n\n    def __init__(self, steps=None, dag=None):\n        self.steps = steps or {}\n        self.dag = dag or DAG()\n\n    def add_step(self, step):\n        self.steps[step.name] = step\n        self.dag.add_node(step.name)\n\n    def connect(self, step, dep):\n        try:\n            self.dag.add_edge(step, dep)\n        except KeyError as e:\n            raise GraphError(e, step, dep)\n        except DAGValidationError as e:\n            raise GraphError(e, step, dep)\n\n    def transitive_reduction(self):\n        self.dag.transitive_reduction()\n\n    def walk(self, walker, walk_func):\n        def fn(step_name):\n            step = self.steps[step_name]\n            return walk_func(step)\n\n        return walker(self.dag, fn)\n\n    def downstream(self, step_name):\n        \"\"\"Returns the direct dependencies of the given step\"\"\"\n        return list(self.steps[dep] for dep in self.dag.downstream(step_name))\n\n    def transposed(self):\n        \"\"\"Returns a \"transposed\" version of this graph. Useful for walking in\n        reverse.\n        \"\"\"\n        return Graph(steps=self.steps, dag=self.dag.transpose())\n\n    def filtered(self, step_names):\n        \"\"\"Returns a \"filtered\" version of this graph.\"\"\"\n        return Graph(steps=self.steps, dag=self.dag.filter(step_names))\n\n    def topological_sort(self):\n        nodes = self.dag.topological_sort()\n        return [self.steps[step_name] for step_name in nodes]\n\n    def to_dict(self):\n        return self.dag.graph\n\n\nclass Plan(object):\n    \"\"\"A convenience class for working on a Graph.\n    Args:\n        description (str): description of the plan.\n        graph (:class:`Graph`): a graph of steps.\n    \"\"\"\n\n    def __init__(self, description, graph):\n        self.id = uuid.uuid4()\n        self.description = description\n        self.graph = graph\n\n    def outline(self, level=logging.INFO, message=\"\"):\n        \"\"\"Print an outline of the actions the plan is going to take.\n        The outline will represent the rough ordering of the steps that will be\n        taken.\n        Args:\n            level (int, optional): a valid log level that should be used to log\n                the outline\n            message (str, optional): a message that will be logged to\n                the user after the outline has been logged.\n        \"\"\"\n        steps = 1\n        logger.log(level, \"Plan \\\"%s\\\":\", self.description)\n        for step in self.steps:\n            logger.log(\n                level,\n                \"  - step: %s: target: \\\"%s\\\", action: \\\"%s\\\"\",\n                steps,\n                step.name,\n                step.fn.__name__,\n            )\n            steps += 1\n\n        if message:\n            logger.log(level, message)\n\n    def dump(self, directory, context, provider=None):\n        logger.info(\"Dumping \\\"%s\\\"...\", self.description)\n        directory = os.path.expanduser(directory)\n        if not os.path.exists(directory):\n            os.makedirs(directory)\n\n        def walk_func(step):\n            step.stack.resolve(\n                context=context,\n                provider=provider,\n            )\n            blueprint = step.stack.blueprint\n            filename = stack_template_key_name(blueprint)\n            path = os.path.join(directory, filename)\n\n            blueprint_dir = os.path.dirname(path)\n            if not os.path.exists(blueprint_dir):\n                os.makedirs(blueprint_dir)\n\n            logger.info(\"Writing stack \\\"%s\\\" -> %s\", step.name, path)\n            with open(path, \"w\") as f:\n                f.write(blueprint.rendered)\n\n            return True\n\n        return self.graph.walk(walk, walk_func)\n\n    def execute(self, *args, **kwargs):\n        \"\"\"Walks each step in the underlying graph, and raises an exception if\n        any of the steps fail.\n\n        Raises:\n            PlanFailed: Raised if any of the steps fail.\n        \"\"\"\n        self.walk(*args, **kwargs)\n\n        failed_steps = [step for step in self.steps if step.status == FAILED]\n        if failed_steps:\n            raise PlanFailed(failed_steps)\n\n    def walk(self, walker):\n        \"\"\"Walks each step in the underlying graph, in topological order.\n\n        Args:\n            walker (func): a walker function to be passed to\n                :class:`stacker.dag.DAG` to walk the graph.\n        \"\"\"\n\n        def walk_func(step):\n            # Before we execute the step, we need to ensure that it's\n            # transitive dependencies are all in an \"ok\" state. If not, we\n            # won't execute this step.\n            for dep in self.graph.downstream(step.name):\n                if not dep.ok:\n                    step.set_status(FailedStatus(\"dependency has failed\"))\n                    return step.ok\n\n            return step.run()\n\n        return self.graph.walk(walker, walk_func)\n\n    @property\n    def steps(self):\n        steps = self.graph.topological_sort()\n        steps.reverse()\n        return steps\n\n    @property\n    def step_names(self):\n        return [step.name for step in self.steps]\n\n    def keys(self):\n        return self.step_names\n"
  },
  {
    "path": "stacker/providers/__init__.py",
    "content": ""
  },
  {
    "path": "stacker/providers/aws/__init__.py",
    "content": ""
  },
  {
    "path": "stacker/providers/aws/default.py",
    "content": "import json\nimport yaml\nimport logging\nimport time\nimport urllib.parse\nimport sys\n\n# thread safe, memoized, provider builder.\nfrom threading import Lock\n\nimport botocore.exceptions\nfrom botocore.config import Config\n\nfrom ..base import BaseProvider\nfrom ... import exceptions\nfrom ...ui import ui\nfrom ...util import parse_cloudformation_template\nfrom stacker.session_cache import get_session\n\nfrom ...actions.diff import (\n    DictValue,\n    diff_parameters,\n    format_params_diff as format_diff\n)\n\nlogger = logging.getLogger(__name__)\n\n# This value controls the maximum number of times a CloudFormation API call\n# will be attempted, after being throttled. This value is used in an\n# exponential backoff algorithm to determine how long the client should wait\n# until attempting a retry:\n#\n#   base * growth_factor ^ (attempts - 1)\n#\n# A value of 10 here would cause the worst case wait time for the last retry to\n# be ~8 mins:\n#\n#   1 * 2 ^ (10 - 1) = 512 seconds\n#\n# References:\n# https://github.com/boto/botocore/blob/1.6.1/botocore/retryhandler.py#L39-L58\n# https://github.com/boto/botocore/blob/1.6.1/botocore/data/_retry.json#L97-L121\nMAX_ATTEMPTS = 10\n\n# Updated this to 15 retries with a 1 second sleep between retries. This is\n# only used when a call to `get_events` fails due to the stack not being\n# found. This is often the case because Cloudformation is taking too long\n# to create the stack. 15 seconds should, hopefully, be plenty of time for\n# the stack to start showing up in the API.\nMAX_TAIL_RETRIES = 15\nTAIL_RETRY_SLEEP = 1\nGET_EVENTS_SLEEP = 1\nDEFAULT_CAPABILITIES = [\"CAPABILITY_NAMED_IAM\",\n                        \"CAPABILITY_AUTO_EXPAND\"]\n\n\ndef get_cloudformation_client(session):\n    config = Config(\n        retries=dict(\n            max_attempts=MAX_ATTEMPTS\n        )\n    )\n    return session.client('cloudformation', config=config)\n\n\ndef get_output_dict(stack):\n    \"\"\"Returns a dict of key/values for the outputs for a given CF stack.\n\n    Args:\n        stack (dict): The stack object to get\n            outputs from.\n\n    Returns:\n        dict: A dictionary with key/values for each output on the stack.\n\n    \"\"\"\n    outputs = {}\n    if 'Outputs' not in stack:\n        return outputs\n\n    for output in stack['Outputs']:\n        logger.debug(\"    %s %s: %s\", stack['StackName'], output['OutputKey'],\n                     output['OutputValue'])\n        outputs[output['OutputKey']] = output['OutputValue']\n    return outputs\n\n\ndef s3_fallback(fqn, template, parameters, tags, method,\n                change_set_name=None, service_role=None):\n    logger.warn(\"DEPRECATION WARNING: Falling back to legacy \"\n                \"stacker S3 bucket region for templates. See \"\n                \"http://stacker.readthedocs.io/en/latest/config.html#s3-bucket\"\n                \" for more information.\")\n    # extra line break on purpose to avoid status updates removing URL\n    # from view\n    logger.warn(\"\\n\")\n    logger.debug(\"Modifying the S3 TemplateURL to point to \"\n                 \"us-east-1 endpoint\")\n    template_url = template.url\n    template_url_parsed = urllib.parse.urlparse(template_url)\n    template_url_parsed = template_url_parsed._replace(\n        netloc=\"s3.amazonaws.com\")\n    template_url = urllib.parse.urlunparse(template_url_parsed)\n    logger.debug(\"Using template_url: %s\", template_url)\n    args = generate_cloudformation_args(\n        fqn, parameters, tags, template,\n        service_role=service_role,\n        change_set_name=get_change_set_name()\n    )\n\n    response = method(**args)\n    return response\n\n\ndef get_change_set_name():\n    \"\"\"Return a valid Change Set Name.\n\n    The name has to satisfy the following regex:\n        [a-zA-Z][-a-zA-Z0-9]*\n\n    And must be unique across all change sets.\n\n    \"\"\"\n    return 'change-set-{}'.format(int(time.time()))\n\n\ndef requires_replacement(changeset):\n    \"\"\"Return the changes within the changeset that require replacement.\n\n    Args:\n        changeset (list): List of changes\n\n    Returns:\n        list: A list of changes that require replacement, if any.\n\n    \"\"\"\n    return [r for r in changeset if r[\"ResourceChange\"].get(\n            \"Replacement\", False) == \"True\"]\n\n\ndef output_full_changeset(full_changeset=None, params_diff=None,\n                          answer=None, fqn=None):\n    \"\"\"Optionally output full changeset.\n\n    Args:\n        full_changeset (list, optional): A list of the full changeset that will\n            be output if the user specifies verbose.\n        params_diff (list, optional): A list of DictValue detailing the\n            differences between two parameters returned by\n            :func:`stacker.actions.diff.diff_dictionaries`\n        answer (str, optional): predetermined answer to the prompt if it has\n            already been answered or inferred.\n        fqn (str, optional): fully qualified name of the stack.\n\n    \"\"\"\n    if not answer:\n        answer = ui.ask('Show full change set? [y/n] ').lower()\n    if answer == 'n':\n        return\n    if answer in ['y', 'v']:\n        if fqn:\n            msg = '%s full changeset' % (fqn)\n        else:\n            msg = 'Full changeset'\n        if params_diff:\n            logger.info(\n                \"%s:\\n\\n%s\\n%s\",\n                msg,\n                format_params_diff(params_diff),\n                yaml.safe_dump(full_changeset),\n            )\n        else:\n            logger.info(\n                \"%s:\\n%s\",\n                msg,\n                yaml.safe_dump(full_changeset),\n            )\n        return\n    raise exceptions.CancelExecution\n\n\ndef ask_for_approval(full_changeset=None, params_diff=None,\n                     include_verbose=False, fqn=None):\n    \"\"\"Prompt the user for approval to execute a change set.\n\n    Args:\n        full_changeset (list, optional): A list of the full changeset that will\n            be output if the user specifies verbose.\n        params_diff (list, optional): A list of DictValue detailing the\n            differences between two parameters returned by\n            :func:`stacker.actions.diff.diff_dictionaries`\n        include_verbose (bool, optional): Boolean for whether or not to include\n            the verbose option.\n        fqn (str): fully qualified name of the stack.\n\n    \"\"\"\n    approval_options = ['y', 'n']\n    if include_verbose:\n        approval_options.append('v')\n\n    approve = ui.ask(\"Execute the above changes? [{}] \".format(\n        '/'.join(approval_options))).lower()\n\n    if include_verbose and approve == \"v\":\n        output_full_changeset(full_changeset=full_changeset,\n                              params_diff=params_diff, answer=approve, fqn=fqn)\n        return ask_for_approval(fqn=fqn)\n    elif approve != \"y\":\n        raise exceptions.CancelExecution\n\n\ndef output_summary(fqn, action, changeset, params_diff,\n                   replacements_only=False):\n    \"\"\"Log a summary of the changeset.\n\n    Args:\n        fqn (string): fully qualified name of the stack\n        action (string): action to include in the log message\n        changeset (list): AWS changeset\n        params_diff (list): A list of dictionaries detailing the differences\n            between two parameters returned by\n            :func:`stacker.actions.diff.diff_dictionaries`\n        replacements_only (bool, optional): boolean for whether or not we only\n            want to list replacements\n\n    \"\"\"\n    replacements = []\n    changes = []\n    for change in changeset:\n        resource = change['ResourceChange']\n        replacement = resource.get('Replacement') == 'True'\n        summary = '- %s %s (%s)' % (\n            resource['Action'],\n            resource['LogicalResourceId'],\n            resource['ResourceType'],\n        )\n        if replacement:\n            replacements.append(summary)\n        else:\n            changes.append(summary)\n\n    summary = ''\n    if params_diff:\n        summary += summarize_params_diff(params_diff)\n    if replacements:\n        if not replacements_only:\n            summary += 'Replacements:\\n'\n        summary += '\\n'.join(replacements)\n    if changes:\n        if summary:\n            summary += '\\n'\n        summary += 'Changes:\\n%s' % ('\\n'.join(changes))\n    logger.info('%s %s:\\n%s', fqn, action, summary)\n\n\ndef format_params_diff(params_diff):\n    \"\"\" Just a wrapper for stacker.actions.diff.format_params_diff\n    for testing purposes.\n    \"\"\"\n    return format_diff(params_diff)\n\n\ndef summarize_params_diff(params_diff):\n    summary = ''\n\n    added_summary = [v.key for v in params_diff\n                     if v.status() is DictValue.ADDED]\n    if added_summary:\n        summary += 'Parameters Added: %s\\n' % ', '.join(added_summary)\n\n    removed_summary = [v.key for v in params_diff\n                       if v.status() is DictValue.REMOVED]\n    if removed_summary:\n        summary += 'Parameters Removed: %s\\n' % ', '.join(removed_summary)\n\n    modified_summary = [v.key for v in params_diff\n                        if v.status() is DictValue.MODIFIED]\n    if modified_summary:\n        summary += 'Parameters Modified: %s\\n' % ', '.join(modified_summary)\n\n    return summary\n\n\ndef wait_till_change_set_complete(cfn_client, change_set_id, try_count=25,\n                                  sleep_time=.5, max_sleep=3):\n    \"\"\" Checks state of a changeset, returning when it is in a complete state.\n\n    Since changesets can take a little bit of time to get into a complete\n    state, we need to poll it until it does so. This will try to get the\n    state `try_count` times, waiting `sleep_time` * 2 seconds between each try\n    up to the `max_sleep` number of seconds. If, after that time, the changeset\n    is not in a complete state it fails. These default settings will wait a\n    little over one minute.\n\n    Args:\n        cfn_client (:class:`botocore.client.CloudFormation`): Used to query\n            cloudformation.\n        change_set_id (str): The unique changeset id to wait for.\n        try_count (int): Number of times to try the call.\n        sleep_time (int): Time to sleep between attempts.\n        max_sleep (int): Max time to sleep during backoff\n\n    Return:\n        dict: The response from cloudformation for the describe_change_set\n            call.\n    \"\"\"\n    complete = False\n    response = None\n    for i in range(try_count):\n        response = cfn_client.describe_change_set(\n            ChangeSetName=change_set_id,\n        )\n        complete = response[\"Status\"] in (\"FAILED\", \"CREATE_COMPLETE\")\n        if complete:\n            break\n        if sleep_time == max_sleep:\n            logger.debug(\n                \"Still waiting on changeset for another %s seconds\",\n                sleep_time\n            )\n        time.sleep(sleep_time)\n\n        # exponential backoff with max\n        sleep_time = min(sleep_time * 2, max_sleep)\n    if not complete:\n        raise exceptions.ChangesetDidNotStabilize(change_set_id)\n    return response\n\n\ndef create_change_set(\n    cfn_client,\n    fqn,\n    template,\n    parameters,\n    tags,\n    change_set_type='UPDATE',\n    replacements_only=False,\n    service_role=None,\n    notification_arns=None\n):\n    logger.debug(\"Attempting to create change set of type %s for stack: %s.\",\n                 change_set_type,\n                 fqn)\n    args = generate_cloudformation_args(\n        fqn, parameters, tags, template,\n        change_set_type=change_set_type,\n        service_role=service_role,\n        change_set_name=get_change_set_name(),\n        notification_arns=notification_arns\n    )\n    try:\n        response = cfn_client.create_change_set(**args)\n    except botocore.exceptions.ClientError as e:\n        if e.response['Error']['Message'] == ('TemplateURL must reference '\n                                              'a valid S3 object to which '\n                                              'you have access.'):\n            response = s3_fallback(fqn, template, parameters,\n                                   tags, cfn_client.create_change_set,\n                                   get_change_set_name(),\n                                   service_role)\n        else:\n            raise\n    change_set_id = response[\"Id\"]\n    response = wait_till_change_set_complete(\n        cfn_client, change_set_id\n    )\n    status = response[\"Status\"]\n    if status == \"FAILED\":\n        status_reason = response[\"StatusReason\"]\n        if (\"didn't contain changes\" in response[\"StatusReason\"] or\n                \"No updates are to be performed\" in response[\"StatusReason\"]):\n            logger.debug(\n                \"Stack %s did not change, not updating and removing \"\n                \"changeset.\",\n                fqn,\n            )\n            cfn_client.delete_change_set(ChangeSetName=change_set_id)\n            raise exceptions.StackDidNotChange()\n        logger.warn(\n            \"Got strange status, '%s' for changeset '%s'. Not deleting for \"\n            \"further investigation - you will need to delete the changeset \"\n            \"manually.\",\n            status, change_set_id\n        )\n        raise exceptions.UnhandledChangeSetStatus(\n            fqn, change_set_id, status, status_reason\n        )\n\n    execution_status = response[\"ExecutionStatus\"]\n    if execution_status != \"AVAILABLE\":\n        raise exceptions.UnableToExecuteChangeSet(fqn,\n                                                  change_set_id,\n                                                  execution_status)\n\n    changes = response[\"Changes\"]\n    return changes, change_set_id\n\n\ndef check_tags_contain(actual, expected):\n    \"\"\"Check if a set of AWS resource tags is contained in another\n\n    Every tag key in `expected` must be present in `actual`, and have the same\n    value. Extra keys in `actual` but not in `expected` are ignored.\n\n    Args:\n        actual (list): Set of tags to be verified, usually from the description\n            of a resource. Each item must be a `dict` containing `Key` and\n            `Value` items.\n        expected (list): Set of tags that must be present in `actual` (in the\n            same format).\n    \"\"\"\n\n    actual_set = set((item[\"Key\"], item[\"Value\"]) for item in actual)\n    expected_set = set((item[\"Key\"], item[\"Value\"]) for item in expected)\n\n    return actual_set >= expected_set\n\n\ndef generate_cloudformation_args(\n    stack_name,\n    parameters,\n    tags,\n    template,\n    capabilities=DEFAULT_CAPABILITIES,\n    change_set_type=None,\n    service_role=None,\n    stack_policy=None,\n    change_set_name=None,\n    notification_arns=None,\n):\n    \"\"\"Used to generate the args for common cloudformation API interactions.\n\n    This is used for create_stack/update_stack/create_change_set calls in\n    cloudformation.\n\n    Args:\n        stack_name (str): The fully qualified stack name in Cloudformation.\n        parameters (list): A list of dictionaries that defines the\n            parameter list to be applied to the Cloudformation stack.\n        tags (list): A list of dictionaries that defines the tags\n            that should be applied to the Cloudformation stack.\n        template (:class:`stacker.provider.base.Template`): The template\n            object.\n        capabilities (list, optional): A list of capabilities to use when\n            updating Cloudformation.\n        change_set_type (str, optional): An optional change set type to use\n            with create_change_set.\n        service_role (str, optional): An optional service role to use when\n            interacting with Cloudformation.\n        stack_policy (:class:`stacker.providers.base.Template`): A template\n            object representing a stack policy.\n        change_set_name (str, optional): An optional change set name to use\n            with create_change_set.\n        notification_arns (list, optional): An optional list of SNS topic ARNs\n            to send CloudFormation Events to.\n\n    Returns:\n        dict: A dictionary of arguments to be used in the Cloudformation API\n            call.\n    \"\"\"\n    args = {\n        \"StackName\": stack_name,\n        \"Parameters\": parameters,\n        \"Tags\": tags,\n        \"Capabilities\": capabilities,\n    }\n\n    if service_role:\n        args[\"RoleARN\"] = service_role\n\n    if change_set_name:\n        args[\"ChangeSetName\"] = change_set_name\n\n    if notification_arns:\n        args[\"NotificationARNs\"] = notification_arns\n\n    if change_set_type:\n        args[\"ChangeSetType\"] = change_set_type\n\n    if template.url:\n        args[\"TemplateURL\"] = template.url\n    else:\n        args[\"TemplateBody\"] = template.body\n\n    # When creating args for CreateChangeSet, don't include the stack policy,\n    # since ChangeSets don't support it.\n    if not change_set_name:\n        args.update(generate_stack_policy_args(stack_policy))\n\n    return args\n\n\ndef generate_stack_policy_args(stack_policy=None):\n    \"\"\" Converts a stack policy object into keyword args.\n\n    Args:\n        stack_policy (:class:`stacker.providers.base.Template`): A template\n            object representing a stack policy.\n\n    Returns:\n        dict: A dictionary of keyword arguments to be used elsewhere.\n    \"\"\"\n\n    args = {}\n    if stack_policy:\n        logger.debug(\"Stack has a stack policy\")\n        if stack_policy.url:\n            # stacker currently does not support uploading stack policies to\n            # S3, so this will never get hit (unless your implementing S3\n            # uploads, and then you're probably reading this comment about why\n            # the exception below was raised :))\n            #\n            # args[\"StackPolicyURL\"] = stack_policy.url\n            raise NotImplementedError\n        else:\n            args[\"StackPolicyBody\"] = stack_policy.body\n    return args\n\n\nclass ProviderBuilder(object):\n    \"\"\"Implements a Memoized ProviderBuilder for the AWS provider.\"\"\"\n\n    def __init__(self, region=None, **kwargs):\n        self.region = region\n        self.kwargs = kwargs\n        self.providers = {}\n        self.lock = Lock()\n\n    def build(self, region=None, profile=None):\n        \"\"\"Get or create the provider for the given region and profile.\"\"\"\n\n        with self.lock:\n            # memoization lookup key derived from region + profile.\n            key = \"{}-{}\".format(profile, region)\n            try:\n                # assume provider is in provider dictionary.\n                provider = self.providers[key]\n            except KeyError:\n                msg = \"Missed memoized lookup ({}), creating new AWS Provider.\"\n                logger.debug(msg.format(key))\n                if not region:\n                    region = self.region\n                # memoize the result for later.\n                self.providers[key] = Provider(\n                    get_session(region=region, profile=profile),\n                    region=region,\n                    **self.kwargs\n                )\n                provider = self.providers[key]\n\n        return provider\n\n\nclass Provider(BaseProvider):\n\n    \"\"\"AWS CloudFormation Provider\"\"\"\n\n    DELETED_STATUS = \"DELETE_COMPLETE\"\n\n    IN_PROGRESS_STATUSES = (\n        \"CREATE_IN_PROGRESS\",\n        \"IMPORT_IN_PROGRESS\",\n        \"UPDATE_IN_PROGRESS\",\n        \"DELETE_IN_PROGRESS\",\n        \"UPDATE_COMPLETE_CLEANUP_IN_PROGRESS\",\n    )\n\n    ROLLING_BACK_STATUSES = (\n        \"ROLLBACK_IN_PROGRESS\",\n        \"IMPORT_ROLLBACK_IN_PROGRESS\",\n        \"UPDATE_ROLLBACK_IN_PROGRESS\"\n    )\n\n    FAILED_STATUSES = (\n        \"CREATE_FAILED\",\n        \"ROLLBACK_FAILED\",\n        \"ROLLBACK_COMPLETE\",\n        \"DELETE_FAILED\",\n        \"IMPORT_ROLLBACK_FAILED\",\n        \"UPDATE_ROLLBACK_FAILED\",\n        # Note: UPDATE_ROLLBACK_COMPLETE is in both the FAILED and COMPLETE\n        # sets, because we need to wait for it when a rollback is triggered,\n        # but still mark the stack as failed.\n        \"UPDATE_ROLLBACK_COMPLETE\",\n    )\n\n    COMPLETE_STATUSES = (\n        \"CREATE_COMPLETE\",\n        \"DELETE_COMPLETE\",\n        \"IMPORT_COMPLETE\",\n        \"UPDATE_COMPLETE\",\n        \"IMPORT_ROLLBACK_COMPLETE\",\n        \"UPDATE_ROLLBACK_COMPLETE\",\n    )\n\n    RECREATION_STATUSES = (\n        \"CREATE_FAILED\",\n        \"ROLLBACK_FAILED\",\n        \"ROLLBACK_COMPLETE\"\n    )\n\n    REVIEW_STATUS = \"REVIEW_IN_PROGRESS\"\n\n    def __init__(self, session, region=None, interactive=False,\n                 replacements_only=False, recreate_failed=False,\n                 service_role=None, **kwargs):\n        self._outputs = {}\n        self.region = region\n        self.cloudformation = get_cloudformation_client(session)\n        self.interactive = interactive\n        # replacements only is only used in interactive mode\n        self.replacements_only = interactive and replacements_only\n        self.recreate_failed = interactive or recreate_failed\n        self.service_role = service_role\n\n    def get_stack(self, stack_name, **kwargs):\n        try:\n            return self.cloudformation.describe_stacks(\n                StackName=stack_name)['Stacks'][0]\n        except botocore.exceptions.ClientError as e:\n            if \"does not exist\" not in str(e):\n                raise\n            raise exceptions.StackDoesNotExist(stack_name)\n\n    def get_stack_status(self, stack, **kwargs):\n        return stack['StackStatus']\n\n    def is_stack_completed(self, stack, **kwargs):\n        return self.get_stack_status(stack) in self.COMPLETE_STATUSES\n\n    def is_stack_in_progress(self, stack, **kwargs):\n        return self.get_stack_status(stack) in self.IN_PROGRESS_STATUSES\n\n    def is_stack_destroyed(self, stack, **kwargs):\n        return self.get_stack_status(stack) == self.DELETED_STATUS\n\n    def is_stack_recreatable(self, stack, **kwargs):\n        return self.get_stack_status(stack) in self.RECREATION_STATUSES\n\n    def is_stack_rolling_back(self, stack, **kwargs):\n        return self.get_stack_status(stack) in self.ROLLING_BACK_STATUSES\n\n    def is_stack_failed(self, stack, **kwargs):\n        return self.get_stack_status(stack) in self.FAILED_STATUSES\n\n    def is_stack_in_review(self, stack, **kwargs):\n        return self.get_stack_status(stack) == self.REVIEW_STATUS\n\n    def tail_stack(self, stack, cancel, log_func=None, **kwargs):\n        def _log_func(e):\n            event_args = [e['ResourceStatus'], e['ResourceType'],\n                          e.get('ResourceStatusReason', None)]\n            # filter out any values that are empty\n            event_args = [arg for arg in event_args if arg]\n            template = \" \".join([\"[%s]\"] + [\"%s\" for _ in event_args])\n            logger.info(template, *([stack.fqn] + event_args))\n\n        log_func = log_func or _log_func\n\n        logger.info(\"Tailing stack: %s\", stack.fqn)\n\n        attempts = 0\n        while True:\n            attempts += 1\n            try:\n                self.tail(stack.fqn, cancel=cancel, log_func=log_func,\n                          include_initial=False)\n                break\n            except botocore.exceptions.ClientError as e:\n                if \"does not exist\" in str(e) and attempts < MAX_TAIL_RETRIES:\n                    # stack might be in the process of launching, wait for a\n                    # second and try again\n                    if cancel.wait(TAIL_RETRY_SLEEP):\n                        return\n                    continue\n                else:\n                    raise\n\n    @staticmethod\n    def _tail_print(e):\n        print(\"%s %s %s\" % (e['ResourceStatus'],\n                            e['ResourceType'],\n                            e['EventId']))\n\n    def get_events(self, stack_name, chronological=True):\n        \"\"\"Get the events in batches and return in chronological order\"\"\"\n        next_token = None\n        event_list = []\n        while True:\n            if next_token is not None:\n                events = self.cloudformation.describe_stack_events(\n                    StackName=stack_name, NextToken=next_token\n                )\n            else:\n                events = self.cloudformation.describe_stack_events(\n                    StackName=stack_name\n                )\n            event_list.append(events['StackEvents'])\n            next_token = events.get('NextToken', None)\n            if next_token is None:\n                break\n            time.sleep(GET_EVENTS_SLEEP)\n        if chronological:\n            return reversed(sum(event_list, []))\n        else:\n            return sum(event_list, [])\n\n    def get_rollback_status_reason(self, stack_name):\n        \"\"\"Process events and returns latest roll back reason\"\"\"\n        event = next((item for item in self.get_events(stack_name,\n                      False) if item[\"ResourceStatus\"] ==\n                      \"UPDATE_ROLLBACK_IN_PROGRESS\"), None)\n        if event:\n            reason = event[\"ResourceStatusReason\"]\n            return reason\n        else:\n            event = next((item for item in self.get_events(stack_name)\n                          if item[\"ResourceStatus\"] ==\n                          \"ROLLBACK_IN_PROGRESS\"), None)\n            reason = event[\"ResourceStatusReason\"]\n            return reason\n\n    def tail(self, stack_name, cancel, log_func=_tail_print, sleep_time=5,\n             include_initial=True):\n        \"\"\"Show and then tail the event log\"\"\"\n        # First dump the full list of events in chronological order and keep\n        # track of the events we've seen already\n        seen = set()\n        initial_events = self.get_events(stack_name)\n        for e in initial_events:\n            if include_initial:\n                log_func(e)\n            seen.add(e['EventId'])\n\n        # Now keep looping through and dump the new events\n        while True:\n            events = self.get_events(stack_name)\n            for e in events:\n                if e['EventId'] not in seen:\n                    log_func(e)\n                    seen.add(e['EventId'])\n            if cancel.wait(sleep_time):\n                return\n\n    def destroy_stack(self, stack, **kwargs):\n        logger.debug(\"Destroying stack: %s\" % (self.get_stack_name(stack)))\n        args = {\"StackName\": self.get_stack_name(stack)}\n        if self.service_role:\n            args[\"RoleARN\"] = self.service_role\n\n        self.cloudformation.delete_stack(**args)\n        return True\n\n    def create_stack(\n        self, fqn, template, parameters, tags,\n        force_change_set=False,\n        stack_policy=None,\n        notification_arns=None,\n        **kwargs\n    ):\n        \"\"\"Create a new Cloudformation stack.\n\n        Args:\n            fqn (str): The fully qualified name of the Cloudformation stack.\n            template (:class:`stacker.providers.base.Template`): A Template\n                object to use when creating the stack.\n            parameters (list): A list of dictionaries that defines the\n                parameter list to be applied to the Cloudformation stack.\n            tags (list): A list of dictionaries that defines the tags\n                that should be applied to the Cloudformation stack.\n            force_change_set (bool): Whether or not to force change set use.\n            stack_policy (:class:`stacker.providers.base.Template`): A template\n                object representing a stack policy.\n            notification_arns (list, optional): An optional list of SNS topic\n                ARNs to send CloudFormation Events to.\n        \"\"\"\n\n        logger.debug(\"Attempting to create stack %s:.\", fqn)\n        logger.debug(\"    parameters: %s\", parameters)\n        logger.debug(\"    tags: %s\", tags)\n        if template.url:\n            logger.debug(\"    template_url: %s\", template.url)\n        else:\n            logger.debug(\"    no template url, uploading template \"\n                         \"directly.\")\n        if force_change_set:\n            logger.debug(\"force_change_set set to True, creating stack with \"\n                         \"changeset.\")\n            _changes, change_set_id = create_change_set(\n                self.cloudformation, fqn, template, parameters, tags,\n                'CREATE', service_role=self.service_role, **kwargs\n            )\n\n            self.cloudformation.execute_change_set(\n                ChangeSetName=change_set_id,\n            )\n        else:\n            args = generate_cloudformation_args(\n                fqn, parameters, tags, template,\n                service_role=self.service_role,\n                stack_policy=stack_policy,\n                notification_arns=notification_arns\n            )\n\n            try:\n                self.cloudformation.create_stack(**args)\n            except botocore.exceptions.ClientError as e:\n                if e.response['Error']['Message'] == ('TemplateURL must '\n                                                      'reference a valid S3 '\n                                                      'object to which you '\n                                                      'have access.'):\n                    s3_fallback(fqn, template, parameters, tags,\n                                self.cloudformation.create_stack,\n                                self.service_role)\n                else:\n                    raise\n\n    def select_update_method(self, force_interactive, force_change_set):\n        \"\"\"Select the correct update method when updating a stack.\n\n        Args:\n            force_interactive (str): Whether or not to force interactive mode\n                no matter what mode the provider is in.\n            force_change_set (bool): Whether or not to force change set use.\n\n        Returns:\n            function: The correct object method to use when updating.\n        \"\"\"\n        if self.interactive or force_interactive:\n            return self.interactive_update_stack\n        elif force_change_set:\n            return self.noninteractive_changeset_update\n        else:\n            return self.default_update_stack\n\n    def prepare_stack_for_update(self, stack, tags):\n        \"\"\"Prepare a stack for updating\n\n        It may involve deleting the stack if is has failed it's initial\n        creation. The deletion is only allowed if:\n          - The stack contains all the tags configured in the current context;\n          - The stack is in one of the statuses considered safe to re-create\n          - ``recreate_failed`` is enabled, due to either being explicitly\n            enabled by the user, or because interactive mode is on.\n\n        Args:\n            stack (dict): a stack object returned from get_stack\n            tags (list): list of expected tags that must be present in the\n                stack if it must be re-created\n\n        Returns:\n            bool: True if the stack can be updated, False if it must be\n                re-created\n        \"\"\"\n\n        if self.is_stack_destroyed(stack):\n            return False\n        elif self.is_stack_completed(stack):\n            return True\n\n        stack_name = self.get_stack_name(stack)\n        stack_status = self.get_stack_status(stack)\n\n        if self.is_stack_in_progress(stack):\n            raise exceptions.StackUpdateBadStatus(\n                stack_name, stack_status,\n                'Update already in-progress')\n\n        if not self.is_stack_recreatable(stack):\n            raise exceptions.StackUpdateBadStatus(\n                stack_name, stack_status,\n                'Unsupported state for re-creation')\n\n        if not self.recreate_failed:\n            raise exceptions.StackUpdateBadStatus(\n                stack_name, stack_status,\n                'Stack re-creation is disabled. Run stacker again with the '\n                '--recreate-failed option to force it to be deleted and '\n                'created from scratch.')\n\n        stack_tags = self.get_stack_tags(stack)\n        if not check_tags_contain(stack_tags, tags):\n            raise exceptions.StackUpdateBadStatus(\n                stack_name, stack_status,\n                'Tags differ from current configuration, possibly not created '\n                'with stacker')\n\n        if self.interactive:\n            sys.stdout.write(\n                'The \\\"%s\\\" stack is in a failed state (%s).\\n'\n                'It cannot be updated, but it can be deleted and re-created.\\n'\n                'All its current resources will IRREVERSIBLY DESTROYED.\\n'\n                'Proceed carefully!\\n\\n' % (stack_name, stack_status))\n            sys.stdout.flush()\n\n            ask_for_approval(include_verbose=False, fqn=stack_name)\n\n        logger.warn('Destroying stack \\\"%s\\\" for re-creation', stack_name)\n        self.destroy_stack(stack)\n\n        return False\n\n    def update_stack(self, fqn, template, old_parameters, parameters, tags,\n                     force_interactive=False, force_change_set=False,\n                     stack_policy=None, **kwargs):\n        \"\"\"Update a Cloudformation stack.\n\n        Args:\n            fqn (str): The fully qualified name of the Cloudformation stack.\n            template (:class:`stacker.providers.base.Template`): A Template\n                object to use when updating the stack.\n            old_parameters (list): A list of dictionaries that defines the\n                parameter list on the existing Cloudformation stack.\n            parameters (list): A list of dictionaries that defines the\n                parameter list to be applied to the Cloudformation stack.\n            tags (list): A list of dictionaries that defines the tags\n                that should be applied to the Cloudformation stack.\n            force_interactive (bool): A flag that indicates whether the update\n                should be interactive. If set to True, interactive mode will\n                be used no matter if the provider is in interactive mode or\n                not. False will follow the behavior of the provider.\n            force_change_set (bool): A flag that indicates whether the update\n                must be executed with a change set.\n            stack_policy (:class:`stacker.providers.base.Template`): A template\n                object representing a stack policy.\n        \"\"\"\n        logger.debug(\"Attempting to update stack %s:\", fqn)\n        logger.debug(\"    parameters: %s\", parameters)\n        logger.debug(\"    tags: %s\", tags)\n        if template.url:\n            logger.debug(\"    template_url: %s\", template.url)\n        else:\n            logger.debug(\"    no template url, uploading template directly.\")\n        update_method = self.select_update_method(force_interactive,\n                                                  force_change_set)\n\n        return update_method(fqn, template, old_parameters, parameters,\n                             stack_policy=stack_policy, tags=tags, **kwargs)\n\n    def deal_with_changeset_stack_policy(self, fqn, stack_policy):\n        \"\"\" Set a stack policy when using changesets.\n\n        ChangeSets don't allow you to set stack policies in the same call to\n        update them. This sets it before executing the changeset if the\n        stack policy is passed in.\n\n        Args:\n            stack_policy (:class:`stacker.providers.base.Template`): A template\n                object representing a stack policy.\n        \"\"\"\n        if stack_policy:\n            kwargs = generate_stack_policy_args(stack_policy)\n            kwargs[\"StackName\"] = fqn\n            logger.debug(\"Setting stack policy on %s.\", fqn)\n            self.cloudformation.set_stack_policy(**kwargs)\n\n    def interactive_update_stack(self, fqn, template, old_parameters,\n                                 parameters, stack_policy, tags,\n                                 **kwargs):\n        \"\"\"Update a Cloudformation stack in interactive mode.\n\n        Args:\n            fqn (str): The fully qualified name of the Cloudformation stack.\n            template (:class:`stacker.providers.base.Template`): A Template\n                object to use when updating the stack.\n            old_parameters (list): A list of dictionaries that defines the\n                parameter list on the existing Cloudformation stack.\n            parameters (list): A list of dictionaries that defines the\n                parameter list to be applied to the Cloudformation stack.\n            stack_policy (:class:`stacker.providers.base.Template`): A template\n                object representing a stack policy.\n            tags (list): A list of dictionaries that defines the tags\n                that should be applied to the Cloudformation stack.\n        \"\"\"\n        logger.debug(\"Using interactive provider mode for %s.\", fqn)\n        changes, change_set_id = create_change_set(\n            self.cloudformation, fqn, template, parameters, tags,\n            'UPDATE', service_role=self.service_role, **kwargs\n        )\n        old_parameters_as_dict = self.params_as_dict(old_parameters)\n        new_parameters_as_dict = self.params_as_dict(\n            [x\n             if 'ParameterValue' in x\n             else {'ParameterKey': x['ParameterKey'],\n                   'ParameterValue': old_parameters_as_dict[x['ParameterKey']]}\n             for x in parameters]\n        )\n        params_diff = diff_parameters(\n            old_parameters_as_dict,\n            new_parameters_as_dict)\n\n        action = \"replacements\" if self.replacements_only else \"changes\"\n        full_changeset = changes\n        if self.replacements_only:\n            changes = requires_replacement(changes)\n\n        if changes or params_diff:\n            ui.lock()\n            try:\n                output_summary(fqn, action, changes, params_diff,\n                               replacements_only=self.replacements_only)\n                ask_for_approval(\n                    full_changeset=full_changeset,\n                    params_diff=params_diff,\n                    include_verbose=True,\n                    fqn=fqn,\n                )\n            finally:\n                ui.unlock()\n\n        self.deal_with_changeset_stack_policy(fqn, stack_policy)\n\n        self.cloudformation.execute_change_set(\n            ChangeSetName=change_set_id,\n        )\n\n    def noninteractive_changeset_update(self, fqn, template, old_parameters,\n                                        parameters, stack_policy, tags,\n                                        **kwargs):\n        \"\"\"Update a Cloudformation stack using a change set.\n\n        This is required for stacks with a defined Transform (i.e. SAM), as the\n        default update_stack API cannot be used with them.\n\n        Args:\n            fqn (str): The fully qualified name of the Cloudformation stack.\n            template (:class:`stacker.providers.base.Template`): A Template\n                object to use when updating the stack.\n            old_parameters (list): A list of dictionaries that defines the\n                parameter list on the existing Cloudformation stack.\n            parameters (list): A list of dictionaries that defines the\n                parameter list to be applied to the Cloudformation stack.\n            stack_policy (:class:`stacker.providers.base.Template`): A template\n                object representing a stack policy.\n            tags (list): A list of dictionaries that defines the tags\n                that should be applied to the Cloudformation stack.\n        \"\"\"\n        logger.debug(\"Using noninterative changeset provider mode \"\n                     \"for %s.\", fqn)\n        _changes, change_set_id = create_change_set(\n            self.cloudformation, fqn, template, parameters, tags,\n            'UPDATE', service_role=self.service_role, **kwargs\n        )\n\n        self.deal_with_changeset_stack_policy(fqn, stack_policy)\n\n        self.cloudformation.execute_change_set(\n            ChangeSetName=change_set_id,\n        )\n\n    def default_update_stack(self, fqn, template, old_parameters, parameters,\n                             tags, stack_policy=None,\n                             notification_arns=[], **kwargs):\n        \"\"\"Update a Cloudformation stack in default mode.\n\n        Args:\n            fqn (str): The fully qualified name of the Cloudformation stack.\n            template (:class:`stacker.providers.base.Template`): A Template\n                object to use when updating the stack.\n            old_parameters (list): A list of dictionaries that defines the\n                parameter list on the existing Cloudformation stack.\n            parameters (list): A list of dictionaries that defines the\n                parameter list to be applied to the Cloudformation stack.\n            tags (list): A list of dictionaries that defines the tags\n                that should be applied to the Cloudformation stack.\n            stack_policy (:class:`stacker.providers.base.Template`): A template\n                object representing a stack policy.\n        \"\"\"\n\n        logger.debug(\"Using default provider mode for %s.\", fqn)\n        args = generate_cloudformation_args(\n            fqn, parameters, tags, template,\n            service_role=self.service_role,\n            stack_policy=stack_policy,\n            notification_arns=notification_arns\n        )\n\n        try:\n            self.cloudformation.update_stack(**args)\n        except botocore.exceptions.ClientError as e:\n            if \"No updates are to be performed.\" in str(e):\n                logger.debug(\n                    \"Stack %s did not change, not updating.\",\n                    fqn,\n                )\n                raise exceptions.StackDidNotChange\n            elif e.response['Error']['Message'] == ('TemplateURL must '\n                                                    'reference a valid '\n                                                    'S3 object to which '\n                                                    'you have access.'):\n                s3_fallback(fqn, template, parameters, tags,\n                            self.cloudformation.update_stack,\n                            self.service_role)\n            else:\n                raise\n\n    def get_stack_name(self, stack, **kwargs):\n        return stack['StackName']\n\n    def get_stack_tags(self, stack, **kwargs):\n        return stack['Tags']\n\n    def get_outputs(self, stack_name, *args, **kwargs):\n        if stack_name not in self._outputs:\n            stack = self.get_stack(stack_name)\n            self._outputs[stack_name] = get_output_dict(stack)\n        return self._outputs[stack_name]\n\n    def get_output_dict(self, stack):\n        return get_output_dict(stack)\n\n    def get_stack_info(self, stack):\n        \"\"\" Get the template and parameters of the stack currently in AWS\n\n        Returns [ template, parameters ]\n        \"\"\"\n        stack_name = stack['StackId']\n\n        try:\n            template = self.cloudformation.get_template(\n                StackName=stack_name)['TemplateBody']\n        except botocore.exceptions.ClientError as e:\n            if \"does not exist\" not in str(e):\n                raise\n            raise exceptions.StackDoesNotExist(stack_name)\n\n        parameters = self.params_as_dict(stack.get('Parameters', []))\n\n        if isinstance(template, str):  # handle yaml templates\n            template = parse_cloudformation_template(template)\n\n        return [json.dumps(template), parameters]\n\n    def get_stack_changes(self, stack, template, parameters,\n                          tags, **kwargs):\n        \"\"\"Get the changes from a ChangeSet.\n\n        Args:\n            stack (:class:`stacker.stack.Stack`): the stack to get changes\n            template (:class:`stacker.providers.base.Template`): A Template\n                object to compaired to.\n            parameters (list): A list of dictionaries that defines the\n                parameter list to be applied to the Cloudformation stack.\n            tags (list): A list of dictionaries that defines the tags\n                that should be applied to the Cloudformation stack.\n\n        Returns:\n            dict: Stack outputs with inferred changes.\n\n        \"\"\"\n        try:\n            stack_details = self.get_stack(stack.fqn)\n            # handling for orphaned changeset temp stacks\n            if self.get_stack_status(\n                    stack_details) == self.REVIEW_STATUS:\n                raise exceptions.StackDoesNotExist(stack.fqn)\n            _old_template, old_params = self.get_stack_info(\n                stack_details\n            )\n            old_template = parse_cloudformation_template(_old_template)\n            change_type = 'UPDATE'\n        except exceptions.StackDoesNotExist:\n            old_params = {}\n            old_template = {}\n            change_type = 'CREATE'\n\n        changes, change_set_id = create_change_set(\n            self.cloudformation, stack.fqn, template, parameters, tags,\n            change_type, service_role=self.service_role, **kwargs\n        )\n        new_parameters_as_dict = self.params_as_dict(\n            [x\n             if 'ParameterValue' in x\n             else {'ParameterKey': x['ParameterKey'],\n                   'ParameterValue': old_params[x['ParameterKey']]}\n             for x in parameters]\n        )\n        params_diff = diff_parameters(old_params, new_parameters_as_dict)\n\n        if changes or params_diff:\n            ui.lock()\n            try:\n                if self.interactive:\n                    output_summary(stack.fqn, 'changes', changes,\n                                   params_diff,\n                                   replacements_only=self.replacements_only)\n                    output_full_changeset(full_changeset=changes,\n                                          params_diff=params_diff,\n                                          fqn=stack.fqn)\n                else:\n                    output_full_changeset(full_changeset=changes,\n                                          params_diff=params_diff,\n                                          answer='y', fqn=stack.fqn)\n            finally:\n                ui.unlock()\n\n        self.cloudformation.delete_change_set(\n            ChangeSetName=change_set_id\n        )\n\n        # ensure current stack outputs are loaded\n        self.get_outputs(stack.fqn)\n\n        # infer which outputs may have changed\n        refs_to_invalidate = []\n        for change in changes:\n            resc_change = change.get('ResourceChange', {})\n            if resc_change.get('Type') == 'Add':\n                continue  # we don't care about anything new\n            # scope of changes that can invalidate a change\n            if resc_change and (resc_change.get('Replacement') == 'True' or\n                                'Properties' in resc_change['Scope']):\n                logger.debug('%s added to invalidation list for %s',\n                             resc_change['LogicalResourceId'], stack.fqn)\n                refs_to_invalidate.append(resc_change['LogicalResourceId'])\n\n        # invalidate cached outputs with inferred changes\n        for output, props in old_template.get('Outputs', {}).items():\n            if any(r in str(props['Value']) for r in refs_to_invalidate):\n                self._outputs[stack.fqn].pop(output)\n                logger.debug('Removed %s from the outputs of %s',\n                             output, stack.fqn)\n\n        # push values for new + invalidated outputs to outputs\n        for output_name, output_params in \\\n                stack.blueprint.get_output_definitions().items():\n            if output_name not in self._outputs[stack.fqn]:\n                self._outputs[stack.fqn][output_name] = (\n                    '<inferred-change: {}.{}={}>'.format(\n                        stack.fqn, output_name,\n                        str(output_params['Value'])\n                    )\n                )\n\n        # when creating a changeset for a new stack, CFN creates a temporary\n        # stack with a status of REVIEW_IN_PROGRESS. this is only removed if\n        # the changeset is executed or it is manually deleted.\n        if change_type == 'CREATE':\n            try:\n                temp_stack = self.get_stack(stack.fqn)\n                if self.is_stack_in_review(temp_stack):\n                    logger.debug('Removing temporary stack that is created '\n                                 'with a ChangeSet of type \"CREATE\"')\n                    self.destroy_stack(temp_stack)\n            except exceptions.StackDoesNotExist:\n                # not an issue if the stack was already cleaned up\n                logger.debug('Stack does not exist: %s', stack.fqn)\n\n        return self.get_outputs(stack.fqn)\n\n    @staticmethod\n    def params_as_dict(parameters_list):\n        parameters = dict()\n        for p in parameters_list:\n            parameters[p['ParameterKey']] = p['ParameterValue']\n        return parameters\n"
  },
  {
    "path": "stacker/providers/base.py",
    "content": "\n\ndef not_implemented(method):\n    raise NotImplementedError(\"Provider does not support '%s' \"\n                              \"method.\" % method)\n\n\nclass BaseProviderBuilder(object):\n    def build(self, region=None):\n        not_implemented(\"build\")\n\n\nclass BaseProvider(object):\n    def get_stack(self, stack_name, *args, **kwargs):\n        # pylint: disable=unused-argument\n        not_implemented(\"get_stack\")\n\n    def create_stack(self, *args, **kwargs):\n        # pylint: disable=unused-argument\n        not_implemented(\"create_stack\")\n\n    def update_stack(self, *args, **kwargs):\n        # pylint: disable=unused-argument\n        not_implemented(\"update_stack\")\n\n    def destroy_stack(self, *args, **kwargs):\n        # pylint: disable=unused-argument\n        not_implemented(\"destroy_stack\")\n\n    def get_stack_status(self, stack_name, *args, **kwargs):\n        # pylint: disable=unused-argument\n        not_implemented(\"get_stack_status\")\n\n    def get_outputs(self, stack_name, *args, **kwargs):\n        # pylint: disable=unused-argument\n        not_implemented(\"get_outputs\")\n\n    def get_output(self, stack_name, output):\n        # pylint: disable=unused-argument\n        return self.get_outputs(stack_name)[output]\n\n\nclass Template(object):\n    \"\"\"A value object that represents a CloudFormation stack template, which\n    could be optionally uploaded to s3.\n\n    Presence of the url attribute indicates that the template was uploaded to\n    S3, and the uploaded template should be used for CreateStack/UpdateStack\n    calls.\n    \"\"\"\n    def __init__(self, url=None, body=None):\n        self.url = url\n        self.body = body\n"
  },
  {
    "path": "stacker/session_cache.py",
    "content": "import boto3\nimport logging\nfrom .ui import ui\n\n\nlogger = logging.getLogger(__name__)\n\n\n# A global credential cache that can be shared among boto3 sessions. This is\n# inherently threadsafe thanks to the GIL:\n# https://docs.python.org/3/glossary.html#term-global-interpreter-lock\ncredential_cache = {}\n\ndefault_profile = None\n\n\ndef get_session(region, profile=None):\n    \"\"\"Creates a boto3 session with a cache\n\n    Args:\n        region (str): The region for the session\n        profile (str): The profile for the session\n\n    Returns:\n        :class:`boto3.session.Session`: A boto3 session with\n            credential caching\n    \"\"\"\n    if profile is None:\n        logger.debug(\"No AWS profile explicitly provided. \"\n                     \"Falling back to default.\")\n        profile = default_profile\n\n    logger.debug(\"Building session using profile \\\"%s\\\" in region \\\"%s\\\"\"\n                 % (profile, region))\n\n    session = boto3.Session(region_name=region, profile_name=profile)\n    c = session._session.get_component('credential_provider')\n    provider = c.get_provider('assume-role')\n    provider.cache = credential_cache\n    provider._prompter = ui.getpass\n    return session\n"
  },
  {
    "path": "stacker/stack.py",
    "content": "import copy\n\nfrom . import util\nfrom .variables import (\n    Variable,\n    resolve_variables,\n)\n\nfrom .blueprints.raw import RawTemplateBlueprint\n\n\ndef _gather_variables(stack_def):\n    \"\"\"Merges context provided & stack defined variables.\n\n    If multiple stacks have a variable with the same name, we can specify the\n    value for a specific stack by passing in the variable name as: `<stack\n    name>::<variable name>`. This variable value will only be used for that\n    specific stack.\n\n    Order of precedence:\n        - context defined stack specific variables (ie.\n            SomeStack::SomeVariable)\n        - context defined non-specific variables\n        - variable defined within the stack definition\n\n    Args:\n        stack_def (dict): The stack definition being worked on.\n\n    Returns:\n        dict: Contains key/value pairs of the collected variables.\n\n    Raises:\n        AttributeError: Raised when the stack definitition contains an invalid\n            attribute. Currently only when using old parameters, rather than\n            variables.\n    \"\"\"\n    variable_values = copy.deepcopy(stack_def.variables or {})\n    return [Variable(k, v) for k, v in variable_values.items()]\n\n\nclass Stack(object):\n\n    \"\"\"Represents gathered information about a stack to be built/updated.\n\n    Args:\n        definition (:class:`stacker.config.Stack`): A stack definition.\n        context (:class:`stacker.context.Context`): Current context for\n            building the stack.\n        mappings (dict, optional): Cloudformation mappings passed to the\n            blueprint.\n        locked (bool, optional): Whether or not the stack is locked.\n        force (bool, optional): Whether to force updates on this stack.\n        enabled (bool, optional): Whether this stack is enabled.\n        protected (boot, optional): Whether this stack is protected.\n        notification_arns (list, optional): An optional list of SNS topic ARNs\n            to send CloudFormation Events to.\n\n    \"\"\"\n\n    def __init__(\n        self, definition, context,\n        variables=None,\n        mappings=None,\n        locked=False,\n        force=False,\n        enabled=True,\n        protected=False,\n        notification_arns=None,\n    ):\n        self.logging = True\n        self.name = definition.name\n        self.fqn = context.get_fqn(definition.stack_name or self.name)\n        self.region = definition.region\n        self.profile = definition.profile\n        self.definition = definition\n        self.variables = _gather_variables(definition)\n        self.mappings = mappings\n        self.locked = locked\n        self.force = force\n        self.enabled = enabled\n        self.protected = protected\n        self.context = context\n        self.outputs = None\n        self.in_progress_behavior = definition.in_progress_behavior\n        self.notification_arns = notification_arns\n\n    def __repr__(self):\n        return self.fqn\n\n    @property\n    def required_by(self):\n        return self.definition.required_by or []\n\n    @property\n    def requires(self):\n        requires = set(self.definition.requires or [])\n\n        # Add any dependencies based on output lookups\n        for variable in self.variables:\n            deps = variable.dependencies()\n            if self.name in deps:\n                message = (\n                    \"Variable %s in stack %s has a circular reference\"\n                ) % (variable.name, self.name)\n                raise ValueError(message)\n            requires.update(deps)\n        return requires\n\n    @property\n    def stack_policy(self):\n        if not hasattr(self, \"_stack_policy\"):\n            self._stack_policy = None\n            if self.definition.stack_policy_path:\n                with open(self.definition.stack_policy_path) as f:\n                    self._stack_policy = f.read()\n\n        return self._stack_policy\n\n    @property\n    def blueprint(self):\n        if not hasattr(self, \"_blueprint\"):\n            kwargs = {}\n            blueprint_class = None\n            if self.definition.class_path:\n                class_path = self.definition.class_path\n                blueprint_class = util.load_object_from_string(class_path)\n                if not hasattr(blueprint_class, \"rendered\"):\n                    raise AttributeError(\"Stack class %s does not have a \"\n                                         \"\\\"rendered\\\" \"\n                                         \"attribute.\" % (class_path,))\n            elif self.definition.template_path:\n                blueprint_class = RawTemplateBlueprint\n                kwargs[\"raw_template_path\"] = self.definition.template_path\n            else:\n                raise AttributeError(\"Stack does not have a defined class or \"\n                                     \"template path.\")\n\n            self._blueprint = blueprint_class(\n                name=self.name,\n                context=self.context,\n                mappings=self.mappings,\n                description=self.definition.description,\n                **kwargs\n            )\n        return self._blueprint\n\n    @property\n    def tags(self):\n        \"\"\"Returns the tags that should be set on this stack. Includes both the\n        global tags, as well as any stack specific tags or overrides.\n\n        Returns:\n\n            dict: dictionary of tags\n\n        \"\"\"\n        tags = self.definition.tags or {}\n        return dict(self.context.tags, **tags)\n\n    @property\n    def parameter_values(self):\n        \"\"\"Return all CloudFormation Parameters for the stack.\n\n        CloudFormation Parameters can be specified via Blueprint Variables with\n        a :class:`stacker.blueprints.variables.types.CFNType` `type`.\n\n        Returns:\n            dict: dictionary of <parameter name>: <parameter value>.\n\n        \"\"\"\n        return self.blueprint.get_parameter_values()\n\n    @property\n    def all_parameter_definitions(self):\n        \"\"\"Return a list of all parameters in the blueprint/template.\"\"\"\n        return self.blueprint.get_parameter_definitions()\n\n    @property\n    def required_parameter_definitions(self):\n        \"\"\"Return all the required CloudFormation Parameters for the stack.\"\"\"\n        return self.blueprint.get_required_parameter_definitions()\n\n    def resolve(self, context, provider):\n        \"\"\"Resolve the Stack variables.\n\n        This resolves the Stack variables and then prepares the Blueprint for\n        rendering by passing the resolved variables to the Blueprint.\n\n        Args:\n            context (:class:`stacker.context.Context`): stacker context\n            provider (:class:`stacker.provider.base.BaseProvider`): subclass of\n                the base provider\n\n        \"\"\"\n        resolve_variables(self.variables, context, provider)\n        self.blueprint.resolve_variables(self.variables)\n\n    def set_outputs(self, outputs):\n        self.outputs = outputs\n"
  },
  {
    "path": "stacker/status.py",
    "content": "import operator\n\n\nclass Status(object):\n    def __init__(self, name, code, reason=None):\n        self.name = name\n        self.code = code\n        self.reason = reason or getattr(self, \"reason\", None)\n\n    def _comparison(self, operator, other):\n        if hasattr(other, \"code\"):\n            return operator(self.code, other.code)\n        return NotImplemented\n\n    def __eq__(self, other):\n        return self._comparison(operator.eq, other)\n\n    def __ne__(self, other):\n        return self._comparison(operator.ne, other)\n\n    def __lt__(self, other):\n        return self._comparison(operator.lt, other)\n\n    def __gt__(self, other):\n        return self._comparison(operator.gt, other)\n\n    def __le__(self, other):\n        return self._comparison(operator.le, other)\n\n    def __ge__(self, other):\n        return self._comparison(operator.ge, other)\n\n\nclass PendingStatus(Status):\n    def __init__(self, reason=None):\n        super(PendingStatus, self).__init__(\"pending\", 0, reason)\n\n\nclass SubmittedStatus(Status):\n    def __init__(self, reason=None):\n        super(SubmittedStatus, self).__init__(\"submitted\", 1, reason)\n\n\nclass CompleteStatus(Status):\n    def __init__(self, reason=None):\n        super(CompleteStatus, self).__init__(\"complete\", 2, reason)\n\n\nclass SkippedStatus(Status):\n    def __init__(self, reason=None):\n        super(SkippedStatus, self).__init__(\"skipped\", 3, reason)\n\n\nclass FailedStatus(Status):\n    def __init__(self, reason=None):\n        super(FailedStatus, self).__init__(\"failed\", 4, reason)\n\n\nclass NotSubmittedStatus(SkippedStatus):\n    reason = \"disabled\"\n\n\nclass NotUpdatedStatus(SkippedStatus):\n    reason = \"locked\"\n\n\nclass DidNotChangeStatus(SkippedStatus):\n    reason = \"nochange\"\n\n\nclass StackDoesNotExist(SkippedStatus):\n    reason = \"does not exist in cloudformation\"\n\n\nPENDING = PendingStatus()\nWAITING = PendingStatus(reason=\"waiting\")\nSUBMITTED = SubmittedStatus()\nCOMPLETE = CompleteStatus()\nSKIPPED = SkippedStatus()\nFAILED = FailedStatus()\nINTERRUPTED = FailedStatus(reason=\"interrupted\")\n"
  },
  {
    "path": "stacker/target.py",
    "content": "\n\nclass Target(object):\n    \"\"\"A \"target\" is just a node in the stacker graph that does nothing, except\n    specify dependencies. These can be useful as a means of logically grouping\n    a set of stacks together that can be targeted with the `--targets` flag.\n    \"\"\"\n\n    def __init__(self, definition):\n        self.name = definition.name\n        self.requires = definition.requires or []\n        self.required_by = definition.required_by or []\n        self.logging = False\n"
  },
  {
    "path": "stacker/tests/__init__.py",
    "content": ""
  },
  {
    "path": "stacker/tests/actions/__init__.py",
    "content": ""
  },
  {
    "path": "stacker/tests/actions/test_base.py",
    "content": "\nimport unittest\n\nimport mock\n\nimport botocore.exceptions\nfrom botocore.stub import Stubber, ANY\n\nfrom stacker.actions.base import (\n    BaseAction\n)\nfrom stacker.blueprints.base import Blueprint\nfrom stacker.providers.aws.default import Provider\nfrom stacker.session_cache import get_session\n\nfrom stacker.tests.factories import (\n    MockProviderBuilder,\n    mock_context,\n)\n\nMOCK_VERSION = \"01234abcdef\"\n\n\nclass TestBlueprint(Blueprint):\n    @property\n    def version(self):\n        return MOCK_VERSION\n\n    VARIABLES = {\n        \"Param1\": {\"default\": \"default\", \"type\": str},\n    }\n\n\nclass TestBaseAction(unittest.TestCase):\n    def test_ensure_cfn_bucket_exists(self):\n        session = get_session(\"us-east-1\")\n        provider = Provider(session)\n        action = BaseAction(\n            context=mock_context(\"mynamespace\"),\n            provider_builder=MockProviderBuilder(provider)\n        )\n        stubber = Stubber(action.s3_conn)\n        stubber.add_response(\n            \"head_bucket\",\n            service_response={},\n            expected_params={\n                \"Bucket\": ANY,\n            }\n        )\n        with stubber:\n            action.ensure_cfn_bucket()\n\n    def test_ensure_cfn_bucket_doesnt_exist_us_east(self):\n        session = get_session(\"us-east-1\")\n        provider = Provider(session)\n        action = BaseAction(\n            context=mock_context(\"mynamespace\"),\n            provider_builder=MockProviderBuilder(provider)\n        )\n        stubber = Stubber(action.s3_conn)\n        stubber.add_client_error(\n            \"head_bucket\",\n            service_error_code=\"NoSuchBucket\",\n            service_message=\"Not Found\",\n            http_status_code=404,\n        )\n        stubber.add_response(\n            \"create_bucket\",\n            service_response={},\n            expected_params={\n                \"Bucket\": ANY,\n            }\n        )\n        with stubber:\n            action.ensure_cfn_bucket()\n\n    def test_ensure_cfn_bucket_doesnt_exist_us_west(self):\n        session = get_session(\"us-west-1\")\n        provider = Provider(session)\n        action = BaseAction(\n            context=mock_context(\"mynamespace\"),\n            provider_builder=MockProviderBuilder(provider, region=\"us-west-1\")\n        )\n        stubber = Stubber(action.s3_conn)\n        stubber.add_client_error(\n            \"head_bucket\",\n            service_error_code=\"NoSuchBucket\",\n            service_message=\"Not Found\",\n            http_status_code=404,\n        )\n        stubber.add_response(\n            \"create_bucket\",\n            service_response={},\n            expected_params={\n                \"Bucket\": ANY,\n                \"CreateBucketConfiguration\": {\n                    \"LocationConstraint\": \"us-west-1\",\n                }\n            }\n        )\n        with stubber:\n            action.ensure_cfn_bucket()\n\n    def test_ensure_cfn_forbidden(self):\n        session = get_session(\"us-west-1\")\n        provider = Provider(session)\n        action = BaseAction(\n            context=mock_context(\"mynamespace\"),\n            provider_builder=MockProviderBuilder(provider)\n        )\n        stubber = Stubber(action.s3_conn)\n        stubber.add_client_error(\n            \"head_bucket\",\n            service_error_code=\"AccessDenied\",\n            service_message=\"Forbidden\",\n            http_status_code=403,\n        )\n        with stubber:\n            with self.assertRaises(botocore.exceptions.ClientError):\n                action.ensure_cfn_bucket()\n\n    def test_stack_template_url(self):\n        context = mock_context(\"mynamespace\")\n        blueprint = TestBlueprint(name=\"myblueprint\", context=context)\n\n        region = \"us-east-1\"\n        endpoint = \"https://example.com\"\n        session = get_session(region)\n        provider = Provider(session)\n        action = BaseAction(\n            context=context,\n            provider_builder=MockProviderBuilder(provider, region=region)\n        )\n\n        with mock.patch('stacker.actions.base.get_s3_endpoint', autospec=True,\n                        return_value=endpoint):\n            self.assertEqual(\n                action.stack_template_url(blueprint),\n                \"%s/%s/stack_templates/%s/%s-%s.json\" % (\n                    endpoint,\n                    \"stacker-mynamespace\",\n                    \"mynamespace-myblueprint\",\n                    \"myblueprint\",\n                    MOCK_VERSION\n                )\n            )\n"
  },
  {
    "path": "stacker/tests/actions/test_build.py",
    "content": "import unittest\nfrom collections import namedtuple\n\nimport mock\n\nfrom stacker import exceptions\nfrom stacker.actions import build\nfrom stacker.session_cache import get_session\nfrom stacker.actions.build import (\n    _resolve_parameters,\n    _handle_missing_parameters,\n    UsePreviousParameterValue,\n)\nfrom stacker.blueprints.variables.types import CFNString\nfrom stacker.context import Context, Config\nfrom stacker.exceptions import StackDidNotChange, StackDoesNotExist\nfrom stacker.providers.base import BaseProvider\nfrom stacker.providers.aws.default import Provider\nfrom stacker.status import (\n    NotSubmittedStatus,\n    COMPLETE,\n    PENDING,\n    SKIPPED,\n    SUBMITTED,\n    FAILED\n)\n\nfrom ..factories import MockThreadingEvent, MockProviderBuilder\n\n\ndef mock_stack_parameters(parameters):\n    return {\n        'Parameters': [\n            {'ParameterKey': k, 'ParameterValue': v}\n            for k, v in parameters.items()\n        ]\n    }\n\n\nclass TestProvider(BaseProvider):\n    def __init__(self, outputs=None, *args, **kwargs):\n        self._outputs = outputs or {}\n\n    def set_outputs(self, outputs):\n        self._outputs = outputs\n\n    def get_stack(self, stack_name, **kwargs):\n        if stack_name not in self._outputs:\n            raise exceptions.StackDoesNotExist(stack_name)\n        return {\"name\": stack_name, \"outputs\": self._outputs[stack_name]}\n\n    def get_outputs(self, stack_name, *args, **kwargs):\n        stack = self.get_stack(stack_name)\n        return stack[\"outputs\"]\n\n\nclass TestBuildAction(unittest.TestCase):\n    def setUp(self):\n        self.context = Context(config=Config({\"namespace\": \"namespace\"}))\n        self.provider = TestProvider()\n        self.build_action = build.Action(\n            self.context,\n            provider_builder=MockProviderBuilder(self.provider))\n\n    def _get_context(self, **kwargs):\n        config = Config({\n            \"namespace\": \"namespace\",\n            \"stacks\": [\n                {\"name\": \"vpc\"},\n                {\"name\": \"bastion\",\n                    \"variables\": {\n                        \"test\": \"${output vpc::something}\"}},\n                {\"name\": \"db\",\n                    \"variables\": {\n                        \"test\": \"${output vpc::something}\",\n                        \"else\": \"${output bastion::something}\"}},\n                {\"name\": \"other\", \"variables\": {}}\n            ],\n        })\n        return Context(config=config, **kwargs)\n\n    def test_handle_missing_params(self):\n        existing_stack_param_dict = {\n            \"StackName\": \"teststack\",\n            \"Address\": \"192.168.0.1\"\n        }\n        existing_stack_params = mock_stack_parameters(\n            existing_stack_param_dict\n        )\n        all_params = existing_stack_param_dict.keys()\n        required = [\"Address\"]\n        parameter_values = {\"Address\": \"192.168.0.1\"}\n        expected_params = {\"StackName\": UsePreviousParameterValue,\n                           \"Address\": \"192.168.0.1\"}\n        result = _handle_missing_parameters(parameter_values, all_params,\n                                            required, existing_stack_params)\n        self.assertEqual(sorted(result), sorted(list(expected_params.items())))\n\n    def test_missing_params_no_existing_stack(self):\n        all_params = [\"Address\", \"StackName\"]\n        required = [\"Address\"]\n        parameter_values = {}\n        with self.assertRaises(exceptions.MissingParameterException) as cm:\n            _handle_missing_parameters(parameter_values, all_params, required)\n\n        self.assertEqual(cm.exception.parameters, required)\n\n    def test_existing_stack_params_dont_override_given_params(self):\n        existing_stack_param_dict = {\n            \"StackName\": \"teststack\",\n            \"Address\": \"192.168.0.1\"\n        }\n        existing_stack_params = mock_stack_parameters(\n            existing_stack_param_dict\n        )\n        all_params = existing_stack_param_dict.keys()\n        required = [\"Address\"]\n        parameter_values = {\"Address\": \"10.0.0.1\"}\n        result = _handle_missing_parameters(parameter_values, all_params,\n                                            required, existing_stack_params)\n        self.assertEqual(\n            sorted(result),\n            sorted(list(parameter_values.items()))\n        )\n\n    def test_generate_plan(self):\n        context = self._get_context()\n        build_action = build.Action(context, cancel=MockThreadingEvent())\n        plan = build_action._generate_plan()\n        self.assertEqual(\n            {\n                'db': set(['bastion', 'vpc']),\n                'bastion': set(['vpc']),\n                'other': set([]),\n                'vpc': set([])},\n            plan.graph.to_dict()\n        )\n\n    def test_dont_execute_plan_when_outline_specified(self):\n        context = self._get_context()\n        build_action = build.Action(context, cancel=MockThreadingEvent())\n        with mock.patch.object(build_action, \"_generate_plan\") as \\\n                mock_generate_plan:\n            build_action.run(outline=True)\n            self.assertEqual(mock_generate_plan().execute.call_count, 0)\n\n    def test_execute_plan_when_outline_not_specified(self):\n        context = self._get_context()\n        build_action = build.Action(context, cancel=MockThreadingEvent())\n        with mock.patch.object(build_action, \"_generate_plan\") as \\\n                mock_generate_plan:\n            build_action.run(outline=False)\n            self.assertEqual(mock_generate_plan().execute.call_count, 1)\n\n    def test_should_update(self):\n        test_scenario = namedtuple(\"test_scenario\",\n                                   [\"locked\", \"force\", \"result\"])\n        test_scenarios = (\n            test_scenario(locked=False, force=False, result=True),\n            test_scenario(locked=False, force=True, result=True),\n            test_scenario(locked=True, force=False, result=False),\n            test_scenario(locked=True, force=True, result=True)\n        )\n        mock_stack = mock.MagicMock([\"locked\", \"force\", \"name\"])\n        mock_stack.name = \"test-stack\"\n        for t in test_scenarios:\n            mock_stack.locked = t.locked\n            mock_stack.force = t.force\n            self.assertEqual(build.should_update(mock_stack), t.result)\n\n    def test_should_ensure_cfn_bucket(self):\n        test_scenarios = [\n            {\"outline\": False, \"dump\": False, \"result\": True},\n            {\"outline\": True, \"dump\": False, \"result\": False},\n            {\"outline\": False, \"dump\": True, \"result\": False},\n            {\"outline\": True, \"dump\": True, \"result\": False},\n            {\"outline\": True, \"dump\": \"DUMP\", \"result\": False}\n        ]\n\n        for scenario in test_scenarios:\n            outline = scenario[\"outline\"]\n            dump = scenario[\"dump\"]\n            result = scenario[\"result\"]\n            try:\n                self.assertEqual(\n                    build.should_ensure_cfn_bucket(outline, dump), result)\n            except AssertionError as e:\n                e.args += (\"scenario\", str(scenario))\n                raise\n\n    def test_should_submit(self):\n        test_scenario = namedtuple(\"test_scenario\",\n                                   [\"enabled\", \"result\"])\n        test_scenarios = (\n            test_scenario(enabled=False, result=False),\n            test_scenario(enabled=True, result=True),\n        )\n\n        mock_stack = mock.MagicMock([\"enabled\", \"name\"])\n        mock_stack.name = \"test-stack\"\n        for t in test_scenarios:\n            mock_stack.enabled = t.enabled\n            self.assertEqual(build.should_submit(mock_stack), t.result)\n\n\nclass TestLaunchStack(TestBuildAction):\n    def setUp(self):\n        self.context = self._get_context()\n        self.session = get_session(region=None)\n        self.provider = Provider(self.session, interactive=False,\n                                 recreate_failed=False)\n        provider_builder = MockProviderBuilder(self.provider)\n        self.build_action = build.Action(self.context,\n                                         provider_builder=provider_builder,\n                                         cancel=MockThreadingEvent())\n\n        self.stack = mock.MagicMock()\n        self.stack.region = None\n        self.stack.name = 'vpc'\n        self.stack.fqn = 'vpc'\n        self.stack.blueprint.rendered = '{}'\n        self.stack.locked = False\n        self.stack_status = None\n\n        plan = self.build_action._generate_plan()\n        self.step = plan.steps[0]\n        self.step.stack = self.stack\n\n        def patch_object(*args, **kwargs):\n            m = mock.patch.object(*args, **kwargs)\n            self.addCleanup(m.stop)\n            m.start()\n\n        def get_stack(name, *args, **kwargs):\n            if name != self.stack.name or not self.stack_status:\n                raise StackDoesNotExist(name)\n\n            return {'StackName': self.stack.name,\n                    'StackStatus': self.stack_status,\n                    'Outputs': [],\n                    'Tags': []}\n\n        def get_events(name, *args, **kwargs):\n            return [{'ResourceStatus': 'ROLLBACK_IN_PROGRESS',\n                    'ResourceStatusReason': 'CFN fail'}]\n\n        patch_object(self.provider, 'get_stack', side_effect=get_stack)\n        patch_object(self.provider, 'update_stack')\n        patch_object(self.provider, 'create_stack')\n        patch_object(self.provider, 'destroy_stack')\n        patch_object(self.provider, 'get_events', side_effect=get_events)\n\n        patch_object(self.build_action, \"s3_stack_push\")\n\n    def _advance(self, new_provider_status, expected_status, expected_reason):\n        self.stack_status = new_provider_status\n        status = self.step._run_once()\n        self.assertEqual(status, expected_status)\n        self.assertEqual(status.reason, expected_reason)\n\n    def test_launch_stack_disabled(self):\n        self.assertEqual(self.step.status, PENDING)\n\n        self.stack.enabled = False\n        self._advance(None, NotSubmittedStatus(), \"disabled\")\n\n    def test_launch_stack_create(self):\n        # initial status should be PENDING\n        self.assertEqual(self.step.status, PENDING)\n\n        # initial run should return SUBMITTED since we've passed off to CF\n        self._advance(None, SUBMITTED, \"creating new stack\")\n\n        # status should stay as SUBMITTED when the stack becomes available\n        self._advance('CREATE_IN_PROGRESS', SUBMITTED, \"creating new stack\")\n\n        # status should become COMPLETE once the stack finishes\n        self._advance('CREATE_COMPLETE', COMPLETE, \"creating new stack\")\n\n    def test_launch_stack_create_rollback(self):\n        # initial status should be PENDING\n        self.assertEqual(self.step.status, PENDING)\n\n        # initial run should return SUBMITTED since we've passed off to CF\n        self._advance(None, SUBMITTED, \"creating new stack\")\n\n        # provider should now return the CF stack since it exists\n        self._advance(\"CREATE_IN_PROGRESS\", SUBMITTED,\n                      \"creating new stack\")\n\n        # rollback should be noticed\n        self._advance(\"ROLLBACK_IN_PROGRESS\", SUBMITTED,\n                      \"rolling back new stack\")\n\n        # rollback should not be added twice to the reason\n        self._advance(\"ROLLBACK_IN_PROGRESS\", SUBMITTED,\n                      \"rolling back new stack\")\n\n        # rollback should finish with failure\n        self._advance(\"ROLLBACK_COMPLETE\", FAILED,\n                      \"rolled back new stack\")\n\n    def test_launch_stack_recreate(self):\n        self.provider.recreate_failed = True\n\n        # initial status should be PENDING\n        self.assertEqual(self.step.status, PENDING)\n\n        # first action with an existing failed stack should be deleting it\n        self._advance(\"ROLLBACK_COMPLETE\", SUBMITTED,\n                      \"destroying stack for re-creation\")\n\n        # status should stay as submitted during deletion\n        self._advance(\"DELETE_IN_PROGRESS\", SUBMITTED,\n                      \"destroying stack for re-creation\")\n\n        # deletion being complete must trigger re-creation\n        self._advance(\"DELETE_COMPLETE\", SUBMITTED,\n                      \"re-creating stack\")\n\n        # re-creation should continue as SUBMITTED\n        self._advance(\"CREATE_IN_PROGRESS\", SUBMITTED,\n                      \"re-creating stack\")\n\n        # re-creation should finish with success\n        self._advance(\"CREATE_COMPLETE\", COMPLETE,\n                      \"re-creating stack\")\n\n    def test_launch_stack_update_skipped(self):\n        # initial status should be PENDING\n        self.assertEqual(self.step.status, PENDING)\n\n        # start the upgrade, that will be skipped\n        self.provider.update_stack.side_effect = StackDidNotChange\n        self._advance(\"CREATE_COMPLETE\", SKIPPED,\n                      \"nochange\")\n\n    def test_launch_stack_update_rollback(self):\n        # initial status should be PENDING\n        self.assertEqual(self.step.status, PENDING)\n\n        # initial run should return SUBMITTED since we've passed off to CF\n        self._advance(\"CREATE_COMPLETE\", SUBMITTED,\n                      \"updating existing stack\")\n\n        # update should continue as SUBMITTED\n        self._advance(\"UPDATE_IN_PROGRESS\", SUBMITTED,\n                      \"updating existing stack\")\n\n        # rollback should be noticed\n        self._advance(\"UPDATE_ROLLBACK_IN_PROGRESS\", SUBMITTED,\n                      \"rolling back update\")\n\n        # rollback should finish with failure\n        self._advance(\"UPDATE_ROLLBACK_COMPLETE\", FAILED,\n                      \"rolled back update\")\n\n    def test_launch_stack_update_success(self):\n        # initial status should be PENDING\n        self.assertEqual(self.step.status, PENDING)\n\n        # initial run should return SUBMITTED since we've passed off to CF\n        self._advance(\"CREATE_COMPLETE\", SUBMITTED,\n                      \"updating existing stack\")\n\n        # update should continue as SUBMITTED\n        self._advance(\"UPDATE_IN_PROGRESS\", SUBMITTED,\n                      \"updating existing stack\")\n\n        # update should finish with sucess\n        self._advance(\"UPDATE_COMPLETE\", COMPLETE,\n                      \"updating existing stack\")\n\n\nclass TestFunctions(unittest.TestCase):\n    \"\"\" test module level functions \"\"\"\n\n    def setUp(self):\n        self.ctx = Context({\"namespace\": \"test\"})\n        self.prov = mock.MagicMock()\n        self.bp = mock.MagicMock()\n\n    def test_resolve_parameters_unused_parameter(self):\n        self.bp.get_parameter_definitions.return_value = {\n            \"a\": {\n                \"type\": CFNString,\n                \"description\": \"A\"},\n            \"b\": {\n                \"type\": CFNString,\n                \"description\": \"B\"}\n        }\n        params = {\"a\": \"Apple\", \"c\": \"Carrot\"}\n        p = _resolve_parameters(params, self.bp)\n        self.assertNotIn(\"c\", p)\n        self.assertIn(\"a\", p)\n\n    def test_resolve_parameters_none_conversion(self):\n        self.bp.get_parameter_definitions.return_value = {\n            \"a\": {\n                \"type\": CFNString,\n                \"description\": \"A\"},\n            \"b\": {\n                \"type\": CFNString,\n                \"description\": \"B\"}\n        }\n        params = {\"a\": None, \"c\": \"Carrot\"}\n        p = _resolve_parameters(params, self.bp)\n        self.assertNotIn(\"a\", p)\n\n    def test_resolve_parameters_booleans(self):\n        self.bp.get_parameter_definitions.return_value = {\n            \"a\": {\n                \"type\": CFNString,\n                \"description\": \"A\"},\n            \"b\": {\n                \"type\": CFNString,\n                \"description\": \"B\"},\n        }\n        params = {\"a\": True, \"b\": False}\n        p = _resolve_parameters(params, self.bp)\n        self.assertEquals(\"true\", p[\"a\"])\n        self.assertEquals(\"false\", p[\"b\"])\n"
  },
  {
    "path": "stacker/tests/actions/test_destroy.py",
    "content": "import unittest\n\nimport mock\n\nfrom stacker.actions import destroy\nfrom stacker.context import Context, Config\nfrom stacker.exceptions import StackDoesNotExist\nfrom stacker.status import (\n    COMPLETE,\n    PENDING,\n    SKIPPED,\n    SUBMITTED,\n)\n\nfrom ..factories import MockThreadingEvent, MockProviderBuilder\n\n\nclass MockStack(object):\n    \"\"\"Mock our local Stacker stack and an AWS provider stack\"\"\"\n\n    def __init__(self, name, tags=None, **kwargs):\n        self.name = name\n        self.fqn = name\n        self.region = None\n        self.profile = None\n        self.requires = []\n\n\nclass TestDestroyAction(unittest.TestCase):\n\n    def setUp(self):\n        config = Config({\n            \"namespace\": \"namespace\",\n            \"stacks\": [\n                {\"name\": \"vpc\"},\n                {\"name\": \"bastion\", \"requires\": [\"vpc\"]},\n                {\"name\": \"instance\", \"requires\": [\"vpc\", \"bastion\"]},\n                {\"name\": \"db\", \"requires\": [\"instance\", \"vpc\", \"bastion\"]},\n                {\"name\": \"other\", \"requires\": [\"db\"]},\n            ],\n        })\n        self.context = Context(config=config)\n        self.action = destroy.Action(self.context,\n                                     cancel=MockThreadingEvent())\n\n    def test_generate_plan(self):\n        plan = self.action._generate_plan()\n        self.assertEqual(\n            {\n                'vpc': set(\n                    ['db', 'instance', 'bastion']),\n                'other': set([]),\n                'bastion': set(\n                    ['instance', 'db']),\n                'instance': set(\n                    ['db']),\n                'db': set(\n                    ['other'])},\n            plan.graph.to_dict()\n        )\n\n    def test_only_execute_plan_when_forced(self):\n        with mock.patch.object(self.action, \"_generate_plan\") as \\\n                mock_generate_plan:\n            self.action.run(force=False)\n            self.assertEqual(mock_generate_plan().execute.call_count, 0)\n\n    def test_execute_plan_when_forced(self):\n        with mock.patch.object(self.action, \"_generate_plan\") as \\\n                mock_generate_plan:\n            self.action.run(force=True)\n            self.assertEqual(mock_generate_plan().execute.call_count, 1)\n\n    def test_destroy_stack_complete_if_state_submitted(self):\n        # Simulate the provider not being able to find the stack (a result of\n        # it being successfully deleted)\n        provider = mock.MagicMock()\n        provider.get_stack.side_effect = StackDoesNotExist(\"mock\")\n        self.action.provider_builder = MockProviderBuilder(provider)\n        status = self.action._destroy_stack(MockStack(\"vpc\"), status=PENDING)\n        # if we haven't processed the step (ie. has never been SUBMITTED,\n        # should be skipped)\n        self.assertEqual(status, SKIPPED)\n        status = self.action._destroy_stack(MockStack(\"vpc\"), status=SUBMITTED)\n        # if we have processed the step and then can't find the stack, it means\n        # we successfully deleted it\n        self.assertEqual(status, COMPLETE)\n\n    def test_destroy_stack_step_statuses(self):\n        mock_provider = mock.MagicMock()\n        stacks_dict = self.context.get_stacks_dict()\n\n        def get_stack(stack_name):\n            return stacks_dict.get(stack_name)\n\n        plan = self.action._generate_plan()\n        step = plan.steps[0]\n        # we need the AWS provider to generate the plan, but swap it for\n        # the mock one to make the test easier\n        self.action.provider_builder = MockProviderBuilder(mock_provider)\n\n        # simulate stack doesn't exist and we haven't submitted anything for\n        # deletion\n        mock_provider.get_stack.side_effect = StackDoesNotExist(\"mock\")\n\n        step.run()\n        self.assertEqual(step.status, SKIPPED)\n\n        # simulate stack getting successfully deleted\n        mock_provider.get_stack.side_effect = get_stack\n        mock_provider.is_stack_destroyed.return_value = False\n        mock_provider.is_stack_in_progress.return_value = False\n\n        step._run_once()\n        self.assertEqual(step.status, SUBMITTED)\n        mock_provider.is_stack_destroyed.return_value = False\n        mock_provider.is_stack_in_progress.return_value = True\n\n        step._run_once()\n        self.assertEqual(step.status, SUBMITTED)\n        mock_provider.is_stack_destroyed.return_value = True\n        mock_provider.is_stack_in_progress.return_value = False\n\n        step._run_once()\n        self.assertEqual(step.status, COMPLETE)\n"
  },
  {
    "path": "stacker/tests/actions/test_diff.py",
    "content": "import unittest\n\nfrom operator import attrgetter\nfrom stacker.actions.diff import (\n    diff_dictionaries,\n    diff_parameters,\n    DictValue\n)\n\n\nclass TestDictValueFormat(unittest.TestCase):\n    def test_status(self):\n        added = DictValue(\"k0\", None, \"value_0\")\n        self.assertEqual(added.status(), DictValue.ADDED)\n        removed = DictValue(\"k1\", \"value_1\", None)\n        self.assertEqual(removed.status(), DictValue.REMOVED)\n        modified = DictValue(\"k2\", \"value_1\", \"value_2\")\n        self.assertEqual(modified.status(), DictValue.MODIFIED)\n        unmodified = DictValue(\"k3\", \"value_1\", \"value_1\")\n        self.assertEqual(unmodified.status(), DictValue.UNMODIFIED)\n\n    def test_format(self):\n        added = DictValue(\"k0\", None, \"value_0\")\n        self.assertEqual(added.changes(),\n                         ['+%s = %s' % (added.key, added.new_value)])\n        removed = DictValue(\"k1\", \"value_1\", None)\n        self.assertEqual(removed.changes(),\n                         ['-%s = %s' % (removed.key, removed.old_value)])\n        modified = DictValue(\"k2\", \"value_1\", \"value_2\")\n        self.assertEqual(modified.changes(), [\n            '-%s = %s' % (modified.key, modified.old_value),\n            '+%s = %s' % (modified.key, modified.new_value)\n        ])\n        unmodified = DictValue(\"k3\", \"value_1\", \"value_1\")\n        self.assertEqual(unmodified.changes(), [' %s = %s' % (\n            unmodified.key, unmodified.old_value)])\n        self.assertEqual(unmodified.changes(), [' %s = %s' % (\n            unmodified.key, unmodified.new_value)])\n\n\nclass TestDiffDictionary(unittest.TestCase):\n    def test_diff_dictionaries(self):\n        old_dict = {\n            \"a\": \"Apple\",\n            \"b\": \"Banana\",\n            \"c\": \"Corn\",\n        }\n        new_dict = {\n            \"a\": \"Apple\",\n            \"b\": \"Bob\",\n            \"d\": \"Doug\",\n        }\n\n        [count, changes] = diff_dictionaries(old_dict, new_dict)\n        self.assertEqual(count, 3)\n        expected_output = [\n            DictValue(\"a\", \"Apple\", \"Apple\"),\n            DictValue(\"b\", \"Banana\", \"Bob\"),\n            DictValue(\"c\", \"Corn\", None),\n            DictValue(\"d\", None, \"Doug\"),\n        ]\n        expected_output.sort(key=attrgetter(\"key\"))\n\n        # compare all the outputs to the expected change\n        for expected_change in expected_output:\n            change = changes.pop(0)\n            self.assertEqual(change, expected_change)\n\n        # No extra output\n        self.assertEqual(len(changes), 0)\n\n\nclass TestDiffParameters(unittest.TestCase):\n    def test_diff_parameters_no_changes(self):\n        old_params = {\n            \"a\": \"Apple\"\n        }\n        new_params = {\n            \"a\": \"Apple\"\n        }\n\n        param_diffs = diff_parameters(old_params, new_params)\n        self.assertEquals(param_diffs, [])\n"
  },
  {
    "path": "stacker/tests/blueprints/__init__.py",
    "content": ""
  },
  {
    "path": "stacker/tests/blueprints/test_base.py",
    "content": "import unittest\nimport sys\nfrom mock import patch\n\nfrom mock import MagicMock\nfrom troposphere import (\n    Base64,\n    Ref,\n    s3,\n    sns\n)\n\nfrom stacker.blueprints.base import (\n    Blueprint,\n    CFNParameter,\n    build_parameter,\n    validate_allowed_values,\n    validate_variable_type,\n    resolve_variable,\n    parse_user_data\n)\nfrom stacker.blueprints.variables.types import (\n    CFNCommaDelimitedList,\n    CFNNumber,\n    CFNString,\n    EC2AvailabilityZoneNameList,\n    TroposphereType,\n)\nfrom stacker.exceptions import (\n    InvalidLookupCombination,\n    MissingVariable,\n    UnresolvedVariable,\n    UnresolvedVariables,\n    ValidatorError,\n    VariableTypeRequired,\n    InvalidUserdataPlaceholder\n)\nfrom stacker.variables import Variable\nfrom stacker.lookups import register_lookup_handler\n\nfrom ..factories import mock_context\n\n\ndef mock_lookup_handler(value, provider=None, context=None, fqn=False,\n                        **kwargs):\n    return value\n\n\nregister_lookup_handler(\"mock\", mock_lookup_handler)\n\n\nclass TestBuildParameter(unittest.TestCase):\n\n    def test_base_parameter(self):\n        p = build_parameter(\"BasicParam\", {\"type\": \"String\"})\n        p.validate()\n        self.assertEquals(p.Type, \"String\")\n\n\nclass TestBlueprintRendering(unittest.TestCase):\n\n    def test_to_json(self):\n        class TestBlueprint(Blueprint):\n            VARIABLES = {\n                \"Param1\": {\"default\": \"default\", \"type\": CFNString},\n                \"Param2\": {\"type\": CFNNumber},\n                \"Param3\": {\"type\": CFNCommaDelimitedList},\n                \"Param4\": {\"default\": \"foo\", \"type\": str},\n                \"Param5\": {\"default\": 5, \"type\": int}\n            }\n\n            def create_template(self):\n                self.template.set_version('2010-09-09')\n                self.template.set_description('TestBlueprint')\n\n        expected_json = \"\"\"{\n    \"AWSTemplateFormatVersion\": \"2010-09-09\",\n    \"Description\": \"TestBlueprint\",\n    \"Parameters\": {\n        \"Param1\": {\n            \"Default\": \"default\",\n            \"Type\": \"String\"\n        },\n        \"Param2\": {\n            \"Type\": \"Number\"\n        },\n        \"Param3\": {\n            \"Type\": \"CommaDelimitedList\"\n        }\n    },\n    \"Resources\": {}\n}\"\"\"\n        self.assertEqual(\n            TestBlueprint(name=\"test\", context=mock_context()).to_json(),\n            expected_json,\n        )\n\n\nclass TestBaseBlueprint(unittest.TestCase):\n    def test_add_output(self):\n        output_name = \"MyOutput1\"\n        output_value = \"OutputValue\"\n\n        class TestBlueprint(Blueprint):\n            VARIABLES = {}\n\n            def create_template(self):\n                self.template.set_version('2010-09-09')\n                self.template.set_description('TestBlueprint')\n                self.add_output(output_name, output_value)\n\n        bp = TestBlueprint(name=\"test\", context=mock_context())\n        bp.render_template()\n        self.assertEqual(bp.template.outputs[output_name].properties[\"Value\"],\n                         output_value)\n\n\nclass TestVariables(unittest.TestCase):\n\n    def test_defined_variables(self):\n        class TestBlueprint(Blueprint):\n            VARIABLES = {\n                \"Param1\": {\"default\": \"default\", \"type\": str},\n            }\n\n        blueprint = TestBlueprint(name=\"test\", context=MagicMock())\n        self.assertEqual(\n            blueprint.defined_variables(),\n            blueprint.VARIABLES,\n        )\n\n    def test_defined_variables_subclass(self):\n        class TestBlueprint(Blueprint):\n            VARIABLES = {\n                \"Param1\": {\"default\": 0, \"type\": int},\n                \"Param2\": {\"default\": 0, \"type\": int},\n            }\n\n        class TestBlueprintSublcass(TestBlueprint):\n\n            def defined_variables(self):\n                variables = super(TestBlueprintSublcass,\n                                  self).defined_variables()\n                variables[\"Param2\"][\"default\"] = 1\n                variables[\"Param3\"] = {\"default\": 1, \"type\": int}\n                return variables\n\n        blueprint = TestBlueprintSublcass(name=\"test\", context=MagicMock())\n        variables = blueprint.defined_variables()\n        self.assertEqual(len(variables), 3)\n        self.assertEqual(variables[\"Param2\"][\"default\"], 1)\n\n    def test_get_variables_unresolved_variables(self):\n        class TestBlueprint(Blueprint):\n            pass\n\n        blueprint = TestBlueprint(name=\"test\", context=MagicMock())\n        with self.assertRaises(UnresolvedVariables):\n            blueprint.get_variables()\n\n    def test_set_description(self):\n        class TestBlueprint(Blueprint):\n            VARIABLES = {\n                \"Param1\": {\"default\": \"default\", \"type\": str},\n            }\n\n            def create_template(self):\n                return\n\n        description = \"my blueprint description\"\n        context = mock_context()\n        blueprint = TestBlueprint(name=\"test\", context=context,\n                                  description=description)\n        blueprint.render_template()\n        self.assertEquals(description, blueprint.template.description)\n\n    def test_validate_variable_type_cfntype(self):\n        var_name = \"testVar\"\n        var_type = CFNString\n        provided_value = \"abc\"\n        value = validate_variable_type(var_name, var_type, provided_value)\n        self.assertIsInstance(value, CFNParameter)\n\n    def test_validate_variable_type_cfntype_none_value(self):\n        var_name = \"testVar\"\n        var_type = CFNString\n        provided_value = None\n        with self.assertRaises(ValueError):\n            validate_variable_type(var_name, var_type, provided_value)\n\n    def test_validate_variable_type_matching_type(self):\n        var_name = \"testVar\"\n        var_type = str\n        provided_value = \"abc\"\n        value = validate_variable_type(var_name, var_type, provided_value)\n        self.assertEqual(value, provided_value)\n\n    # This tests that validate_variable_type doesn't change the original value\n    # even if it could.  IE: A string \"1\" shouldn't be valid for an int.\n    # See: https://github.com/remind101/stacker/pull/266\n    def test_strict_validate_variable_type(self):\n        var_name = \"testVar\"\n        var_type = int\n        provided_value = \"1\"\n        with self.assertRaises(ValueError):\n            validate_variable_type(var_name, var_type, provided_value)\n\n    def test_validate_variable_type_invalid_value(self):\n        var_name = \"testVar\"\n        var_type = int\n        provided_value = \"abc\"\n        with self.assertRaises(ValueError):\n            validate_variable_type(var_name, var_type, provided_value)\n\n    def test_resolve_variable_no_type_on_variable_definition(self):\n        var_name = \"testVar\"\n        var_def = {}\n        provided_variable = None\n        blueprint_name = \"testBlueprint\"\n\n        with self.assertRaises(VariableTypeRequired):\n            resolve_variable(var_name, var_def, provided_variable,\n                             blueprint_name)\n\n    def test_resolve_variable_no_provided_with_default(self):\n        var_name = \"testVar\"\n        default_value = \"foo\"\n        var_def = {\"default\": default_value, \"type\": str}\n        provided_variable = None\n        blueprint_name = \"testBlueprint\"\n\n        value = resolve_variable(var_name, var_def, provided_variable,\n                                 blueprint_name)\n\n        self.assertEqual(default_value, value)\n\n    def test_resolve_variable_no_provided_without_default(self):\n        var_name = \"testVar\"\n        var_def = {\"type\": str}\n        provided_variable = None\n        blueprint_name = \"testBlueprint\"\n\n        with self.assertRaises(MissingVariable):\n            resolve_variable(var_name, var_def, provided_variable,\n                             blueprint_name)\n\n    def test_resolve_variable_provided_not_resolved(self):\n        var_name = \"testVar\"\n        var_def = {\"type\": str}\n        provided_variable = Variable(var_name, \"${mock abc}\")\n        blueprint_name = \"testBlueprint\"\n\n        with self.assertRaises(UnresolvedVariable):\n            resolve_variable(var_name, var_def, provided_variable,\n                             blueprint_name)\n\n    def _resolve_troposphere_var(self, tpe, value, **kwargs):\n        var_name = \"testVar\"\n        var_def = {\"type\": TroposphereType(tpe, **kwargs)}\n        provided_variable = Variable(var_name, value)\n        blueprint_name = \"testBlueprint\"\n\n        return resolve_variable(var_name, var_def, provided_variable,\n                                blueprint_name)\n\n    def test_resolve_variable_troposphere_type_resource_single(self):\n        bucket_defs = {\"MyBucket\": {\"BucketName\": \"some-bucket\"}}\n        bucket = self._resolve_troposphere_var(s3.Bucket, bucket_defs)\n\n        self.assertTrue(isinstance(bucket, s3.Bucket))\n        self.assertEqual(bucket.properties, bucket_defs[bucket.title])\n        self.assertEqual(bucket.title, \"MyBucket\")\n\n    def test_resolve_variable_troposphere_type_resource_optional(self):\n        bucket = self._resolve_troposphere_var(s3.Bucket, None, optional=True)\n        self.assertEqual(bucket, None)\n\n    def test_resolve_variable_troposphere_type_value_blank_required(self):\n        with self.assertRaises(ValidatorError):\n            self._resolve_troposphere_var(s3.Bucket, None)\n\n    def test_resolve_variable_troposphere_type_resource_many(self):\n        bucket_defs = {\n            \"FirstBucket\": {\"BucketName\": \"some-bucket\"},\n            \"SecondBucket\": {\"BucketName\": \"some-other-bucket\"}\n        }\n        buckets = self._resolve_troposphere_var(s3.Bucket, bucket_defs,\n                                                many=True)\n\n        for bucket in buckets:\n            self.assertTrue(isinstance(bucket, s3.Bucket))\n            self.assertEqual(bucket.properties, bucket_defs[bucket.title])\n\n    def test_resolve_variable_troposphere_type_resource_many_empty(self):\n        buckets = self._resolve_troposphere_var(s3.Bucket, {}, many=True)\n        self.assertEqual(buckets, [])\n\n    def test_resolve_variable_troposphere_type_resource_fail(self):\n        # Do this to silence the error reporting here:\n        # https://github.com/cloudtools/troposphere/commit/dc8abd5c\n        with open(\"/dev/null\", \"w\") as devnull:\n            _stderr = sys.stderr\n            sys.stderr = devnull\n            with self.assertRaises(ValidatorError):\n                self._resolve_troposphere_var(s3.Bucket,\n                                              {\"MyBucket\": {\"BucketName\": 1}})\n            sys.stderr = _stderr\n\n    def test_resolve_variable_troposphere_type_props_single(self):\n        sub_defs = {\"Endpoint\": \"test\", \"Protocol\": \"lambda\"}\n        # Note that sns.Subscription != sns.SubscriptionResource. The former\n        # is a property type, the latter is a complete resource.\n        sub = self._resolve_troposphere_var(sns.Subscription, sub_defs)\n\n        self.assertTrue(isinstance(sub, sns.Subscription))\n        self.assertEqual(sub.properties, sub_defs)\n\n    def test_resolve_variable_troposphere_type_props_optional(self):\n        sub = self._resolve_troposphere_var(sns.Subscription, None,\n                                            optional=True)\n        self.assertEqual(sub, None)\n\n    def test_resolve_variable_troposphere_type_props_many(self):\n        sub_defs = [\n            {\"Endpoint\": \"test1\", \"Protocol\": \"lambda\"},\n            {\"Endpoint\": \"test2\", \"Protocol\": \"lambda\"}\n        ]\n        subs = self._resolve_troposphere_var(sns.Subscription, sub_defs,\n                                             many=True)\n\n        for i, sub in enumerate(subs):\n            self.assertTrue(isinstance(sub, sns.Subscription))\n            self.assertEqual(sub.properties, sub_defs[i])\n\n    def test_resolve_variable_troposphere_type_props_many_empty(self):\n        subs = self._resolve_troposphere_var(sns.Subscription, [], many=True)\n        self.assertEqual(subs, [])\n\n    def test_resolve_variable_troposphere_type_props_fail(self):\n        with self.assertRaises(ValidatorError):\n            self._resolve_troposphere_var(sns.Subscription, {})\n\n    def test_resolve_variable_troposphere_type_unvalidated(self):\n        self._resolve_troposphere_var(sns.Subscription, {}, validate=False)\n\n    def test_resolve_variable_troposphere_type_optional_many(self):\n        res = self._resolve_troposphere_var(sns.Subscription, {},\n                                            many=True, optional=True)\n        self.assertIsNone(res)\n\n    def test_resolve_variable_provided_resolved(self):\n        var_name = \"testVar\"\n        var_def = {\"type\": str}\n        provided_variable = Variable(var_name, \"${mock 1}\")\n        provided_variable.resolve(context=MagicMock(), provider=MagicMock())\n        blueprint_name = \"testBlueprint\"\n\n        value = resolve_variable(var_name, var_def, provided_variable,\n                                 blueprint_name)\n        self.assertEqual(value, \"1\")\n\n    def test_resolve_variable_allowed_values(self):\n        var_name = \"testVar\"\n        var_def = {\"type\": str, \"allowed_values\": [\"allowed\"]}\n        provided_variable = Variable(var_name, \"not_allowed\")\n        blueprint_name = \"testBlueprint\"\n        with self.assertRaises(ValueError):\n            resolve_variable(var_name, var_def, provided_variable,\n                             blueprint_name)\n\n        provided_variable = Variable(var_name, \"allowed\")\n        value = resolve_variable(var_name, var_def, provided_variable,\n                                 blueprint_name)\n        self.assertEqual(value, \"allowed\")\n\n    def test_resolve_variable_validator_valid_value(self):\n        def triple_validator(value):\n            if len(value) != 3:\n                raise ValueError\n            return value\n\n        var_name = \"testVar\"\n        var_def = {\"type\": list, \"validator\": triple_validator}\n        var_value = [1, 2, 3]\n        provided_variable = Variable(var_name, var_value)\n        blueprint_name = \"testBlueprint\"\n\n        value = resolve_variable(var_name, var_def, provided_variable,\n                                 blueprint_name)\n        self.assertEqual(value, var_value)\n\n    def test_resolve_variable_validator_invalid_value(self):\n        def triple_validator(value):\n            if len(value) != 3:\n                raise ValueError(\"Must be a triple.\")\n            return value\n\n        var_name = \"testVar\"\n        var_def = {\"type\": list, \"validator\": triple_validator}\n        var_value = [1, 2]\n        provided_variable = Variable(var_name, var_value)\n        blueprint_name = \"testBlueprint\"\n\n        with self.assertRaises(ValidatorError) as cm:\n            resolve_variable(var_name, var_def, provided_variable,\n                             blueprint_name)\n\n        exc = cm.exception.exception  # The wrapped exception\n        self.assertIsInstance(exc, ValueError)\n\n    def test_resolve_variables(self):\n        class TestBlueprint(Blueprint):\n            VARIABLES = {\n                \"Param1\": {\"default\": 0, \"type\": int},\n                \"Param2\": {\"type\": str},\n            }\n\n        blueprint = TestBlueprint(name=\"test\", context=MagicMock())\n        variables = [\n            Variable(\"Param1\", 1),\n            Variable(\"Param2\", \"${output other-stack::Output}\"),\n            Variable(\"Param3\", 3),\n        ]\n\n        variables[1]._value._resolve(\"Test Output\")\n\n        blueprint.resolve_variables(variables)\n        self.assertEqual(blueprint.resolved_variables[\"Param1\"], 1)\n        self.assertEqual(blueprint.resolved_variables[\"Param2\"], \"Test Output\")\n        self.assertIsNone(blueprint.resolved_variables.get(\"Param3\"))\n\n    def test_resolve_variables_lookup_returns_non_string(self):\n        class TestBlueprint(Blueprint):\n            VARIABLES = {\n                \"Param1\": {\"type\": list},\n            }\n\n        def return_list_something(*_args, **_kwargs):\n            return [\"something\"]\n\n        register_lookup_handler(\"custom\", return_list_something)\n        blueprint = TestBlueprint(name=\"test\", context=MagicMock())\n        variables = [Variable(\"Param1\", \"${custom non-string-return-val}\")]\n        for var in variables:\n            var._value.resolve({}, {})\n\n        blueprint.resolve_variables(variables)\n        self.assertEqual(blueprint.resolved_variables[\"Param1\"], [\"something\"])\n\n    def test_resolve_variables_lookup_returns_troposphere_obj(self):\n        class TestBlueprint(Blueprint):\n            VARIABLES = {\n                \"Param1\": {\"type\": Base64},\n            }\n\n        def return_obj(*_args, **_kwargs):\n            return Base64(\"test\")\n\n        register_lookup_handler(\"custom\", return_obj)\n        blueprint = TestBlueprint(name=\"test\", context=MagicMock())\n        variables = [Variable(\"Param1\", \"${custom non-string-return-val}\")]\n        for var in variables:\n            var._value.resolve({}, {})\n\n        blueprint.resolve_variables(variables)\n        self.assertEqual(blueprint.resolved_variables[\"Param1\"].data,\n                         Base64(\"test\").data)\n\n    def test_resolve_variables_lookup_returns_non_string_invalid_combo(self):\n        class TestBlueprint(Blueprint):\n            VARIABLES = {\n                \"Param1\": {\"type\": list},\n            }\n\n        def return_list_something(*_args, **_kwargs):\n            return [\"something\"]\n\n        register_lookup_handler(\"custom\", return_list_something)\n        variable = Variable(\n            \"Param1\",\n            \"${custom non-string-return-val},${output some-stack::Output}\",\n        )\n        variable._value[0].resolve({}, {})\n        with self.assertRaises(InvalidLookupCombination):\n            variable.value()\n\n    def test_get_variables(self):\n        class TestBlueprint(Blueprint):\n            VARIABLES = {\n                \"Param1\": {\"type\": int},\n                \"Param2\": {\"type\": str},\n            }\n\n        blueprint = TestBlueprint(name=\"test\", context=MagicMock())\n        variables = [Variable(\"Param1\", 1), Variable(\"Param2\", \"Test Output\")]\n        blueprint.resolve_variables(variables)\n        variables = blueprint.get_variables()\n        self.assertEqual(variables[\"Param1\"], 1)\n        self.assertEqual(variables[\"Param2\"], \"Test Output\")\n\n    def test_resolve_variables_missing_variable(self):\n        class TestBlueprint(Blueprint):\n            VARIABLES = {\n                \"Param1\": {\"type\": int},\n                \"Param2\": {\"type\": str},\n            }\n\n        blueprint = TestBlueprint(name=\"test\", context=MagicMock())\n        variables = [Variable(\"Param1\", 1)]\n        with self.assertRaises(MissingVariable):\n            blueprint.resolve_variables(variables)\n\n    def test_resolve_variables_incorrect_type(self):\n        class TestBlueprint(Blueprint):\n            VARIABLES = {\n                \"Param1\": {\"type\": int},\n            }\n\n        blueprint = TestBlueprint(name=\"test\", context=MagicMock())\n        variables = [Variable(\"Param1\", \"Something\")]\n        with self.assertRaises(ValueError):\n            blueprint.resolve_variables(variables)\n\n    def test_get_variables_default_value(self):\n        class TestBlueprint(Blueprint):\n            VARIABLES = {\n                \"Param1\": {\"type\": int, \"default\": 1},\n                \"Param2\": {\"type\": str},\n            }\n\n        blueprint = TestBlueprint(name=\"test\", context=MagicMock())\n        variables = [Variable(\"Param2\", \"Test Output\")]\n        blueprint.resolve_variables(variables)\n        variables = blueprint.get_variables()\n        self.assertEqual(variables[\"Param1\"], 1)\n        self.assertEqual(variables[\"Param2\"], \"Test Output\")\n\n    def test_resolve_variables_convert_type(self):\n        class TestBlueprint(Blueprint):\n            VARIABLES = {\n                \"Param1\": {\"type\": int},\n            }\n\n        blueprint = TestBlueprint(name=\"test\", context=MagicMock())\n        variables = [Variable(\"Param1\", 1)]\n        blueprint.resolve_variables(variables)\n        variables = blueprint.get_variables()\n        self.assertTrue(isinstance(variables[\"Param1\"], int))\n\n    def test_resolve_variables_cfn_type(self):\n        class TestBlueprint(Blueprint):\n            VARIABLES = {\n                \"Param1\": {\"type\": CFNString},\n            }\n\n        blueprint = TestBlueprint(name=\"test\", context=MagicMock())\n        variables = [Variable(\"Param1\", \"Value\")]\n        blueprint.resolve_variables(variables)\n        variables = blueprint.get_variables()\n        self.assertTrue(isinstance(variables[\"Param1\"], CFNParameter))\n\n    def test_resolve_variables_cfn_number(self):\n        class TestBlueprint(Blueprint):\n            VARIABLES = {\n                \"Param1\": {\"type\": CFNNumber},\n            }\n\n        blueprint = TestBlueprint(name=\"test\", context=MagicMock())\n        variables = [Variable(\"Param1\", 1)]\n        blueprint.resolve_variables(variables)\n        variables = blueprint.get_variables()\n        self.assertTrue(isinstance(variables[\"Param1\"], CFNParameter))\n        self.assertEqual(variables[\"Param1\"].value, \"1\")\n\n    def test_resolve_variables_cfn_type_list(self):\n        class TestBlueprint(Blueprint):\n            VARIABLES = {\n                \"Param1\": {\"type\": EC2AvailabilityZoneNameList},\n            }\n\n        blueprint = TestBlueprint(name=\"test\", context=MagicMock())\n        variables = [Variable(\"Param1\", [\"us-east-1\", \"us-west-2\"])]\n        blueprint.resolve_variables(variables)\n        variables = blueprint.get_variables()\n        self.assertTrue(isinstance(variables[\"Param1\"], CFNParameter))\n        self.assertEqual(variables[\"Param1\"].value, [\"us-east-1\", \"us-west-2\"])\n        self.assertEqual(variables[\"Param1\"].ref.data, Ref(\"Param1\").data)\n        parameters = blueprint.get_parameter_values()\n        self.assertEqual(parameters[\"Param1\"], [\"us-east-1\", \"us-west-2\"])\n\n    def test_resolve_variables_cfn_type_list_invalid_value(self):\n        class TestBlueprint(Blueprint):\n            VARIABLES = {\n                \"Param1\": {\"type\": EC2AvailabilityZoneNameList},\n            }\n\n        blueprint = TestBlueprint(name=\"test\", context=MagicMock())\n        variables = [Variable(\"Param1\", {\"main\": \"us-east-1\"})]\n        with self.assertRaises(ValueError):\n            blueprint.resolve_variables(variables)\n        variables = blueprint.get_variables()\n\n    def test_get_parameter_definitions_cfn_type_list(self):\n        class TestBlueprint(Blueprint):\n            VARIABLES = {\n                \"Param1\": {\"type\": EC2AvailabilityZoneNameList},\n            }\n\n        blueprint = TestBlueprint(name=\"test\", context=MagicMock())\n        parameters = blueprint.get_parameter_definitions()\n        self.assertTrue(\"Param1\" in parameters)\n        parameter = parameters[\"Param1\"]\n        self.assertEqual(parameter[\"type\"],\n                         \"List<AWS::EC2::AvailabilityZone::Name>\")\n\n    def test_get_parameter_definitions_cfn_type(self):\n        class TestBlueprint(Blueprint):\n            VARIABLES = {\n                \"Param1\": {\"type\": CFNString},\n            }\n\n        blueprint = TestBlueprint(name=\"test\", context=MagicMock())\n        parameters = blueprint.get_parameter_definitions()\n        self.assertTrue(\"Param1\" in parameters)\n        parameter = parameters[\"Param1\"]\n        self.assertEqual(parameter[\"type\"], \"String\")\n\n    def test_get_required_parameter_definitions_cfn_type(self):\n        class TestBlueprint(Blueprint):\n            VARIABLES = {\n                \"Param1\": {\"type\": CFNString},\n            }\n\n        blueprint = TestBlueprint(name=\"test\", context=MagicMock())\n        blueprint.setup_parameters()\n        params = blueprint.get_required_parameter_definitions()\n        self.assertEqual(list(params.keys())[0], \"Param1\")\n\n    def test_get_parameter_values(self):\n        class TestBlueprint(Blueprint):\n            VARIABLES = {\n                \"Param1\": {\"type\": int},\n                \"Param2\": {\"type\": CFNString},\n            }\n\n        blueprint = TestBlueprint(name=\"test\", context=MagicMock())\n        variables = [Variable(\"Param1\", 1), Variable(\"Param2\", \"Value\")]\n        blueprint.resolve_variables(variables)\n        variables = blueprint.get_variables()\n        self.assertEqual(len(variables), 2)\n        parameters = blueprint.get_parameter_values()\n        self.assertEqual(len(parameters), 1)\n        self.assertEqual(parameters[\"Param2\"], \"Value\")\n\n    def test_validate_allowed_values(self):\n        allowed_values = ['allowed']\n        valid = validate_allowed_values(allowed_values, \"not_allowed\")\n        self.assertFalse(valid)\n        valid = validate_allowed_values(allowed_values, \"allowed\")\n        self.assertTrue(valid)\n\n    def test_blueprint_with_parameters_fails(self):\n        class TestBlueprint(Blueprint):\n            PARAMETERS = {\n                \"Param2\": {\"default\": 0, \"type\": \"Integer\"},\n            }\n\n        with self.assertRaises(AttributeError):\n            TestBlueprint(name=\"test\", context=MagicMock())\n\n        class TestBlueprint(Blueprint):\n            LOCAL_PARAMETERS = {\n                \"Param2\": {\"default\": 0, \"type\": \"Integer\"},\n            }\n\n        with self.assertRaises(AttributeError):\n            TestBlueprint(name=\"test\", context=MagicMock())\n\n    def test_variable_exists_but_value_is_none(self):\n        var_name = \"testVar\"\n        var_def = {\"type\": str}\n        var_value = None\n        provided_variable = Variable(var_name, var_value)\n        blueprint_name = \"testBlueprint\"\n\n        with self.assertRaises(ValueError):\n            resolve_variable(var_name, var_def, provided_variable,\n                             blueprint_name)\n\n\nclass TestCFNParameter(unittest.TestCase):\n    def test_cfnparameter_convert_boolean(self):\n        p = CFNParameter(\"myParameter\", True)\n        self.assertEqual(p.value, \"true\")\n        p = CFNParameter(\"myParameter\", False)\n        self.assertEqual(p.value, \"false\")\n        # Test to make sure other types aren't affected\n        p = CFNParameter(\"myParameter\", 0)\n        self.assertEqual(p.value, \"0\")\n        p = CFNParameter(\"myParameter\", \"myString\")\n        self.assertEqual(p.value, \"myString\")\n\n    def test_parse_user_data(self):\n        expected = 'name: tom, last: taubkin and $'\n        variables = {\n            'name': 'tom',\n            'last': 'taubkin'\n        }\n\n        raw_user_data = 'name: ${name}, last: $last and $$'\n        blueprint_name = 'test'\n        res = parse_user_data(variables, raw_user_data, blueprint_name)\n        self.assertEqual(res, expected)\n\n    def test_parse_user_data_missing_variable(self):\n        variables = {\n            'name': 'tom',\n        }\n\n        raw_user_data = 'name: ${name}, last: $last and $$'\n        blueprint_name = 'test'\n        with self.assertRaises(MissingVariable):\n            parse_user_data(variables, raw_user_data, blueprint_name)\n\n    def test_parse_user_data_invaled_placeholder(self):\n        raw_user_data = '$100'\n        blueprint_name = 'test'\n        with self.assertRaises(InvalidUserdataPlaceholder):\n            parse_user_data({}, raw_user_data, blueprint_name)\n\n    @patch('stacker.blueprints.base.read_value_from_path',\n           return_value='contents')\n    @patch('stacker.blueprints.base.parse_user_data')\n    def test_read_user_data(self, parse_mock, file_mock):\n        class TestBlueprint(Blueprint):\n            VARIABLES = {}\n\n        blueprint = TestBlueprint(name=\"blueprint_name\", context=MagicMock())\n        blueprint.resolve_variables({})\n        blueprint.read_user_data('file://test.txt')\n        file_mock.assert_called_with('file://test.txt')\n        parse_mock.assert_called_with({}, 'contents', 'blueprint_name')\n"
  },
  {
    "path": "stacker/tests/blueprints/test_raw.py",
    "content": "\"\"\"Test module for blueprint-from-raw-template module.\"\"\"\nimport json\nimport unittest\n\nfrom mock import MagicMock\n\nfrom stacker.blueprints.raw import (\n    get_template_params, get_template_path, RawTemplateBlueprint\n)\nfrom stacker.variables import Variable\nfrom ..factories import mock_context\n\n\nRAW_JSON_TEMPLATE_PATH = 'stacker/tests/fixtures/cfn_template.json'\nRAW_YAML_TEMPLATE_PATH = 'stacker/tests/fixtures/cfn_template.yaml'\nRAW_J2_TEMPLATE_PATH = 'stacker/tests/fixtures/cfn_template.json.j2'\n\n\ndef test_get_template_path_local_file(tmpdir):\n    \"\"\"Verify get_template_path finding a file relative to CWD.\"\"\"\n\n    template_path = tmpdir.join('cfn_template.json')\n    template_path.ensure()\n\n    with tmpdir.as_cwd():\n        result = get_template_path('cfn_template.json')\n        assert template_path.samefile(result)\n\n\ndef test_get_template_path_invalid_file(tmpdir):\n    \"\"\"Verify get_template_path with an invalid filename.\"\"\"\n\n    with tmpdir.as_cwd():\n        assert get_template_path('cfn_template.json') is None\n\n\ndef test_get_template_path_file_in_syspath(tmpdir, monkeypatch):\n    \"\"\"Verify get_template_path with a file in sys.path.\n\n    This ensures templates are able to be retrieved from remote packages.\n\n    \"\"\"\n\n    template_path = tmpdir.join('cfn_template.json')\n    template_path.ensure()\n\n    monkeypatch.syspath_prepend(tmpdir)\n    result = get_template_path(template_path.basename)\n    assert template_path.samefile(result)\n\n\ndef test_get_template_params():\n    \"\"\"Verify get_template_params function operation.\"\"\"\n    template_dict = {\n        \"AWSTemplateFormatVersion\": \"2010-09-09\",\n        \"Description\": \"TestTemplate\",\n        \"Parameters\": {\n            \"Param1\": {\n                \"Type\": \"String\"\n            },\n            \"Param2\": {\n                \"Default\": \"default\",\n                \"Type\": \"CommaDelimitedList\"\n            }\n        },\n        \"Resources\": {}\n    }\n    template_params = {\n        \"Param1\": {\n            \"Type\": \"String\"\n        },\n        \"Param2\": {\n            \"Default\": \"default\",\n            \"Type\": \"CommaDelimitedList\"\n        }\n    }\n\n    assert get_template_params(template_dict) == template_params\n\n\nclass TestBlueprintRendering(unittest.TestCase):\n    \"\"\"Test class for blueprint rendering.\"\"\"\n\n    def test_to_json(self):\n        \"\"\"Verify to_json method operation.\"\"\"\n        expected_json = json.dumps(\n            {\n                \"AWSTemplateFormatVersion\": \"2010-09-09\",\n                \"Description\": \"TestTemplate\",\n                \"Parameters\": {\n                    \"Param1\": {\n                        \"Type\": \"String\"\n                    },\n                    \"Param2\": {\n                        \"Default\": \"default\",\n                        \"Type\": \"CommaDelimitedList\"\n                    }\n                },\n                \"Resources\": {\n                    \"Dummy\": {\n                        \"Type\": \"AWS::SNS::Topic\",\n                        \"Properties\": {\n                            \"DisplayName\": {\"Ref\": \"Param1\"}\n                        }\n                    }\n                },\n                \"Outputs\": {\n                    \"DummyId\": {\n                        \"Value\": \"dummy-1234\"\n                    }\n                }\n            },\n            sort_keys=True,\n            indent=4\n        )\n        self.assertEqual(\n            RawTemplateBlueprint(\n                name=\"test\",\n                context=mock_context(),\n                raw_template_path=RAW_JSON_TEMPLATE_PATH).to_json(),\n            expected_json\n        )\n\n    def test_j2_to_json(self):\n        \"\"\"Verify jinja2 template parsing.\"\"\"\n        expected_json = json.dumps(\n            {\n                \"AWSTemplateFormatVersion\": \"2010-09-09\",\n                \"Description\": \"TestTemplate\",\n                \"Parameters\": {\n                    \"Param1\": {\n                        \"Type\": \"String\"\n                    },\n                    \"Param2\": {\n                        \"Default\": \"default\",\n                        \"Type\": \"CommaDelimitedList\"\n                    }\n                },\n                \"Resources\": {\n                    \"Dummy\": {\n                        \"Type\": \"AWS::CloudFormation::WaitConditionHandle\"\n                    }\n                },\n                \"Outputs\": {\n                    \"DummyId\": {\n                        \"Value\": \"dummy-bar-param1val-foo-1234\"\n                    }\n                }\n            },\n            sort_keys=True,\n            indent=4\n        )\n        blueprint = RawTemplateBlueprint(\n            name=\"stack1\",\n            context=mock_context(\n                extra_config_args={'stacks': [{'name': 'stack1',\n                                               'template_path': 'unused',\n                                               'variables': {\n                                                   'Param1': 'param1val',\n                                                   'bar': 'foo'}}]},\n                environment={'foo': 'bar'}),\n            raw_template_path=RAW_J2_TEMPLATE_PATH\n        )\n        blueprint.resolve_variables([Variable(\"Param1\", \"param1val\"),\n                                     Variable(\"bar\", \"foo\")])\n        self.assertEqual(\n            expected_json,\n            blueprint.to_json()\n        )\n\n\nclass TestVariables(unittest.TestCase):\n    \"\"\"Test class for blueprint variable methods.\"\"\"\n\n    def test_get_parameter_definitions_json(self):  # noqa pylint: disable=invalid-name\n        \"\"\"Verify get_parameter_definitions method with json raw template.\"\"\"\n        blueprint = RawTemplateBlueprint(\n            name=\"test\",\n            context=MagicMock(),\n            raw_template_path=RAW_JSON_TEMPLATE_PATH)\n        parameters = blueprint.get_parameter_definitions()\n        self.assertEqual(\n            parameters,\n            {\"Param1\": {\"Type\": \"String\"},\n             \"Param2\": {\"Default\": \"default\",\n                        \"Type\": \"CommaDelimitedList\"}})\n\n    def test_get_parameter_definitions_yaml(self):  # noqa pylint: disable=invalid-name\n        \"\"\"Verify get_parameter_definitions method with yaml raw template.\"\"\"\n        blueprint = RawTemplateBlueprint(\n            name=\"test\",\n            context=MagicMock(),\n            raw_template_path=RAW_YAML_TEMPLATE_PATH\n        )\n        parameters = blueprint.get_parameter_definitions()\n        self.assertEqual(\n            parameters,\n            {\"Param1\": {\"Type\": \"String\"},\n             \"Param2\": {\"Default\": \"default\",\n                        \"Type\": \"CommaDelimitedList\"}})\n\n    def test_get_required_parameter_definitions_json(self):  # noqa pylint: disable=invalid-name\n        \"\"\"Verify get_required_param... method with json raw template.\"\"\"\n        blueprint = RawTemplateBlueprint(\n            name=\"test\",\n            context=MagicMock(),\n            raw_template_path=RAW_JSON_TEMPLATE_PATH\n        )\n        self.assertEqual(\n            blueprint.get_required_parameter_definitions(),\n            {\"Param1\": {\"Type\": \"String\"}})\n\n    def test_get_required_parameter_definitions_yaml(self):  # noqa pylint: disable=invalid-name\n        \"\"\"Verify get_required_param... method with yaml raw template.\"\"\"\n        blueprint = RawTemplateBlueprint(\n            name=\"test\",\n            context=MagicMock(),\n            raw_template_path=RAW_YAML_TEMPLATE_PATH\n        )\n        self.assertEqual(\n            blueprint.get_required_parameter_definitions(),\n            {\"Param1\": {\"Type\": \"String\"}})\n"
  },
  {
    "path": "stacker/tests/blueprints/test_testutil.py",
    "content": "import unittest\n\nfrom troposphere import ecr\n\nfrom ...blueprints.testutil import BlueprintTestCase\nfrom ...blueprints.base import Blueprint\nfrom ...context import Context\nfrom ...variables import Variable\n\n\nclass Repositories(Blueprint):\n    \"\"\" Simple blueprint to test our test cases. \"\"\"\n    VARIABLES = {\n        \"Repositories\": {\n            \"type\": list,\n            \"description\": \"A list of repository names to create.\"\n        }\n    }\n\n    def create_template(self):\n        t = self.template\n        variables = self.get_variables()\n\n        for repo in variables[\"Repositories\"]:\n            t.add_resource(\n                ecr.Repository(\n                    \"%sRepository\" % repo,\n                    RepositoryName=repo,\n                )\n            )\n\n\nclass TestRepositories(BlueprintTestCase):\n    def test_create_template_passes(self):\n        ctx = Context({'namespace': 'test'})\n        blueprint = Repositories('test_repo', ctx)\n        blueprint.resolve_variables([\n            Variable('Repositories', [\"repo1\", \"repo2\"])\n        ])\n        blueprint.create_template()\n        self.assertRenderedBlueprint(blueprint)\n\n    def test_create_template_fails(self):\n        ctx = Context({'namespace': 'test'})\n        blueprint = Repositories('test_repo', ctx)\n        blueprint.resolve_variables([\n            Variable('Repositories', [\"repo1\", \"repo2\", \"repo3\"])\n        ])\n        blueprint.create_template()\n        with self.assertRaises(AssertionError):\n            self.assertRenderedBlueprint(blueprint)\n\n\nif __name__ == '__main__':\n    unittest.main()\n"
  },
  {
    "path": "stacker/tests/conftest.py",
    "content": "\nimport logging\nimport os\n\nimport pytest\nimport py.path\n\nlogger = logging.getLogger(__name__)\n\n\n@pytest.fixture(scope='session', autouse=True)\ndef aws_credentials():\n    # Handle change in https://github.com/spulec/moto/issues/1924\n    # Ensure AWS SDK finds some (bogus) credentials in the environment and\n    # doesn't try to use other providers.\n    overrides = {\n        'AWS_ACCESS_KEY_ID': 'testing',\n        'AWS_SECRET_ACCESS_KEY': 'testing',\n        'AWS_DEFAULT_REGION': 'us-east-1'\n    }\n    saved_env = {}\n    for key, value in overrides.items():\n        logger.info('Overriding env var: {}={}'.format(key, value))\n        saved_env[key] = os.environ.get(key, None)\n        os.environ[key] = value\n\n    yield\n\n    for key, value in saved_env.items():\n        logger.info('Restoring saved env var: {}={}'.format(key, value))\n        if value is None:\n            del os.environ[key]\n        else:\n            os.environ[key] = value\n\n    saved_env.clear()\n\n\n@pytest.fixture(scope=\"package\")\ndef stacker_fixture_dir():\n    path = os.path.join(os.path.dirname(os.path.realpath(__file__)),\n                        'fixtures')\n    return py.path.local(path)\n"
  },
  {
    "path": "stacker/tests/factories.py",
    "content": "from mock import MagicMock\n\nfrom stacker.context import Context\nfrom stacker.config import Config, Stack\nfrom stacker.lookups import Lookup\n\n\nclass MockThreadingEvent(object):\n    def wait(self, timeout=None):\n        return False\n\n\nclass MockProviderBuilder(object):\n    def __init__(self, provider, region=None):\n        self.provider = provider\n        self.region = region\n\n    def build(self, region=None, profile=None):\n        return self.provider\n\n\ndef mock_provider(**kwargs):\n    return MagicMock(**kwargs)\n\n\ndef mock_context(namespace=\"default\", extra_config_args=None, **kwargs):\n    config_args = {\"namespace\": namespace}\n    if extra_config_args:\n        config_args.update(extra_config_args)\n    config = Config(config_args)\n    if kwargs.get(\"environment\"):\n        return Context(\n            config=config,\n            **kwargs)\n    return Context(\n        config=config,\n        environment={},\n        **kwargs)\n\n\ndef generate_definition(base_name, stack_id, **overrides):\n    definition = {\n        \"name\": \"%s.%d\" % (base_name, stack_id),\n        \"class_path\": \"stacker.tests.fixtures.mock_blueprints.%s\" % (\n            base_name.upper()),\n        \"requires\": []\n    }\n    definition.update(overrides)\n    return Stack(definition)\n\n\ndef mock_lookup(lookup_input, lookup_type, raw=None):\n    if raw is None:\n        raw = \"%s %s\" % (lookup_type, lookup_input)\n    return Lookup(type=lookup_type, input=lookup_input, raw=raw)\n\n\nclass SessionStub(object):\n\n    \"\"\"Stubber class for boto3 sessions made with session_cache.get_session()\n\n    This is a helper class that should be used when trying to stub out\n    get_session() calls using the boto3.stubber.\n\n    Example Usage:\n\n        @mock.patch('stacker.lookups.handlers.myfile.get_session',\n                return_value=sessionStub(client))\n        def myfile_test(self, client_stub):\n            ...\n\n    Attributes:\n        client_stub (:class:`boto3.session.Session`:): boto3 session stub\n\n    \"\"\"\n\n    def __init__(self, client_stub):\n        self.client_stub = client_stub\n\n    def client(self, region):\n        \"\"\"Returns the stubbed client object\n\n        Args:\n            region (str): So boto3 won't complain\n\n        Returns:\n            :class:`boto3.session.Session`: The stubbed boto3 session\n        \"\"\"\n        return self.client_stub\n"
  },
  {
    "path": "stacker/tests/fixtures/__init__.py",
    "content": ""
  },
  {
    "path": "stacker/tests/fixtures/basic.env",
    "content": "namespace: test.stacker\n"
  },
  {
    "path": "stacker/tests/fixtures/cfn_template.json",
    "content": "{\n    \"AWSTemplateFormatVersion\": \"2010-09-09\",\n    \"Description\": \"TestTemplate\",\n    \"Parameters\": {\n        \"Param1\": {\n            \"Type\": \"String\"\n        },\n        \"Param2\": {\n            \"Default\": \"default\",\n            \"Type\": \"CommaDelimitedList\"\n        }\n    },\n    \"Resources\": {\n      \"Dummy\": {\n        \"Type\": \"AWS::SNS::Topic\",\n        \"Properties\": {\n          \"DisplayName\": {\"Ref\" : \"Param1\"}\n        }\n      }\n    },\n    \"Outputs\": {\n      \"DummyId\": {\n        \"Value\": \"dummy-1234\"\n      }\n    }\n}\n"
  },
  {
    "path": "stacker/tests/fixtures/cfn_template.json.j2",
    "content": "{\n    \"AWSTemplateFormatVersion\": \"2010-09-09\",\n    \"Description\": \"TestTemplate\",\n    \"Parameters\": {\n        \"Param1\": {\n            \"Type\": \"String\"\n        },\n        \"Param2\": {\n            \"Default\": \"default\",\n            \"Type\": \"CommaDelimitedList\"\n        }\n    },\n    \"Resources\": {\n      \"Dummy\": {\n        \"Type\": \"AWS::CloudFormation::WaitConditionHandle\"\n      }\n    },\n    \"Outputs\": {\n      \"DummyId\": {\n        \"Value\": \"dummy-{{ context.environment.foo }}-{{ variables.Param1 }}-{{ variables.bar }}-1234\"\n      }\n    }\n}\n"
  },
  {
    "path": "stacker/tests/fixtures/cfn_template.yaml",
    "content": "AWSTemplateFormatVersion: \"2010-09-09\"\nDescription: TestTemplate\nParameters:\n  Param1:\n    Type: String\n  Param2:\n    Default: default\n    Type: CommaDelimitedList\nResources:\n  Bucket:\n    Type: AWS::S3::Bucket\n    Properties:\n      BucketName:\n        !Join\n          - \"-\"\n          - - !Ref \"AWS::StackName\"\n            - !Ref \"AWS::Region\"\n  Dummy:\n    Type: AWS::CloudFormation::WaitConditionHandle\nOutputs:\n  DummyId:\n    Value: dummy-1234\n"
  },
  {
    "path": "stacker/tests/fixtures/keypair/fingerprint",
    "content": "d7:50:1f:78:55:5f:22:c1:f6:88:c6:5d:82:4f:94:4f\n"
  },
  {
    "path": "stacker/tests/fixtures/keypair/id_rsa",
    "content": "-----BEGIN OPENSSH PRIVATE KEY-----\nb3BlbnNzaC1rZXktdjEAAAAABG5vbmUAAAAEbm9uZQAAAAAAAAABAAABFwAAAAdzc2gtcn\nNhAAAAAwEAAQAAAQEA7rF34ExOHgT+dDYJUswkhBpyC+vnK+ptx+nGQDTkPj9aP1uAXbXA\nC97KK+Ihou0jniYKPJMHsjEK4a7eh2ihoK6JkYs9+y0MeGCAHAYuGXdNt5jv1e0XNgoYdf\nJloC0pgOp4Po9+4qeuOds8bb9IxwM/aSaJWygaSc22ZTzeOWQk5PXJNH0lR0ZelUUkj0HK\naouuV6UX/t+czTghgnNZgDjk5sOfUNmugN7fJi+6/dWjOaukDkJttfZXLRTPDux0SZw4Jo\nRqZ40cBNS8ipLVk24BWeEjVlNl6rrFDtO4yrkscz7plwXlPiRLcdCdbamcCZaRrdkftKje\n5ypz5dvocQAAA9DJ0TBmydEwZgAAAAdzc2gtcnNhAAABAQDusXfgTE4eBP50NglSzCSEGn\nIL6+cr6m3H6cZANOQ+P1o/W4BdtcAL3sor4iGi7SOeJgo8kweyMQrhrt6HaKGgromRiz37\nLQx4YIAcBi4Zd023mO/V7Rc2Chh18mWgLSmA6ng+j37ip6452zxtv0jHAz9pJolbKBpJzb\nZlPN45ZCTk9ck0fSVHRl6VRSSPQcpqi65XpRf+35zNOCGCc1mAOOTmw59Q2a6A3t8mL7r9\n1aM5q6QOQm219lctFM8O7HRJnDgmhGpnjRwE1LyKktWTbgFZ4SNWU2XqusUO07jKuSxzPu\nmXBeU+JEtx0J1tqZwJlpGt2R+0qN7nKnPl2+hxAAAAAwEAAQAAAQAwMUSy1LUw+nElpYNc\nZDs7MNu17HtQMpTXuCt+6y7qIoBmKmNQiFGuE91d3tpLuvVmCOgoMsdrAtvflR741/dKKf\nM8n5B0FjReWZ2ECvtjyOK4HvjNiIEXOBKYPcim/ndSwARnHTHRMWnL5KfewLBA/jbfVBiH\nfyFPpWkeJ5v2mg3EDCkTCj7mBZwXYkX8uZ1IN6CZJ9kWNaPO3kloTlamgs6pd/5+OmMGWc\n/vhfJQppaJjW58y7D7zCpncHg3Yf0HZsgWRTGJO93TxuyzDlAXITVGwqcz7InTVQZS1XTx\n3FNmIpb0lDtVrKGxwvR/7gP6DpxMlKkzoCg3j1o8tHvBAAAAgQDuZCVAAqQFrY4ZH2TluP\nSFulXuTiT4mgQivAwI6ysMxjpX1IGBTgDvHXJ0xyW4LN7pCvg8hRAhsPlaNBX24nNfOGmn\nQMYp/qAZG5JP2vEJmDUKmEJ77Twwmk+k0zXfyZyfo7rgpF4c5W2EFnV7xiMtBTKbAj4HMn\nqGPYDPGpySTwAAAIEA+w72mMctM2yd9Sxyg5b7ZlhuNyKW1oHcEvLoEpTtru0f8gh7C3HT\nC0SiuTOth2xoHUWnbo4Yv5FV3gSoQ/rd1sWbkpEZMwbaPGsTA8bkCn2eItsjfrQx+6oY1U\nHgZDrkjbByB3KQiq+VioKsrUmgfT/UgBq2tSnHqcYB56Eqj0sAAACBAPNkMvCstNJGS4FN\nnSCGXghoYqKHivZN/IjWP33t/cr72lGp1yCY5S6FCn+JdNrojKYk2VXOSF5xc3fZllbr7W\nhmhXRr/csQkymXMDkJHnsdhpMeoEZm7wBjUx+hE1+QbNF63kZMe9sjm5y/YRu7W7H6ngme\nkb5FW97sspLYX8WzAAAAF2RhbmllbGt6YUBkYW5pZWwtcGMubGFuAQID\n-----END OPENSSH PRIVATE KEY-----\n"
  },
  {
    "path": "stacker/tests/fixtures/keypair/id_rsa.pub",
    "content": "ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEAklOUpkDHrfHY17SbrmTIpNLTGK9Tjom/BWDSUGPl+nafzlHDTYW7hdI4yZ5ew18JH4JW9jbhUFrviQzM7xlELEVf4h9lFX5QVkbPppSwg0cda3Pbv7kOdJ/MTyBlWXFCR+HAo3FXRitBqxiX1nKhXpHAZsMciLq8V6RjsNAQwdsdMFvSlVK/7XAt3FaoJoAsncM1Q9x5+3V0Ww68/eIFmb1zuUFljQJKprrX88XypNDvjYNby6vw/Pb0rwert/EnmZ+AW4OZPnTPI89ZPmVMLuayrD2cE86Z/il8b+gw3r3+1nKatmIkjn2so1d01QraTlMqVSsbxNrRFi9wrf+M7Q==\n"
  },
  {
    "path": "stacker/tests/fixtures/mock_blueprints.py",
    "content": "from troposphere import GetAtt, Output, Sub, Ref\nfrom troposphere import iam\n\nfrom awacs.aws import Policy, Statement, AWSPrincipal\nimport awacs\nimport awacs.s3\nimport awacs.cloudformation\nimport awacs.iam\nimport awacs.sts\nimport awacs.sns\n\nfrom troposphere.cloudformation import WaitCondition, WaitConditionHandle\n\nfrom stacker.blueprints.base import Blueprint\nfrom stacker.blueprints.variables.types import (\n    CFNCommaDelimitedList,\n    CFNNumber,\n    CFNString,\n    EC2KeyPairKeyName,\n    EC2SecurityGroupId,\n    EC2SubnetIdList,\n    EC2VPCId,\n)\n\n\nclass FunctionalTests(Blueprint):\n    \"\"\"This creates a stack with an IAM user and access key for running the\n    functional tests for stacker.\n    \"\"\"\n\n    VARIABLES = {\n        \"StackerNamespace\": {\n            \"type\": CFNString,\n            \"description\": \"The stacker namespace that the tests will use. \"\n                           \"Access to cloudformation will be restricted to \"\n                           \"only allow access to stacks with this prefix.\"},\n        \"StackerBucket\": {\n            \"type\": CFNString,\n            \"description\": \"The name of the bucket that the tests will use \"\n                           \"for uploading templates.\"}\n    }\n\n    def create_template(self):\n        t = self.template\n\n        bucket_arn = Sub(\"arn:aws:s3:::${StackerBucket}*\")\n        objects_arn = Sub(\"arn:aws:s3:::${StackerBucket}*/*\")\n        cloudformation_scope = Sub(\n            \"arn:aws:cloudformation:*:${AWS::AccountId}:\"\n            \"stack/${StackerNamespace}-*\")\n        sns_scope = Sub(\n            \"arn:aws:sns:*:${AWS::AccountId}:\"\n            \"${StackerNamespace}-*\")\n        changeset_scope = \"*\"\n\n        # This represents the precise IAM permissions that stacker itself\n        # needs.\n        stacker_policy = iam.Policy(\n            PolicyName=\"Stacker\",\n            PolicyDocument=Policy(\n                Statement=[\n                    Statement(\n                        Effect=\"Allow\",\n                        Resource=[\"*\"],\n                        Action=[awacs.s3.ListAllMyBuckets]\n                    ),\n                    Statement(\n                        Effect=\"Allow\",\n                        Resource=[bucket_arn],\n                        Action=[\n                            awacs.s3.ListBucket,\n                            awacs.s3.GetBucketLocation,\n                            awacs.s3.CreateBucket,\n                            awacs.s3.DeleteBucket,\n                        ]\n                    ),\n                    Statement(\n                        Effect=\"Allow\",\n                        Resource=[bucket_arn],\n                        Action=[\n                            awacs.s3.GetObject,\n                            awacs.s3.GetObjectAcl,\n                            awacs.s3.PutObject,\n                            awacs.s3.PutObjectAcl,\n                        ]\n                    ),\n                    Statement(\n                        Effect=\"Allow\",\n                        Resource=[objects_arn],\n                        Action=[\n                            awacs.s3.DeleteObject,\n                        ]\n                    ),\n                    Statement(\n                        Effect=\"Allow\",\n                        Resource=[changeset_scope],\n                        Action=[\n                            awacs.cloudformation.DescribeChangeSet,\n                            awacs.cloudformation.ExecuteChangeSet,\n                            awacs.cloudformation.DeleteChangeSet,\n                        ]\n                    ),\n                    Statement(\n                        Effect=\"Deny\",\n                        Resource=[Ref(\"AWS::StackId\")],\n                        Action=[awacs.cloudformation.Action(\"*\")]\n                    ),\n                    Statement(\n                        Effect=\"Allow\",\n                        Resource=[cloudformation_scope],\n                        Action=[\n                            awacs.cloudformation.GetTemplate,\n                            awacs.cloudformation.CreateChangeSet,\n                            awacs.cloudformation.DeleteChangeSet,\n                            awacs.cloudformation.DeleteStack,\n                            awacs.cloudformation.CreateStack,\n                            awacs.cloudformation.UpdateStack,\n                            awacs.cloudformation.SetStackPolicy,\n                            awacs.cloudformation.DescribeStacks,\n                            awacs.cloudformation.DescribeStackEvents\n                        ]\n                    ),\n                    Statement(\n                        Effect=\"Allow\",\n                        Resource=[sns_scope],\n                        Action=[\n                            awacs.sns.CreateTopic,\n                            awacs.sns.DeleteTopic,\n                            awacs.sns.GetTopicAttributes\n                        ]\n                    )\n\n                ]\n            )\n        )\n\n        principal = AWSPrincipal(Ref(\"AWS::AccountId\"))\n        role = t.add_resource(\n            iam.Role(\n                \"FunctionalTestRole\",\n                AssumeRolePolicyDocument=Policy(\n                    Statement=[\n                        Statement(\n                            Effect=\"Allow\",\n                            Action=[\n                                awacs.sts.AssumeRole],\n                            Principal=principal)]),\n                Policies=[\n                    stacker_policy]))\n\n        assumerole_policy = iam.Policy(\n            PolicyName=\"AssumeRole\",\n            PolicyDocument=Policy(\n                Statement=[\n                    Statement(\n                        Effect=\"Allow\",\n                        Resource=[GetAtt(role, \"Arn\")],\n                        Action=[\n                            awacs.sts.AssumeRole])]))\n\n        user = t.add_resource(\n            iam.User(\n                \"FunctionalTestUser\",\n                Policies=[\n                    stacker_policy,\n                    assumerole_policy]))\n\n        key = t.add_resource(\n            iam.AccessKey(\n                \"FunctionalTestKey\",\n                Serial=1,\n                UserName=Ref(user)))\n\n        t.add_output(Output(\"User\", Value=Ref(user)))\n        t.add_output(Output(\"AccessKeyId\", Value=Ref(key)))\n        t.add_output(\n            Output(\n                \"SecretAccessKey\",\n                Value=GetAtt(\"FunctionalTestKey\", \"SecretAccessKey\")))\n        t.add_output(\n            Output(\n                \"FunctionalTestRole\",\n                Value=GetAtt(role, \"Arn\")))\n\n\nclass Dummy(Blueprint):\n    VARIABLES = {\n        \"StringVariable\": {\n            \"type\": str,\n            \"default\": \"\"}\n    }\n\n    def create_template(self):\n        self.template.add_resource(WaitConditionHandle(\"Dummy\"))\n        self.template.add_output(Output(\"DummyId\", Value=\"dummy-1234\"))\n        self.template.add_output(Output(\"Region\", Value=Ref(\"AWS::Region\")))\n\n\nclass Dummy2(Blueprint):\n    \"\"\"\n    This blueprint allows tests of only additional resources to occur.\n    Just swap out the Dummy class for Dummy2 on the same stack.\n    \"\"\"\n    VARIABLES = {\n        \"StringVariable\": {\n            \"type\": str,\n            \"default\": \"\"}\n    }\n\n    def create_template(self):\n        self.template.add_resource(WaitConditionHandle(\"Dummy\"))\n        self.template.add_output(Output(\"DummyId\", Value=\"dummy-1234\"))\n        self.template.add_resource(WaitConditionHandle(\"Dummy2\"))\n\n\nclass LongRunningDummy(Blueprint):\n    \"\"\"\n    Meant to be an attempt to create a cheap blueprint that takes a little bit\n    of time to create/rollback/destroy to avoid some of the race conditions\n    we've seen in some of our functional tests.\n    \"\"\"\n    VARIABLES = {\n        \"Count\": {\n            \"type\": int,\n            \"description\": \"The # of WaitConditonHandles to create.\",\n            \"default\": 1,\n        },\n        \"BreakLast\": {\n            \"type\": bool,\n            \"description\": \"Whether or not to break the last WaitConditon \"\n                           \"by creating an invalid WaitConditionHandle.\",\n            \"default\": True,\n        },\n        \"OutputValue\": {\n            \"type\": str,\n            \"description\": \"The value to put in an output to allow for \"\n                           \"updates.\",\n            \"default\": \"DefaultOutput\",\n        },\n    }\n\n    def create_template(self):\n        v = self.get_variables()\n        t = self.template\n        base_name = \"Dummy\"\n\n        for i in range(v[\"Count\"]):\n            name = \"%s%s\" % (base_name, i)\n            last_name = None\n            if i:\n                last_name = \"%s%s\" % (base_name, i - 1)\n            wch = WaitConditionHandle(name)\n            if last_name is not None:\n                wch.DependsOn = last_name\n            t.add_resource(wch)\n\n        self.add_output(\"OutputValue\", str(v[\"OutputValue\"]))\n        self.add_output(\"WCHCount\", str(v[\"Count\"]))\n\n        if v[\"BreakLast\"]:\n            t.add_resource(\n                WaitCondition(\n                    \"BrokenWaitCondition\",\n                    Handle=wch.Ref(),\n                    # Timeout is made deliberately large so CF rejects it\n                    Timeout=2 ** 32,\n                    Count=0\n                )\n            )\n\n\nclass Broken(Blueprint):\n    \"\"\"\n    This blueprint deliberately fails validation, so that it can be used to\n    test re-creation of a failed stack\n    \"\"\"\n    VARIABLES = {\n        \"StringVariable\": {\n            \"type\": str,\n            \"default\": \"\"}\n    }\n\n    def create_template(self):\n        t = self.template\n        t.add_resource(WaitConditionHandle(\"BrokenDummy\"))\n        t.add_resource(WaitCondition(\n            \"BrokenWaitCondition\",\n            Handle=Ref(\"BrokenDummy\"),\n            # Timeout is made deliberately large so CF rejects it\n            Timeout=2 ** 32,\n            Count=0))\n        t.add_output(Output(\"DummyId\", Value=\"dummy-1234\"))\n\n\nclass VPC(Blueprint):\n    VARIABLES = {\n        \"AZCount\": {\n            \"type\": int,\n            \"default\": 2,\n        },\n        \"PrivateSubnets\": {\n            \"type\": CFNCommaDelimitedList,\n            \"description\": \"Comma separated list of subnets to use for \"\n                           \"non-public hosts. NOTE: Must have as many subnets \"\n                           \"as AZCount\"},\n        \"PublicSubnets\": {\n            \"type\": CFNCommaDelimitedList,\n            \"description\": \"Comma separated list of subnets to use for \"\n                           \"public hosts. NOTE: Must have as many subnets \"\n                           \"as AZCount\"},\n        \"InstanceType\": {\n            \"type\": CFNString,\n            \"description\": \"NAT EC2 instance type.\",\n            \"default\": \"m3.medium\"},\n        \"BaseDomain\": {\n            \"type\": CFNString,\n            \"default\": \"\",\n            \"description\": \"Base domain for the stack.\"},\n        \"InternalDomain\": {\n            \"type\": CFNString,\n            \"default\": \"\",\n            \"description\": \"Internal domain name, if you have one.\"},\n        \"CidrBlock\": {\n            \"type\": CFNString,\n            \"description\": \"Base CIDR block for subnets.\",\n            \"default\": \"10.128.0.0/16\"},\n        \"ImageName\": {\n            \"type\": CFNString,\n            \"description\": \"The image name to use from the AMIMap (usually \"\n                           \"found in the config file.)\",\n            \"default\": \"NAT\"},\n        \"UseNatGateway\": {\n            \"type\": CFNString,\n            \"allowed_values\": [\"true\", \"false\"],\n            \"description\": \"If set to true, will configure a NAT Gateway\"\n                           \"instead of NAT instances.\",\n            \"default\": \"false\"},\n    }\n\n    def create_template(self):\n        self.template.add_resource(WaitConditionHandle(\"VPC\"))\n\n\nclass DiffTester(Blueprint):\n    VARIABLES = {\n        \"InstanceType\": {\n            \"type\": CFNString,\n            \"description\": \"NAT EC2 instance type.\",\n            \"default\": \"m3.medium\"},\n        \"WaitConditionCount\": {\n            \"type\": int,\n            \"description\": \"Number of WaitConditionHandle resources \"\n                           \"to add to the template\"}\n    }\n\n    def create_template(self):\n        for i in range(self.get_variables()[\"WaitConditionCount\"]):\n            self.template.add_resource(WaitConditionHandle(\"VPC%d\" % i))\n\n\nclass Bastion(Blueprint):\n    VARIABLES = {\n        \"VpcId\": {\"type\": EC2VPCId, \"description\": \"Vpc Id\"},\n        \"DefaultSG\": {\"type\": EC2SecurityGroupId,\n                      \"description\": \"Top level security group.\"},\n        \"PublicSubnets\": {\"type\": EC2SubnetIdList,\n                          \"description\": \"Subnets to deploy public \"\n                                         \"instances in.\"},\n        \"PrivateSubnets\": {\"type\": EC2SubnetIdList,\n                           \"description\": \"Subnets to deploy private \"\n                                          \"instances in.\"},\n        \"AvailabilityZones\": {\"type\": CFNCommaDelimitedList,\n                              \"description\": \"Availability Zones to deploy \"\n                                             \"instances in.\"},\n        \"InstanceType\": {\"type\": CFNString,\n                         \"description\": \"EC2 Instance Type\",\n                         \"default\": \"m3.medium\"},\n        \"MinSize\": {\"type\": CFNNumber,\n                    \"description\": \"Minimum # of instances.\",\n                    \"default\": \"1\"},\n        \"MaxSize\": {\"type\": CFNNumber,\n                    \"description\": \"Maximum # of instances.\",\n                    \"default\": \"5\"},\n        \"SshKeyName\": {\"type\": EC2KeyPairKeyName},\n        \"OfficeNetwork\": {\n            \"type\": CFNString,\n            \"description\": \"CIDR block allowed to connect to bastion hosts.\"},\n        \"ImageName\": {\n            \"type\": CFNString,\n            \"description\": \"The image name to use from the AMIMap (usually \"\n                           \"found in the config file.)\",\n            \"default\": \"bastion\"},\n    }\n\n    def create_template(self):\n        return\n\n\nclass PreOneOhBastion(Blueprint):\n    \"\"\"Used to ensure old blueprints won't be usable in 1.0\"\"\"\n    PARAMETERS = {\n        \"VpcId\": {\"type\": \"AWS::EC2::VPC::Id\", \"description\": \"Vpc Id\"},\n        \"DefaultSG\": {\"type\": \"AWS::EC2::SecurityGroup::Id\",\n                      \"description\": \"Top level security group.\"},\n        \"PublicSubnets\": {\"type\": \"List<AWS::EC2::Subnet::Id>\",\n                          \"description\": \"Subnets to deploy public \"\n                                         \"instances in.\"},\n        \"PrivateSubnets\": {\"type\": \"List<AWS::EC2::Subnet::Id>\",\n                           \"description\": \"Subnets to deploy private \"\n                                          \"instances in.\"},\n        \"AvailabilityZones\": {\"type\": \"CommaDelimitedList\",\n                              \"description\": \"Availability Zones to deploy \"\n                                             \"instances in.\"},\n        \"InstanceType\": {\"type\": \"String\",\n                         \"description\": \"EC2 Instance Type\",\n                         \"default\": \"m3.medium\"},\n        \"MinSize\": {\"type\": \"Number\",\n                    \"description\": \"Minimum # of instances.\",\n                    \"default\": \"1\"},\n        \"MaxSize\": {\"type\": \"Number\",\n                    \"description\": \"Maximum # of instances.\",\n                    \"default\": \"5\"},\n        \"SshKeyName\": {\"type\": \"AWS::EC2::KeyPair::KeyName\"},\n        \"OfficeNetwork\": {\n            \"type\": \"String\",\n            \"description\": \"CIDR block allowed to connect to bastion hosts.\"},\n        \"ImageName\": {\n            \"type\": \"String\",\n            \"description\": \"The image name to use from the AMIMap (usually \"\n                           \"found in the config file.)\",\n            \"default\": \"bastion\"},\n    }\n\n    def create_template(self):\n        return\n"
  },
  {
    "path": "stacker/tests/fixtures/mock_hooks.py",
    "content": "\n\ndef mock_hook(provider, context, **kwargs):\n    return {\"result\": kwargs[\"value\"]}\n"
  },
  {
    "path": "stacker/tests/fixtures/mock_lookups.py",
    "content": "TYPE_NAME = \"mock\"\n\n\ndef handler(value, **kwargs):\n    return \"mock\"\n"
  },
  {
    "path": "stacker/tests/fixtures/not-basic.env",
    "content": "namespace: test.stacker\nenvironment: test\n"
  },
  {
    "path": "stacker/tests/fixtures/parameter_resolution/template.yml",
    "content": "# used in functional test suites, to fix https://github.com/cloudtools/stacker/pull/615\nAWSTemplateFormatVersion: \"2010-09-09\"\n\nParameters:\n  NormalParam:\n    Type: String\n  SecretParam:\n    Type: String\n    Default: default-secret\n    NoEcho: true\n\nOutputs:\n  NormalParam:\n    Value: !Ref \"NormalParam\"\n  SecretParam:\n    Value: !Ref \"SecretParam\"\n\n\nResources:\n  WaitConditionHandle:\n    Type: \"AWS::CloudFormation::WaitConditionHandle\"\n"
  },
  {
    "path": "stacker/tests/fixtures/vpc-bastion-db-web-pre-1.0.yaml",
    "content": "# Hooks require a path.\n# If the build should stop when a hook fails, set required to true.\n# pre_build happens before the build\n# post_build happens after the build\npre_build:\n  - path: stacker.hooks.route53.create_domain\n    required: true\n    enabled: true\n    # Additional args can be passed as a dict of key/value pairs\n    # args:\n    #   BaseDomain: foo\n# post_build:\n\nmappings:\n  AmiMap:\n    us-east-1:\n      NAT: ami-ad227cc4\n      ubuntu1404: &ubuntu1404 ami-74e27e1c # Setting an anchor\n      bastion: *ubuntu1404 # Using the anchor above\n    us-west-2:\n      NAT: ami-290f4119\n      ubuntu1404west2: &ubuntu1404west2 ami-5189a661\n      bastion: *ubuntu1404west2\n\nvpc_parameters: &vpc_parameters\n  VpcId: vpc::VpcId # parametrs with ::'s in them refer to <stack>::<Output>\n  DefaultSG: vpc::DefaultSG\n  PublicSubnets: vpc::PublicSubnets\n  PrivateSubnets: vpc::PrivateSubnets\n  AvailabilityZones: vpc::AvailabilityZones\n\nstacks:\n  - name: vpc\n    class_path: stacker.tests.fixtures.mock_blueprints.VPC\n    variables:\n      InstanceType: m3.medium\n      SshKeyName: default\n      ImageName: NAT\n      # Only build 2 AZs, can be overridden with -p on the command line\n      # Note: If you want more than 4 AZs you should add more subnets below\n      #       Also you need at least 2 AZs in order to use the DB because\n      #       of the fact that the DB blueprint uses MultiAZ\n      AZCount: 2\n      # Enough subnets for 4 AZs\n      PublicSubnets: 10.128.0.0/24,10.128.1.0/24,10.128.2.0/24,10.128.3.0/24\n      PrivateSubnets: 10.128.8.0/22,10.128.12.0/22,10.128.16.0/22,10.128.20.0/22\n      # Uncomment if you want an internal hosted zone for the VPC\n      # If provided, it will be added to the dns search path of the DHCP\n      # Options\n      #InternalDomain: internal\n  - name: bastion\n    class_path: stacker.tests.fixtures.mock_blueprints.Bastion\n    ## !! This should break, parameters not allowed in 1.0\n    parameters:\n      # Extends the parameters dict with the contents of the vpc_parameters\n      # anchor. Basically we're including all VPC Outputs in the parameters\n      # of the bastion stack. Note: Stacker figures out, automatically, which\n      # parameters the stack actually needs and only submits those to each\n      # stack. For example, most stacks are in the PrivateSubnets, but not\n      # the PublicSubnets, but stacker deals with it for you.\n      << : *vpc_parameters\n      InstanceType: m3.medium\n      OfficeNetwork: 203.0.113.0/24\n      MinSize: 2\n      MaxSize: 2\n      SshKeyName: default\n      ImageName: bastion\n"
  },
  {
    "path": "stacker/tests/fixtures/vpc-bastion-db-web.yaml",
    "content": "# Hooks require a path.\n# If the build should stop when a hook fails, set required to true.\n# pre_build happens before the build\n# post_build happens after the build\npre_build:\n  - path: stacker.hooks.route53.create_domain\n    required: true\n    enabled: true\n    # Additional args can be passed as a dict of key/value pairs\n    # args:\n    #   BaseDomain: foo\n# post_build:\n\nmappings:\n  AmiMap:\n    us-east-1:\n      NAT: ami-ad227cc4\n      ubuntu1404: &ubuntu1404 ami-74e27e1c # Setting an anchor\n      bastion: *ubuntu1404 # Using the anchor above\n    us-west-2:\n      NAT: ami-290f4119\n      ubuntu1404west2: &ubuntu1404west2 ami-5189a661\n      bastion: *ubuntu1404west2\n\nvpc_parameters: &vpc_parameters\n  VpcId: vpc::VpcId # parametrs with ::'s in them refer to <stack>::<Output>\n  DefaultSG: vpc::DefaultSG\n  PublicSubnets: vpc::PublicSubnets\n  PrivateSubnets: vpc::PrivateSubnets\n  AvailabilityZones: vpc::AvailabilityZones\n\nstacks:\n  - name: vpc\n    class_path: stacker.tests.fixtures.mock_blueprints.VPC\n    variables:\n      InstanceType: m3.medium\n      SshKeyName: default\n      ImageName: NAT\n      # Only build 2 AZs, can be overridden with -p on the command line\n      # Note: If you want more than 4 AZs you should add more subnets below\n      #       Also you need at least 2 AZs in order to use the DB because\n      #       of the fact that the DB blueprint uses MultiAZ\n      AZCount: 2\n      # Enough subnets for 4 AZs\n      PublicSubnets: 10.128.0.0/24,10.128.1.0/24,10.128.2.0/24,10.128.3.0/24\n      PrivateSubnets: 10.128.8.0/22,10.128.12.0/22,10.128.16.0/22,10.128.20.0/22\n      # Uncomment if you want an internal hosted zone for the VPC\n      # If provided, it will be added to the dns search path of the DHCP\n      # Options\n      #InternalDomain: internal\n  - name: bastion\n    class_path: stacker.tests.fixtures.mock_blueprints.Bastion\n    variables:\n      # Extends the parameters dict with the contents of the vpc_parameters\n      # anchor. Basically we're including all VPC Outputs in the parameters\n      # of the bastion stack. Note: Stacker figures out, automatically, which\n      # parameters the stack actually needs and only submits those to each\n      # stack. For example, most stacks are in the PrivateSubnets, but not\n      # the PublicSubnets, but stacker deals with it for you.\n      << : *vpc_parameters\n      InstanceType: m3.medium\n      OfficeNetwork: 203.0.113.0/24\n      MinSize: 2\n      MaxSize: 2\n      SshKeyName: default\n      ImageName: bastion\n"
  },
  {
    "path": "stacker/tests/fixtures/vpc-custom-log-format-info.yaml",
    "content": "log_formats:\n  info: \"[%(asctime)s] ${environment} custom log format - %(message)s\"\n\nstacks:\n  - name: vpc\n    class_path: stacker.tests.fixtures.mock_blueprints.VPC\n    variables:\n      InstanceType: m3.medium\n      SshKeyName: default\n      ImageName: NAT\n      # Only build 2 AZs, can be overridden with -p on the command line\n      # Note: If you want more than 4 AZs you should add more subnets below\n      #       Also you need at least 2 AZs in order to use the DB because\n      #       of the fact that the DB blueprint uses MultiAZ\n      AZCount: 2\n      # Enough subnets for 4 AZs\n      PublicSubnets: 10.128.0.0/24,10.128.1.0/24,10.128.2.0/24,10.128.3.0/24\n      PrivateSubnets: 10.128.8.0/22,10.128.12.0/22,10.128.16.0/22,10.128.20.0/22\n"
  },
  {
    "path": "stacker/tests/hooks/__init__.py",
    "content": ""
  },
  {
    "path": "stacker/tests/hooks/test_aws_lambda.py",
    "content": "import os.path\nimport os\nimport mock\nimport random\nfrom io import BytesIO as StringIO\nfrom zipfile import ZipFile\n\nimport boto3\nimport botocore\nimport pytest\nfrom moto import mock_s3\nfrom troposphere.awslambda import Code\n\nfrom stacker.hooks.aws_lambda import (\n    ZIP_PERMS_MASK,\n    _calculate_hash,\n    select_bucket_region,\n    upload_lambda_functions,\n)\nfrom ..factories import mock_context, mock_provider\n\n\nREGION = \"us-east-1\"\n\n\n@pytest.fixture\ndef all_files(tmpdir):\n    files = (\n        'f1/f1.py',\n        'f1/f1.pyc',\n        'f1/__init__.py',\n        'f1/test/__init__.py',\n        'f1/test/f1.py',\n        'f1/test/f1.pyc',\n        'f1/test2/test.txt',\n        'f2/f2.js'\n    )\n\n    def create():\n        for file in files:\n            f = tmpdir.join(file)\n            f.write(b'', ensure=True)\n            yield f\n\n    return list(create())\n\n\n@pytest.fixture\ndef f1_files(tmpdir, all_files):\n    return [p for p in all_files if p.relto(tmpdir).startswith('f1')]\n\n\n@pytest.fixture\ndef f2_files(tmpdir, all_files):\n    return [p for p in all_files if p.relto(tmpdir).startswith('f2')]\n\n\n@pytest.fixture(scope='package')\ndef prebuilt_zip(stacker_fixture_dir):\n    path = stacker_fixture_dir.join('test.zip')\n    content = path.read_binary()\n    md5 = 'c6fb602d9bde5a522856adabe9949f63'\n    return dict(path=path, md5=md5, contents=content)\n\n\n@pytest.fixture(autouse=True)\ndef s3():\n    with mock_s3():\n        yield boto3.client('s3', region_name=REGION)\n\n\ndef assert_s3_zip_file_list(s3, bucket, key, files, root=None):\n    object_info = s3.get_object(Bucket=bucket, Key=key)\n    zip_data = StringIO(object_info['Body'].read())\n\n    expected_files = set()\n    for f in files:\n        rel_path = os.path.relpath(str(f), str(root)) if root else str(f)\n        expected_files.add(rel_path)\n\n    found_files = set()\n    with ZipFile(zip_data, 'r') as zip_file:\n        for zip_info in zip_file.infolist():\n            perms = (zip_info.external_attr & ZIP_PERMS_MASK) >> 16\n            assert perms in (0o755, 0o644)\n            found_files.add(zip_info.filename)\n\n    assert found_files == set(expected_files)\n\n\ndef assert_s3_zip_contents(s3, bucket, key, contents):\n    object_info = s3.get_object(Bucket=bucket, Key=key)\n    zip_data = object_info['Body'].read()\n\n    assert zip_data == contents\n\n\ndef assert_s3_bucket(s3, bucket, present=True):\n    try:\n        s3.head_bucket(Bucket=bucket)\n    except botocore.exceptions.ClientError as e:\n        if e.response['Error']['Code'] == '404':\n            if present:\n                pytest.fail('s3: bucket {} does not exist'.format(bucket))\n        else:\n            raise\n    else:\n        if not present:\n            pytest.fail('s3: bucket {} should not exist'.format(bucket))\n\n\n@pytest.fixture\ndef context():\n    return mock_context()\n\n\n@pytest.fixture\ndef provider():\n    return mock_provider(region=REGION)\n\n\n@pytest.fixture\ndef run_hook(context, provider):\n    def run(**kwargs):\n        return upload_lambda_functions(context=context, provider=provider,\n                                       **kwargs)\n\n    return run\n\n\ndef test_bucket_default(s3, context, run_hook):\n    result = run_hook(functions={})\n    assert result is not None\n\n    assert_s3_bucket(s3, context.bucket_name, present=True)\n\n\ndef test_bucket_custom(s3, context, run_hook):\n    result = run_hook(bucket='custom', functions={})\n    assert result is not None\n\n    assert_s3_bucket(s3, context.bucket_name, present=False)\n    assert_s3_bucket(s3, 'custom', present=True)\n\n\ndef test_prefix(tmpdir, s3, all_files, f1_files, run_hook):\n    root = tmpdir.join('f1')\n    results = run_hook(\n        prefix='cloudformation-custom-resources/',\n        functions={\n            'MyFunction': {\n                'path': str(root)\n            }\n        })\n    assert results is not None\n\n    code = results.get('MyFunction')\n    assert isinstance(code, Code)\n    assert_s3_zip_file_list(s3, code.S3Bucket, code.S3Key, f1_files, root=root)\n    assert code.S3Key.startswith(\n        'cloudformation-custom-resources/lambda-MyFunction-')\n\n\ndef test_prefix_missing(tmpdir, s3, all_files, f1_files, run_hook):\n    root = tmpdir.join('f1')\n    results = run_hook(\n        functions={\n            'MyFunction': {\n                'path': str(root)\n            }\n        }\n    )\n\n    assert results is not None\n\n    code = results.get('MyFunction')\n    assert isinstance(code, Code)\n    assert_s3_zip_file_list(s3, code.S3Bucket, code.S3Key, f1_files,\n                            root=root)\n    assert code.S3Key.startswith('lambda-MyFunction-')\n\n\ndef test_path_missing(run_hook):\n    msg = \"missing required property 'path' in function 'MyFunction'\"\n    with pytest.raises(ValueError, match=msg):\n        run_hook(\n            functions={\n                'MyFunction': {\n                }\n            }\n        )\n\n\ndef test_path_non_zip_non_dir(tmpdir, all_files, run_hook):\n    root = tmpdir\n    msg = 'Path must be an existing ZIP file or directory'\n    with pytest.raises(ValueError, match=msg):\n        run_hook(\n            functions={\n                'MyFunction': {\n                    'path': str(root.join('test.txt'))\n                }\n            }\n        )\n\n\ndef test_path_relative(tmpdir, s3, run_hook):\n    root = tmpdir\n    root.join('test/test.py').write(b'', ensure=True)\n\n    get_config_directory = 'stacker.hooks.aws_lambda.get_config_directory'\n    with mock.patch(get_config_directory, return_value=str(root)):\n        results = run_hook(\n            functions={\n                'MyFunction': {\n                    'path': 'test'\n                }\n            }\n        )\n\n    assert results is not None\n\n    code = results.get('MyFunction')\n    assert isinstance(code, Code)\n    assert_s3_zip_file_list(s3, code.S3Bucket, code.S3Key, ['test.py'])\n\n\ndef test_path_home_relative(tmpdir, s3, run_hook):\n    root = tmpdir\n    test_path = '~/test'\n\n    orig_expanduser = os.path.expanduser\n    tmpdir.join('test.py').write(b'')\n\n    def expanduser(path):\n        return str(root) if path == test_path else orig_expanduser(path)\n\n    with mock.patch('os.path.expanduser', side_effect=expanduser):\n        results = run_hook(\n            functions={\n                'MyFunction': {\n                    'path': test_path\n                }\n            }\n        )\n\n    assert results is not None\n\n    code = results.get('MyFunction')\n    assert isinstance(code, Code)\n    assert_s3_zip_file_list(s3, code.S3Bucket, code.S3Key, ['test.py'])\n\n\ndef test_multiple_functions(tmpdir, s3, all_files, f1_files, f2_files,\n                            run_hook):\n    root1 = tmpdir.join('f1')\n    root2 = tmpdir.join('f2')\n    results = run_hook(\n        functions={\n            'MyFunction': {\n                'path': str(root1)\n            },\n            'OtherFunction': {\n                'path': str(root2)\n            }\n        }\n    )\n\n    assert results is not None\n\n    f1_code = results.get('MyFunction')\n    assert isinstance(f1_code, Code)\n    assert_s3_zip_file_list(s3, f1_code.S3Bucket, f1_code.S3Key, f1_files,\n                            root=root1)\n\n    f2_code = results.get('OtherFunction')\n    assert isinstance(f2_code, Code)\n    assert_s3_zip_file_list(s3, f2_code.S3Bucket, f2_code.S3Key, f2_files,\n                            root=root2)\n\n\ndef test_patterns_invalid(tmpdir, run_hook):\n    root = tmpdir\n\n    msg = (\"Invalid file patterns in key 'include': must be a string or \"\n           'list of strings')\n    with pytest.raises(ValueError, match=msg):\n        run_hook(\n            functions={\n                'MyFunction': {\n                    'path': str(root),\n                    'include': {'invalid': 'invalid'}\n                }\n            }\n        )\n\n\ndef test_patterns_include(tmpdir, s3, all_files, run_hook):\n    root = tmpdir.join('f1')\n    results = run_hook(\n        functions={\n            'MyFunction': {\n                'path': str(root),\n                'include': ['*.py', 'test2/']\n            }\n        }\n    )\n\n    assert results is not None\n\n    code = results.get('MyFunction')\n    assert isinstance(code, Code)\n    assert_s3_zip_file_list(s3, code.S3Bucket, code.S3Key, [\n        'f1.py',\n        '__init__.py',\n        'test/__init__.py',\n        'test/f1.py',\n        'test2/test.txt'\n    ])\n\n\ndef test_patterns_exclude(tmpdir, s3, all_files, run_hook):\n    root = tmpdir.join('f1')\n    results = run_hook(\n        functions={\n            'MyFunction': {\n                'path': str(root),\n                'exclude': ['*.pyc', 'test/']\n            }\n        }\n    )\n\n    assert results is not None\n\n    code = results.get('MyFunction')\n    assert isinstance(code, Code)\n    assert_s3_zip_file_list(s3, code.S3Bucket, code.S3Key, [\n        'f1.py',\n        '__init__.py',\n        'test2/test.txt'\n    ])\n\n\n@mock_s3\ndef test_patterns_include_exclude(tmpdir, s3, all_files, run_hook):\n    root = tmpdir.join('f1')\n    results = run_hook(functions={\n        'MyFunction': {\n            'path': str(root),\n            'include': '*.py',\n            'exclude': 'test/'\n        }\n    })\n\n    assert results is not None\n\n    code = results.get('MyFunction')\n    assert isinstance(code, Code)\n    assert_s3_zip_file_list(s3, code.S3Bucket, code.S3Key, [\n        'f1.py',\n        '__init__.py'\n    ])\n\n\ndef test_patterns_exclude_all(tmpdir, all_files, run_hook):\n    root = tmpdir.join('f1')\n\n    msg = ('Empty list of files for Lambda payload. Check your '\n           'include/exclude options for errors.')\n    with pytest.raises(RuntimeError, match=msg):\n        run_hook(\n            functions={\n                'MyFunction': {\n                    'path': str(root),\n                    'exclude': ['**']\n                }\n            }\n        )\n\n\ndef test_idempotence(tmpdir, s3, all_files, run_hook):\n    root = tmpdir.join('f1')\n\n    bucket_name = 'test'\n    functions = {\n        'MyFunction': {\n            'path': str(root)\n        }\n    }\n\n    s3.create_bucket(Bucket=bucket_name)\n\n    previous = None\n    for i in range(2):\n        results = run_hook(bucket=bucket_name, functions=functions)\n        assert results is not None\n\n        code = results.get('MyFunction')\n        assert isinstance(code, Code)\n\n        if not previous:\n            previous = code.S3Key\n            continue\n\n        assert previous == code.S3Key\n\n\ndef test_calculate_hash(tmpdir, all_files, f1_files, f2_files):\n    root = tmpdir\n\n    all_hash_1 = _calculate_hash(map(str, all_files), str(root))\n    all_hash_2 = _calculate_hash(map(str, all_files), str(root))\n    f1_hash = _calculate_hash(map(str, f1_files), str(root))\n    f2_hash = _calculate_hash(map(str, f2_files), str(root))\n\n    assert all_hash_1 == all_hash_2\n    assert f1_hash != all_hash_1\n    assert f2_hash != all_hash_1\n    assert f1_hash != f2_hash\n\n\ndef test_calculate_hash_diff_filename_same_contents(tmpdir, all_files):\n    root = tmpdir\n\n    files = all_files[:2]\n    tmpdir.join(files[0]).write('data', ensure=True)\n    tmpdir.join(files[1]).write('data', ensure=True)\n\n    hash1 = _calculate_hash([str(files[0])], str(root))\n    hash2 = _calculate_hash([str(files[1])], str(root))\n\n    assert hash1 != hash2\n\n\ndef test_calculate_hash_different_ordering(tmpdir, all_files):\n    root = tmpdir\n\n    all_files_diff_order = random.sample(all_files, k=len(all_files))\n    hash1 = _calculate_hash(map(str, all_files), str(root))\n    hash2 = _calculate_hash(map(str, all_files_diff_order), str(root))\n    assert hash1 == hash2\n\n\n@pytest.mark.parametrize(\n    'case',\n    [\n        dict(\n            custom_bucket=\"myBucket\",\n            hook_region=\"us-east-1\",\n            stacker_bucket_region=\"us-west-1\",\n            provider_region=\"eu-west-1\",\n            result=\"us-east-1\"\n        ),\n        dict(\n            custom_bucket=\"myBucket\",\n            hook_region=None,\n            stacker_bucket_region=\"us-west-1\",\n            provider_region=\"eu-west-1\",\n            result=\"eu-west-1\"),\n        dict(\n            custom_bucket=None,\n            hook_region=\"us-east-1\",\n            stacker_bucket_region=\"us-west-1\",\n            provider_region=\"eu-west-1\",\n            result=\"us-west-1\"),\n        dict(\n            custom_bucket=None,\n            hook_region=\"us-east-1\",\n            stacker_bucket_region=None,\n            provider_region=\"eu-west-1\",\n            result=\"eu-west-1\")\n    ]\n)\ndef test_select_bucket_region(case):\n    result = case.pop('result')\n    assert select_bucket_region(**case) == result\n\n\ndef test_follow_symlink_nonbool(run_hook):\n    msg = \"follow_symlinks option must be a boolean\"\n    with pytest.raises(ValueError, match=msg):\n        run_hook(\n            follow_symlinks=\"raiseValueError\",\n            functions={\n                'MyFunction': {\n                }\n            }\n        )\n\n\n@pytest.fixture\ndef linked_dir(tmpdir):\n    linked_dir = tmpdir.join('linked')\n    linked_dir.mksymlinkto(tmpdir.join('f1'))\n    return linked_dir\n\n\ndef test_follow_symlink_true(tmpdir, s3, all_files, f1_files, run_hook,\n                             linked_dir):\n    root = tmpdir\n    results = run_hook(\n        follow_symlinks=True,\n        functions={\n            'MyFunction': {\n                'path': str(root)\n            }\n        }\n    )\n    assert results is not None\n\n    code = results.get('MyFunction')\n    assert isinstance(code, Code)\n\n    linked_files = [p for p in linked_dir.visit() if p.check(file=1)]\n    assert_s3_zip_file_list(s3, code.S3Bucket, code.S3Key,\n                            all_files + linked_files, root=tmpdir)\n\n\ndef test_follow_symlink_false(tmpdir, s3, all_files, run_hook, linked_dir):\n    root = tmpdir\n    results = run_hook(\n        follow_symlinks=False,\n        functions={\n            'MyFunction': {\n                'path': str(root)\n            }\n        }\n    )\n    assert results is not None\n\n    code = results.get('MyFunction')\n    assert isinstance(code, Code)\n    assert_s3_zip_file_list(s3, code.S3Bucket, code.S3Key, all_files,\n                            root=tmpdir)\n"
  },
  {
    "path": "stacker/tests/hooks/test_command.py",
    "content": "\nimport os\nimport unittest\nfrom subprocess import PIPE\n\nimport mock\n\nfrom stacker.context import Context\nfrom stacker.config import Config\nfrom stacker.hooks.command import run_command\n\nfrom ..factories import mock_provider\n\n\nclass MockProcess(object):\n    def __init__(self, returncode=0, stdout='', stderr=''):\n        self.returncode = returncode\n        self.stdout = stdout\n        self.stderr = stderr\n        self.stdin = None\n\n    def communicate(self, stdin):\n        self.stdin = stdin\n        return (self.stdout, self.stderr)\n\n    def wait(self):\n        return self.returncode\n\n    def kill(self):\n        return\n\n\nclass TestCommandHook(unittest.TestCase):\n    def setUp(self):\n        self.context = Context(\n            config=Config({'namespace': 'test', 'stacker_bucket': 'test'}))\n        self.provider = mock_provider(region=\"us-east-1\")\n\n        self.mock_process = MockProcess()\n        self.popen_mock = \\\n            mock.patch('stacker.hooks.command.Popen',\n                       return_value=self.mock_process).start()\n\n        self.devnull = mock.Mock()\n        self.devnull_mock = \\\n            mock.patch('stacker.hooks.command._devnull',\n                       return_value=self.devnull).start()\n\n    def tearDown(self):\n        self.devnull_mock.stop()\n        self.popen_mock.stop()\n\n    def run_hook(self, **kwargs):\n        real_kwargs = {\n            'context': self.context,\n            'provider': self.provider,\n        }\n        real_kwargs.update(kwargs)\n\n        return run_command(**real_kwargs)\n\n    def test_command_ok(self):\n        self.mock_process.returncode = 0\n        self.mock_process.stdout = None\n        self.mock_process.stderr = None\n\n        results = self.run_hook(command=['foo'])\n\n        self.assertEqual(\n            results, {'returncode': 0, 'stdout': None, 'stderr': None})\n        self.popen_mock.assert_called_once_with(\n            ['foo'], stdin=self.devnull, stdout=None, stderr=None, env=None)\n\n    def test_command_fail(self):\n        self.mock_process.returncode = 1\n        self.mock_process.stdout = None\n        self.mock_process.stderr = None\n\n        results = self.run_hook(command=['foo'])\n\n        self.assertEqual(results, None)\n        self.popen_mock.assert_called_once_with(\n            ['foo'], stdin=self.devnull, stdout=None, stderr=None, env=None)\n\n    def test_command_ignore_status(self):\n        self.mock_process.returncode = 1\n        self.mock_process.stdout = None\n        self.mock_process.stderr = None\n\n        results = self.run_hook(command=['foo'], ignore_status=True)\n\n        self.assertEqual(\n            results, {'returncode': 1, 'stdout': None, 'stderr': None})\n        self.popen_mock.assert_called_once_with(\n            ['foo'], stdin=self.devnull, stdout=None, stderr=None, env=None)\n\n    def test_command_quiet(self):\n        self.mock_process.returncode = 0\n        self.mock_process.stdout = None\n        self.mock_process.stderr = None\n\n        results = self.run_hook(command=['foo'], quiet=True)\n        self.assertEqual(\n            results, {'returncode': 0, 'stdout': None, 'stderr': None})\n\n        self.popen_mock.assert_called_once_with(\n            ['foo'], stdin=self.devnull, stdout=self.devnull,\n            stderr=self.devnull, env=None)\n\n    def test_command_interactive(self):\n        self.mock_process.returncode = 0\n        self.mock_process.stdout = None\n        self.mock_process.stderr = None\n\n        results = self.run_hook(command=['foo'], interactive=True)\n        self.assertEqual(\n            results, {'returncode': 0, 'stdout': None, 'stderr': None})\n\n        self.popen_mock.assert_called_once_with(\n            ['foo'], stdin=None, stdout=None, stderr=None, env=None)\n\n    def test_command_input(self):\n        self.mock_process.returncode = 0\n        self.mock_process.stdout = None\n        self.mock_process.stderr = None\n\n        results = self.run_hook(command=['foo'], stdin='hello world')\n        self.assertEqual(\n            results, {'returncode': 0, 'stdout': None, 'stderr': None})\n\n        self.popen_mock.assert_called_once_with(\n            ['foo'], stdin=PIPE, stdout=None, stderr=None, env=None)\n        self.assertEqual(self.mock_process.stdin, 'hello world')\n\n    def test_command_capture(self):\n        self.mock_process.returncode = 0\n        self.mock_process.stdout = 'hello'\n        self.mock_process.stderr = 'world'\n\n        results = self.run_hook(command=['foo'], capture=True)\n        self.assertEqual(\n            results, {'returncode': 0, 'stdout': 'hello', 'stderr': 'world'})\n\n        self.popen_mock.assert_called_once_with(\n            ['foo'], stdin=self.devnull, stdout=PIPE, stderr=PIPE, env=None)\n\n    def test_command_env(self):\n        self.mock_process.returncode = 0\n        self.mock_process.stdout = None\n        self.mock_process.stderr = None\n\n        with mock.patch.dict(os.environ, {'foo': 'bar'}, clear=True):\n            results = self.run_hook(command=['foo'], env={'hello': 'world'})\n\n            self.assertEqual(results, {'returncode': 0,\n                                       'stdout': None,\n                                       'stderr': None})\n            self.popen_mock.assert_called_once_with(\n                ['foo'], stdin=self.devnull, stdout=None, stderr=None,\n                env={'hello': 'world', 'foo': 'bar'})\n"
  },
  {
    "path": "stacker/tests/hooks/test_ecs.py",
    "content": "import unittest\n\nimport boto3\nfrom moto import mock_ecs\nfrom testfixtures import LogCapture\n\nfrom stacker.hooks.ecs import create_clusters\nfrom ..factories import (\n    mock_context,\n    mock_provider,\n)\n\nREGION = \"us-east-1\"\n\n\nclass TestECSHooks(unittest.TestCase):\n\n    def setUp(self):\n        self.provider = mock_provider(region=REGION)\n        self.context = mock_context(namespace=\"fake\")\n\n    def test_create_single_cluster(self):\n        with mock_ecs():\n            cluster = \"test-cluster\"\n            logger = \"stacker.hooks.ecs\"\n            client = boto3.client(\"ecs\", region_name=REGION)\n            response = client.list_clusters()\n\n            self.assertEqual(len(response[\"clusterArns\"]), 0)\n            with LogCapture(logger) as logs:\n                self.assertTrue(\n                    create_clusters(\n                        provider=self.provider,\n                        context=self.context,\n                        clusters=cluster,\n                    )\n                )\n\n                logs.check(\n                    (\n                        logger,\n                        \"DEBUG\",\n                        \"Creating ECS cluster: %s\" % cluster\n                    )\n                )\n\n            response = client.list_clusters()\n            self.assertEqual(len(response[\"clusterArns\"]), 1)\n\n    def test_create_multiple_clusters(self):\n        with mock_ecs():\n            clusters = (\"test-cluster0\", \"test-cluster1\")\n            logger = \"stacker.hooks.ecs\"\n            client = boto3.client(\"ecs\", region_name=REGION)\n            response = client.list_clusters()\n\n            self.assertEqual(len(response[\"clusterArns\"]), 0)\n            for cluster in clusters:\n                with LogCapture(logger) as logs:\n                    self.assertTrue(\n                        create_clusters(\n                            provider=self.provider,\n                            context=self.context,\n                            clusters=cluster,\n                        )\n                    )\n\n                    logs.check(\n                        (\n                            logger,\n                            \"DEBUG\",\n                            \"Creating ECS cluster: %s\" % cluster\n                        )\n                    )\n\n            response = client.list_clusters()\n            self.assertEqual(len(response[\"clusterArns\"]), 2)\n\n    def test_fail_create_cluster(self):\n        with mock_ecs():\n            logger = \"stacker.hooks.ecs\"\n            client = boto3.client(\"ecs\", region_name=REGION)\n            response = client.list_clusters()\n\n            self.assertEqual(len(response[\"clusterArns\"]), 0)\n            with LogCapture(logger) as logs:\n                create_clusters(\n                    provider=self.provider,\n                    context=self.context\n                )\n\n                logs.check(\n                    (\n                        logger,\n                        \"ERROR\",\n                        \"setup_clusters hook missing \\\"clusters\\\" argument\"\n                    )\n                )\n\n            response = client.list_clusters()\n            self.assertEqual(len(response[\"clusterArns\"]), 0)\n"
  },
  {
    "path": "stacker/tests/hooks/test_iam.py",
    "content": "import unittest\n\nimport boto3\nfrom botocore.exceptions import ClientError\n\nfrom moto import mock_iam\n\nfrom stacker.hooks.iam import (\n    create_ecs_service_role,\n    _get_cert_arn_from_response,\n)\n\nfrom awacs.helpers.trust import get_ecs_assumerole_policy\n\nfrom ..factories import (\n    mock_context,\n    mock_provider,\n)\n\n\nREGION = \"us-east-1\"\n\n# No test for stacker.hooks.iam.ensure_server_cert_exists until\n# updated version of moto is imported\n# (https://github.com/spulec/moto/pull/679) merged\n\n\nclass TestIAMHooks(unittest.TestCase):\n\n    def setUp(self):\n        self.context = mock_context(namespace=\"fake\")\n        self.provider = mock_provider(region=REGION)\n\n    def test_get_cert_arn_from_response(self):\n        arn = \"fake-arn\"\n        # Creation response\n        response = {\n            \"ServerCertificateMetadata\": {\n                \"Arn\": arn\n            }\n        }\n\n        self.assertEqual(_get_cert_arn_from_response(response), arn)\n\n        # Existing cert response\n        response = {\"ServerCertificate\": response}\n        self.assertEqual(_get_cert_arn_from_response(response), arn)\n\n    def test_create_service_role(self):\n        role_name = \"ecsServiceRole\"\n        policy_name = \"AmazonEC2ContainerServiceRolePolicy\"\n        with mock_iam():\n            client = boto3.client(\"iam\", region_name=REGION)\n\n            with self.assertRaises(ClientError):\n                client.get_role(RoleName=role_name)\n\n            self.assertTrue(\n                create_ecs_service_role(\n                    context=self.context,\n                    provider=self.provider,\n                )\n            )\n\n            role = client.get_role(RoleName=role_name)\n\n            self.assertIn(\"Role\", role)\n            self.assertEqual(role_name, role[\"Role\"][\"RoleName\"])\n            client.get_role_policy(\n                RoleName=role_name,\n                PolicyName=policy_name\n            )\n\n    def test_create_service_role_already_exists(self):\n        role_name = \"ecsServiceRole\"\n        policy_name = \"AmazonEC2ContainerServiceRolePolicy\"\n        with mock_iam():\n            client = boto3.client(\"iam\", region_name=REGION)\n            client.create_role(\n                RoleName=role_name,\n                AssumeRolePolicyDocument=get_ecs_assumerole_policy().to_json()\n            )\n\n            self.assertTrue(\n                create_ecs_service_role(\n                    context=self.context,\n                    provider=self.provider,\n                )\n            )\n\n            role = client.get_role(RoleName=role_name)\n\n            self.assertIn(\"Role\", role)\n            self.assertEqual(role_name, role[\"Role\"][\"RoleName\"])\n            client.get_role_policy(\n                RoleName=role_name,\n                PolicyName=policy_name\n            )\n"
  },
  {
    "path": "stacker/tests/hooks/test_keypair.py",
    "content": "import sys\nfrom collections import namedtuple\nfrom contextlib import contextmanager\n\nimport mock\nimport pytest\n\nimport boto3\nfrom moto import mock_ec2, mock_ssm\n\nfrom stacker.hooks.keypair import ensure_keypair_exists\nfrom ..factories import mock_context, mock_provider\n\n\nREGION = \"us-east-1\"\nKEY_PAIR_NAME = \"FakeKey\"\n\nSSHKey = namedtuple('SSHKey', 'public_key private_key fingerprint')\n\n\n@pytest.fixture(scope=\"module\")\ndef ssh_key(stacker_fixture_dir):\n    base = stacker_fixture_dir.join('keypair')\n    return SSHKey(\n        private_key=base.join('id_rsa').read_binary(),\n        public_key=base.join('id_rsa.pub').read_binary(),\n        fingerprint=base.join('fingerprint').read_text('ascii').strip())\n\n\n@pytest.fixture\ndef provider():\n    return mock_provider(region=REGION)\n\n\n@pytest.fixture\ndef context():\n    return mock_context(namespace=\"fake\")\n\n\n@pytest.fixture(autouse=True)\ndef ec2(ssh_key):\n    # Force moto to generate a deterministic key pair on creation.\n    # Can be replaced by something more sensible when\n    # https://github.com/spulec/moto/pull/2108 is merged\n\n    key_pair = {'fingerprint': ssh_key.fingerprint,\n                'material': ssh_key.private_key.decode('ascii')}\n    with mock.patch('moto.ec2.models.random_key_pair', side_effect=[key_pair]):\n        with mock_ec2():\n            yield\n\n\n@pytest.fixture(autouse=True)\ndef ssm():\n    with mock_ssm():\n        yield\n\n\n@contextmanager\ndef mock_input(lines=(), isatty=True):\n    with mock.patch('stacker.hooks.keypair.get_raw_input',\n                    side_effect=lines) as m:\n        with mock.patch.object(sys.stdin, 'isatty', return_value=isatty):\n            yield m\n\n\ndef assert_key_present(hook_result, key_name, fingerprint):\n    assert hook_result['key_name'] == key_name\n    assert hook_result['fingerprint'] == fingerprint\n\n    ec2 = boto3.client('ec2')\n    response = ec2.describe_key_pairs(KeyNames=[key_name], DryRun=False)\n    key_pairs = response['KeyPairs']\n\n    assert len(key_pairs) == 1\n    assert key_pairs[0]['KeyName'] == key_name\n    assert key_pairs[0]['KeyFingerprint'] == fingerprint\n\n\ndef test_param_validation(provider, context):\n    result = ensure_keypair_exists(provider, context, keypair=KEY_PAIR_NAME,\n                                   ssm_parameter_name='test',\n                                   public_key_path='test')\n    assert result is False\n\n\ndef test_keypair_exists(provider, context):\n    ec2 = boto3.client('ec2')\n    keypair = ec2.create_key_pair(KeyName=KEY_PAIR_NAME)\n\n    result = ensure_keypair_exists(provider, context, keypair=KEY_PAIR_NAME)\n    expected = dict(\n        status='exists',\n        key_name=KEY_PAIR_NAME,\n        fingerprint=keypair['KeyFingerprint'])\n    assert result == expected\n\n\ndef test_import_file(tmpdir, provider, context, ssh_key):\n    pkey = tmpdir.join(\"id_rsa.pub\")\n    pkey.write(ssh_key.public_key)\n\n    result = ensure_keypair_exists(provider, context, keypair=KEY_PAIR_NAME,\n                                   public_key_path=str(pkey))\n    assert_key_present(result, KEY_PAIR_NAME, ssh_key.fingerprint)\n    assert result['status'] == 'imported'\n\n\ndef test_import_bad_key_data(tmpdir, provider, context):\n    pkey = tmpdir.join(\"id_rsa.pub\")\n    pkey.write('garbage')\n\n    result = ensure_keypair_exists(provider, context, keypair=KEY_PAIR_NAME,\n                                   public_key_path=str(pkey))\n    assert result is False\n\n\n@pytest.mark.parametrize('ssm_key_id', (None, 'my-key'))\ndef test_create_in_ssm(provider, context, ssh_key, ssm_key_id):\n    result = ensure_keypair_exists(provider, context, keypair=KEY_PAIR_NAME,\n                                   ssm_parameter_name='param',\n                                   ssm_key_id=ssm_key_id)\n\n    assert_key_present(result, KEY_PAIR_NAME, ssh_key.fingerprint)\n    assert result['status'] == 'created'\n\n    ssm = boto3.client('ssm')\n    param = ssm.get_parameter(Name='param', WithDecryption=True)['Parameter']\n    assert param['Value'] == ssh_key.private_key.decode('ascii')\n    assert param['Type'] == 'SecureString'\n\n    params = ssm.describe_parameters()['Parameters']\n    param_details = next(p for p in params if p['Name'] == 'param')\n    assert param_details['Description'] == \\\n        'SSH private key for KeyPair \"{}\" (generated by Stacker)'.format(\n            KEY_PAIR_NAME)\n    # The default ssm key id\n    ssm_key_id = ssm_key_id or \"alias/aws/ssm\"\n    assert param_details.get('KeyId') == ssm_key_id\n\n\ndef test_interactive_non_terminal_input(capsys, provider, context):\n    with mock_input(isatty=False) as input:\n        result = ensure_keypair_exists(provider, context,\n                                       keypair=KEY_PAIR_NAME)\n        input.assert_not_called()\n    assert result is False\n\n    output = capsys.readouterr()\n    assert len(output.out) == 0\n    assert len(output.err) == 0\n\n\ndef test_interactive_retry_cancel(provider, context):\n    lines = ['garbage', 'cancel']\n    with mock_input(lines) as input:\n        result = ensure_keypair_exists(\n            provider, context, keypair=KEY_PAIR_NAME)\n        assert input.call_count == 2\n\n    assert result is False\n\n\ndef test_interactive_import(tmpdir, provider, context, ssh_key):\n    key_file = tmpdir.join(\"id_rsa.pub\")\n    key_file.write(ssh_key.public_key)\n\n    lines = ['import', str(key_file)]\n    with mock_input(lines):\n        result = ensure_keypair_exists(\n            provider, context, keypair=KEY_PAIR_NAME)\n\n    assert_key_present(result, KEY_PAIR_NAME, ssh_key.fingerprint)\n    assert result['status'] == 'imported'\n\n\ndef test_interactive_create(tmpdir, provider, context, ssh_key):\n    key_dir = tmpdir.join('keys')\n    key_dir.ensure_dir()\n    key_file = key_dir.join('{}.pem'.format(KEY_PAIR_NAME))\n\n    lines = ['create', str(key_dir)]\n    with mock_input(lines):\n        result = ensure_keypair_exists(\n            provider, context, keypair=KEY_PAIR_NAME)\n\n    assert_key_present(result, KEY_PAIR_NAME, ssh_key.fingerprint)\n    assert result['status'] == 'created'\n    assert key_file.samefile(result['file_path'])\n    assert key_file.read_binary() == ssh_key.private_key\n\n\ndef test_interactive_create_bad_dir(tmpdir, provider, context):\n    key_dir = tmpdir.join('missing')\n\n    lines = ['create', str(key_dir)]\n    with mock_input(lines):\n        result = ensure_keypair_exists(\n            provider, context, keypair=KEY_PAIR_NAME)\n\n    assert result is False\n\n\ndef test_interactive_create_existing_file(tmpdir, provider, context):\n    key_dir = tmpdir.join('keys')\n    key_dir.ensure_dir()\n    key_file = key_dir.join('{}.pem'.format(KEY_PAIR_NAME))\n    key_file.ensure()\n\n    lines = ['create', str(key_dir)]\n    with mock_input(lines):\n        result = ensure_keypair_exists(\n            provider, context, keypair=KEY_PAIR_NAME)\n\n    assert result is False\n"
  },
  {
    "path": "stacker/tests/lookups/__init__.py",
    "content": ""
  },
  {
    "path": "stacker/tests/lookups/handlers/__init__.py",
    "content": ""
  },
  {
    "path": "stacker/tests/lookups/handlers/test_ami.py",
    "content": "import unittest\nimport mock\nfrom botocore.stub import Stubber\nfrom stacker.lookups.handlers.ami import AmiLookup, ImageNotFound\nimport boto3\nfrom stacker.tests.factories import SessionStub, mock_provider\n\nREGION = \"us-east-1\"\n\n\nclass TestAMILookup(unittest.TestCase):\n    client = boto3.client(\"ec2\", region_name=REGION)\n\n    def setUp(self):\n        self.stubber = Stubber(self.client)\n        self.provider = mock_provider(region=REGION)\n\n    @mock.patch(\"stacker.lookups.handlers.ami.get_session\",\n                return_value=SessionStub(client))\n    def test_basic_lookup_single_image(self, mock_client):\n        image_id = \"ami-fffccc111\"\n        self.stubber.add_response(\n            \"describe_images\",\n            {\n                \"Images\": [\n                    {\n                        \"OwnerId\": \"897883143566\",\n                        \"Architecture\": \"x86_64\",\n                        \"CreationDate\": \"2011-02-13T01:17:44.000Z\",\n                        \"State\": \"available\",\n                        \"ImageId\": image_id,\n                        \"Name\": \"Fake Image 1\",\n                        \"VirtualizationType\": \"hvm\",\n                    }\n                ]\n            }\n        )\n\n        with self.stubber:\n            value = AmiLookup.handle(\n                value=\"owners:self name_regex:Fake\\sImage\\s\\d\",\n                provider=self.provider\n            )\n            self.assertEqual(value, image_id)\n\n    @mock.patch(\"stacker.lookups.handlers.ami.get_session\",\n                return_value=SessionStub(client))\n    def test_basic_lookup_with_region(self, mock_client):\n        image_id = \"ami-fffccc111\"\n        self.stubber.add_response(\n            \"describe_images\",\n            {\n                \"Images\": [\n                    {\n                        \"OwnerId\": \"897883143566\",\n                        \"Architecture\": \"x86_64\",\n                        \"CreationDate\": \"2011-02-13T01:17:44.000Z\",\n                        \"State\": \"available\",\n                        \"ImageId\": image_id,\n                        \"Name\": \"Fake Image 1\",\n                        \"VirtualizationType\": \"hvm\",\n                    }\n                ]\n            }\n        )\n\n        with self.stubber:\n            value = AmiLookup.handle(\n                value=\"us-west-1@owners:self name_regex:Fake\\sImage\\s\\d\",\n                provider=self.provider\n            )\n            self.assertEqual(value, image_id)\n\n    @mock.patch(\"stacker.lookups.handlers.ami.get_session\",\n                return_value=SessionStub(client))\n    def test_basic_lookup_multiple_images(self, mock_client):\n        image_id = \"ami-fffccc111\"\n        self.stubber.add_response(\n            \"describe_images\",\n            {\n                \"Images\": [\n                    {\n                        \"OwnerId\": \"897883143566\",\n                        \"Architecture\": \"x86_64\",\n                        \"CreationDate\": \"2011-02-13T01:17:44.000Z\",\n                        \"State\": \"available\",\n                        \"ImageId\": \"ami-fffccc110\",\n                        \"Name\": \"Fake Image 1\",\n                        \"VirtualizationType\": \"hvm\",\n                    },\n                    {\n                        \"OwnerId\": \"897883143566\",\n                        \"Architecture\": \"x86_64\",\n                        \"CreationDate\": \"2011-02-14T01:17:44.000Z\",\n                        \"State\": \"available\",\n                        \"ImageId\": image_id,\n                        \"Name\": \"Fake Image 2\",\n                        \"VirtualizationType\": \"hvm\",\n                    },\n                ]\n            }\n        )\n\n        with self.stubber:\n            value = AmiLookup.handle(\n                value=\"owners:self name_regex:Fake\\sImage\\s\\d\",\n                provider=self.provider\n            )\n            self.assertEqual(value, image_id)\n\n    @mock.patch(\"stacker.lookups.handlers.ami.get_session\",\n                return_value=SessionStub(client))\n    def test_basic_lookup_multiple_images_name_match(self, mock_client):\n        image_id = \"ami-fffccc111\"\n        self.stubber.add_response(\n            \"describe_images\",\n            {\n                \"Images\": [\n                    {\n                        \"OwnerId\": \"897883143566\",\n                        \"Architecture\": \"x86_64\",\n                        \"CreationDate\": \"2011-02-13T01:17:44.000Z\",\n                        \"State\": \"available\",\n                        \"ImageId\": \"ami-fffccc110\",\n                        \"Name\": \"Fa---ke Image 1\",\n                        \"VirtualizationType\": \"hvm\",\n                    },\n                    {\n                        \"OwnerId\": \"897883143566\",\n                        \"Architecture\": \"x86_64\",\n                        \"CreationDate\": \"2011-02-14T01:17:44.000Z\",\n                        \"State\": \"available\",\n                        \"ImageId\": image_id,\n                        \"Name\": \"Fake Image 2\",\n                        \"VirtualizationType\": \"hvm\",\n                    },\n                ]\n            }\n        )\n\n        with self.stubber:\n            value = AmiLookup.handle(\n                value=\"owners:self name_regex:Fake\\sImage\\s\\d\",\n                provider=self.provider\n            )\n            self.assertEqual(value, image_id)\n\n    @mock.patch(\"stacker.lookups.handlers.ami.get_session\",\n                return_value=SessionStub(client))\n    def test_basic_lookup_no_matching_images(self, mock_client):\n        self.stubber.add_response(\n            \"describe_images\",\n            {\n                \"Images\": []\n            }\n        )\n\n        with self.stubber:\n            with self.assertRaises(ImageNotFound):\n                AmiLookup.handle(\n                    value=\"owners:self name_regex:Fake\\sImage\\s\\d\",\n                    provider=self.provider\n                )\n\n    @mock.patch(\"stacker.lookups.handlers.ami.get_session\",\n                return_value=SessionStub(client))\n    def test_basic_lookup_no_matching_images_from_name(self, mock_client):\n        image_id = \"ami-fffccc111\"\n        self.stubber.add_response(\n            \"describe_images\",\n            {\n                \"Images\": [\n                    {\n                        \"OwnerId\": \"897883143566\",\n                        \"Architecture\": \"x86_64\",\n                        \"CreationDate\": \"2011-02-13T01:17:44.000Z\",\n                        \"State\": \"available\",\n                        \"ImageId\": image_id,\n                        \"Name\": \"Fake Image 1\",\n                        \"VirtualizationType\": \"hvm\",\n                    }\n                ]\n            }\n        )\n\n        with self.stubber:\n            with self.assertRaises(ImageNotFound):\n                AmiLookup.handle(\n                    value=\"owners:self name_regex:MyImage\\s\\d\",\n                    provider=self.provider\n                )\n"
  },
  {
    "path": "stacker/tests/lookups/handlers/test_default.py",
    "content": "from mock import MagicMock\nimport unittest\n\nfrom stacker.context import Context\nfrom stacker.lookups.handlers.default import DefaultLookup\n\n\nclass TestDefaultLookup(unittest.TestCase):\n\n    def setUp(self):\n        self.provider = MagicMock()\n        self.context = Context(\n            environment={\n                'namespace': 'test',\n                'env_var': 'val_in_env'}\n        )\n\n    def test_env_var_present(self):\n        lookup_val = \"env_var::fallback\"\n        value = DefaultLookup.handle(lookup_val,\n                                     provider=self.provider,\n                                     context=self.context)\n        assert value == 'val_in_env'\n\n    def test_env_var_missing(self):\n        lookup_val = \"bad_env_var::fallback\"\n        value = DefaultLookup.handle(lookup_val,\n                                     provider=self.provider,\n                                     context=self.context)\n        assert value == 'fallback'\n\n    def test_invalid_value(self):\n        value = \"env_var:fallback\"\n        with self.assertRaises(ValueError):\n            DefaultLookup.handle(value)\n"
  },
  {
    "path": "stacker/tests/lookups/handlers/test_dynamodb.py",
    "content": "import unittest\nimport mock\nfrom botocore.stub import Stubber\nfrom stacker.lookups.handlers.dynamodb import DynamodbLookup\nimport boto3\nfrom stacker.tests.factories import SessionStub\n\n\nclass TestDynamoDBHandler(unittest.TestCase):\n    client = boto3.client('dynamodb', region_name='us-east-1')\n\n    def setUp(self):\n        self.stubber = Stubber(self.client)\n        self.get_parameters_response = {'Item': {'TestMap': {'M': {\n            'String1': {'S': 'StringVal1'},\n            'List1': {'L': [\n                {'S': 'ListVal1'},\n                {'S': 'ListVal2'}]},\n            'Number1': {'N': '12345'}, }}}}\n\n    @mock.patch('stacker.lookups.handlers.dynamodb.get_session',\n                return_value=SessionStub(client))\n    def test_dynamodb_handler(self, mock_client):\n        expected_params = {\n            'TableName': 'TestTable',\n            'Key': {\n                'TestKey': {'S': 'TestVal'}\n            },\n            'ProjectionExpression': 'TestVal,TestMap,String1'\n        }\n        base_lookup_key = 'TestTable@TestKey:TestVal.TestMap[M].String1'\n        base_lookup_key_valid = 'StringVal1'\n        self.stubber.add_response('get_item',\n                                  self.get_parameters_response,\n                                  expected_params)\n        with self.stubber:\n            value = DynamodbLookup.handle(base_lookup_key)\n            self.assertEqual(value, base_lookup_key_valid)\n\n    @mock.patch('stacker.lookups.handlers.dynamodb.get_session',\n                return_value=SessionStub(client))\n    def test_dynamodb_number_handler(self, mock_client):\n        expected_params = {\n            'TableName': 'TestTable',\n            'Key': {\n                'TestKey': {'S': 'TestVal'}\n            },\n            'ProjectionExpression': 'TestVal,TestMap,Number1'\n        }\n        base_lookup_key = 'TestTable@TestKey:TestVal.' \\\n            'TestMap[M].Number1[N]'\n        base_lookup_key_valid = 12345\n        self.stubber.add_response('get_item',\n                                  self.get_parameters_response,\n                                  expected_params)\n        with self.stubber:\n            value = DynamodbLookup.handle(base_lookup_key)\n            self.assertEqual(value, base_lookup_key_valid)\n\n    @mock.patch('stacker.lookups.handlers.dynamodb.get_session',\n                return_value=SessionStub(client))\n    def test_dynamodb_list_handler(self, mock_client):\n        expected_params = {\n            'TableName': 'TestTable',\n            'Key': {\n                'TestKey': {'S': 'TestVal'}\n            },\n            'ProjectionExpression': 'TestVal,TestMap,List1'\n        }\n        base_lookup_key = 'TestTable@TestKey:TestVal.' \\\n            'TestMap[M].List1[L]'\n        base_lookup_key_valid = ['ListVal1', 'ListVal2']\n        self.stubber.add_response('get_item',\n                                  self.get_parameters_response,\n                                  expected_params)\n        with self.stubber:\n            value = DynamodbLookup.handle(base_lookup_key)\n            self.assertEqual(value, base_lookup_key_valid)\n\n    @mock.patch('stacker.lookups.handlers.dynamodb.get_session',\n                return_value=SessionStub(client))\n    def test_dynamodb_empty_table_handler(self, mock_client):\n        expected_params = {\n            'TableName': '',\n            'Key': {\n                'TestKey': {'S': 'TestVal'}\n            },\n            'ProjectionExpression': 'TestVal,TestMap,String1'\n        }\n        base_lookup_key = '@TestKey:TestVal.TestMap[M].String1'\n        self.stubber.add_response('get_item',\n                                  self.get_parameters_response,\n                                  expected_params)\n        with self.stubber:\n            try:\n                DynamodbLookup.handle(base_lookup_key)\n            except ValueError as e:\n                self.assertEqual(\n                    'Please make sure to include a dynamodb table name',\n                    str(e))\n\n    @mock.patch('stacker.lookups.handlers.dynamodb.get_session',\n                return_value=SessionStub(client))\n    def test_dynamodb_missing_table_handler(self, mock_client):\n        expected_params = {\n            'Key': {\n                'TestKey': {'S': 'TestVal'}\n            },\n            'ProjectionExpression': 'TestVal,TestMap,String1'\n        }\n        base_lookup_key = 'TestKey:TestVal.TestMap[M].String1'\n        self.stubber.add_response('get_item',\n                                  self.get_parameters_response,\n                                  expected_params)\n        with self.stubber:\n            try:\n                DynamodbLookup.handle(base_lookup_key)\n            except ValueError as e:\n                self.assertEqual(\n                    'Please make sure to include a tablename',\n                    str(e))\n\n    @mock.patch('stacker.lookups.handlers.dynamodb.get_session',\n                return_value=SessionStub(client))\n    def test_dynamodb_invalid_table_handler(self, mock_client):\n        expected_params = {\n            'TableName': 'FakeTable',\n            'Key': {\n                'TestKey': {'S': 'TestVal'}\n            },\n            'ProjectionExpression': 'TestVal,TestMap,String1'\n        }\n        base_lookup_key = 'FakeTable@TestKey:TestVal.TestMap[M].String1'\n        service_error_code = 'ResourceNotFoundException'\n        self.stubber.add_client_error('get_item',\n                                      service_error_code=service_error_code,\n                                      expected_params=expected_params)\n        with self.stubber:\n            try:\n                DynamodbLookup.handle(base_lookup_key)\n            except ValueError as e:\n                self.assertEqual(\n                    'Cannot find the dynamodb table: FakeTable',\n                    str(e))\n\n    @mock.patch('stacker.lookups.handlers.dynamodb.get_session',\n                return_value=SessionStub(client))\n    def test_dynamodb_invalid_partition_key_handler(self, mock_client):\n        expected_params = {\n            'TableName': 'TestTable',\n            'Key': {\n                'FakeKey': {'S': 'TestVal'}\n            },\n            'ProjectionExpression': 'TestVal,TestMap,String1'\n        }\n        base_lookup_key = 'TestTable@FakeKey:TestVal.TestMap[M].String1'\n        service_error_code = 'ValidationException'\n        self.stubber.add_client_error('get_item',\n                                      service_error_code=service_error_code,\n                                      expected_params=expected_params)\n\n        with self.stubber:\n            try:\n                DynamodbLookup.handle(base_lookup_key)\n            except ValueError as e:\n                self.assertEqual(\n                    'No dynamodb record matched the partition key: FakeKey',\n                    str(e))\n\n    @mock.patch('stacker.lookups.handlers.dynamodb.get_session',\n                return_value=SessionStub(client))\n    def test_dynamodb_invalid_partition_val_handler(self, mock_client):\n        expected_params = {\n            'TableName': 'TestTable',\n            'Key': {\n                'TestKey': {'S': 'FakeVal'}\n            },\n            'ProjectionExpression': 'FakeVal,TestMap,String1'\n        }\n        empty_response = {'ResponseMetadata': {}}\n        base_lookup_key = 'TestTable@TestKey:FakeVal.TestMap[M].String1'\n        self.stubber.add_response('get_item',\n                                  empty_response,\n                                  expected_params)\n        with self.stubber:\n            try:\n                DynamodbLookup.handle(base_lookup_key)\n            except ValueError as e:\n                self.assertEqual(\n                    'The dynamodb record could not be found using '\n                    'the following key: {\\'S\\': \\'FakeVal\\'}',\n                    str(e))\n"
  },
  {
    "path": "stacker/tests/lookups/handlers/test_envvar.py",
    "content": "import unittest\nfrom stacker.lookups.handlers.envvar import EnvvarLookup\nimport os\n\n\nclass TestEnvVarHandler(unittest.TestCase):\n\n    def setUp(self):\n        self.testkey = 'STACKER_ENVVAR_TESTCASE'\n        self.invalidtestkey = 'STACKER_INVALID_ENVVAR_TESTCASE'\n        self.testval = 'TestVal'\n        os.environ[self.testkey] = self.testval\n\n    def test_valid_envvar(self):\n        value = EnvvarLookup.handle(self.testkey)\n        self.assertEqual(value, self.testval)\n\n    def test_invalid_envvar(self):\n        with self.assertRaises(ValueError):\n            EnvvarLookup.handle(self.invalidtestkey)\n"
  },
  {
    "path": "stacker/tests/lookups/handlers/test_file.py",
    "content": "# encoding: utf-8\n\n\nimport unittest\nimport mock\nimport base64\nimport yaml\nimport json\nfrom troposphere import Base64, GenericHelperFn, Join\n\nfrom stacker.lookups.handlers.file import (json_codec, FileLookup,\n                                           parameterized_codec, yaml_codec)\n\n\ndef to_template_dict(obj):\n    \"\"\"Extract the CFN template dict of an object for test comparisons\"\"\"\n\n    if hasattr(obj, 'to_dict') and callable(obj.to_dict):\n        return obj.to_dict()\n    elif isinstance(obj, dict):\n        return dict((key, to_template_dict(value))\n                    for (key, value) in obj.items())\n    elif isinstance(obj, (list, tuple)):\n        return type(obj)(to_template_dict(item) for item in obj)\n    else:\n        return obj\n\n\nclass TestFileTranslator(unittest.TestCase):\n    @staticmethod\n    def assertTemplateEqual(left, right):\n        \"\"\"\n        Assert that two codec results are equivalent\n\n        Convert both sides to their template representations, since Troposphere\n        objects are not natively comparable\n        \"\"\"\n        return to_template_dict(left) == to_template_dict(right)\n\n    def test_parameterized_codec_b64(self):\n        expected = Base64(\n            Join(u'', [u'Test ', {u'Ref': u'Interpolation'}, u' Here'])\n        )\n\n        out = parameterized_codec(u'Test {{Interpolation}} Here', True)\n        self.assertEqual(Base64, out.__class__)\n        self.assertTemplateEqual(expected, out)\n\n    def test_parameterized_codec_plain(self):\n        expected = Join(u'', [u'Test ', {u'Ref': u'Interpolation'}, u' Here'])\n\n        out = parameterized_codec(u'Test {{Interpolation}} Here', False)\n        self.assertEqual(GenericHelperFn, out.__class__)\n        self.assertTemplateEqual(expected, out)\n\n    def test_parameterized_codec_plain_no_interpolation(self):\n        expected = u'Test Without Interpolation Here'\n\n        out = parameterized_codec(u'Test Without Interpolation Here', False)\n        self.assertEqual(GenericHelperFn, out.__class__)\n        self.assertTemplateEqual(expected, out)\n\n    def test_yaml_codec_raw(self):\n        structured = {\n            u'Test': [1, None, u'unicode ✓', {u'some': u'obj'}]\n        }\n        # Note: must use safe_dump, since regular dump adds !python/unicode\n        # tags, which we don't want, or we can't be sure we're correctly\n        # loading string as unicode.\n        raw = yaml.safe_dump(structured)\n\n        out = yaml_codec(raw, parameterized=False)\n        self.assertEqual(structured, out)\n\n    def test_yaml_codec_parameterized(self):\n        processed = {\n            u'Test': Join(u'', [u'Test ', {u'Ref': u'Interpolation'},\n                          u' Here'])\n        }\n        structured = {\n            u'Test': u'Test {{Interpolation}} Here'\n        }\n        raw = yaml.safe_dump(structured)\n\n        out = yaml_codec(raw, parameterized=True)\n        self.assertTemplateEqual(processed, out)\n\n    def test_json_codec_raw(self):\n        structured = {\n            u'Test': [1, None, u'str', {u'some': u'obj'}]\n        }\n        raw = json.dumps(structured)\n\n        out = json_codec(raw, parameterized=False)\n        self.assertEqual(structured, out)\n\n    def test_json_codec_parameterized(self):\n        processed = {\n            u'Test': Join(u'', [u'Test ', {u'Ref': u'Interpolation'},\n                                u' Here'])\n        }\n        structured = {\n            u'Test': u'Test {{Interpolation}} Here'\n        }\n        raw = json.dumps(structured)\n\n        out = json_codec(raw, parameterized=True)\n        self.assertTemplateEqual(processed, out)\n\n    @mock.patch('stacker.lookups.handlers.file.read_value_from_path',\n                return_value='')\n    def test_file_loaded(self, content_mock):\n        FileLookup.handle(u'plain:file://tmp/test')\n        content_mock.assert_called_with(u'file://tmp/test')\n\n    @mock.patch('stacker.lookups.handlers.file.read_value_from_path',\n                return_value=u'Hello, world')\n    def test_handler_plain(self, _):\n        out = FileLookup.handle(u'plain:file://tmp/test')\n        self.assertEqual(u'Hello, world', out)\n\n    @mock.patch('stacker.lookups.handlers.file.read_value_from_path')\n    def test_handler_b64(self, content_mock):\n        plain = u'Hello, world'\n        encoded = base64.b64encode(plain.encode('utf8')).decode('utf-8')\n\n        content_mock.return_value = plain\n        out = FileLookup.handle(u'base64:file://tmp/test')\n        self.assertEqual(encoded, out)\n\n    @mock.patch('stacker.lookups.handlers.file.parameterized_codec')\n    @mock.patch('stacker.lookups.handlers.file.read_value_from_path')\n    def test_handler_parameterized(self, content_mock, codec_mock):\n        result = mock.Mock()\n        codec_mock.return_value = result\n\n        out = FileLookup.handle(u'parameterized:file://tmp/test')\n        codec_mock.assert_called_once_with(content_mock.return_value, False)\n\n        self.assertEqual(result, out)\n\n    @mock.patch('stacker.lookups.handlers.file.parameterized_codec')\n    @mock.patch('stacker.lookups.handlers.file.read_value_from_path')\n    def test_handler_parameterized_b64(self, content_mock, codec_mock):\n        result = mock.Mock()\n        codec_mock.return_value = result\n\n        out = FileLookup.handle(u'parameterized-b64:file://tmp/test')\n        codec_mock.assert_called_once_with(content_mock.return_value, True)\n\n        self.assertEqual(result, out)\n\n    @mock.patch('stacker.lookups.handlers.file.yaml_codec')\n    @mock.patch('stacker.lookups.handlers.file.read_value_from_path')\n    def test_handler_yaml(self, content_mock, codec_mock):\n        result = mock.Mock()\n        codec_mock.return_value = result\n\n        out = FileLookup.handle(u'yaml:file://tmp/test')\n        codec_mock.assert_called_once_with(content_mock.return_value,\n                                           parameterized=False)\n\n        self.assertEqual(result, out)\n\n    @mock.patch('stacker.lookups.handlers.file.yaml_codec')\n    @mock.patch('stacker.lookups.handlers.file.read_value_from_path')\n    def test_handler_yaml_parameterized(self, content_mock, codec_mock):\n        result = mock.Mock()\n        codec_mock.return_value = result\n\n        out = FileLookup.handle(u'yaml-parameterized:file://tmp/test')\n        codec_mock.assert_called_once_with(content_mock.return_value,\n                                           parameterized=True)\n\n        self.assertEqual(result, out)\n\n    @mock.patch('stacker.lookups.handlers.file.json_codec')\n    @mock.patch('stacker.lookups.handlers.file.read_value_from_path')\n    def test_handler_json(self, content_mock, codec_mock):\n        result = mock.Mock()\n        codec_mock.return_value = result\n\n        out = FileLookup.handle(u'json:file://tmp/test')\n        codec_mock.assert_called_once_with(content_mock.return_value,\n                                           parameterized=False)\n\n        self.assertEqual(result, out)\n\n    @mock.patch('stacker.lookups.handlers.file.json_codec')\n    @mock.patch('stacker.lookups.handlers.file.read_value_from_path')\n    def test_handler_json_parameterized(self, content_mock, codec_mock):\n        result = mock.Mock()\n        codec_mock.return_value = result\n\n        out = FileLookup.handle(u'json-parameterized:file://tmp/test')\n        codec_mock.assert_called_once_with(content_mock.return_value,\n                                           parameterized=True)\n\n        self.assertEqual(result, out)\n\n    @mock.patch('stacker.lookups.handlers.file.read_value_from_path')\n    def test_unknown_codec(self, _):\n        with self.assertRaises(KeyError):\n            FileLookup.handle(u'bad:file://tmp/test')\n"
  },
  {
    "path": "stacker/tests/lookups/handlers/test_hook_data.py",
    "content": "import unittest\n\n\nfrom stacker.context import Context\nfrom stacker.lookups.handlers.hook_data import HookDataLookup\n\n\nclass TestHookDataLookup(unittest.TestCase):\n\n    def setUp(self):\n        self.ctx = Context({\"namespace\": \"test-ns\"})\n        self.ctx.set_hook_data(\"fake_hook\", {\"result\": \"good\"})\n\n    def test_valid_hook_data(self):\n        value = HookDataLookup.handle(\"fake_hook::result\", context=self.ctx)\n        self.assertEqual(value, \"good\")\n\n    def test_invalid_hook_data(self):\n        with self.assertRaises(KeyError):\n            HookDataLookup.handle(\"fake_hook::bad_key\", context=self.ctx)\n\n    def test_bad_value_hook_data(self):\n        with self.assertRaises(ValueError):\n            HookDataLookup.handle(\"fake_hook\", context=self.ctx)\n"
  },
  {
    "path": "stacker/tests/lookups/handlers/test_output.py",
    "content": "from mock import MagicMock\nimport unittest\n\nfrom stacker.stack import Stack\nfrom ...factories import generate_definition\nfrom stacker.lookups.handlers.output import OutputLookup\n\n\nclass TestOutputHandler(unittest.TestCase):\n\n    def setUp(self):\n        self.context = MagicMock()\n\n    def test_output_handler(self):\n        stack = Stack(\n            definition=generate_definition(\"vpc\", 1),\n            context=self.context)\n        stack.set_outputs({\n            \"SomeOutput\": \"Test Output\"})\n        self.context.get_stack.return_value = stack\n        value = OutputLookup.handle(\"stack-name::SomeOutput\",\n                                    context=self.context)\n        self.assertEqual(value, \"Test Output\")\n        self.assertEqual(self.context.get_stack.call_count, 1)\n        args = self.context.get_stack.call_args\n        self.assertEqual(args[0][0], \"stack-name\")\n"
  },
  {
    "path": "stacker/tests/lookups/handlers/test_rxref.py",
    "content": "from mock import MagicMock\nimport unittest\n\nfrom stacker.lookups.handlers.rxref import RxrefLookup\nfrom ....context import Context\nfrom ....config import Config\n\n\nclass TestRxrefHandler(unittest.TestCase):\n\n    def setUp(self):\n        self.provider = MagicMock()\n        self.context = Context(\n            config=Config({\"namespace\": \"ns\"})\n        )\n\n    def test_rxref_handler(self):\n        self.provider.get_output.return_value = \"Test Output\"\n\n        value = RxrefLookup.handle(\"fully-qualified-stack-name::SomeOutput\",\n                                   provider=self.provider,\n                                   context=self.context)\n        self.assertEqual(value, \"Test Output\")\n\n        args = self.provider.get_output.call_args\n        self.assertEqual(args[0][0], \"ns-fully-qualified-stack-name\")\n        self.assertEqual(args[0][1], \"SomeOutput\")\n"
  },
  {
    "path": "stacker/tests/lookups/handlers/test_split.py",
    "content": "import unittest\n\nfrom stacker.lookups.handlers.split import SplitLookup\n\n\nclass TestSplitLookup(unittest.TestCase):\n    def test_single_character_split(self):\n        value = \",::a,b,c\"\n        expected = [\"a\", \"b\", \"c\"]\n        assert SplitLookup.handle(value) == expected\n\n    def test_multi_character_split(self):\n        value = \",,::a,,b,c\"\n        expected = [\"a\", \"b,c\"]\n        assert SplitLookup.handle(value) == expected\n\n    def test_invalid_value_split(self):\n        value = \",:a,b,c\"\n        with self.assertRaises(ValueError):\n            SplitLookup.handle(value)\n"
  },
  {
    "path": "stacker/tests/lookups/handlers/test_ssmstore.py",
    "content": "import unittest\nimport mock\nfrom botocore.stub import Stubber\nfrom stacker.lookups.handlers.ssmstore import SsmstoreLookup\nimport boto3\nfrom stacker.tests.factories import SessionStub\n\n\nclass TestSSMStoreHandler(unittest.TestCase):\n    client = boto3.client('ssm', region_name='us-east-1')\n\n    def setUp(self):\n        self.stubber = Stubber(self.client)\n        self.get_parameters_response = {\n            'Parameters': [\n                {\n                    'Name': 'ssmkey',\n                    'Type': 'String',\n                    'Value': 'ssmvalue'\n                }\n            ],\n            'InvalidParameters': [\n                'invalidssmparam'\n            ]\n        }\n        self.invalid_get_parameters_response = {\n            'InvalidParameters': [\n                'ssmkey'\n            ]\n        }\n        self.expected_params = {\n            'Names': ['ssmkey'],\n            'WithDecryption': True\n        }\n        self.ssmkey = \"ssmkey\"\n        self.ssmvalue = \"ssmvalue\"\n\n    @mock.patch('stacker.lookups.handlers.ssmstore.get_session',\n                return_value=SessionStub(client))\n    def test_ssmstore_handler(self, mock_client):\n        self.stubber.add_response('get_parameters',\n                                  self.get_parameters_response,\n                                  self.expected_params)\n        with self.stubber:\n            value = SsmstoreLookup.handle(self.ssmkey)\n            self.assertEqual(value, self.ssmvalue)\n            self.assertIsInstance(value, str)\n\n    @mock.patch('stacker.lookups.handlers.ssmstore.get_session',\n                return_value=SessionStub(client))\n    def test_ssmstore_invalid_value_handler(self, mock_client):\n        self.stubber.add_response('get_parameters',\n                                  self.invalid_get_parameters_response,\n                                  self.expected_params)\n        with self.stubber:\n            try:\n                SsmstoreLookup.handle(self.ssmkey)\n            except ValueError:\n                assert True\n\n    @mock.patch('stacker.lookups.handlers.ssmstore.get_session',\n                return_value=SessionStub(client))\n    def test_ssmstore_handler_with_region(self, mock_client):\n        self.stubber.add_response('get_parameters',\n                                  self.get_parameters_response,\n                                  self.expected_params)\n        region = \"us-east-1\"\n        temp_value = \"%s@%s\" % (region, self.ssmkey)\n        with self.stubber:\n            value = SsmstoreLookup.handle(temp_value)\n            self.assertEqual(value, self.ssmvalue)\n"
  },
  {
    "path": "stacker/tests/lookups/handlers/test_xref.py",
    "content": "from mock import MagicMock\nimport unittest\n\nfrom stacker.lookups.handlers.xref import XrefLookup\n\n\nclass TestXrefHandler(unittest.TestCase):\n\n    def setUp(self):\n        self.provider = MagicMock()\n        self.context = MagicMock()\n\n    def test_xref_handler(self):\n        self.provider.get_output.return_value = \"Test Output\"\n        value = XrefLookup.handle(\"fully-qualified-stack-name::SomeOutput\",\n                                  provider=self.provider,\n                                  context=self.context)\n        self.assertEqual(value, \"Test Output\")\n        self.assertEqual(self.context.get_fqn.call_count, 0)\n        args = self.provider.get_output.call_args\n        self.assertEqual(args[0][0], \"fully-qualified-stack-name\")\n        self.assertEqual(args[0][1], \"SomeOutput\")\n"
  },
  {
    "path": "stacker/tests/lookups/test_registry.py",
    "content": "import unittest\n\nfrom mock import MagicMock\n\nfrom stacker.exceptions import (\n    UnknownLookupType,\n    FailedVariableLookup,\n)\n\nfrom stacker.lookups.registry import LOOKUP_HANDLERS\n\nfrom stacker.variables import Variable, VariableValueLookup\n\nfrom ..factories import (\n    mock_context,\n    mock_provider,\n)\n\n\nclass TestRegistry(unittest.TestCase):\n    def setUp(self):\n        self.ctx = mock_context()\n        self.provider = mock_provider()\n\n    def test_autoloaded_lookup_handlers(self):\n        handlers = [\n            \"output\", \"xref\", \"kms\", \"ssmstore\", \"envvar\", \"rxref\", \"ami\",\n            \"file\", \"split\", \"default\", \"hook_data\", \"dynamodb\",\n        ]\n        for handler in handlers:\n            try:\n                LOOKUP_HANDLERS[handler]\n            except KeyError:\n                self.assertTrue(\n                    False,\n                    \"Lookup handler: '{}' was not registered\".format(handler),\n                )\n\n    def test_resolve_lookups_string_unknown_lookup(self):\n        with self.assertRaises(UnknownLookupType):\n            Variable(\"MyVar\", \"${bad_lookup foo}\")\n\n    def test_resolve_lookups_list_unknown_lookup(self):\n        with self.assertRaises(UnknownLookupType):\n            Variable(\n                \"MyVar\", [\n                    \"${bad_lookup foo}\", \"random string\",\n                ]\n            )\n\n    def resolve_lookups_with_output_handler_raise_valueerror(self, variable):\n        \"\"\"Mock output handler to throw ValueError, then run resolve_lookups\n        on the given variable.\n        \"\"\"\n        mock_handler = MagicMock(side_effect=ValueError(\"Error\"))\n\n        # find the only lookup in the variable\n        for value in variable._value:\n            if isinstance(value, VariableValueLookup):\n                value.handler = mock_handler\n\n        with self.assertRaises(FailedVariableLookup) as cm:\n            variable.resolve(self.ctx, self.provider)\n\n        self.assertIsInstance(cm.exception.error, ValueError)\n\n    def test_resolve_lookups_string_failed_variable_lookup(self):\n        variable = Variable(\"MyVar\", \"${output foo::bar}\")\n        self.resolve_lookups_with_output_handler_raise_valueerror(variable)\n\n    def test_resolve_lookups_list_failed_variable_lookup(self):\n        variable = Variable(\n            \"MyVar\", [\n                \"random string\", \"${output foo::bar}\", \"random string\",\n            ]\n        )\n        self.resolve_lookups_with_output_handler_raise_valueerror(variable)\n"
  },
  {
    "path": "stacker/tests/providers/__init__.py",
    "content": ""
  },
  {
    "path": "stacker/tests/providers/aws/__init__.py",
    "content": ""
  },
  {
    "path": "stacker/tests/providers/aws/test_default.py",
    "content": "import copy\nfrom datetime import datetime\nimport os.path\nimport random\nimport string\nimport threading\nimport unittest\n\nfrom mock import patch, MagicMock\nfrom botocore.stub import Stubber\nfrom botocore.exceptions import ClientError, UnStubbedResponseError\nimport boto3\n\nfrom stacker.actions.diff import DictValue\n\nfrom stacker.providers.base import Template\nfrom stacker.session_cache import get_session\n\nfrom stacker.providers.aws import default\n\nfrom stacker.providers.aws.default import (\n    DEFAULT_CAPABILITIES,\n    MAX_TAIL_RETRIES,\n    Provider,\n    requires_replacement,\n    ask_for_approval,\n    wait_till_change_set_complete,\n    create_change_set,\n    summarize_params_diff,\n    generate_cloudformation_args,\n    output_full_changeset\n)\n\nfrom stacker import exceptions\n\nfrom stacker.stack import Stack\n\n\ndef random_string(length=12):\n    \"\"\" Returns a random string of variable length.\n\n    Args:\n        length (int): The # of characters to use in the random string.\n\n    Returns:\n        str: The random string.\n    \"\"\"\n\n    return ''.join(\n        [random.choice(string.ascii_letters) for _ in range(length)])\n\n\ndef generate_describe_stacks_stack(stack_name,\n                                   creation_time=None,\n                                   stack_status=\"CREATE_COMPLETE\",\n                                   tags=None):\n    tags = tags or []\n    return {\n        \"StackName\": stack_name,\n        \"StackId\": stack_name,\n        \"CreationTime\": creation_time or datetime(2015, 1, 1),\n        \"StackStatus\": stack_status,\n        \"Tags\": tags\n    }\n\n\ndef generate_get_template(file_name='cfn_template.json',\n                          stages_available=['Original']):\n    fixture_dir = os.path.join(os.path.dirname(__file__), '../../fixtures')\n    with open(os.path.join(fixture_dir, file_name), 'r') as f:\n        return {\n            \"StagesAvailable\": stages_available,\n            \"TemplateBody\": f.read()\n        }\n\n\ndef generate_stack_object(stack_name, outputs=None):\n    mock_stack = MagicMock(['name', 'fqn', 'blueprint'])\n    if not outputs:\n        outputs = {\n            \"FakeOutput\": {\n                \"Value\": {\"Ref\": \"FakeResource\"}\n            }\n        }\n    mock_stack.name = stack_name\n    mock_stack.fqn = stack_name\n    mock_stack.blueprint = MagicMock(['get_output_definitions'])\n    mock_stack.blueprint.get_output_definitions = MagicMock(\n        return_value=outputs\n    )\n    return mock_stack\n\n\ndef generate_resource_change(replacement=True):\n    resource_change = {\n        \"Action\": \"Modify\",\n        \"Details\": [],\n        \"LogicalResourceId\": \"Fake\",\n        \"PhysicalResourceId\": \"arn:aws:fake\",\n        \"Replacement\": \"True\" if replacement else \"False\",\n        \"ResourceType\": \"AWS::Fake\",\n        \"Scope\": [\"Properties\"],\n    }\n    return {\n        \"ResourceChange\": resource_change,\n        \"Type\": \"Resource\",\n    }\n\n\ndef generate_change_set_response(status, execution_status=\"AVAILABLE\",\n                                 changes=[], status_reason=\"FAKE\"):\n    return {\n        \"ChangeSetName\": \"string\",\n        \"ChangeSetId\": \"string\",\n        \"StackId\": \"string\",\n        \"StackName\": \"string\",\n        \"Description\": \"string\",\n        \"Parameters\": [\n            {\n                \"ParameterKey\": \"string\",\n                \"ParameterValue\": \"string\",\n                \"UsePreviousValue\": False\n            },\n        ],\n        \"CreationTime\": datetime(2015, 1, 1),\n        \"ExecutionStatus\": execution_status,\n        \"Status\": status,\n        \"StatusReason\": status_reason,\n        \"NotificationARNs\": [\n            \"string\",\n        ],\n        \"Capabilities\": [\n            \"CAPABILITY_NAMED_IAM\",\n            \"CAPABILITY_AUTO_EXPAND\"\n        ],\n        \"Tags\": [\n            {\n                \"Key\": \"string\",\n                \"Value\": \"string\"\n            },\n        ],\n        \"Changes\": changes,\n        \"NextToken\": \"string\"\n    }\n\n\ndef generate_change(action=\"Modify\", resource_type=\"EC2::Instance\",\n                    replacement=\"False\", requires_recreation=\"Never\"):\n    \"\"\" Generate a minimal change for a changeset \"\"\"\n    return {\n        \"Type\": \"Resource\",\n        \"ResourceChange\": {\n            \"Action\": action,\n            \"LogicalResourceId\": random_string(),\n            \"PhysicalResourceId\": random_string(),\n            \"ResourceType\": resource_type,\n            \"Replacement\": replacement,\n            \"Scope\": [\"Properties\"],\n            \"Details\": [\n                {\n                    \"Target\": {\n                        \"Attribute\": \"Properties\",\n                        \"Name\": random_string(),\n                        \"RequiresRecreation\": requires_recreation\n                    },\n                    \"Evaluation\": \"Static\",\n                    \"ChangeSource\": \"ResourceReference\",\n                    \"CausingEntity\": random_string()\n                },\n            ]\n        }\n    }\n\n\nclass TestMethods(unittest.TestCase):\n    def setUp(self):\n        self.cfn = boto3.client(\"cloudformation\")\n        self.stubber = Stubber(self.cfn)\n\n    def test_requires_replacement(self):\n        changeset = [\n            generate_resource_change(),\n            generate_resource_change(replacement=False),\n            generate_resource_change(),\n        ]\n        replacement = requires_replacement(changeset)\n        self.assertEqual(len(replacement), 2)\n        for resource in replacement:\n            self.assertEqual(resource[\"ResourceChange\"][\"Replacement\"], \"True\")\n\n    def test_summarize_params_diff(self):\n        unmodified_param = DictValue(\"ParamA\", \"new-param-value\",\n                                     \"new-param-value\")\n        modified_param = DictValue(\"ParamB\", \"param-b-old-value\",\n                                   \"param-b-new-value-delta\")\n        added_param = DictValue(\"ParamC\", None, \"param-c-new-value\")\n        removed_param = DictValue(\"ParamD\", \"param-d-old-value\", None)\n\n        params_diff = [\n            unmodified_param,\n            modified_param,\n            added_param,\n            removed_param,\n        ]\n        self.assertEqual(summarize_params_diff([]), \"\")\n        self.assertEqual(summarize_params_diff(params_diff), '\\n'.join([\n            \"Parameters Added: ParamC\",\n            \"Parameters Removed: ParamD\",\n            \"Parameters Modified: ParamB\\n\",\n        ]))\n\n        only_modified_params_diff = [modified_param]\n        self.assertEqual(summarize_params_diff(only_modified_params_diff),\n                         \"Parameters Modified: ParamB\\n\")\n\n        only_added_params_diff = [added_param]\n        self.assertEqual(summarize_params_diff(only_added_params_diff),\n                         \"Parameters Added: ParamC\\n\")\n\n        only_removed_params_diff = [removed_param]\n        self.assertEqual(summarize_params_diff(only_removed_params_diff),\n                         \"Parameters Removed: ParamD\\n\")\n\n    def test_ask_for_approval(self):\n        get_input_path = \"stacker.ui.get_raw_input\"\n        with patch(get_input_path, return_value=\"y\"):\n            self.assertIsNone(ask_for_approval([], [], None))\n\n        for v in (\"n\", \"N\", \"x\", \"\\n\"):\n            with patch(get_input_path, return_value=v):\n                with self.assertRaises(exceptions.CancelExecution):\n                    ask_for_approval([], [])\n\n        with patch(get_input_path, side_effect=[\"v\", \"n\"]) as mock_get_input:\n            with patch(\n                \"stacker.providers.aws.default.output_full_changeset\"\n            ) as mock_full_changeset:\n                with self.assertRaises(exceptions.CancelExecution):\n                    ask_for_approval([], [], True)\n                self.assertEqual(mock_full_changeset.call_count, 1)\n            self.assertEqual(mock_get_input.call_count, 2)\n\n    def test_ask_for_approval_with_params_diff(self):\n        get_input_path = \"stacker.ui.get_raw_input\"\n        params_diff = [\n            DictValue('ParamA', None, 'new-param-value'),\n            DictValue('ParamB', 'param-b-old-value', 'param-b-new-value-delta')\n        ]\n        with patch(get_input_path, return_value=\"y\"):\n            self.assertIsNone(ask_for_approval([], params_diff, None))\n\n        for v in (\"n\", \"N\", \"x\", \"\\n\"):\n            with patch(get_input_path, return_value=v):\n                with self.assertRaises(exceptions.CancelExecution):\n                    ask_for_approval([], params_diff)\n\n        with patch(get_input_path, side_effect=[\"v\", \"n\"]) as mock_get_input:\n            with patch(\n                \"stacker.providers.aws.default.output_full_changeset\"\n            ) as mock_full_changeset:\n                with self.assertRaises(exceptions.CancelExecution):\n                    ask_for_approval([], params_diff, True)\n                self.assertEqual(mock_full_changeset.call_count, 1)\n            self.assertEqual(mock_get_input.call_count, 2)\n\n    @patch(\"stacker.providers.aws.default.format_params_diff\")\n    @patch('stacker.providers.aws.default.yaml.safe_dump')\n    def test_output_full_changeset(self, mock_safe_dump, patched_format):\n        get_input_path = \"stacker.ui.get_raw_input\"\n\n        safe_dump_counter = 0\n\n        for v in ['y', 'v', 'Y', 'V']:\n            with patch(get_input_path, return_value=v) as prompt:\n                self.assertIsNone(output_full_changeset(full_changeset=[],\n                                                        params_diff=[],\n                                                        fqn=None))\n                self.assertEqual(prompt.call_count, 1)\n                safe_dump_counter += 1\n                self.assertEqual(mock_safe_dump.call_count, safe_dump_counter)\n                self.assertEqual(patched_format.call_count, 0)\n\n        for v in ['n', 'N']:\n            with patch(get_input_path, return_value=v) as prompt:\n                output_full_changeset(full_changeset=[], params_diff=[],\n                                      answer=None, fqn=None)\n                self.assertEqual(prompt.call_count, 1)\n                self.assertEqual(mock_safe_dump.call_count, safe_dump_counter)\n                self.assertEqual(patched_format.call_count, 0)\n\n        with self.assertRaises(exceptions.CancelExecution):\n            output_full_changeset(full_changeset=[], params_diff=[],\n                                  answer='x', fqn=None)\n\n        output_full_changeset(full_changeset=[], params_diff=['mock'],\n                              answer='y', fqn=None)\n        safe_dump_counter += 1\n        self.assertEqual(mock_safe_dump.call_count, safe_dump_counter)\n        self.assertEqual(patched_format.call_count, 1)\n\n    def test_wait_till_change_set_complete_success(self):\n        self.stubber.add_response(\n            \"describe_change_set\",\n            generate_change_set_response(\"CREATE_COMPLETE\")\n        )\n        with self.stubber:\n            wait_till_change_set_complete(self.cfn, \"FAKEID\")\n\n        self.stubber.add_response(\n            \"describe_change_set\",\n            generate_change_set_response(\"FAILED\")\n        )\n        with self.stubber:\n            wait_till_change_set_complete(self.cfn, \"FAKEID\")\n\n    def test_wait_till_change_set_complete_failed(self):\n        # Need 2 responses for try_count\n        for i in range(2):\n            self.stubber.add_response(\n                \"describe_change_set\",\n                generate_change_set_response(\"CREATE_PENDING\")\n            )\n        with self.stubber:\n            with self.assertRaises(exceptions.ChangesetDidNotStabilize):\n                wait_till_change_set_complete(self.cfn, \"FAKEID\", try_count=2,\n                                              sleep_time=.1)\n\n    def test_create_change_set_stack_did_not_change(self):\n        self.stubber.add_response(\n            \"create_change_set\",\n            {'Id': 'CHANGESETID', 'StackId': 'STACKID'}\n        )\n\n        self.stubber.add_response(\n            \"describe_change_set\",\n            generate_change_set_response(\n                \"FAILED\", status_reason=\"Stack didn't contain changes.\"\n            )\n        )\n\n        self.stubber.add_response(\n            \"delete_change_set\",\n            {},\n            expected_params={\"ChangeSetName\": \"CHANGESETID\"}\n        )\n\n        with self.stubber:\n            with self.assertRaises(exceptions.StackDidNotChange):\n                create_change_set(\n                    cfn_client=self.cfn, fqn=\"my-fake-stack\",\n                    template=Template(url=\"http://fake.template.url.com/\"),\n                    parameters=[], tags=[]\n                )\n\n    def test_create_change_set_unhandled_failed_status(self):\n        self.stubber.add_response(\n            \"create_change_set\",\n            {'Id': 'CHANGESETID', 'StackId': 'STACKID'}\n        )\n\n        self.stubber.add_response(\n            \"describe_change_set\",\n            generate_change_set_response(\n                \"FAILED\", status_reason=\"Some random bad thing.\"\n            )\n        )\n\n        with self.stubber:\n            with self.assertRaises(exceptions.UnhandledChangeSetStatus):\n                create_change_set(\n                    cfn_client=self.cfn, fqn=\"my-fake-stack\",\n                    template=Template(url=\"http://fake.template.url.com/\"),\n                    parameters=[], tags=[]\n                )\n\n    def test_create_change_set_bad_execution_status(self):\n        self.stubber.add_response(\n            \"create_change_set\",\n            {'Id': 'CHANGESETID', 'StackId': 'STACKID'}\n        )\n\n        self.stubber.add_response(\n            \"describe_change_set\",\n            generate_change_set_response(\n                status=\"CREATE_COMPLETE\", execution_status=\"UNAVAILABLE\",\n            )\n        )\n\n        with self.stubber:\n            with self.assertRaises(exceptions.UnableToExecuteChangeSet):\n                create_change_set(\n                    cfn_client=self.cfn, fqn=\"my-fake-stack\",\n                    template=Template(url=\"http://fake.template.url.com/\"),\n                    parameters=[], tags=[]\n                )\n\n    def test_generate_cloudformation_args(self):\n        stack_name = \"mystack\"\n        template_url = \"http://fake.s3url.com/blah.json\"\n        template_body = '{\"fake_body\": \"woot\"}'\n        std_args = {\n            \"stack_name\": stack_name,\n            \"parameters\": [],\n            \"tags\": [],\n            \"template\": Template(url=template_url)\n        }\n        std_return = {\n            \"StackName\": stack_name,\n            \"Parameters\": [],\n            \"Tags\": [],\n            \"Capabilities\": DEFAULT_CAPABILITIES,\n            \"TemplateURL\": template_url,\n        }\n        result = generate_cloudformation_args(**std_args)\n        self.assertEqual(result, std_return)\n\n        result = generate_cloudformation_args(service_role=\"FakeRole\",\n                                              **std_args)\n        service_role_result = copy.deepcopy(std_return)\n        service_role_result[\"RoleARN\"] = \"FakeRole\"\n        self.assertEqual(result, service_role_result)\n\n        result = generate_cloudformation_args(change_set_name=\"MyChanges\",\n                                              **std_args)\n        change_set_result = copy.deepcopy(std_return)\n        change_set_result[\"ChangeSetName\"] = \"MyChanges\"\n        self.assertEqual(result, change_set_result)\n\n        # Check stack policy\n        stack_policy = Template(body=\"{}\")\n        result = generate_cloudformation_args(stack_policy=stack_policy,\n                                              **std_args)\n        stack_policy_result = copy.deepcopy(std_return)\n        stack_policy_result[\"StackPolicyBody\"] = \"{}\"\n        self.assertEqual(result, stack_policy_result)\n\n        # If not TemplateURL is provided, use TemplateBody\n        std_args[\"template\"] = Template(body=template_body)\n        template_body_result = copy.deepcopy(std_return)\n        del(template_body_result[\"TemplateURL\"])\n        template_body_result[\"TemplateBody\"] = template_body\n        result = generate_cloudformation_args(**std_args)\n        self.assertEqual(result, template_body_result)\n\n    def test_generate_cloudformation_args_with_notification_arns(self):\n        stack_name = \"mystack\"\n        template_url = \"http://fake.s3url.com/blah.json\"\n        std_args = {\n            \"stack_name\": stack_name,\n            \"parameters\": [],\n            \"tags\": [],\n            \"template\": Template(url=template_url),\n            \"notification_arns\": [\n                \"arn:aws:sns:us-east-1:1234567890:test-cf-deploy-notify-sns-topic-CfDeployNotify\" # noqa\n            ]\n        }\n        std_return = {\n            \"StackName\": stack_name,\n            \"Parameters\": [],\n            \"Tags\": [],\n            \"Capabilities\": DEFAULT_CAPABILITIES,\n            \"TemplateURL\": template_url,\n            \"NotificationARNs\": [\n                \"arn:aws:sns:us-east-1:1234567890:test-cf-deploy-notify-sns-topic-CfDeployNotify\" # noqa\n            ]\n        }\n        result = generate_cloudformation_args(**std_args)\n        self.assertEqual(result, std_return)\n\n\nclass TestProviderDefaultMode(unittest.TestCase):\n    def setUp(self):\n        region = \"us-east-1\"\n        self.session = get_session(region=region)\n        self.provider = Provider(\n            self.session, region=region, recreate_failed=False)\n        self.stubber = Stubber(self.provider.cloudformation)\n\n    def test_get_stack_stack_does_not_exist(self):\n        stack_name = \"MockStack\"\n        self.stubber.add_client_error(\n            \"describe_stacks\",\n            service_error_code=\"ValidationError\",\n            service_message=\"Stack with id %s does not exist\" % stack_name,\n            expected_params={\"StackName\": stack_name}\n        )\n\n        with self.assertRaises(exceptions.StackDoesNotExist):\n            with self.stubber:\n                self.provider.get_stack(stack_name)\n\n    def test_get_stack_stack_exists(self):\n        stack_name = \"MockStack\"\n        stack_response = {\n            \"Stacks\": [generate_describe_stacks_stack(stack_name)]\n        }\n        self.stubber.add_response(\n            \"describe_stacks\",\n            stack_response,\n            expected_params={\"StackName\": stack_name}\n        )\n\n        with self.stubber:\n            response = self.provider.get_stack(stack_name)\n\n        self.assertEqual(response[\"StackName\"], stack_name)\n\n    def test_select_update_method(self):\n        for i in [[{'force_interactive': True,\n                    'force_change_set': False},\n                   self.provider.interactive_update_stack],\n                  [{'force_interactive': False,\n                    'force_change_set': False},\n                   self.provider.default_update_stack],\n                  [{'force_interactive': False,\n                    'force_change_set': True},\n                   self.provider.noninteractive_changeset_update],\n                  [{'force_interactive': True,\n                    'force_change_set': True},\n                   self.provider.interactive_update_stack]]:\n            self.assertEquals(\n                self.provider.select_update_method(**i[0]),\n                i[1]\n            )\n\n    def test_prepare_stack_for_update_completed(self):\n        stack_name = \"MockStack\"\n        stack = generate_describe_stacks_stack(\n            stack_name, stack_status=\"UPDATE_COMPLETE\")\n\n        with self.stubber:\n            self.assertTrue(\n                self.provider.prepare_stack_for_update(stack, []))\n\n    def test_prepare_stack_for_update_in_progress(self):\n        stack_name = \"MockStack\"\n        stack = generate_describe_stacks_stack(\n            stack_name, stack_status=\"UPDATE_IN_PROGRESS\")\n\n        with self.assertRaises(exceptions.StackUpdateBadStatus) as raised:\n            with self.stubber:\n                self.provider.prepare_stack_for_update(stack, [])\n\n            self.assertIn('in-progress', str(raised.exception))\n\n    def test_prepare_stack_for_update_non_recreatable(self):\n        stack_name = \"MockStack\"\n        stack = generate_describe_stacks_stack(\n            stack_name, stack_status=\"REVIEW_IN_PROGRESS\")\n\n        with self.assertRaises(exceptions.StackUpdateBadStatus) as raised:\n            with self.stubber:\n                self.provider.prepare_stack_for_update(stack, [])\n\n        self.assertIn('Unsupported state', str(raised.exception))\n\n    def test_prepare_stack_for_update_disallowed(self):\n        stack_name = \"MockStack\"\n        stack = generate_describe_stacks_stack(\n            stack_name, stack_status=\"ROLLBACK_COMPLETE\")\n\n        with self.assertRaises(exceptions.StackUpdateBadStatus) as raised:\n            with self.stubber:\n                self.provider.prepare_stack_for_update(stack, [])\n\n        self.assertIn('re-creation is disabled', str(raised.exception))\n        # Ensure we point out to the user how to enable re-creation\n        self.assertIn('--recreate-failed', str(raised.exception))\n\n    def test_prepare_stack_for_update_bad_tags(self):\n        stack_name = \"MockStack\"\n        stack = generate_describe_stacks_stack(\n            stack_name, stack_status=\"ROLLBACK_COMPLETE\")\n\n        self.provider.recreate_failed = True\n\n        with self.assertRaises(exceptions.StackUpdateBadStatus) as raised:\n            with self.stubber:\n                self.provider.prepare_stack_for_update(\n                    stack,\n                    tags=[{'Key': 'stacker_namespace', 'Value': 'test'}])\n\n        self.assertIn('tags differ', str(raised.exception).lower())\n\n    def test_prepare_stack_for_update_recreate(self):\n        stack_name = \"MockStack\"\n        stack = generate_describe_stacks_stack(\n            stack_name, stack_status=\"ROLLBACK_COMPLETE\")\n\n        self.stubber.add_response(\n            \"delete_stack\",\n            {},\n            expected_params={\"StackName\": stack_name}\n        )\n\n        self.provider.recreate_failed = True\n\n        with self.stubber:\n            self.assertFalse(\n                self.provider.prepare_stack_for_update(stack, []))\n\n    def test_noninteractive_changeset_update_no_stack_policy(self):\n        stack_name = \"MockStack\"\n\n        self.stubber.add_response(\n            \"create_change_set\",\n            {'Id': 'CHANGESETID', 'StackId': 'STACKID'}\n        )\n        changes = []\n        changes.append(generate_change())\n\n        self.stubber.add_response(\n            \"describe_change_set\",\n            generate_change_set_response(\n                status=\"CREATE_COMPLETE\", execution_status=\"AVAILABLE\",\n                changes=changes,\n            )\n        )\n\n        self.stubber.add_response(\"execute_change_set\", {})\n\n        with self.stubber:\n            self.provider.noninteractive_changeset_update(\n                fqn=stack_name,\n                template=Template(url=\"http://fake.template.url.com/\"),\n                old_parameters=[],\n                parameters=[], stack_policy=None, tags=[],\n            )\n\n    def test_noninteractive_changeset_update_with_stack_policy(self):\n        stack_name = \"MockStack\"\n\n        self.stubber.add_response(\n            \"create_change_set\",\n            {'Id': 'CHANGESETID', 'StackId': 'STACKID'}\n        )\n        changes = []\n        changes.append(generate_change())\n\n        self.stubber.add_response(\n            \"describe_change_set\",\n            generate_change_set_response(\n                status=\"CREATE_COMPLETE\", execution_status=\"AVAILABLE\",\n                changes=changes,\n            )\n        )\n\n        self.stubber.add_response(\"set_stack_policy\", {})\n\n        self.stubber.add_response(\"execute_change_set\", {})\n\n        with self.stubber:\n            self.provider.noninteractive_changeset_update(\n                fqn=stack_name,\n                template=Template(url=\"http://fake.template.url.com/\"),\n                old_parameters=[],\n                parameters=[], stack_policy=Template(body=\"{}\"), tags=[],\n            )\n\n    @patch('stacker.providers.aws.default.output_full_changeset')\n    def test_get_stack_changes_update(self, mock_output_full_cs):\n        stack_name = \"MockStack\"\n        mock_stack = generate_stack_object(stack_name)\n\n        self.stubber.add_response(\n            'describe_stacks',\n            {'Stacks': [generate_describe_stacks_stack(stack_name)]}\n        )\n        self.stubber.add_response(\n            'get_template',\n            generate_get_template('cfn_template.yaml')\n        )\n        self.stubber.add_response(\n            \"create_change_set\",\n            {'Id': 'CHANGESETID', 'StackId': stack_name}\n        )\n        changes = []\n        changes.append(generate_change())\n\n        self.stubber.add_response(\n            \"describe_change_set\",\n            generate_change_set_response(\n                status=\"CREATE_COMPLETE\", execution_status=\"AVAILABLE\",\n                changes=changes,\n            )\n        )\n        self.stubber.add_response(\"delete_change_set\", {})\n        self.stubber.add_response(\n            'describe_stacks',\n            {'Stacks': [generate_describe_stacks_stack(stack_name)]}\n        )\n\n        with self.stubber:\n            result = self.provider.get_stack_changes(\n                stack=mock_stack, template=Template(\n                    url=\"http://fake.template.url.com/\"\n                ), parameters=[], tags=[])\n\n        mock_output_full_cs.assert_called_with(full_changeset=changes,\n                                               params_diff=[],\n                                               fqn=stack_name,\n                                               answer='y')\n        expected_outputs = {\n            'FakeOutput': '<inferred-change: MockStack.FakeOutput={}>'.format(\n                str({\"Ref\": \"FakeResource\"})\n            )\n        }\n        self.assertEqual(self.provider.get_outputs(stack_name),\n                         expected_outputs)\n        self.assertEqual(result, expected_outputs)\n\n    @patch('stacker.providers.aws.default.output_full_changeset')\n    def test_get_stack_changes_create(self, mock_output_full_cs):\n        stack_name = \"MockStack\"\n        mock_stack = generate_stack_object(stack_name)\n\n        self.stubber.add_response(\n            'describe_stacks',\n            {'Stacks': [generate_describe_stacks_stack(\n                stack_name, stack_status='REVIEW_IN_PROGRESS'\n            )]}\n        )\n        self.stubber.add_response(\n            \"create_change_set\",\n            {'Id': 'CHANGESETID', 'StackId': stack_name}\n        )\n        changes = []\n        changes.append(generate_change())\n\n        self.stubber.add_response(\n            \"describe_change_set\",\n            generate_change_set_response(\n                status=\"CREATE_COMPLETE\", execution_status=\"AVAILABLE\",\n                changes=changes,\n            )\n        )\n        self.stubber.add_response(\"delete_change_set\", {})\n        self.stubber.add_response(\n            'describe_stacks',\n            {'Stacks': [generate_describe_stacks_stack(\n                stack_name, stack_status='REVIEW_IN_PROGRESS'\n            )]}\n        )\n        self.stubber.add_response(\n            'describe_stacks',\n            {'Stacks': [generate_describe_stacks_stack(\n                stack_name, stack_status='REVIEW_IN_PROGRESS'\n            )]}\n        )\n\n        self.stubber.add_response(\"delete_stack\", {})\n\n        with self.stubber:\n            self.provider.get_stack_changes(\n                stack=mock_stack, template=Template(\n                    url=\"http://fake.template.url.com/\"\n                ), parameters=[], tags=[])\n\n        mock_output_full_cs.assert_called_with(full_changeset=changes,\n                                               params_diff=[],\n                                               fqn=stack_name,\n                                               answer='y')\n\n    def test_tail_stack_retry_on_missing_stack(self):\n        stack_name = \"SlowToCreateStack\"\n        stack = MagicMock(spec=Stack)\n        stack.fqn = \"my-namespace-{}\".format(stack_name)\n\n        default.TAIL_RETRY_SLEEP = .01\n\n        # Ensure the stack never appears before we run out of retries\n        for i in range(MAX_TAIL_RETRIES + 5):\n            self.stubber.add_client_error(\n                \"describe_stack_events\",\n                service_error_code=\"ValidationError\",\n                service_message=\"Stack [{}] does not exist\".format(stack_name),\n                http_status_code=400,\n                response_meta={\"attempt\": i + 1},\n            )\n\n        with self.stubber:\n            try:\n                self.provider.tail_stack(stack, threading.Event())\n            except ClientError as exc:\n                self.assertEqual(\n                    exc.response[\"ResponseMetadata\"][\"attempt\"],\n                    MAX_TAIL_RETRIES\n                )\n\n    def test_tail_stack_retry_on_missing_stack_eventual_success(self):\n        stack_name = \"SlowToCreateStack\"\n        stack = MagicMock(spec=Stack)\n        stack.fqn = \"my-namespace-{}\".format(stack_name)\n\n        default.TAIL_RETRY_SLEEP = .01\n        default.GET_EVENTS_SLEEP = .01\n\n        rcvd_events = []\n\n        def mock_log_func(e):\n            rcvd_events.append(e)\n\n        def valid_event_response(stack, event_id):\n            return {\n                \"StackEvents\": [\n                    {\n                        \"StackId\": stack.fqn + \"12345\",\n                        \"EventId\": event_id,\n                        \"StackName\": stack.fqn,\n                        \"Timestamp\": datetime.now()\n                    },\n                ]\n            }\n\n        # Ensure the stack never appears before we run out of retries\n        for i in range(3):\n            self.stubber.add_client_error(\n                \"describe_stack_events\",\n                service_error_code=\"ValidationError\",\n                service_message=\"Stack [{}] does not exist\".format(stack_name),\n                http_status_code=400,\n                response_meta={\"attempt\": i + 1},\n            )\n\n        self.stubber.add_response(\n            \"describe_stack_events\",\n            valid_event_response(stack, \"InitialEvents\")\n        )\n\n        self.stubber.add_response(\n            \"describe_stack_events\",\n            valid_event_response(stack, \"Event1\")\n        )\n\n        with self.stubber:\n            try:\n                self.provider.tail_stack(stack, threading.Event(),\n                                         log_func=mock_log_func)\n            except UnStubbedResponseError:\n                # Eventually we run out of responses - could not happen in\n                # regular execution\n                # normally this would just be dealt with when the threads were\n                # shutdown, but doing so here is a little difficult because\n                # we can't control the `tail_stack` loop\n                pass\n\n        self.assertEqual(rcvd_events[0][\"EventId\"], \"Event1\")\n\n\nclass TestProviderInteractiveMode(unittest.TestCase):\n    def setUp(self):\n        region = \"us-east-1\"\n        self.session = get_session(region=region)\n        self.provider = Provider(\n            self.session, interactive=True, recreate_failed=True)\n        self.stubber = Stubber(self.provider.cloudformation)\n\n    def test_successful_init(self):\n        replacements = True\n        p = Provider(self.session, interactive=True,\n                     replacements_only=replacements)\n        self.assertEqual(p.replacements_only, replacements)\n\n    @patch(\"stacker.providers.aws.default.ask_for_approval\")\n    def test_update_stack_execute_success_no_stack_policy(self,\n                                                          patched_approval):\n        stack_name = \"my-fake-stack\"\n\n        self.stubber.add_response(\n            \"create_change_set\",\n            {'Id': 'CHANGESETID', 'StackId': 'STACKID'}\n        )\n        changes = []\n        changes.append(generate_change())\n\n        self.stubber.add_response(\n            \"describe_change_set\",\n            generate_change_set_response(\n                status=\"CREATE_COMPLETE\", execution_status=\"AVAILABLE\",\n                changes=changes,\n            )\n        )\n\n        self.stubber.add_response(\"execute_change_set\", {})\n\n        with self.stubber:\n            self.provider.update_stack(\n                fqn=stack_name,\n                template=Template(url=\"http://fake.template.url.com/\"),\n                old_parameters=[],\n                parameters=[], tags=[]\n            )\n\n        patched_approval.assert_called_with(full_changeset=changes,\n                                            params_diff=[],\n                                            include_verbose=True,\n                                            fqn=stack_name)\n\n        self.assertEqual(patched_approval.call_count, 1)\n\n    @patch(\"stacker.providers.aws.default.ask_for_approval\")\n    def test_update_stack_execute_success_with_stack_policy(self,\n                                                            patched_approval):\n        stack_name = \"my-fake-stack\"\n\n        self.stubber.add_response(\n            \"create_change_set\",\n            {'Id': 'CHANGESETID', 'StackId': 'STACKID'}\n        )\n        changes = []\n        changes.append(generate_change())\n\n        self.stubber.add_response(\n            \"describe_change_set\",\n            generate_change_set_response(\n                status=\"CREATE_COMPLETE\", execution_status=\"AVAILABLE\",\n                changes=changes,\n            )\n        )\n\n        self.stubber.add_response(\"set_stack_policy\", {})\n\n        self.stubber.add_response(\"execute_change_set\", {})\n\n        with self.stubber:\n            self.provider.update_stack(\n                fqn=stack_name,\n                template=Template(url=\"http://fake.template.url.com/\"),\n                old_parameters=[],\n                parameters=[], tags=[],\n                stack_policy=Template(body=\"{}\"),\n            )\n\n        patched_approval.assert_called_with(full_changeset=changes,\n                                            params_diff=[],\n                                            include_verbose=True,\n                                            fqn=stack_name)\n\n        self.assertEqual(patched_approval.call_count, 1)\n\n    def test_select_update_method(self):\n        for i in [[{'force_interactive': False,\n                    'force_change_set': False},\n                   self.provider.interactive_update_stack],\n                  [{'force_interactive': True,\n                    'force_change_set': False},\n                   self.provider.interactive_update_stack],\n                  [{'force_interactive': False,\n                    'force_change_set': True},\n                   self.provider.interactive_update_stack],\n                  [{'force_interactive': True,\n                    'force_change_set': True},\n                   self.provider.interactive_update_stack]]:\n            self.assertEquals(\n                self.provider.select_update_method(**i[0]),\n                i[1]\n            )\n\n    @patch('stacker.providers.aws.default.output_full_changeset')\n    @patch('stacker.providers.aws.default.output_summary')\n    def test_get_stack_changes_interactive(self, mock_output_summary,\n                                           mock_output_full_cs):\n        stack_name = \"MockStack\"\n        mock_stack = generate_stack_object(stack_name)\n\n        self.stubber.add_response(\n            'describe_stacks',\n            {'Stacks': [generate_describe_stacks_stack(stack_name)]}\n        )\n        self.stubber.add_response(\n            'get_template',\n            generate_get_template('cfn_template.yaml')\n        )\n        self.stubber.add_response(\n            \"create_change_set\",\n            {'Id': 'CHANGESETID', 'StackId': stack_name}\n        )\n        changes = []\n        changes.append(generate_change())\n\n        self.stubber.add_response(\n            \"describe_change_set\",\n            generate_change_set_response(\n                status=\"CREATE_COMPLETE\", execution_status=\"AVAILABLE\",\n                changes=changes,\n            )\n        )\n        self.stubber.add_response(\"delete_change_set\", {})\n        self.stubber.add_response(\n            'describe_stacks',\n            {'Stacks': [generate_describe_stacks_stack(stack_name)]}\n        )\n\n        with self.stubber:\n            self.provider.get_stack_changes(\n                stack=mock_stack, template=Template(\n                    url=\"http://fake.template.url.com/\"\n                ), parameters=[], tags=[])\n\n        mock_output_summary.assert_called_with(stack_name, 'changes',\n                                               changes, [],\n                                               replacements_only=False)\n        mock_output_full_cs.assert_called_with(full_changeset=changes,\n                                               params_diff=[],\n                                               fqn=stack_name)\n"
  },
  {
    "path": "stacker/tests/test_config.py",
    "content": "import sys\nimport unittest\nimport yaml\n\nfrom stacker.config import (\n    render_parse_load,\n    load,\n    render,\n    parse,\n    dump,\n    process_remote_sources\n)\nfrom stacker.config import Config, Stack\nfrom stacker.environment import (\n    parse_environment,\n    parse_yaml_environment\n)\nfrom stacker import exceptions\nfrom stacker.lookups.registry import LOOKUP_HANDLERS\n\nfrom yaml.constructor import ConstructorError\n\nconfig = \"\"\"a: $a\nb: $b\nc: $c\"\"\"\n\n\nclass TestConfig(unittest.TestCase):\n    def test_render_missing_env(self):\n        env = {\"a\": \"A\"}\n        with self.assertRaises(exceptions.MissingEnvironment) as expected:\n            render(config, env)\n        self.assertEqual(expected.exception.key, \"b\")\n\n    def test_render_no_variable_config(self):\n        c = render(\"namespace: prod\", {})\n        self.assertEqual(\"namespace: prod\", c)\n\n    def test_render_valid_env_substitution(self):\n        c = render(\"namespace: $namespace\", {\"namespace\": \"prod\"})\n        self.assertEqual(\"namespace: prod\", c)\n\n    def test_render_blank_env_values(self):\n        conf = \"\"\"namespace: ${namespace}\"\"\"\n        e = parse_environment(\"\"\"namespace:\"\"\")\n        c = render(conf, e)\n        self.assertEqual(\"namespace: \", c)\n        e = parse_environment(\"\"\"namespace: !!str\"\"\")\n        c = render(conf, e)\n        self.assertEqual(\"namespace: !!str\", c)\n\n    def test_render_yaml(self):\n        conf = \"\"\"\n            namespace: ${namespace}\n            list_var: ${env_list}\n            dict_var: ${env_dict}\n            str_var: ${env_str}\n            nested_list:\n              - ${list_1}\n              - ${dict_1}\n              - ${str_1}\n            nested_dict:\n              a: ${list_1}\n              b: ${dict_1}\n              c: ${str_1}\n            empty: ${empty_string}\n            substr: prefix-${str_1}-suffix\n            multiple: ${str_1}-${str_2}\n            dont_match_this: ${output something}\n        \"\"\"\n        env = \"\"\"\n            namespace: test\n            env_list: &listAnchor\n              - a\n              - b\n              - c\n            env_dict: &dictAnchor\n              a: 1\n              b: 2\n              c: 3\n            env_str: Hello World!\n            list_1: *listAnchor\n            dict_1: *dictAnchor\n            str_1: another str\n            str_2: hello\n            empty_string: \"\"\n        \"\"\"\n        e = parse_yaml_environment(env)\n        c = render(conf, e)\n\n        # Parse the YAML again, so that we can check structure\n        pc = yaml.safe_load(c)\n\n        exp_dict = {'a': 1, 'b': 2, 'c': 3}\n        exp_list = ['a', 'b', 'c']\n\n        self.assertEquals(pc['namespace'], 'test')\n        self.assertEquals(pc['list_var'], exp_list)\n        self.assertEquals(pc['dict_var'], exp_dict)\n        self.assertEquals(pc['str_var'], 'Hello World!')\n        self.assertEquals(pc['nested_list'][0], exp_list)\n        self.assertEquals(pc['nested_list'][1], exp_dict)\n        self.assertEquals(pc['nested_list'][2], 'another str')\n        self.assertEquals(pc['nested_dict']['a'], exp_list)\n        self.assertEquals(pc['nested_dict']['b'], exp_dict)\n        self.assertEquals(pc['nested_dict']['c'], 'another str')\n        self.assertEquals(pc['empty'], '')\n        self.assertEquals(pc['substr'], 'prefix-another str-suffix')\n        self.assertEquals(pc['multiple'], 'another str-hello')\n        self.assertEquals(pc['dont_match_this'], '${output something}')\n\n    def test_render_yaml_errors(self):\n        # We shouldn't be able to substitute an object into a string\n        conf = \"something: prefix-${var_name}\"\n        env = \"\"\"\n        var_name:\n            foo: bar\n        \"\"\"\n        e = parse_yaml_environment(env)\n        with self.assertRaises(exceptions.WrongEnvironmentType):\n            render(conf, e)\n\n        # Missing keys need to raise errors too\n        conf = \"something: ${variable}\"\n        env = \"some_other_variable: 5\"\n        e = parse_yaml_environment(env)\n        with self.assertRaises(exceptions.MissingEnvironment):\n            render(conf, e)\n\n    def test_config_validate_missing_stack_source(self):\n        config = Config({\n            \"namespace\": \"prod\",\n            \"stacks\": [\n                {\n                    \"name\": \"bastion\"}]})\n        with self.assertRaises(exceptions.InvalidConfig) as ex:\n            config.validate()\n\n        stack_errors = ex.exception.errors['stacks'][0]\n        self.assertEquals(\n            stack_errors['template_path'][0].__str__(),\n            \"class_path or template_path is required.\")\n        self.assertEquals(\n            stack_errors['class_path'][0].__str__(),\n            \"class_path or template_path is required.\")\n\n    def test_config_validate_missing_stack_source_when_locked(self):\n        config = Config({\n            \"namespace\": \"prod\",\n            \"stacks\": [\n                {\n                    \"name\": \"bastion\",\n                    \"locked\": True}]})\n        config.validate()\n\n    def test_config_validate_stack_class_and_template_paths(self):\n        config = Config({\n            \"namespace\": \"prod\",\n            \"stacks\": [\n                {\n                    \"name\": \"bastion\",\n                    \"class_path\": \"foo\",\n                    \"template_path\": \"bar\"}]})\n        with self.assertRaises(exceptions.InvalidConfig) as ex:\n            config.validate()\n\n        stack_errors = ex.exception.errors['stacks'][0]\n        self.assertEquals(\n            stack_errors['template_path'][0].__str__(),\n            \"class_path cannot be present when template_path is provided.\")\n        self.assertEquals(\n            stack_errors['class_path'][0].__str__(),\n            \"template_path cannot be present when class_path is provided.\")\n\n    def test_config_validate_missing_name(self):\n        config = Config({\n            \"namespace\": \"prod\",\n            \"stacks\": [\n                {\n                    \"class_path\": \"blueprints.Bastion\"}]})\n        with self.assertRaises(exceptions.InvalidConfig) as ex:\n            config.validate()\n\n        error = ex.exception.errors['stacks'][0]['name'].errors[0]\n        self.assertEquals(\n            error.__str__(),\n            \"This field is required.\")\n\n    def test_config_validate_duplicate_stack_names(self):\n        config = Config({\n            \"namespace\": \"prod\",\n            \"stacks\": [\n                {\n                    \"name\": \"bastion\",\n                    \"class_path\": \"blueprints.Bastion\"},\n                {\n                    \"name\": \"bastion\",\n                    \"class_path\": \"blueprints.BastionV2\"}]})\n        with self.assertRaises(exceptions.InvalidConfig) as ex:\n            config.validate()\n\n        error = ex.exception.errors['stacks'][0]\n        self.assertEquals(\n            error.__str__(),\n            \"Duplicate stack bastion found at index 0.\")\n\n    def test_dump_unicode(self):\n        config = Config()\n        config.namespace = \"test\"\n        self.assertEquals(dump(config), b\"\"\"namespace: test\nstacks: []\n\"\"\")\n\n        config = Config({\"namespace\": \"test\"})\n        # Ensure that we're producing standard yaml, that doesn't include\n        # python specific objects.\n        self.assertNotEquals(\n            dump(config), b\"namespace: !!python/unicode 'test'\\n\")\n        self.assertEquals(dump(config), b\"\"\"namespace: test\nstacks: []\n\"\"\")\n\n    def test_parse_tags(self):\n        config = parse(\"\"\"\n        namespace: prod\n        tags:\n          \"a:b\": \"c\"\n          \"hello\": 1\n          simple_tag: simple value\n        \"\"\")\n        self.assertEquals(config.tags, {\n            \"a:b\": \"c\",\n            \"hello\": \"1\",\n            \"simple_tag\": \"simple value\"})\n\n    def test_parse_with_arbitrary_anchors(self):\n        config = parse(\"\"\"\n        namespace: prod\n        common_variables: &common_variables\n          Foo: bar\n        stacks:\n        - name: vpc\n          class_path: blueprints.VPC\n          variables:\n            << : *common_variables\n        \"\"\")\n\n        stack = config.stacks[0]\n        self.assertEquals(stack.variables, {\"Foo\": \"bar\"})\n\n    def test_parse_with_deprecated_parameters(self):\n        config = parse(\"\"\"\n        namespace: prod\n        stacks:\n        - name: vpc\n          class_path: blueprints.VPC\n          parameters:\n            Foo: bar\n        \"\"\")\n        with self.assertRaises(exceptions.InvalidConfig) as ex:\n            config.validate()\n\n        error = ex.exception.errors['stacks'][0]['parameters'][0]\n        self.assertEquals(\n            error.__str__(),\n            \"DEPRECATION: Stack definition vpc contains deprecated \"\n            \"'parameters', rather than 'variables'. You are required to update\"\n            \" your config. See https://stacker.readthedocs.io/en/latest/c\"\n            \"onfig.html#variables for additional information.\")\n\n    def test_config_build(self):\n        vpc = Stack({\"name\": \"vpc\", \"class_path\": \"blueprints.VPC\"})\n        config = Config({\"namespace\": \"prod\", \"stacks\": [vpc]})\n        self.assertEquals(config.namespace, \"prod\")\n        self.assertEquals(config.stacks[0].name, \"vpc\")\n        self.assertEquals(config[\"namespace\"], \"prod\")\n        config.validate()\n\n    def test_parse(self):\n        config_with_lists = \"\"\"\n        namespace: prod\n        stacker_bucket: stacker-prod\n        pre_build:\n          - path: stacker.hooks.route53.create_domain\n            required: true\n            enabled: true\n            args:\n              domain: mydomain.com\n        post_build:\n          - path: stacker.hooks.route53.create_domain\n            required: true\n            enabled: true\n            args:\n              domain: mydomain.com\n        pre_destroy:\n          - path: stacker.hooks.route53.create_domain\n            required: true\n            enabled: true\n            args:\n              domain: mydomain.com\n        post_destroy:\n          - path: stacker.hooks.route53.create_domain\n            required: true\n            enabled: true\n            args:\n              domain: mydomain.com\n        package_sources:\n          s3:\n            - bucket: acmecorpbucket\n              key: public/acmecorp-blueprints-v1.zip\n            - bucket: examplecorpbucket\n              key: public/examplecorp-blueprints-v2.tar.gz\n              requester_pays: true\n            - bucket: anotherexamplebucket\n              key: example-blueprints-v3.tar.gz\n              use_latest: false\n              paths:\n                - foo\n              configs:\n                - foo/config.yml\n          git:\n            - uri: git@github.com:acmecorp/stacker_blueprints.git\n            - uri: git@github.com:remind101/stacker_blueprints.git\n              tag: 1.0.0\n              paths:\n                - stacker_blueprints\n            - uri: git@github.com:contoso/webapp.git\n              branch: staging\n            - uri: git@github.com:contoso/foo.git\n              commit: 12345678\n              paths:\n                - bar\n              configs:\n                - bar/moreconfig.yml\n        tags:\n          environment: production\n        stacks:\n        - name: vpc\n          class_path: blueprints.VPC\n          variables:\n            PrivateSubnets:\n            - 10.0.0.0/24\n        - name: bastion\n          class_path: blueprints.Bastion\n          requires: ['vpc']\n          variables:\n            VpcId: ${output vpc::VpcId}\n        \"\"\"\n        config_with_dicts = \"\"\"\n        namespace: prod\n        stacker_bucket: stacker-prod\n        pre_build:\n          prebuild_createdomain:\n            path: stacker.hooks.route53.create_domain\n            required: true\n            enabled: true\n            args:\n              domain: mydomain.com\n        post_build:\n          postbuild_createdomain:\n            path: stacker.hooks.route53.create_domain\n            required: true\n            enabled: true\n            args:\n              domain: mydomain.com\n        pre_destroy:\n          predestroy_createdomain:\n            path: stacker.hooks.route53.create_domain\n            required: true\n            enabled: true\n            args:\n              domain: mydomain.com\n        post_destroy:\n          postdestroy_createdomain:\n            path: stacker.hooks.route53.create_domain\n            required: true\n            enabled: true\n            args:\n              domain: mydomain.com\n        package_sources:\n          s3:\n            - bucket: acmecorpbucket\n              key: public/acmecorp-blueprints-v1.zip\n            - bucket: examplecorpbucket\n              key: public/examplecorp-blueprints-v2.tar.gz\n              requester_pays: true\n            - bucket: anotherexamplebucket\n              key: example-blueprints-v3.tar.gz\n              use_latest: false\n              paths:\n                - foo\n              configs:\n                - foo/config.yml\n          git:\n            - uri: git@github.com:acmecorp/stacker_blueprints.git\n            - uri: git@github.com:remind101/stacker_blueprints.git\n              tag: 1.0.0\n              paths:\n                - stacker_blueprints\n            - uri: git@github.com:contoso/webapp.git\n              branch: staging\n            - uri: git@github.com:contoso/foo.git\n              commit: 12345678\n              paths:\n                - bar\n              configs:\n                - bar/moreconfig.yml\n        tags:\n          environment: production\n        stacks:\n          vpc:\n            class_path: blueprints.VPC\n            variables:\n              PrivateSubnets:\n              - 10.0.0.0/24\n          bastion:\n            class_path: blueprints.Bastion\n            requires: ['vpc']\n            variables:\n              VpcId: ${output vpc::VpcId}\n        \"\"\"\n\n        for raw_config in [config_with_lists, config_with_dicts]:\n            config = parse(raw_config)\n\n            config.validate()\n\n            self.assertEqual(config.namespace, \"prod\")\n            self.assertEqual(config.stacker_bucket, \"stacker-prod\")\n\n            for hooks in [config.pre_build, config.post_build,\n                          config.pre_destroy, config.post_destroy]:\n                self.assertEqual(\n                    hooks[0].path, \"stacker.hooks.route53.create_domain\")\n                self.assertEqual(\n                    hooks[0].required, True)\n                self.assertEqual(\n                    hooks[0].args, {\"domain\": \"mydomain.com\"})\n\n            self.assertEqual(\n                config.package_sources.s3[0].bucket,\n                \"acmecorpbucket\")\n            self.assertEqual(\n                config.package_sources.s3[0].key,\n                \"public/acmecorp-blueprints-v1.zip\")\n            self.assertEqual(\n                config.package_sources.s3[1].bucket,\n                \"examplecorpbucket\")\n            self.assertEqual(\n                config.package_sources.s3[1].key,\n                \"public/examplecorp-blueprints-v2.tar.gz\")\n            self.assertEqual(\n                config.package_sources.s3[1].requester_pays,\n                True)\n            self.assertEqual(\n                config.package_sources.s3[2].use_latest,\n                False)\n\n            self.assertEqual(\n                config.package_sources.git[0].uri,\n                \"git@github.com:acmecorp/stacker_blueprints.git\")\n            self.assertEqual(\n                config.package_sources.git[1].uri,\n                \"git@github.com:remind101/stacker_blueprints.git\")\n            self.assertEqual(\n                config.package_sources.git[1].tag,\n                \"1.0.0\")\n            self.assertEqual(\n                config.package_sources.git[1].paths,\n                [\"stacker_blueprints\"])\n            self.assertEqual(\n                config.package_sources.git[2].branch,\n                \"staging\")\n\n            self.assertEqual(config.tags, {\"environment\": \"production\"})\n\n            self.assertEqual(len(config.stacks), 2)\n\n            vpc_index = next(\n                i for (i, d) in enumerate(config.stacks) if d.name == \"vpc\"\n            )\n            vpc = config.stacks[vpc_index]\n            self.assertEqual(vpc.name, \"vpc\")\n            self.assertEqual(vpc.class_path, \"blueprints.VPC\")\n            self.assertEqual(vpc.requires, None)\n            self.assertEqual(vpc.variables,\n                             {\"PrivateSubnets\": [\"10.0.0.0/24\"]})\n\n            bastion_index = next(\n                i for (i, d) in enumerate(config.stacks) if d.name == \"bastion\"\n            )\n            bastion = config.stacks[bastion_index]\n            self.assertEqual(bastion.name, \"bastion\")\n            self.assertEqual(bastion.class_path, \"blueprints.Bastion\")\n            self.assertEqual(bastion.requires, [\"vpc\"])\n            self.assertEqual(bastion.variables,\n                             {\"VpcId\": \"${output vpc::VpcId}\"})\n\n    def test_dump_complex(self):\n        config = Config({\n            \"namespace\": \"prod\",\n            \"stacks\": [\n                Stack({\n                    \"name\": \"vpc\",\n                    \"class_path\": \"blueprints.VPC\"}),\n                Stack({\n                    \"name\": \"bastion\",\n                    \"class_path\": \"blueprints.Bastion\",\n                    \"requires\": [\"vpc\"]})]})\n\n        self.assertEqual(dump(config), b\"\"\"namespace: prod\nstacks:\n- class_path: blueprints.VPC\n  enabled: true\n  locked: false\n  name: vpc\n  protected: false\n- class_path: blueprints.Bastion\n  enabled: true\n  locked: false\n  name: bastion\n  protected: false\n  requires:\n  - vpc\n\"\"\")\n\n    def test_load_register_custom_lookups(self):\n        config = Config({\n            \"lookups\": {\n                \"custom\": \"importlib.import_module\"}})\n        load(config)\n        self.assertTrue(callable(LOOKUP_HANDLERS[\"custom\"]))\n\n    def test_load_adds_sys_path(self):\n        config = Config({\"sys_path\": \"/foo/bar\"})\n        load(config)\n        self.assertIn(\"/foo/bar\", sys.path)\n\n    def test_process_empty_remote_sources(self):\n        config = \"\"\"\n        namespace: prod\n        stacks:\n          - name: vpc\n            class_path: blueprints.VPC\n        \"\"\"\n        self.assertEqual(config, process_remote_sources(config))\n\n    def test_lookup_with_sys_path(self):\n        config = Config({\n            \"sys_path\": \"stacker/tests\",\n            \"lookups\": {\n                \"custom\": \"fixtures.mock_lookups.handler\"}})\n        load(config)\n        self.assertTrue(callable(LOOKUP_HANDLERS[\"custom\"]))\n\n    def test_render_parse_load_namespace_fallback(self):\n        conf = \"\"\"\n        stacks:\n        - name: vpc\n          class_path: blueprints.VPC\n        \"\"\"\n        config = render_parse_load(\n            conf, environment={\"namespace\": \"prod\"}, validate=False)\n        config.validate()\n        self.assertEquals(config.namespace, \"prod\")\n\n    def test_allow_most_keys_to_be_duplicates_for_overrides(self):\n        yaml_config = \"\"\"\n        namespace: prod\n        stacks:\n          - name: vpc\n            class_path: blueprints.VPC\n            variables:\n              CIDR: 192.168.1.0/24\n              CIDR: 192.168.2.0/24\n        \"\"\"\n        doc = parse(yaml_config)\n        self.assertEqual(\n            doc[\"stacks\"][0][\"variables\"][\"CIDR\"], \"192.168.2.0/24\"\n        )\n        yaml_config = \"\"\"\n        default_variables: &default_variables\n          CIDR: 192.168.1.0/24\n        namespace: prod\n        stacks:\n          - name: vpc\n            class_path: blueprints.VPC\n            variables:\n              << : *default_variables\n              CIDR: 192.168.2.0/24\n        \"\"\"\n        doc = parse(yaml_config)\n        self.assertEqual(\n            doc[\"stacks\"][0][\"variables\"][\"CIDR\"], \"192.168.2.0/24\"\n        )\n\n    def test_raise_constructor_error_on_keyword_duplicate_key(self):\n        \"\"\"Some keys should never have a duplicate sibling. For example we\n        treat `class_path` as a special \"keyword\" and disallow dupes.\"\"\"\n        yaml_config = \"\"\"\n        namespace: prod\n        stacks:\n          - name: vpc\n            class_path: blueprints.VPC\n            class_path: blueprints.Fake\n        \"\"\"\n        with self.assertRaises(ConstructorError):\n            parse(yaml_config)\n\n    def test_raise_construct_error_on_duplicate_stack_name_dict(self):\n        \"\"\"Some mappings should never have a duplicate children. For example we\n        treat `stacks` as a special mapping and disallow dupe children keys.\"\"\"\n        yaml_config = \"\"\"\n        namespace: prod\n        stacks:\n          my_vpc:\n            class_path: blueprints.VPC1\n          my_vpc:\n            class_path: blueprints.VPC2\n        \"\"\"\n        with self.assertRaises(ConstructorError):\n            parse(yaml_config)\n\n    def test_parse_invalid_inner_keys(self):\n        yaml_config = \"\"\"\n        namespace: prod\n        stacks:\n        - name: vpc\n          class_path: blueprints.VPC\n          garbage: yes\n          variables:\n            Foo: bar\n        \"\"\"\n\n        with self.assertRaises(exceptions.InvalidConfig):\n            parse(yaml_config)\n\n\nif __name__ == '__main__':\n    unittest.main()\n"
  },
  {
    "path": "stacker/tests/test_context.py",
    "content": "import unittest\n\nfrom stacker.context import Context, get_fqn\nfrom stacker.config import load, Config\nfrom stacker.hooks.utils import handle_hooks\n\n\nclass TestContext(unittest.TestCase):\n\n    def setUp(self):\n        self.config = Config({\n            \"namespace\": \"namespace\",\n            \"stacks\": [\n                {\"name\": \"stack1\"}, {\"name\": \"stack2\"}]})\n\n    def test_context_optional_keys_set(self):\n        context = Context(\n            config=Config({}),\n            stack_names=[\"stack\"],\n        )\n        self.assertEqual(context.mappings, {})\n        self.assertEqual(context.stack_names, [\"stack\"])\n\n    def test_context_get_stacks(self):\n        context = Context(config=self.config)\n        self.assertEqual(len(context.get_stacks()), 2)\n\n    def test_context_get_stacks_dict_use_fqn(self):\n        context = Context(config=self.config)\n        stacks_dict = context.get_stacks_dict()\n        stack_names = sorted(stacks_dict.keys())\n        self.assertEqual(stack_names[0], \"namespace-stack1\")\n        self.assertEqual(stack_names[1], \"namespace-stack2\")\n\n    def test_context_get_fqn(self):\n        context = Context(config=self.config)\n        fqn = context.get_fqn()\n        self.assertEqual(fqn, \"namespace\")\n\n    def test_context_get_fqn_replace_dot(self):\n        context = Context(config=Config({\"namespace\": \"my.namespace\"}))\n        fqn = context.get_fqn()\n        self.assertEqual(fqn, \"my-namespace\")\n\n    def test_context_get_fqn_empty_namespace(self):\n        context = Context(config=Config({\"namespace\": \"\"}))\n        fqn = context.get_fqn(\"vpc\")\n        self.assertEqual(fqn, \"vpc\")\n        self.assertEqual(context.tags, {})\n\n    def test_context_namespace(self):\n        context = Context(config=Config({\"namespace\": \"namespace\"}))\n        self.assertEqual(context.namespace, \"namespace\")\n\n    def test_context_get_fqn_stack_name(self):\n        context = Context(config=self.config)\n        fqn = context.get_fqn(\"stack1\")\n        self.assertEqual(fqn, \"namespace-stack1\")\n\n    def test_context_default_bucket_name(self):\n        context = Context(config=Config({\"namespace\": \"test\"}))\n        self.assertEqual(context.bucket_name, \"stacker-test\")\n\n    def test_context_bucket_name_is_overriden_but_is_none(self):\n        config = Config({\"namespace\": \"test\", \"stacker_bucket\": \"\"})\n        context = Context(config=config)\n        self.assertEqual(context.bucket_name, None)\n\n        config = Config({\"namespace\": \"test\", \"stacker_bucket\": None})\n        context = Context(config=config)\n        self.assertEqual(context.bucket_name, \"stacker-test\")\n\n    def test_context_bucket_name_is_overriden(self):\n        config = Config({\"namespace\": \"test\", \"stacker_bucket\": \"bucket123\"})\n        context = Context(config=config)\n        self.assertEqual(context.bucket_name, \"bucket123\")\n\n    def test_context_default_bucket_no_namespace(self):\n        context = Context(config=Config({\"namespace\": \"\"}))\n        self.assertEqual(context.bucket_name, None)\n\n        context = Context(config=Config({\"namespace\": None}))\n        self.assertEqual(context.bucket_name, None)\n\n        context = Context(\n            config=Config({\"namespace\": None, \"stacker_bucket\": \"\"}))\n        self.assertEqual(context.bucket_name, None)\n\n    def test_context_namespace_delimiter_is_overriden_and_not_none(self):\n        config = Config({\"namespace\": \"namespace\", \"namespace_delimiter\": \"_\"})\n        context = Context(config=config)\n        fqn = context.get_fqn(\"stack1\")\n        self.assertEqual(fqn, \"namespace_stack1\")\n\n    def test_context_namespace_delimiter_is_overriden_and_is_empty(self):\n        config = Config({\"namespace\": \"namespace\", \"namespace_delimiter\": \"\"})\n        context = Context(config=config)\n        fqn = context.get_fqn(\"stack1\")\n        self.assertEqual(fqn, \"namespacestack1\")\n\n    def test_context_tags_with_empty_map(self):\n        config = Config({\"namespace\": \"test\", \"tags\": {}})\n        context = Context(config=config)\n        self.assertEqual(context.tags, {})\n\n    def test_context_no_tags_specified(self):\n        config = Config({\"namespace\": \"test\"})\n        context = Context(config=config)\n        self.assertEqual(context.tags, {\"stacker_namespace\": \"test\"})\n\n    def test_hook_with_sys_path(self):\n        config = Config({\n            \"namespace\": \"test\",\n            \"sys_path\": \"stacker/tests\",\n            \"pre_build\": [\n                {\n                    \"data_key\": \"myHook\",\n                    \"path\": \"fixtures.mock_hooks.mock_hook\",\n                    \"required\": True,\n                    \"args\": {\n                        \"value\": \"mockResult\"}}]})\n        load(config)\n        context = Context(config=config)\n        stage = \"pre_build\"\n        handle_hooks(stage, context.config[stage], \"mock-region-1\", context)\n        self.assertEqual(\"mockResult\", context.hook_data[\"myHook\"][\"result\"])\n\n\nclass TestFunctions(unittest.TestCase):\n    \"\"\" Test the module level functions \"\"\"\n    def test_get_fqn_redundant_base(self):\n        base = \"woot\"\n        name = \"woot-blah\"\n        self.assertEqual(get_fqn(base, '-', name), name)\n        self.assertEqual(get_fqn(base, '', name), name)\n        self.assertEqual(get_fqn(base, '_', name), \"woot_woot-blah\")\n\n    def test_get_fqn_only_base(self):\n        base = \"woot\"\n        self.assertEqual(get_fqn(base, '-'), base)\n        self.assertEqual(get_fqn(base, ''), base)\n        self.assertEqual(get_fqn(base, '_'), base)\n\n    def test_get_fqn_full(self):\n        base = \"woot\"\n        name = \"blah\"\n        self.assertEqual(get_fqn(base, '-', name), \"%s-%s\" % (base, name))\n        self.assertEqual(get_fqn(base, '', name), \"%s%s\" % (base, name))\n        self.assertEqual(get_fqn(base, '_', name), \"%s_%s\" % (base, name))\n\n\nif __name__ == '__main__':\n    unittest.main()\n"
  },
  {
    "path": "stacker/tests/test_dag.py",
    "content": "\"\"\" Tests on the DAG implementation \"\"\"\nimport threading\n\nimport pytest\n\nfrom stacker.dag import (\n    DAG,\n    DAGValidationError,\n    ThreadedWalker,\n    UnlimitedSemaphore\n)\n\n\n@pytest.fixture\ndef empty_dag():\n    return DAG()\n\n\n@pytest.fixture\ndef basic_dag():\n    dag = DAG()\n    dag.from_dict({'a': ['b', 'c'],\n                   'b': ['d'],\n                   'c': ['d'],\n                   'd': []})\n    return dag\n\n\ndef test_add_node(empty_dag):\n    dag = empty_dag\n\n    dag.add_node('a')\n    assert dag.graph == {'a': set()}\n\n\ndef test_transpose(basic_dag):\n    dag = basic_dag\n\n    transposed = dag.transpose()\n    assert transposed.graph == {'d': set(['c', 'b']),\n                                'c': set(['a']),\n                                'b': set(['a']),\n                                'a': set([])}\n\n\ndef test_add_edge(empty_dag):\n    dag = empty_dag\n\n    dag.add_node('a')\n    dag.add_node('b')\n    dag.add_edge('a', 'b')\n    assert dag.graph == {'a': set('b'), 'b': set()}\n\n\ndef test_from_dict(empty_dag):\n    dag = empty_dag\n\n    dag.from_dict({'a': ['b', 'c'],\n                   'b': ['d'],\n                   'c': ['d'],\n                   'd': []})\n    assert dag.graph == {'a': set(['b', 'c']),\n                         'b': set('d'),\n                         'c': set('d'),\n                         'd': set()}\n\n\ndef test_reset_graph(empty_dag):\n    dag = empty_dag\n\n    dag.add_node('a')\n    assert dag.graph == {'a': set()}\n    dag.reset_graph()\n    assert dag.graph == {}\n\n\ndef test_walk(empty_dag):\n    dag = empty_dag\n\n    # b and c should be executed at the same time.\n    dag.from_dict({'a': ['b', 'c'],\n                   'b': ['d'],\n                   'c': ['d'],\n                   'd': []})\n\n    nodes = []\n\n    def walk_func(n):\n        nodes.append(n)\n        return True\n\n    dag.walk(walk_func)\n    assert nodes == ['d', 'c', 'b', 'a'] or nodes == ['d', 'b', 'c', 'a']\n\n\ndef test_ind_nodes(basic_dag):\n    dag = basic_dag\n    assert dag.ind_nodes() == ['a']\n\n\ndef test_topological_sort(empty_dag):\n    dag = empty_dag\n    dag.from_dict({'a': [],\n                   'b': ['a'],\n                   'c': ['b']})\n    assert dag.topological_sort() == ['c', 'b', 'a']\n\n\ndef test_successful_validation(basic_dag):\n    dag = basic_dag\n    assert dag.validate()[0] == True  # noqa: E712\n\n\ndef test_failed_validation(empty_dag):\n    dag = empty_dag\n\n    with pytest.raises(DAGValidationError):\n        dag.from_dict({'a': ['b'],\n                       'b': ['a']})\n\n\ndef test_downstream(basic_dag):\n    dag = basic_dag\n    assert set(dag.downstream('a')) == set(['b', 'c'])\n\n\ndef test_all_downstreams(basic_dag):\n    dag = basic_dag\n\n    assert dag.all_downstreams('a') == ['b', 'c', 'd']\n    assert dag.all_downstreams('b') == ['d']\n    assert dag.all_downstreams('d') == []\n\n\ndef test_all_downstreams_pass_graph(empty_dag):\n    dag = empty_dag\n    dag.from_dict({'a': ['c'],\n                   'b': ['d'],\n                   'c': ['d'],\n                   'd': []})\n    assert dag.all_downstreams('a') == ['c', 'd']\n    assert dag.all_downstreams('b') == ['d']\n    assert dag.all_downstreams('d') == []\n\n\ndef test_predecessors(basic_dag):\n    dag = basic_dag\n\n    assert set(dag.predecessors('a')) == set([])\n    assert set(dag.predecessors('b')) == set(['a'])\n    assert set(dag.predecessors('c')) == set(['a'])\n    assert set(dag.predecessors('d')) == set(['b', 'c'])\n\n\ndef test_filter(basic_dag):\n    dag = basic_dag\n\n    dag2 = dag.filter(['b', 'c'])\n    assert dag2.graph == {'b': set('d'),\n                          'c': set('d'),\n                          'd': set()}\n\n\ndef test_all_leaves(basic_dag):\n    dag = basic_dag\n\n    assert dag.all_leaves() == ['d']\n\n\ndef test_size(basic_dag):\n    dag = basic_dag\n\n    assert dag.size() == 4\n    dag.delete_node('a')\n    assert dag.size() == 3\n\n\ndef test_transitive_reduction_no_reduction(empty_dag):\n    dag = empty_dag\n    dag.from_dict({'a': ['b', 'c'],\n                   'b': ['d'],\n                   'c': ['d'],\n                   'd': []})\n    dag.transitive_reduction()\n    assert dag.graph == {'a': set(['b', 'c']),\n                         'b': set('d'),\n                         'c': set('d'),\n                         'd': set()}\n\n\ndef test_transitive_reduction(empty_dag):\n    dag = empty_dag\n    # https://en.wikipedia.org/wiki/Transitive_reduction#/media/File:Tred-G.svg\n    dag.from_dict({'a': ['b', 'c', 'd', 'e'],\n                   'b': ['d'],\n                   'c': ['d', 'e'],\n                   'd': ['e'],\n                   'e': []})\n    dag.transitive_reduction()\n    # https://en.wikipedia.org/wiki/Transitive_reduction#/media/File:Tred-Gprime.svg\n    assert dag.graph == {'a': set(['b', 'c']),\n                         'b': set('d'),\n                         'c': set('d'),\n                         'd': set('e'),\n                         'e': set()}\n\n\ndef test_transitive_deep_reduction(empty_dag):\n    dag = empty_dag\n    # https://en.wikipedia.org/wiki/Transitive_reduction#/media/File:Tred-G.svg\n    dag.from_dict({\n        'a': ['b', 'd'],\n        'b': ['c'],\n        'c': ['d'],\n        'd': [],\n    })\n    dag.transitive_reduction()\n    # https://en.wikipedia.org/wiki/Transitive_reduction#/media/File:Tred-Gprime.svg\n    assert dag.graph == {'a': set('b'),\n                         'b': set('c'),\n                         'c': set('d'),\n                         'd': set()}\n\n\ndef test_threaded_walker(empty_dag):\n    dag = empty_dag\n\n    walker = ThreadedWalker(UnlimitedSemaphore())\n\n    # b and c should be executed at the same time.\n    dag.from_dict({'a': ['b', 'c'],\n                   'b': ['d'],\n                   'c': ['d'],\n                   'd': []})\n\n    lock = threading.Lock()  # Protects nodes from concurrent access\n    nodes = []\n\n    def walk_func(n):\n        lock.acquire()\n        nodes.append(n)\n        lock.release()\n        return True\n\n    walker.walk(dag, walk_func)\n    assert nodes == ['d', 'c', 'b', 'a'] or nodes == ['d', 'b', 'c', 'a']\n"
  },
  {
    "path": "stacker/tests/test_environment.py",
    "content": "import unittest\n\nfrom stacker.environment import (\n    DictWithSourceType,\n    parse_environment\n)\n\ntest_env = \"\"\"key1: value1\n# some: comment\n\n # here: about\n\n# key2\nkey2: value2\n\n# another comment here\nkey3: some:complex::value\n\n\n# one more here as well\nkey4: :otherValue:\nkey5: <another>@value\n\"\"\"\n\ntest_error_env = \"\"\"key1: valu1\nerror\n\"\"\"\n\n\nclass TestEnvironment(unittest.TestCase):\n\n    def test_simple_key_value_parsing(self):\n        parsed_env = parse_environment(test_env)\n        self.assertTrue(isinstance(parsed_env, DictWithSourceType))\n        self.assertEqual(parsed_env[\"key1\"], \"value1\")\n        self.assertEqual(parsed_env[\"key2\"], \"value2\")\n        self.assertEqual(parsed_env[\"key3\"], \"some:complex::value\")\n        self.assertEqual(parsed_env[\"key4\"], \":otherValue:\")\n        self.assertEqual(parsed_env[\"key5\"], \"<another>@value\")\n        self.assertEqual(len(parsed_env), 5)\n\n    def test_simple_key_value_parsing_exception(self):\n        with self.assertRaises(ValueError):\n            parse_environment(test_error_env)\n\n    def test_blank_value(self):\n        e = \"\"\"key1:\"\"\"\n        parsed = parse_environment(e)\n        self.assertEqual(parsed[\"key1\"], \"\")\n"
  },
  {
    "path": "stacker/tests/test_lookups.py",
    "content": "import unittest\n\nfrom stacker.lookups import extract_lookups, extract_lookups_from_string\n\n\nclass TestLookupExtraction(unittest.TestCase):\n\n    def test_no_lookups(self):\n        lookups = extract_lookups(\"value\")\n        self.assertEqual(lookups, set())\n\n    def test_single_lookup_string(self):\n        lookups = extract_lookups(\"${output fakeStack::FakeOutput}\")\n        self.assertEqual(len(lookups), 1)\n\n    def test_multiple_lookups_string(self):\n        lookups = extract_lookups(\n            \"url://${output fakeStack::FakeOutput}@\"\n            \"${output fakeStack::FakeOutput2}\"\n        )\n        self.assertEqual(len(lookups), 2)\n        self.assertEqual(list(lookups)[0].type, \"output\")\n\n    def test_lookups_list(self):\n        lookups = extract_lookups([\n            \"something\",\n            \"${output fakeStack::FakeOutput}\"\n        ])\n        self.assertEqual(len(lookups), 1)\n\n    def test_lookups_dict(self):\n        lookups = extract_lookups({\n            \"something\": \"${output fakeStack::FakeOutput}\",\n            \"other\": \"value\",\n        })\n        self.assertEqual(len(lookups), 1)\n\n    def test_lookups_mixed(self):\n        lookups = extract_lookups({\n            \"something\": \"${output fakeStack::FakeOutput}\",\n            \"list\": [\"value\", \"${output fakeStack::FakeOutput2}\"],\n            \"dict\": {\n                \"other\": \"value\",\n                \"another\": \"${output fakeStack::FakeOutput3}\",\n            },\n        })\n        self.assertEqual(len(lookups), 3)\n\n    def test_nested_lookups_string(self):\n        lookups = extract_lookups(\n            \"${noop ${output stack::Output},${output stack::Output2}}\"\n        )\n        self.assertEqual(len(lookups), 2)\n\n    def test_comma_delimited(self):\n        lookups = extract_lookups(\"${noop val1,val2}\")\n        self.assertEqual(len(lookups), 1)\n\n    def test_kms_lookup(self):\n        lookups = extract_lookups(\"${kms CiADsGxJp1mCR21fjsVjVxr7RwuO2FE3ZJqC4iG0Lm+HkRKwAQEBAgB4A7BsSadZgkdtX47FY1ca+0cLjthRN2SaguIhtC5vh5EAAACHMIGEBgkqhkiG9w0BBwagdzB1AgEAMHAGCSqGSIb3DQEHATAeBglghkgBZQMEAS4wEQQM3IKyEoNEQVxN3BaaAgEQgEOpqa0rcl3WpHOmblAqL1rOPRyokO3YXcJAAB37h/WKLpZZRAWV2h9C67xjlsj3ebg+QIU91T/}\")  # NOQA\n        self.assertEqual(len(lookups), 1)\n        lookup = list(lookups)[0]\n        self.assertEqual(lookup.type, \"kms\")\n        self.assertEqual(lookup.input, \"CiADsGxJp1mCR21fjsVjVxr7RwuO2FE3ZJqC4iG0Lm+HkRKwAQEBAgB4A7BsSadZgkdtX47FY1ca+0cLjthRN2SaguIhtC5vh5EAAACHMIGEBgkqhkiG9w0BBwagdzB1AgEAMHAGCSqGSIb3DQEHATAeBglghkgBZQMEAS4wEQQM3IKyEoNEQVxN3BaaAgEQgEOpqa0rcl3WpHOmblAqL1rOPRyokO3YXcJAAB37h/WKLpZZRAWV2h9C67xjlsj3ebg+QIU91T/\")  # NOQA\n\n    def test_kms_lookup_with_equals(self):\n        lookups = extract_lookups(\"${kms us-east-1@AQECAHjLp186mZ+mgXTQSytth/ibiIdwBm8CZAzZNSaSkSRqswAAAG4wbAYJKoZIhvcNAQcGoF8wXQIBADBYBgkqhkiG9w0BBwEwHgYJYIZIAWUDBAEuMBEEDLNmhGU6fe4vp175MAIBEIAr+8tUpi7SDzOZm+FFyYvWXhs4hEEyaazIn2dP8a+yHzZYDSVYGRpfUz34bQ==}\")  # NOQA\n        self.assertEqual(len(lookups), 1)\n        lookup = list(lookups)[0]\n        self.assertEqual(lookup.type, \"kms\")\n        self.assertEqual(lookup.input, \"us-east-1@AQECAHjLp186mZ+mgXTQSytth/ibiIdwBm8CZAzZNSaSkSRqswAAAG4wbAYJKoZIhvcNAQcGoF8wXQIBADBYBgkqhkiG9w0BBwEwHgYJYIZIAWUDBAEuMBEEDLNmhGU6fe4vp175MAIBEIAr+8tUpi7SDzOZm+FFyYvWXhs4hEEyaazIn2dP8a+yHzZYDSVYGRpfUz34bQ==\")  # NOQA\n\n    def test_kms_lookup_with_region(self):\n        lookups = extract_lookups(\"${kms us-west-2@CiADsGxJp1mCR21fjsVjVxr7RwuO2FE3ZJqC4iG0Lm+HkRKwAQEBAgB4A7BsSadZgkdtX47FY1ca+0cLjthRN2SaguIhtC5vh5EAAACHMIGEBgkqhkiG9w0BBwagdzB1AgEAMHAGCSqGSIb3DQEHATAeBglghkgBZQMEAS4wEQQM3IKyEoNEQVxN3BaaAgEQgEOpqa0rcl3WpHOmblAqL1rOPRyokO3YXcJAAB37h/WKLpZZRAWV2h9C67xjlsj3ebg+QIU91T/}\")  # NOQA\n        self.assertEqual(len(lookups), 1)\n        lookup = list(lookups)[0]\n        self.assertEqual(lookup.type, \"kms\")\n        self.assertEqual(lookup.input, \"us-west-2@CiADsGxJp1mCR21fjsVjVxr7RwuO2FE3ZJqC4iG0Lm+HkRKwAQEBAgB4A7BsSadZgkdtX47FY1ca+0cLjthRN2SaguIhtC5vh5EAAACHMIGEBgkqhkiG9w0BBwagdzB1AgEAMHAGCSqGSIb3DQEHATAeBglghkgBZQMEAS4wEQQM3IKyEoNEQVxN3BaaAgEQgEOpqa0rcl3WpHOmblAqL1rOPRyokO3YXcJAAB37h/WKLpZZRAWV2h9C67xjlsj3ebg+QIU91T/\")  # NOQA\n\n    def test_kms_file_lookup(self):\n        lookups = extract_lookups(\"${kms file://path/to/some/file.txt}\")\n        self.assertEqual(len(lookups), 1)\n        lookup = list(lookups)[0]\n        self.assertEqual(lookup.type, \"kms\")\n        self.assertEqual(lookup.input, \"file://path/to/some/file.txt\")\n\n    def test_valid_extract_lookups_from_string(self):\n        _type = \"output\"\n        _input = \"vpc::PublicSubnets\"\n        value = \"${%s %s}\" % (_type, _input)\n        lookups = extract_lookups_from_string(value)\n        lookup = lookups.pop()\n        assert lookup.type == _type\n        assert lookup.input == _input\n        assert lookup.raw == \"%s %s\" % (_type, _input)\n"
  },
  {
    "path": "stacker/tests/test_parse_user_data.py",
    "content": "import unittest\n\nimport yaml\n\nfrom ..tokenize_userdata import cf_tokenize\n\n\nclass TestCfTokenize(unittest.TestCase):\n    def test_tokenize(self):\n        user_data = [\n            \"field0\",\n            \"Ref(\\\"SshKey\\\")\",\n            \"field1\",\n            \"Fn::GetAtt(\\\"Blah\\\", \\\"Woot\\\")\"\n        ]\n        ud = yaml.dump(user_data)\n        parts = cf_tokenize(ud)\n        self.assertIsInstance(parts[1], dict)\n        self.assertIsInstance(parts[3], dict)\n        self.assertEqual(parts[1][\"Ref\"], \"SshKey\")\n        self.assertEqual(parts[3][\"Fn::GetAtt\"], [\"Blah\", \"Woot\"])\n        self.assertEqual(len(parts), 5)\n"
  },
  {
    "path": "stacker/tests/test_plan.py",
    "content": "import os\nimport shutil\nimport tempfile\n\nimport unittest\nimport mock\n\nfrom stacker.context import Context, Config\nfrom stacker.dag import walk\nfrom stacker.util import stack_template_key_name\nfrom stacker.lookups.registry import (\n    register_lookup_handler,\n    unregister_lookup_handler,\n)\nfrom stacker.plan import (\n    Step,\n    build_plan,\n    build_graph,\n)\nfrom stacker.exceptions import (\n    CancelExecution,\n    GraphError,\n    PlanFailed,\n)\nfrom stacker.status import (\n    SUBMITTED,\n    COMPLETE,\n    SKIPPED,\n    FAILED,\n)\nfrom stacker.stack import Stack\n\nfrom .factories import generate_definition\n\ncount = 0\n\n\nclass TestStep(unittest.TestCase):\n\n    def setUp(self):\n        stack = mock.MagicMock()\n        stack.name = \"stack\"\n        stack.fqn = \"namespace-stack\"\n        self.step = Step(stack=stack, fn=None)\n\n    def test_status(self):\n        self.assertFalse(self.step.submitted)\n        self.assertFalse(self.step.completed)\n\n        self.step.submit()\n        self.assertEqual(self.step.status, SUBMITTED)\n        self.assertTrue(self.step.submitted)\n        self.assertFalse(self.step.completed)\n\n        self.step.complete()\n        self.assertEqual(self.step.status, COMPLETE)\n        self.assertNotEqual(self.step.status, SUBMITTED)\n        self.assertTrue(self.step.submitted)\n        self.assertTrue(self.step.completed)\n\n        self.assertNotEqual(self.step.status, True)\n        self.assertNotEqual(self.step.status, False)\n        self.assertNotEqual(self.step.status, 'banana')\n\n\nclass TestPlan(unittest.TestCase):\n\n    def setUp(self):\n        self.count = 0\n        self.config = Config({\"namespace\": \"namespace\"})\n        self.context = Context(config=self.config)\n        register_lookup_handler(\"noop\", lambda **kwargs: \"test\")\n\n    def tearDown(self):\n        unregister_lookup_handler(\"noop\")\n\n    def test_plan(self):\n        vpc = Stack(\n            definition=generate_definition('vpc', 1),\n            context=self.context)\n        bastion = Stack(\n            definition=generate_definition('bastion', 1, requires=[vpc.name]),\n            context=self.context)\n\n        graph = build_graph([\n            Step(vpc, fn=None), Step(bastion, fn=None)])\n        plan = build_plan(description=\"Test\", graph=graph)\n\n        self.assertEqual(plan.graph.to_dict(), {\n            'bastion.1': set(['vpc.1']),\n            'vpc.1': set([])})\n\n    def test_execute_plan(self):\n        vpc = Stack(\n            definition=generate_definition('vpc', 1),\n            context=self.context)\n        bastion = Stack(\n            definition=generate_definition('bastion', 1, requires=[vpc.name]),\n            context=self.context)\n\n        calls = []\n\n        def fn(stack, status=None):\n            calls.append(stack.fqn)\n            return COMPLETE\n\n        graph = build_graph([Step(vpc, fn), Step(bastion, fn)])\n        plan = build_plan(\n            description=\"Test\", graph=graph)\n        plan.execute(walk)\n\n        self.assertEquals(calls, ['namespace-vpc.1', 'namespace-bastion.1'])\n\n    def test_execute_plan_locked(self):\n        # Locked stacks still need to have their requires evaluated when\n        # they're being created.\n        vpc = Stack(\n            definition=generate_definition('vpc', 1),\n            context=self.context)\n        bastion = Stack(\n            definition=generate_definition('bastion', 1, requires=[vpc.name]),\n            locked=True,\n            context=self.context)\n\n        calls = []\n\n        def fn(stack, status=None):\n            calls.append(stack.fqn)\n            return COMPLETE\n\n        graph = build_graph([Step(vpc, fn), Step(bastion, fn)])\n        plan = build_plan(\n            description=\"Test\", graph=graph)\n        plan.execute(walk)\n\n        self.assertEquals(calls, ['namespace-vpc.1', 'namespace-bastion.1'])\n\n    def test_execute_plan_filtered(self):\n        vpc = Stack(\n            definition=generate_definition('vpc', 1),\n            context=self.context)\n        db = Stack(\n            definition=generate_definition('db', 1, requires=[vpc.name]),\n            context=self.context)\n        app = Stack(\n            definition=generate_definition('app', 1, requires=[db.name]),\n            context=self.context)\n\n        calls = []\n\n        def fn(stack, status=None):\n            calls.append(stack.fqn)\n            return COMPLETE\n\n        graph = build_graph([\n            Step(vpc, fn), Step(db, fn), Step(app, fn)])\n        plan = build_plan(\n            description=\"Test\",\n            graph=graph,\n            targets=['db.1'])\n        plan.execute(walk)\n\n        self.assertEquals(calls, [\n            'namespace-vpc.1', 'namespace-db.1'])\n\n    def test_execute_plan_exception(self):\n        vpc = Stack(\n            definition=generate_definition('vpc', 1),\n            context=self.context)\n        bastion = Stack(\n            definition=generate_definition('bastion', 1, requires=[vpc.name]),\n            context=self.context)\n\n        calls = []\n\n        def fn(stack, status=None):\n            calls.append(stack.fqn)\n            if stack.name == vpc_step.name:\n                raise ValueError('Boom')\n            return COMPLETE\n\n        vpc_step = Step(vpc, fn)\n        bastion_step = Step(bastion, fn)\n\n        graph = build_graph([vpc_step, bastion_step])\n        plan = build_plan(description=\"Test\", graph=graph)\n\n        with self.assertRaises(PlanFailed):\n            plan.execute(walk)\n\n        self.assertEquals(calls, ['namespace-vpc.1'])\n        self.assertEquals(vpc_step.status, FAILED)\n\n    def test_execute_plan_skipped(self):\n        vpc = Stack(\n            definition=generate_definition('vpc', 1),\n            context=self.context)\n        bastion = Stack(\n            definition=generate_definition('bastion', 1, requires=[vpc.name]),\n            context=self.context)\n\n        calls = []\n\n        def fn(stack, status=None):\n            calls.append(stack.fqn)\n            if stack.fqn == vpc_step.name:\n                return SKIPPED\n            return COMPLETE\n\n        vpc_step = Step(vpc, fn)\n        bastion_step = Step(bastion, fn)\n\n        graph = build_graph([vpc_step, bastion_step])\n        plan = build_plan(description=\"Test\", graph=graph)\n        plan.execute(walk)\n\n        self.assertEquals(calls, ['namespace-vpc.1', 'namespace-bastion.1'])\n\n    def test_execute_plan_failed(self):\n        vpc = Stack(\n            definition=generate_definition('vpc', 1),\n            context=self.context)\n        bastion = Stack(\n            definition=generate_definition('bastion', 1, requires=[vpc.name]),\n            context=self.context)\n        db = Stack(\n            definition=generate_definition('db', 1),\n            context=self.context)\n\n        calls = []\n\n        def fn(stack, status=None):\n            calls.append(stack.fqn)\n            if stack.name == vpc_step.name:\n                return FAILED\n            return COMPLETE\n\n        vpc_step = Step(vpc, fn)\n        bastion_step = Step(bastion, fn)\n        db_step = Step(db, fn)\n\n        graph = build_graph([\n            vpc_step, bastion_step, db_step])\n        plan = build_plan(description=\"Test\", graph=graph)\n        with self.assertRaises(PlanFailed):\n            plan.execute(walk)\n\n        calls.sort()\n\n        self.assertEquals(calls, ['namespace-db.1', 'namespace-vpc.1'])\n\n    def test_execute_plan_cancelled(self):\n        vpc = Stack(\n            definition=generate_definition('vpc', 1),\n            context=self.context)\n        bastion = Stack(\n            definition=generate_definition('bastion', 1, requires=[vpc.name]),\n            context=self.context)\n\n        calls = []\n\n        def fn(stack, status=None):\n            calls.append(stack.fqn)\n            if stack.fqn == vpc_step.name:\n                raise CancelExecution\n            return COMPLETE\n\n        vpc_step = Step(vpc, fn)\n        bastion_step = Step(bastion, fn)\n\n        graph = build_graph([vpc_step, bastion_step])\n        plan = build_plan(description=\"Test\", graph=graph)\n        plan.execute(walk)\n\n        self.assertEquals(calls, ['namespace-vpc.1', 'namespace-bastion.1'])\n\n    def test_build_graph_missing_dependency(self):\n        bastion = Stack(\n            definition=generate_definition(\n                'bastion', 1, requires=['vpc.1']),\n            context=self.context)\n\n        with self.assertRaises(GraphError) as expected:\n            build_graph([Step(bastion, None)])\n        message_starts = (\n            \"Error detected when adding 'vpc.1' \"\n            \"as a dependency of 'bastion.1':\"\n        )\n        message_contains = \"dependent node vpc.1 does not exist\"\n        self.assertTrue(str(expected.exception).startswith(message_starts))\n        self.assertTrue(message_contains in str(expected.exception))\n\n    def test_build_graph_cyclic_dependencies(self):\n        vpc = Stack(\n            definition=generate_definition(\n                'vpc', 1),\n            context=self.context)\n        db = Stack(\n            definition=generate_definition(\n                'db', 1, requires=['app.1']),\n            context=self.context)\n        app = Stack(\n            definition=generate_definition(\n                'app', 1, requires=['db.1']),\n            context=self.context)\n\n        with self.assertRaises(GraphError) as expected:\n            build_graph([Step(vpc, None), Step(db, None), Step(app, None)])\n        message = (\"Error detected when adding 'db.1' \"\n                   \"as a dependency of 'app.1': graph is \"\n                   \"not acyclic\")\n        self.assertEqual(str(expected.exception), message)\n\n    def test_dump(self, *args):\n        requires = None\n        steps = []\n\n        for i in range(5):\n            overrides = {\n                \"variables\": {\n                    \"PublicSubnets\": \"1\",\n                    \"SshKeyName\": \"1\",\n                    \"PrivateSubnets\": \"1\",\n                    \"Random\": \"${noop something}\",\n                },\n                \"requires\": requires,\n            }\n\n            stack = Stack(\n                definition=generate_definition('vpc', i, **overrides),\n                context=self.context)\n            requires = [stack.name]\n\n            steps += [Step(stack, None)]\n\n        graph = build_graph(steps)\n        plan = build_plan(description=\"Test\", graph=graph)\n\n        tmp_dir = tempfile.mkdtemp()\n        try:\n            plan.dump(tmp_dir, context=self.context)\n\n            for step in plan.steps:\n                template_path = os.path.join(\n                    tmp_dir,\n                    stack_template_key_name(step.stack.blueprint))\n                self.assertTrue(os.path.isfile(template_path))\n        finally:\n            shutil.rmtree(tmp_dir)\n"
  },
  {
    "path": "stacker/tests/test_stack.py",
    "content": "from mock import MagicMock\nimport unittest\n\nfrom stacker.lookups import register_lookup_handler\nfrom stacker.context import Context\nfrom stacker.config import Config\nfrom stacker.stack import Stack\nfrom .factories import generate_definition\n\n\nclass TestStack(unittest.TestCase):\n\n    def setUp(self):\n        self.sd = {\"name\": \"test\"}\n        self.config = Config({\"namespace\": \"namespace\"})\n        self.context = Context(config=self.config)\n        self.stack = Stack(\n            definition=generate_definition(\"vpc\", 1),\n            context=self.context,\n        )\n        register_lookup_handler(\"noop\", lambda **kwargs: \"test\")\n\n    def test_stack_requires(self):\n        definition = generate_definition(\n            base_name=\"vpc\",\n            stack_id=1,\n            variables={\n                \"Var1\": \"${noop fakeStack3::FakeOutput}\",\n                \"Var2\": (\n                    \"some.template.value:${output fakeStack2::FakeOutput}:\"\n                    \"${output fakeStack::FakeOutput}\"\n                ),\n                \"Var3\": \"${output fakeStack::FakeOutput},\"\n                        \"${output fakeStack2::FakeOutput}\",\n            },\n            requires=[\"fakeStack\"],\n        )\n        stack = Stack(definition=definition, context=self.context)\n        self.assertEqual(len(stack.requires), 2)\n        self.assertIn(\n            \"fakeStack\",\n            stack.requires,\n        )\n        self.assertIn(\n            \"fakeStack2\",\n            stack.requires,\n        )\n\n    def test_stack_requires_circular_ref(self):\n        definition = generate_definition(\n            base_name=\"vpc\",\n            stack_id=1,\n            variables={\n                \"Var1\": \"${output vpc.1::FakeOutput}\",\n            },\n        )\n        stack = Stack(definition=definition, context=self.context)\n        with self.assertRaises(ValueError):\n            stack.requires\n\n    def test_stack_cfn_parameters(self):\n        definition = generate_definition(\n            base_name=\"vpc\",\n            stack_id=1,\n            variables={\n                \"Param1\": \"${output fakeStack::FakeOutput}\",\n            },\n        )\n        stack = Stack(definition=definition, context=self.context)\n        stack._blueprint = MagicMock()\n        stack._blueprint.get_parameter_values.return_value = {\n            \"Param2\": \"Some Resolved Value\",\n        }\n        self.assertEqual(len(stack.parameter_values), 1)\n        param = stack.parameter_values[\"Param2\"]\n        self.assertEqual(param, \"Some Resolved Value\")\n\n    def test_stack_tags_default(self):\n        self.config.tags = {\"environment\": \"prod\"}\n        definition = generate_definition(\n            base_name=\"vpc\",\n            stack_id=1\n        )\n        stack = Stack(definition=definition, context=self.context)\n        self.assertEquals(stack.tags, {\"environment\": \"prod\"})\n\n    def test_stack_tags_override(self):\n        self.config.tags = {\"environment\": \"prod\"}\n        definition = generate_definition(\n            base_name=\"vpc\",\n            stack_id=1,\n            tags={\"environment\": \"stage\"}\n        )\n        stack = Stack(definition=definition, context=self.context)\n        self.assertEquals(stack.tags, {\"environment\": \"stage\"})\n\n    def test_stack_tags_extra(self):\n        self.config.tags = {\"environment\": \"prod\"}\n        definition = generate_definition(\n            base_name=\"vpc\",\n            stack_id=1,\n            tags={\"app\": \"graph\"}\n        )\n        stack = Stack(definition=definition, context=self.context)\n        self.assertEquals(stack.tags, {\"environment\": \"prod\", \"app\": \"graph\"})\n\n\nif __name__ == '__main__':\n    unittest.main()\n"
  },
  {
    "path": "stacker/tests/test_stacker.py",
    "content": "import unittest\n\nfrom stacker.commands import Stacker\nfrom stacker.exceptions import InvalidConfig\n\n\nclass TestStacker(unittest.TestCase):\n\n    def test_stacker_build_parse_args(self):\n        stacker = Stacker()\n        args = stacker.parse_args(\n            [\"build\",\n             \"-r\", \"us-west-2\",\n             \"-e\", \"namespace=test.override\",\n             \"stacker/tests/fixtures/basic.env\",\n             \"stacker/tests/fixtures/vpc-bastion-db-web.yaml\"]\n        )\n        self.assertEqual(args.region, \"us-west-2\")\n        self.assertFalse(args.outline)\n        # verify namespace was modified\n        self.assertEqual(args.environment[\"namespace\"], \"test.override\")\n\n    def test_stacker_build_parse_args_region_from_env(self):\n        stacker = Stacker()\n        args = stacker.parse_args(\n            [\"build\",\n             \"-e\", \"namespace=test.override\",\n             \"stacker/tests/fixtures/basic.env\",\n             \"stacker/tests/fixtures/vpc-bastion-db-web.yaml\"]\n        )\n        self.assertEqual(args.region, None)\n\n    def test_stacker_build_context_passed_to_blueprint(self):\n        stacker = Stacker()\n        args = stacker.parse_args(\n            [\"build\",\n             \"-r\", \"us-west-2\",\n             \"stacker/tests/fixtures/basic.env\",\n             \"stacker/tests/fixtures/vpc-bastion-db-web.yaml\"]\n        )\n        stacker.configure(args)\n        stacks_dict = args.context.get_stacks_dict()\n        blueprint = stacks_dict[args.context.get_fqn(\"bastion\")].blueprint\n        self.assertTrue(hasattr(blueprint, \"context\"))\n        blueprint.render_template()\n        # verify that the bastion blueprint only contains blueprint variables,\n        # not BaseDomain, AZCount or CidrBlock. Any variables that get passed\n        # in from the command line shouldn't be resovled at the blueprint level\n        self.assertNotIn(\"BaseDomain\", blueprint.template.parameters)\n        self.assertNotIn(\"AZCount\", blueprint.template.parameters)\n        self.assertNotIn(\"CidrBlock\", blueprint.template.parameters)\n\n    def test_stacker_blueprint_property_access_does_not_reset_blueprint(self):\n        stacker = Stacker()\n        args = stacker.parse_args(\n            [\"build\",\n             \"-r\", \"us-west-2\",\n             \"stacker/tests/fixtures/basic.env\",\n             \"stacker/tests/fixtures/vpc-bastion-db-web.yaml\"]\n        )\n        stacker.configure(args)\n        stacks_dict = args.context.get_stacks_dict()\n        bastion_stack = stacks_dict[args.context.get_fqn(\"bastion\")]\n        bastion_stack.blueprint.render_template()\n        self.assertIn(\"DefaultSG\", bastion_stack.blueprint.template.parameters)\n\n    def test_stacker_build_context_stack_names_specified(self):\n        stacker = Stacker()\n        args = stacker.parse_args(\n            [\"build\",\n             \"-r\", \"us-west-2\",\n             \"stacker/tests/fixtures/basic.env\",\n             \"stacker/tests/fixtures/vpc-bastion-db-web.yaml\",\n             \"--stacks\", \"vpc\",\n             \"--stacks\", \"bastion\"]\n        )\n        stacker.configure(args)\n        stacks = args.context.get_stacks()\n        self.assertEqual(len(stacks), 2)\n\n    def test_stacker_build_fail_when_parameters_in_stack_def(self):\n        stacker = Stacker()\n        args = stacker.parse_args(\n            [\"build\",\n             \"-r\", \"us-west-2\",\n             \"stacker/tests/fixtures/basic.env\",\n             \"stacker/tests/fixtures/vpc-bastion-db-web-pre-1.0.yaml\"]\n        )\n        with self.assertRaises(InvalidConfig):\n            stacker.configure(args)\n\n    def test_stacker_build_custom_info_log_format(self):\n        stacker = Stacker()\n        args = stacker.parse_args(\n            [\n                \"build\", \"-r\", \"us-west-2\",\n                \"stacker/tests/fixtures/not-basic.env\",\n                \"stacker/tests/fixtures/vpc-custom-log-format-info.yaml\"\n            ]\n        )\n        stacker.configure(args)\n        self.assertEqual(\n            stacker.config.log_formats[\"info\"],\n            '[%(asctime)s] test custom log format - %(message)s'\n        )\n        self.assertIsNone(\n            stacker.config.log_formats.get(\"color\")\n        )\n        self.assertIsNone(\n            stacker.config.log_formats.get(\"debug\")\n        )\n\n\nif __name__ == '__main__':\n    unittest.main()\n"
  },
  {
    "path": "stacker/tests/test_util.py",
    "content": "\nimport unittest\n\nimport string\nimport os\nimport queue\n\nimport mock\n\nimport boto3\n\nfrom stacker.config import Hook, GitPackageSource\nfrom stacker.util import (\n    cf_safe_name,\n    load_object_from_string,\n    camel_to_snake,\n    merge_map,\n    yaml_to_ordered_dict,\n    get_client_region,\n    get_s3_endpoint,\n    s3_bucket_location_constraint,\n    parse_cloudformation_template,\n    Extractor,\n    TarExtractor,\n    TarGzipExtractor,\n    ZipExtractor,\n    SourceProcessor\n)\n\nfrom stacker.hooks.utils import handle_hooks\n\nfrom .factories import (\n    mock_context,\n    mock_provider,\n)\n\nregions = [\"us-east-1\", \"cn-north-1\", \"ap-northeast-1\", \"eu-west-1\",\n           \"ap-southeast-1\", \"ap-southeast-2\", \"us-west-2\", \"us-gov-west-1\",\n           \"us-west-1\", \"eu-central-1\", \"sa-east-1\"]\n\n\ndef mock_create_cache_directories(self, **kwargs):\n    # Don't actually need the directories created in testing\n    return 1\n\n\nclass TestUtil(unittest.TestCase):\n\n    def test_cf_safe_name(self):\n        tests = (\n            (\"abc-def\", \"AbcDef\"),\n            (\"GhI\", \"GhI\"),\n            (\"jKlm.noP\", \"JKlmNoP\")\n        )\n        for t in tests:\n            self.assertEqual(cf_safe_name(t[0]), t[1])\n\n    def test_load_object_from_string(self):\n        tests = (\n            (\"string.Template\", string.Template),\n            (\"os.path.basename\", os.path.basename),\n            (\"string.ascii_letters\", string.ascii_letters)\n        )\n        for t in tests:\n            self.assertIs(load_object_from_string(t[0]), t[1])\n\n    def test_camel_to_snake(self):\n        tests = (\n            (\"TestTemplate\", \"test_template\"),\n            (\"testTemplate\", \"test_template\"),\n            (\"test_Template\", \"test__template\"),\n            (\"testtemplate\", \"testtemplate\"),\n        )\n        for t in tests:\n            self.assertEqual(camel_to_snake(t[0]), t[1])\n\n    def test_merge_map(self):\n        tests = [\n            # 2 lists of stacks defined\n            [{'stacks': [{'stack1': {'variables': {'a': 'b'}}}]},\n             {'stacks': [{'stack2': {'variables': {'c': 'd'}}}]},\n             {'stacks': [\n                 {'stack1': {\n                     'variables': {\n                         'a': 'b'}}},\n                 {'stack2': {\n                     'variables': {\n                         'c': 'd'}}}]}],\n            # A list of stacks combined with a higher precedence dict of stacks\n            [{'stacks': [{'stack1': {'variables': {'a': 'b'}}}]},\n             {'stacks': {'stack2': {'variables': {'c': 'd'}}}},\n             {'stacks': {'stack2': {'variables': {'c': 'd'}}}}],\n            # 2 dicts of stacks with non-overlapping variables merged\n            [{'stacks': {'stack1': {'variables': {'a': 'b'}}}},\n             {'stacks': {'stack1': {'variables': {'c': 'd'}}}},\n             {'stacks': {\n                 'stack1': {\n                     'variables': {\n                         'a': 'b',\n                         'c': 'd'}}}}],\n            # 2 dicts of stacks with overlapping variables merged\n            [{'stacks': {'stack1': {'variables': {'a': 'b'}}}},\n             {'stacks': {'stack1': {'variables': {'a': 'c'}}}},\n             {'stacks': {'stack1': {'variables': {'a': 'c'}}}}],\n        ]\n        for t in tests:\n            self.assertEqual(merge_map(t[0], t[1]), t[2])\n\n    def test_yaml_to_ordered_dict(self):\n        raw_config = \"\"\"\n        pre_build:\n          hook2:\n            path: foo.bar\n          hook1:\n            path: foo1.bar1\n        \"\"\"\n        config = yaml_to_ordered_dict(raw_config)\n        self.assertEqual(list(config['pre_build'].keys())[0], 'hook2')\n        self.assertEqual(config['pre_build']['hook2']['path'], 'foo.bar')\n\n    def test_get_client_region(self):\n        regions = [\"us-east-1\", \"us-west-1\", \"eu-west-1\", \"sa-east-1\"]\n        for region in regions:\n            client = boto3.client(\"s3\", region_name=region)\n            self.assertEqual(get_client_region(client), region)\n\n    def test_get_s3_endpoint(self):\n        endpoint_url = \"https://example.com\"\n        client = boto3.client(\"s3\", region_name=\"us-east-1\",\n                              endpoint_url=endpoint_url)\n        self.assertEqual(get_s3_endpoint(client), endpoint_url)\n\n    def test_s3_bucket_location_constraint(self):\n        tests = (\n            (\"us-east-1\", \"\"),\n            (\"us-west-1\", \"us-west-1\")\n        )\n        for region, result in tests:\n            self.assertEqual(\n                s3_bucket_location_constraint(region),\n                result\n            )\n\n    def test_parse_cloudformation_template(self):\n        template = \"\"\"AWSTemplateFormatVersion: \"2010-09-09\"\nParameters:\n  Param1:\n    Type: String\nResources:\n  Bucket:\n    Type: AWS::S3::Bucket\n    Properties:\n      BucketName:\n        !Join\n          - \"-\"\n          - - !Ref \"AWS::StackName\"\n            - !Ref \"AWS::Region\"\nOutputs:\n  DummyId:\n    Value: dummy-1234\"\"\"\n        parsed_template = {\n            'AWSTemplateFormatVersion': '2010-09-09',\n            'Outputs': {'DummyId': {'Value': 'dummy-1234'}},\n            'Parameters': {'Param1': {'Type': 'String'}},\n            'Resources': {\n                'Bucket': {'Type': 'AWS::S3::Bucket',\n                           'Properties': {\n                               'BucketName': {\n                                   u'Fn::Join': [\n                                       '-',\n                                       [{u'Ref': u'AWS::StackName'},\n                                        {u'Ref': u'AWS::Region'}]\n                                   ]\n                               }\n                           }}\n            }\n        }\n        self.assertEqual(\n            parse_cloudformation_template(template),\n            parsed_template\n        )\n\n    def test_extractors(self):\n        self.assertEqual(Extractor('test.zip').archive, 'test.zip')\n        self.assertEqual(TarExtractor().extension(), '.tar')\n        self.assertEqual(TarGzipExtractor().extension(), '.tar.gz')\n        self.assertEqual(ZipExtractor().extension(), '.zip')\n        for i in [TarExtractor(), ZipExtractor(), ZipExtractor()]:\n            i.set_archive('/tmp/foo')\n            self.assertEqual(i.archive.endswith(i.extension()), True)\n\n    def test_SourceProcessor_helpers(self):\n        with mock.patch.object(SourceProcessor,\n                               'create_cache_directories',\n                               new=mock_create_cache_directories):\n            sp = SourceProcessor(sources={})\n\n            self.assertEqual(\n                sp.sanitize_git_path('git@github.com:foo/bar.git'),\n                'git_github.com_foo_bar'\n            )\n            self.assertEqual(\n                sp.sanitize_uri_path('http://example.com/foo/bar.gz@1'),\n                'http___example.com_foo_bar.gz_1'\n            )\n            self.assertEqual(\n                sp.sanitize_git_path('git@github.com:foo/bar.git', 'v1'),\n                'git_github.com_foo_bar-v1'\n            )\n\n            for i in [GitPackageSource({'branch': 'foo'}), {'branch': 'foo'}]:\n                self.assertEqual(\n                    sp.determine_git_ls_remote_ref(i),\n                    'refs/heads/foo'\n                )\n            for i in [{'uri': 'git@foo'}, {'tag': 'foo'}, {'commit': '1234'}]:\n                self.assertEqual(\n                    sp.determine_git_ls_remote_ref(GitPackageSource(i)),\n                    'HEAD'\n                )\n                self.assertEqual(\n                    sp.determine_git_ls_remote_ref(i),\n                    'HEAD'\n                )\n\n            self.assertEqual(\n                sp.git_ls_remote('https://github.com/remind101/stacker.git',\n                                 'refs/heads/release-1.0'),\n                b'857b4834980e582874d70feef77bb064b60762d1'\n            )\n\n            bad_configs = [{'uri': 'x',\n                            'commit': '1234',\n                            'tag': 'v1',\n                            'branch': 'x'},\n                           {'uri': 'x', 'commit': '1234', 'tag': 'v1'},\n                           {'uri': 'x', 'commit': '1234', 'branch': 'x'},\n                           {'uri': 'x', 'tag': 'v1', 'branch': 'x'},\n                           {'uri': 'x', 'commit': '1234', 'branch': 'x'}]\n            for i in bad_configs:\n                with self.assertRaises(ImportError):\n                    sp.determine_git_ref(GitPackageSource(i))\n                with self.assertRaises(ImportError):\n                    sp.determine_git_ref(i)\n\n            self.assertEqual(\n                sp.determine_git_ref(\n                    GitPackageSource({'uri': 'https://github.com/remind101/'\n                                             'stacker.git',\n                                      'branch': 'release-1.0'})),\n                '857b4834980e582874d70feef77bb064b60762d1'\n            )\n            self.assertEqual(\n                sp.determine_git_ref(\n                    GitPackageSource({'uri': 'git@foo', 'commit': '1234'})),\n                '1234'\n            )\n            self.assertEqual(\n                sp.determine_git_ref({'uri': 'git@foo', 'commit': '1234'}),\n                '1234'\n            )\n            self.assertEqual(\n                sp.determine_git_ref(\n                    GitPackageSource({'uri': 'git@foo', 'tag': 'v1.0.0'})),\n                'v1.0.0'\n            )\n            self.assertEqual(\n                sp.determine_git_ref({'uri': 'git@foo', 'tag': 'v1.0.0'}),\n                'v1.0.0'\n            )\n\n\nhook_queue = queue.Queue()\n\n\ndef mock_hook(*args, **kwargs):\n    hook_queue.put(kwargs)\n    return True\n\n\ndef fail_hook(*args, **kwargs):\n    return None\n\n\ndef exception_hook(*args, **kwargs):\n    raise Exception\n\n\ndef context_hook(*args, **kwargs):\n    return \"context\" in kwargs\n\n\ndef result_hook(*args, **kwargs):\n    return {\"foo\": \"bar\"}\n\n\nclass TestHooks(unittest.TestCase):\n\n    def setUp(self):\n        self.context = mock_context(namespace=\"namespace\")\n        self.provider = mock_provider(region=\"us-east-1\")\n\n    def test_empty_hook_stage(self):\n        hooks = []\n        handle_hooks(\"fake\", hooks, self.provider, self.context)\n        self.assertTrue(hook_queue.empty())\n\n    def test_missing_required_hook(self):\n        hooks = [Hook({\"path\": \"not.a.real.path\", \"required\": True})]\n        with self.assertRaises(ImportError):\n            handle_hooks(\"missing\", hooks, self.provider, self.context)\n\n    def test_missing_required_hook_method(self):\n        hooks = [{\"path\": \"stacker.hooks.blah\", \"required\": True}]\n        with self.assertRaises(AttributeError):\n            handle_hooks(\"missing\", hooks, self.provider, self.context)\n\n    def test_missing_non_required_hook_method(self):\n        hooks = [Hook({\"path\": \"stacker.hooks.blah\", \"required\": False})]\n        handle_hooks(\"missing\", hooks, self.provider, self.context)\n        self.assertTrue(hook_queue.empty())\n\n    def test_default_required_hook(self):\n        hooks = [Hook({\"path\": \"stacker.hooks.blah\"})]\n        with self.assertRaises(AttributeError):\n            handle_hooks(\"missing\", hooks, self.provider, self.context)\n\n    def test_valid_hook(self):\n        hooks = [\n            Hook({\"path\": \"stacker.tests.test_util.mock_hook\",\n                  \"required\": True})]\n        handle_hooks(\"missing\", hooks, self.provider, self.context)\n        good = hook_queue.get_nowait()\n        self.assertEqual(good[\"provider\"].region, \"us-east-1\")\n        with self.assertRaises(queue.Empty):\n            hook_queue.get_nowait()\n\n    def test_valid_enabled_hook(self):\n        hooks = [\n            Hook({\"path\": \"stacker.tests.test_util.mock_hook\",\n                  \"required\": True, \"enabled\": True})]\n        handle_hooks(\"missing\", hooks, self.provider, self.context)\n        good = hook_queue.get_nowait()\n        self.assertEqual(good[\"provider\"].region, \"us-east-1\")\n        with self.assertRaises(queue.Empty):\n            hook_queue.get_nowait()\n\n    def test_valid_enabled_false_hook(self):\n        hooks = [\n            Hook({\"path\": \"stacker.tests.test_util.mock_hook\",\n                  \"required\": True, \"enabled\": False})]\n        handle_hooks(\"missing\", hooks, self.provider, self.context)\n        self.assertTrue(hook_queue.empty())\n\n    def test_context_provided_to_hook(self):\n        hooks = [\n            Hook({\"path\": \"stacker.tests.test_util.context_hook\",\n                  \"required\": True})]\n        handle_hooks(\"missing\", hooks, \"us-east-1\", self.context)\n\n    def test_hook_failure(self):\n        hooks = [\n            Hook({\"path\": \"stacker.tests.test_util.fail_hook\",\n                  \"required\": True})]\n        with self.assertRaises(SystemExit):\n            handle_hooks(\"fail\", hooks, self.provider, self.context)\n        hooks = [{\"path\": \"stacker.tests.test_util.exception_hook\",\n                  \"required\": True}]\n        with self.assertRaises(Exception):\n            handle_hooks(\"fail\", hooks, self.provider, self.context)\n        hooks = [\n            Hook({\"path\": \"stacker.tests.test_util.exception_hook\",\n                  \"required\": False})]\n        # Should pass\n        handle_hooks(\"ignore_exception\", hooks, self.provider, self.context)\n\n    def test_return_data_hook(self):\n        hooks = [\n            Hook({\n                \"path\": \"stacker.tests.test_util.result_hook\",\n                \"data_key\": \"my_hook_results\"\n            }),\n            # Shouldn't return data\n            Hook({\n                \"path\": \"stacker.tests.test_util.context_hook\"\n            })\n        ]\n        handle_hooks(\"result\", hooks, \"us-east-1\", self.context)\n\n        self.assertEqual(\n            self.context.hook_data[\"my_hook_results\"][\"foo\"],\n            \"bar\"\n        )\n        # Verify only the first hook resulted in stored data\n        self.assertEqual(\n            list(self.context.hook_data.keys()), [\"my_hook_results\"]\n        )\n\n    def test_return_data_hook_duplicate_key(self):\n        hooks = [\n            Hook({\n                \"path\": \"stacker.tests.test_util.result_hook\",\n                \"data_key\": \"my_hook_results\"\n            }),\n            Hook({\n                \"path\": \"stacker.tests.test_util.result_hook\",\n                \"data_key\": \"my_hook_results\"\n            })\n        ]\n\n        with self.assertRaises(KeyError):\n            handle_hooks(\"result\", hooks, \"us-east-1\", self.context)\n\n\nclass TestException1(Exception):\n    pass\n\n\nclass TestException2(Exception):\n    pass\n\n\nclass TestExceptionRetries(unittest.TestCase):\n    def setUp(self):\n        self.counter = 0\n\n    def _works_immediately(self, a, b, x=None, y=None):\n        self.counter += 1\n        return [a, b, x, y]\n\n    def _works_second_attempt(self, a, b, x=None, y=None):\n        self.counter += 1\n        if self.counter == 2:\n            return [a, b, x, y]\n        raise Exception(\"Broke.\")\n\n    def _second_raises_exception2(self, a, b, x=None, y=None):\n        self.counter += 1\n        if self.counter == 2:\n            return [a, b, x, y]\n        raise TestException2(\"Broke.\")\n\n    def _throws_exception2(self, a, b, x=None, y=None):\n        self.counter += 1\n        raise TestException2(\"Broke.\")\n"
  },
  {
    "path": "stacker/tests/test_variables.py",
    "content": "\nimport unittest\n\nfrom mock import MagicMock\n\nfrom troposphere import s3\nfrom stacker.blueprints.variables.types import TroposphereType\nfrom stacker.variables import Variable\nfrom stacker.lookups import register_lookup_handler\nfrom stacker.stack import Stack\n\n\nfrom .factories import generate_definition\n\n\nclass TestVariables(unittest.TestCase):\n\n    def setUp(self):\n        self.provider = MagicMock()\n        self.context = MagicMock()\n\n    def test_variable_replace_no_lookups(self):\n        var = Variable(\"Param1\", \"2\")\n        self.assertEqual(var.value, \"2\")\n\n    def test_variable_replace_simple_lookup(self):\n        var = Variable(\"Param1\", \"${output fakeStack::FakeOutput}\")\n        var._value._resolve(\"resolved\")\n        self.assertEqual(var.value, \"resolved\")\n\n    def test_variable_resolve_simple_lookup(self):\n        stack = Stack(\n            definition=generate_definition(\"vpc\", 1),\n            context=self.context)\n        stack.set_outputs({\n            \"FakeOutput\": \"resolved\",\n            \"FakeOutput2\": \"resolved2\",\n        })\n\n        self.context.get_stack.return_value = stack\n\n        var = Variable(\"Param1\", \"${output fakeStack::FakeOutput}\")\n        var.resolve(self.context, self.provider)\n        self.assertTrue(var.resolved)\n        self.assertEqual(var.value, \"resolved\")\n\n    def test_variable_resolve_default_lookup_empty(self):\n        var = Variable(\"Param1\", \"${default fakeStack::}\")\n        var.resolve(self.context, self.provider)\n        self.assertTrue(var.resolved)\n        self.assertEqual(var.value, \"\")\n\n    def test_variable_replace_multiple_lookups_string(self):\n        var = Variable(\n            \"Param1\",\n            \"url://\"  # 0\n            \"${output fakeStack::FakeOutput}\"  # 1\n            \"@\"  # 2\n            \"${output fakeStack::FakeOutput2}\",  # 3\n        )\n        var._value[1]._resolve(\"resolved\")\n        var._value[3]._resolve(\"resolved2\")\n        self.assertEqual(var.value, \"url://resolved@resolved2\")\n\n    def test_variable_resolve_multiple_lookups_string(self):\n        var = Variable(\n            \"Param1\",\n            \"url://${output fakeStack::FakeOutput}@\"\n            \"${output fakeStack::FakeOutput2}\",\n        )\n\n        stack = Stack(\n            definition=generate_definition(\"vpc\", 1),\n            context=self.context)\n        stack.set_outputs({\n            \"FakeOutput\": \"resolved\",\n            \"FakeOutput2\": \"resolved2\",\n        })\n\n        self.context.get_stack.return_value = stack\n        var.resolve(self.context, self.provider)\n        self.assertTrue(var.resolved)\n        self.assertEqual(var.value, \"url://resolved@resolved2\")\n\n    def test_variable_replace_no_lookups_list(self):\n        var = Variable(\"Param1\", [\"something\", \"here\"])\n        self.assertEqual(var.value, [\"something\", \"here\"])\n\n    def test_variable_replace_lookups_list(self):\n        value = [\"something\",  # 0\n                 \"${output fakeStack::FakeOutput}\",  # 1\n                 \"${output fakeStack::FakeOutput2}\"  # 2\n                 ]\n        var = Variable(\"Param1\", value)\n\n        var._value[1]._resolve(\"resolved\")\n        var._value[2]._resolve(\"resolved2\")\n        self.assertEqual(var.value, [\"something\", \"resolved\", \"resolved2\"])\n\n    def test_variable_replace_lookups_dict(self):\n        value = {\n            \"something\": \"${output fakeStack::FakeOutput}\",\n            \"other\": \"${output fakeStack::FakeOutput2}\",\n        }\n        var = Variable(\"Param1\", value)\n        var._value[\"something\"]._resolve(\"resolved\")\n        var._value[\"other\"]._resolve(\"resolved2\")\n        self.assertEqual(var.value, {\"something\": \"resolved\", \"other\":\n                                     \"resolved2\"})\n\n    def test_variable_replace_lookups_mixed(self):\n        value = {\n            \"something\": [\n                \"${output fakeStack::FakeOutput}\",\n                \"other\",\n            ],\n            \"here\": {\n                \"other\": \"${output fakeStack::FakeOutput2}\",\n                \"same\": \"${output fakeStack::FakeOutput}\",\n                \"mixed\": \"something:${output fakeStack::FakeOutput3}\",\n            },\n        }\n        var = Variable(\"Param1\", value)\n        var._value[\"something\"][0]._resolve(\"resolved\")\n        var._value[\"here\"][\"other\"]._resolve(\"resolved2\")\n        var._value[\"here\"][\"same\"]._resolve(\"resolved\")\n        var._value[\"here\"][\"mixed\"][1]._resolve(\"resolved3\")\n        self.assertEqual(var.value, {\n            \"something\": [\n                \"resolved\",\n                \"other\",\n            ],\n            \"here\": {\n                \"other\": \"resolved2\",\n                \"same\": \"resolved\",\n                \"mixed\": \"something:resolved3\",\n            },\n        })\n\n    def test_variable_resolve_nested_lookup(self):\n        stack = Stack(\n            definition=generate_definition(\"vpc\", 1),\n            context=self.context)\n        stack.set_outputs({\n            \"FakeOutput\": \"resolved\",\n            \"FakeOutput2\": \"resolved2\",\n        })\n\n        def mock_handler(value, context, provider, **kwargs):\n            return \"looked up: {}\".format(value)\n\n        register_lookup_handler(\"lookup\", mock_handler)\n        self.context.get_stack.return_value = stack\n        var = Variable(\n            \"Param1\",\n            \"${lookup ${lookup ${output fakeStack::FakeOutput}}}\",\n        )\n        var.resolve(self.context, self.provider)\n        self.assertTrue(var.resolved)\n        self.assertEqual(var.value, \"looked up: looked up: resolved\")\n\n    def test_troposphere_type_no_from_dict(self):\n        with self.assertRaises(ValueError):\n            TroposphereType(object)\n\n        with self.assertRaises(ValueError):\n            TroposphereType(object, many=True)\n\n    def test_troposphere_type_create(self):\n        troposphere_type = TroposphereType(s3.Bucket)\n        created = troposphere_type.create(\n            {\"MyBucket\": {\"BucketName\": \"test-bucket\"}})\n        self.assertTrue(isinstance(created, s3.Bucket))\n        self.assertTrue(created.properties[\"BucketName\"], \"test-bucket\")\n\n    def test_troposphere_type_create_multiple(self):\n        troposphere_type = TroposphereType(s3.Bucket, many=True)\n        created = troposphere_type.create({\n            \"FirstBucket\": {\"BucketName\": \"test-bucket\"},\n            \"SecondBucket\": {\"BucketName\": \"other-test-bucket\"},\n        })\n        self.assertTrue(isinstance(created, list))\n"
  },
  {
    "path": "stacker/tokenize_userdata.py",
    "content": "import re\n\nfrom troposphere import Ref, GetAtt\n\n\nHELPERS = {\n    \"Ref\": Ref,\n    \"Fn::GetAtt\": GetAtt\n}\n\nsplit_string = \"(\" + \"|\".join([r\"%s\\([^)]+\\)\" % h for h in HELPERS]) + \")\"\nreplace_string = \\\n    r\"(?P<helper>%s)\\((?P<args>['\\\"]?[^)]+['\\\"]?)+\\)\" % '|'.join(HELPERS)\n\nsplit_re = re.compile(split_string)\nreplace_re = re.compile(replace_string)\n\n\ndef cf_tokenize(s):\n    \"\"\" Parses UserData for Cloudformation helper functions.\n\n    http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/user-data.html\n    http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/intrinsic-function-reference.html\n    http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/quickref-cloudformation.html#scenario-userdata-base64\n\n    It breaks apart the given string at each recognized function (see HELPERS)\n    and instantiates the helper function objects in place of those.\n\n    Returns a list of parts as a result. Useful when used with Join() and\n    Base64() CloudFormation functions to produce user data.\n\n    ie: Base64(Join('', cf_tokenize(userdata_string)))\n    \"\"\"\n    t = []\n    parts = split_re.split(s)\n    for part in parts:\n        cf_func = replace_re.search(part)\n        if cf_func:\n            args = [a.strip(\"'\\\" \") for a in cf_func.group(\"args\").split(\",\")]\n            t.append(HELPERS[cf_func.group(\"helper\")](*args).data)\n        else:\n            t.append(part)\n    return t\n"
  },
  {
    "path": "stacker/ui.py",
    "content": "import threading\nimport logging\nfrom getpass import getpass\n\n\nlogger = logging.getLogger(__name__)\n\n\ndef get_raw_input(message):\n    \"\"\" Just a wrapper for raw_input for testing purposes. \"\"\"\n    return input(message)\n\n\nclass UI(object):\n    \"\"\" This class is used internally by stacker to perform I/O with the\n    terminal in a multithreaded environment. It ensures that two threads don't\n    write over each other while asking a user for input (e.g. in interactive\n    mode).\n    \"\"\"\n\n    def __init__(self):\n        self._lock = threading.RLock()\n\n    def lock(self, *args, **kwargs):\n        \"\"\"Obtains an exclusive lock on the UI for the currently executing\n        thread.\"\"\"\n        return self._lock.acquire()\n\n    def unlock(self, *args, **kwargs):\n        return self._lock.release()\n\n    def info(self, *args, **kwargs):\n        \"\"\"Logs the line of the current thread owns the underlying lock, or\n        blocks.\"\"\"\n        self.lock()\n        try:\n            return logger.info(*args, **kwargs)\n        finally:\n            self.unlock()\n\n    def ask(self, message):\n        \"\"\"This wraps the built-in raw_input function to ensure that only 1\n        thread is asking for input from the user at a give time. Any process\n        that tries to log output to the terminal will block while the user is\n        being prompted.\"\"\"\n        self.lock()\n        try:\n            return get_raw_input(message)\n        finally:\n            self.unlock()\n\n    def getpass(self, *args):\n        \"\"\"Wraps getpass to lock the UI.\"\"\"\n        try:\n            self.lock()\n            return getpass(*args)\n        finally:\n            self.unlock()\n\n\n# Global UI object for other modules to use.\nui = UI()\n"
  },
  {
    "path": "stacker/util.py",
    "content": "import copy\nimport uuid\nimport importlib\nimport logging\nimport os\nimport re\nimport shutil\nimport subprocess\nimport sys\nimport tarfile\nimport tempfile\nimport zipfile\n\nfrom collections import OrderedDict\n\nimport botocore.client\nimport botocore.exceptions\nimport dateutil\nimport yaml\nfrom yaml.constructor import ConstructorError\nfrom yaml.nodes import MappingNode\n\nfrom .awscli_yamlhelper import yaml_parse\nfrom stacker.session_cache import get_session\n\nlogger = logging.getLogger(__name__)\n\n\ndef camel_to_snake(name):\n    \"\"\"Converts CamelCase to snake_case.\n\n    Args:\n        name (string): The name to convert from CamelCase to snake_case.\n\n    Returns:\n        string: Converted string.\n    \"\"\"\n    s1 = re.sub(\"(.)([A-Z][a-z]+)\", r\"\\1_\\2\", name)\n    return re.sub(\"([a-z0-9])([A-Z])\", r\"\\1_\\2\", s1).lower()\n\n\ndef convert_class_name(kls):\n    \"\"\"Gets a string that represents a given class.\n\n    Args:\n        kls (class): The class being analyzed for its name.\n\n    Returns:\n        string: The name of the given kls.\n    \"\"\"\n    return camel_to_snake(kls.__name__)\n\n\ndef parse_zone_id(full_zone_id):\n    \"\"\"Parses the returned hosted zone id and returns only the ID itself.\"\"\"\n    return full_zone_id.split(\"/\")[2]\n\n\ndef get_hosted_zone_by_name(client, zone_name):\n    \"\"\"Get the zone id of an existing zone by name.\n\n    Args:\n        client (:class:`botocore.client.Route53`): The connection used to\n            interact with Route53's API.\n        zone_name (string): The name of the DNS hosted zone to create.\n\n    Returns:\n        string: The Id of the Hosted Zone.\n    \"\"\"\n    p = client.get_paginator(\"list_hosted_zones\")\n\n    for i in p.paginate():\n        for zone in i[\"HostedZones\"]:\n            if zone[\"Name\"] == zone_name:\n                return parse_zone_id(zone[\"Id\"])\n    return None\n\n\ndef get_or_create_hosted_zone(client, zone_name):\n    \"\"\"Get the Id of an existing zone, or create it.\n\n    Args:\n        client (:class:`botocore.client.Route53`): The connection used to\n            interact with Route53's API.\n        zone_name (string): The name of the DNS hosted zone to create.\n\n    Returns:\n        string: The Id of the Hosted Zone.\n    \"\"\"\n    zone_id = get_hosted_zone_by_name(client, zone_name)\n    if zone_id:\n        return zone_id\n\n    logger.debug(\"Zone %s does not exist, creating.\", zone_name)\n\n    reference = uuid.uuid4().hex\n\n    response = client.create_hosted_zone(Name=zone_name,\n                                         CallerReference=reference)\n\n    return parse_zone_id(response[\"HostedZone\"][\"Id\"])\n\n\nclass SOARecordText(object):\n    \"\"\"Represents the actual body of an SOARecord. \"\"\"\n    def __init__(self, record_text):\n        (self.nameserver, self.contact, self.serial, self.refresh,\n            self.retry, self.expire, self.min_ttl) = record_text.split()\n\n    def __str__(self):\n        return \"%s %s %s %s %s %s %s\" % (\n            self.nameserver, self.contact, self.serial, self.refresh,\n            self.retry, self.expire, self.min_ttl\n        )\n\n\nclass SOARecord(object):\n    \"\"\"Represents an SOA record. \"\"\"\n    def __init__(self, record):\n        self.name = record[\"Name\"]\n        self.text = SOARecordText(record[\"ResourceRecords\"][0][\"Value\"])\n        self.ttl = record[\"TTL\"]\n\n\ndef get_soa_record(client, zone_id, zone_name):\n    \"\"\"Gets the SOA record for zone_name from zone_id.\n\n    Args:\n        client (:class:`botocore.client.Route53`): The connection used to\n            interact with Route53's API.\n        zone_id (string): The AWS Route53 zone id of the hosted zone to query.\n        zone_name (string): The name of the DNS hosted zone to create.\n\n    Returns:\n        :class:`stacker.util.SOARecord`: An object representing the parsed SOA\n            record returned from AWS Route53.\n    \"\"\"\n\n    response = client.list_resource_record_sets(HostedZoneId=zone_id,\n                                                StartRecordName=zone_name,\n                                                StartRecordType=\"SOA\",\n                                                MaxItems=\"1\")\n    return SOARecord(response[\"ResourceRecordSets\"][0])\n\n\ndef create_route53_zone(client, zone_name):\n    \"\"\"Creates the given zone_name if it doesn't already exists.\n\n    Also sets the SOA negative caching TTL to something short (300 seconds).\n\n    Args:\n        client (:class:`botocore.client.Route53`): The connection used to\n            interact with Route53's API.\n        zone_name (string): The name of the DNS hosted zone to create.\n\n    Returns:\n        string: The zone id returned from AWS for the existing, or newly\n            created zone.\n    \"\"\"\n    if not zone_name.endswith(\".\"):\n        zone_name += \".\"\n    zone_id = get_or_create_hosted_zone(client, zone_name)\n    old_soa = get_soa_record(client, zone_id, zone_name)\n\n    # If the negative cache value is already 300, don't update it.\n    if old_soa.text.min_ttl == \"300\":\n        return zone_id\n\n    new_soa = copy.deepcopy(old_soa)\n    logger.debug(\"Updating negative caching value on zone %s to 300.\",\n                 zone_name)\n    new_soa.text.min_ttl = \"300\"\n    client.change_resource_record_sets(\n        HostedZoneId=zone_id,\n        ChangeBatch={\n            \"Comment\": \"Update SOA min_ttl to 300.\",\n            \"Changes\": [\n                {\n                    \"Action\": \"UPSERT\",\n                    \"ResourceRecordSet\": {\n                        \"Name\": zone_name,\n                        \"Type\": \"SOA\",\n                        \"TTL\": old_soa.ttl,\n                        \"ResourceRecords\": [\n                            {\n                                \"Value\": str(new_soa.text)\n                            }\n                        ]\n                    }\n                },\n            ]\n        }\n    )\n    return zone_id\n\n\ndef load_object_from_string(fqcn):\n    \"\"\"Converts \".\" delimited strings to a python object.\n\n    Given a \".\" delimited string representing the full path to an object\n    (function, class, variable) inside a module, return that object.  Example:\n\n    load_object_from_string(\"os.path.basename\")\n    load_object_from_string(\"logging.Logger\")\n    load_object_from_string(\"LocalClassName\")\n    \"\"\"\n    module_path = \"__main__\"\n    object_name = fqcn\n    if \".\" in fqcn:\n        module_path, object_name = fqcn.rsplit(\".\", 1)\n        importlib.import_module(module_path)\n    return getattr(sys.modules[module_path], object_name)\n\n\ndef merge_map(a, b):\n    \"\"\"Recursively merge elements of argument b into argument a.\n\n    Primarly used for merging two dictionaries together, where dict b takes\n    precedence over dict a. If 2 lists are provided, they are concatenated.\n    \"\"\"\n    if isinstance(a, list) and isinstance(b, list):\n        return a + b\n\n    if not isinstance(a, dict) or not isinstance(b, dict):\n        return b\n\n    for key in b:\n        a[key] = merge_map(a[key], b[key]) if key in a else b[key]\n    return a\n\n\ndef yaml_to_ordered_dict(stream, loader=yaml.SafeLoader):\n    \"\"\"Provides yaml.load alternative with preserved dictionary order.\n\n    Args:\n        stream (string): YAML string to load.\n        loader (:class:`yaml.loader`): PyYAML loader class. Defaults to safe\n            load.\n\n    Returns:\n        OrderedDict: Parsed YAML.\n    \"\"\"\n    class OrderedUniqueLoader(loader):\n        \"\"\"\n        Subclasses the given pyYAML `loader` class.\n\n        Validates all sibling keys to insure no duplicates.\n\n        Returns an OrderedDict instead of a Dict.\n        \"\"\"\n\n        # keys which require no duplicate siblings.\n        NO_DUPE_SIBLINGS = [\"stacks\", \"class_path\"]\n        # keys which require no duplicate children keys.\n        NO_DUPE_CHILDREN = [\"stacks\"]\n\n        def _error_mapping_on_dupe(self, node, node_name):\n            \"\"\"check mapping node for dupe children keys.\"\"\"\n            if isinstance(node, MappingNode):\n                mapping = {}\n                for n in node.value:\n                    a = n[0]\n                    b = mapping.get(a.value, None)\n                    if b:\n                        msg = \"{} mapping cannot have duplicate keys {} {}\"\n                        raise ConstructorError(\n                            msg.format(node_name, b.start_mark, a.start_mark)\n                        )\n                    mapping[a.value] = a\n\n        def _validate_mapping(self, node, deep=False):\n            if not isinstance(node, MappingNode):\n                raise ConstructorError(\n                    None, None,\n                    \"expected a mapping node, but found %s\" % node.id,\n                    node.start_mark)\n            mapping = OrderedDict()\n            for key_node, value_node in node.value:\n                key = self.construct_object(key_node, deep=deep)\n                try:\n                    hash(key)\n                except TypeError as exc:\n                    raise ConstructorError(\n                        \"while constructing a mapping\", node.start_mark,\n                        \"found unhashable key (%s)\" % exc, key_node.start_mark\n                    )\n                # prevent duplicate sibling keys for certain \"keywords\".\n                if key in mapping and key in self.NO_DUPE_SIBLINGS:\n                    msg = \"{} key cannot have duplicate siblings {} {}\"\n                    raise ConstructorError(\n                        msg.format(key, node.start_mark, key_node.start_mark)\n                    )\n                if key in self.NO_DUPE_CHILDREN:\n                    # prevent duplicate children keys for this mapping.\n                    self._error_mapping_on_dupe(value_node, key_node.value)\n                value = self.construct_object(value_node, deep=deep)\n                mapping[key] = value\n            return mapping\n\n        def construct_mapping(self, node, deep=False):\n            \"\"\"Override parent method to use OrderedDict.\"\"\"\n            if isinstance(node, MappingNode):\n                self.flatten_mapping(node)\n            return self._validate_mapping(node, deep=deep)\n\n        def construct_yaml_map(self, node):\n            data = OrderedDict()\n            yield data\n            value = self.construct_mapping(node)\n            data.update(value)\n\n    OrderedUniqueLoader.add_constructor(\n        u'tag:yaml.org,2002:map', OrderedUniqueLoader.construct_yaml_map,\n    )\n    return yaml.load(stream, OrderedUniqueLoader)\n\n\ndef uppercase_first_letter(s):\n    \"\"\"Return string \"s\" with first character upper case.\"\"\"\n    return s[0].upper() + s[1:]\n\n\ndef cf_safe_name(name):\n    \"\"\"Converts a name to a safe string for a Cloudformation resource.\n\n    Given a string, returns a name that is safe for use as a CloudFormation\n    Resource. (ie: Only alphanumeric characters)\n    \"\"\"\n    alphanumeric = r\"[a-zA-Z0-9]+\"\n    parts = re.findall(alphanumeric, name)\n    return \"\".join([uppercase_first_letter(part) for part in parts])\n\n\ndef get_config_directory():\n    \"\"\"Return the directory the config file is located in.\n\n    This enables us to use relative paths in config values.\n\n    \"\"\"\n    # avoid circular import\n    from .commands.stacker import Stacker\n    command = Stacker()\n    namespace = command.parse_args()\n    return os.path.dirname(namespace.config.name)\n\n\ndef read_value_from_path(value):\n    \"\"\"Enables translators to read values from files.\n\n    The value can be referred to with the `file://` prefix. ie:\n\n        conf_key: ${kms file://kms_value.txt}\n\n    \"\"\"\n    if value.startswith('file://'):\n        path = value.split('file://', 1)[1]\n        config_directory = get_config_directory()\n        relative_path = os.path.join(config_directory, path)\n        with open(relative_path) as read_file:\n            value = read_file.read()\n    return value\n\n\ndef get_client_region(client):\n    \"\"\"Gets the region from a :class:`boto3.client.Client` object.\n\n    Args:\n        client (:class:`boto3.client.Client`): The client to get the region\n            from.\n\n    Returns:\n        string: AWS region string.\n    \"\"\"\n\n    return client._client_config.region_name\n\n\ndef get_s3_endpoint(client):\n    \"\"\"Gets the s3 endpoint for the given :class:`boto3.client.Client` object.\n\n    Args:\n        client (:class:`boto3.client.Client`): The client to get the endpoint\n            from.\n\n    Returns:\n        string: The AWS endpoint for the client.\n    \"\"\"\n\n    return client._endpoint.host\n\n\ndef s3_bucket_location_constraint(region):\n    \"\"\"Returns the appropriate LocationConstraint info for a new S3 bucket.\n\n    When creating a bucket in a region OTHER than us-east-1, you need to\n    specify a LocationConstraint inside the CreateBucketConfiguration argument.\n    This function helps you determine the right value given a given client.\n\n    Args:\n        region (str): The region where the bucket will be created in.\n\n    Returns:\n        string: The string to use with the given client for creating a bucket.\n    \"\"\"\n    if region == \"us-east-1\":\n        return \"\"\n    return region\n\n\ndef ensure_s3_bucket(s3_client, bucket_name, bucket_region):\n    \"\"\"Ensure an s3 bucket exists, if it does not then create it.\n\n    Args:\n        s3_client (:class:`botocore.client.Client`): An s3 client used to\n            verify and create the bucket.\n        bucket_name (str): The bucket being checked/created.\n        bucket_region (str, optional): The region to create the bucket in. If\n            not provided, will be determined by s3_client's region.\n    \"\"\"\n    try:\n        s3_client.head_bucket(Bucket=bucket_name)\n    except botocore.exceptions.ClientError as e:\n        if e.response['Error']['Message'] == \"Not Found\":\n            logger.debug(\"Creating bucket %s.\", bucket_name)\n            create_args = {\"Bucket\": bucket_name}\n            location_constraint = s3_bucket_location_constraint(\n                bucket_region\n            )\n            if location_constraint:\n                create_args[\"CreateBucketConfiguration\"] = {\n                    \"LocationConstraint\": location_constraint\n                }\n            s3_client.create_bucket(**create_args)\n        elif e.response['Error']['Message'] == \"Forbidden\":\n            logger.exception(\"Access denied for bucket %s.  Did \" +\n                             \"you remember to use a globally unique name?\",\n                             bucket_name)\n            raise\n        else:\n            logger.exception(\"Error creating bucket %s. Error %s\",\n                             bucket_name, e.response)\n            raise\n\n\ndef parse_cloudformation_template(template):\n    \"\"\"Parse CFN template string.\n\n    Leverages the vendored aws-cli yamlhelper to handle JSON or YAML templates.\n\n    Args:\n        template (str): The template body.\n    \"\"\"\n    return yaml_parse(template)\n\n\nclass Extractor(object):\n    \"\"\"Base class for extractors.\"\"\"\n\n    def __init__(self, archive=None):\n        \"\"\"\n        Create extractor object with the archive path.\n\n        Args:\n            archive (string): Archive path\n        \"\"\"\n        self.archive = archive\n\n    def set_archive(self, dir_name):\n        \"\"\"\n        Update archive filename to match directory name & extension.\n\n        Args:\n            dir_name (string): Archive directory name\n        \"\"\"\n        self.archive = dir_name + self.extension()\n\n    @staticmethod\n    def extension():\n        \"\"\"Serve as placeholder; override this in subclasses.\"\"\"\n        return ''\n\n\nclass TarExtractor(Extractor):\n    \"\"\"Extracts tar archives.\"\"\"\n\n    def extract(self, destination):\n        \"\"\"Extract the archive.\"\"\"\n        with tarfile.open(self.archive, 'r:') as tar:\n            tar.extractall(path=destination)\n\n    @staticmethod\n    def extension():\n        \"\"\"Return archive extension.\"\"\"\n        return '.tar'\n\n\nclass TarGzipExtractor(Extractor):\n    \"\"\"Extracts compressed tar archives.\"\"\"\n\n    def extract(self, destination):\n        \"\"\"Extract the archive.\"\"\"\n        with tarfile.open(self.archive, 'r:gz') as tar:\n            tar.extractall(path=destination)\n\n    @staticmethod\n    def extension():\n        \"\"\"Return archive extension.\"\"\"\n        return '.tar.gz'\n\n\nclass ZipExtractor(Extractor):\n    \"\"\"Extracts zip archives.\"\"\"\n\n    def extract(self, destination):\n        \"\"\"Extract the archive.\"\"\"\n        with zipfile.ZipFile(self.archive, 'r') as zip_ref:\n            zip_ref.extractall(destination)\n\n    @staticmethod\n    def extension():\n        \"\"\"Return archive extension.\"\"\"\n        return '.zip'\n\n\nclass SourceProcessor(object):\n    \"\"\"Makes remote python package sources available in current environment.\"\"\"\n\n    ISO8601_FORMAT = '%Y%m%dT%H%M%SZ'\n\n    def __init__(self, sources, stacker_cache_dir=None):\n        \"\"\"\n        Process a config's defined package sources.\n\n        Args:\n            sources (dict): Package sources from Stacker config dictionary\n            stacker_cache_dir (string): Path where remote sources will be\n                cached.\n        \"\"\"\n        if not stacker_cache_dir:\n            stacker_cache_dir = os.path.expanduser(\"~/.stacker\")\n        package_cache_dir = os.path.join(stacker_cache_dir, 'packages')\n        self.stacker_cache_dir = stacker_cache_dir\n        self.package_cache_dir = package_cache_dir\n        self.sources = sources\n        self.configs_to_merge = []\n        self.create_cache_directories()\n\n    def create_cache_directories(self):\n        \"\"\"Ensure that SourceProcessor cache directories exist.\"\"\"\n        if not os.path.isdir(self.package_cache_dir):\n            if not os.path.isdir(self.stacker_cache_dir):\n                os.mkdir(self.stacker_cache_dir)\n            os.mkdir(self.package_cache_dir)\n\n    def get_package_sources(self):\n        \"\"\"Make remote python packages available for local use.\"\"\"\n        # Checkout local modules\n        for config in self.sources.get('local', []):\n            self.fetch_local_package(config=config)\n        # Checkout S3 repositories specified in config\n        for config in self.sources.get('s3', []):\n            self.fetch_s3_package(config=config)\n        # Checkout git repositories specified in config\n        for config in self.sources.get('git', []):\n            self.fetch_git_package(config=config)\n\n    def fetch_local_package(self, config):\n        \"\"\"Make a local path available to current stacker config.\n\n        Args:\n            config (dict): 'local' path config dictionary\n\n        \"\"\"\n        # Update sys.path & merge in remote configs (if necessary)\n        self.update_paths_and_config(config=config,\n                                     pkg_dir_name=config['source'],\n                                     pkg_cache_dir=os.getcwd())\n\n    def fetch_s3_package(self, config):\n        \"\"\"Make a remote S3 archive available for local use.\n\n        Args:\n            config (dict): git config dictionary\n\n        \"\"\"\n        extractor_map = {'.tar.gz': TarGzipExtractor,\n                         '.tar': TarExtractor,\n                         '.zip': ZipExtractor}\n        extractor = None\n        for suffix, klass in extractor_map.items():\n            if config['key'].endswith(suffix):\n                extractor = klass()\n                logger.debug(\"Using extractor %s for S3 object \\\"%s\\\" in \"\n                             \"bucket %s.\",\n                             klass.__name__,\n                             config['key'],\n                             config['bucket'])\n                dir_name = self.sanitize_uri_path(\n                    \"s3-%s-%s\" % (config['bucket'],\n                                  config['key'][:-len(suffix)])\n                )\n                break\n\n        if extractor is None:\n            raise ValueError(\n                \"Archive type could not be determined for S3 object \\\"%s\\\" \"\n                \"in bucket %s.\" % (config['key'], config['bucket'])\n            )\n\n        session = get_session(region=None)\n        extra_s3_args = {}\n        if config.get('requester_pays', False):\n            extra_s3_args['RequestPayer'] = 'requester'\n\n        # We can skip downloading the archive if it's already been cached\n        if config.get('use_latest', True):\n            try:\n                # LastModified should always be returned in UTC, but it doesn't\n                # hurt to explicitly convert it to UTC again just in case\n                modified_date = session.client('s3').head_object(\n                    Bucket=config['bucket'],\n                    Key=config['key'],\n                    **extra_s3_args\n                )['LastModified'].astimezone(dateutil.tz.tzutc())\n            except botocore.exceptions.ClientError as client_error:\n                logger.error(\"Error checking modified date of \"\n                             \"s3://%s/%s : %s\",\n                             config['bucket'],\n                             config['key'],\n                             client_error)\n                sys.exit(1)\n            dir_name += \"-%s\" % modified_date.strftime(self.ISO8601_FORMAT)\n        cached_dir_path = os.path.join(self.package_cache_dir, dir_name)\n        if not os.path.isdir(cached_dir_path):\n            logger.debug(\"Remote package s3://%s/%s does not appear to have \"\n                         \"been previously downloaded - starting download and \"\n                         \"extraction to %s\",\n                         config['bucket'],\n                         config['key'],\n                         cached_dir_path)\n            tmp_dir = tempfile.mkdtemp(prefix='stacker')\n            tmp_package_path = os.path.join(tmp_dir, dir_name)\n            try:\n                extractor.set_archive(os.path.join(tmp_dir, dir_name))\n                logger.debug(\"Starting remote package download from S3 to %s \"\n                             \"with extra S3 options \\\"%s\\\"\",\n                             extractor.archive,\n                             str(extra_s3_args))\n                session.resource('s3').Bucket(config['bucket']).download_file(\n                    config['key'],\n                    extractor.archive,\n                    ExtraArgs=extra_s3_args\n                )\n                logger.debug(\"Download complete; extracting downloaded \"\n                             \"package to %s\",\n                             tmp_package_path)\n                extractor.extract(tmp_package_path)\n                logger.debug(\"Moving extracted package directory %s to the \"\n                             \"Stacker cache at %s\",\n                             dir_name,\n                             self.package_cache_dir)\n                shutil.move(tmp_package_path, self.package_cache_dir)\n            finally:\n                shutil.rmtree(tmp_dir)\n        else:\n            logger.debug(\"Remote package s3://%s/%s appears to have \"\n                         \"been previously downloaded to %s -- bypassing \"\n                         \"download\",\n                         config['bucket'],\n                         config['key'],\n                         cached_dir_path)\n\n        # Update sys.path & merge in remote configs (if necessary)\n        self.update_paths_and_config(config=config,\n                                     pkg_dir_name=dir_name)\n\n    def fetch_git_package(self, config):\n        \"\"\"Make a remote git repository available for local use.\n\n        Args:\n            config (dict): git config dictionary\n\n        \"\"\"\n        # only loading git here when needed to avoid load errors on systems\n        # without git installed\n        from git import Repo\n\n        ref = self.determine_git_ref(config)\n        dir_name = self.sanitize_git_path(uri=config['uri'], ref=ref)\n        cached_dir_path = os.path.join(self.package_cache_dir, dir_name)\n\n        # We can skip cloning the repo if it's already been cached\n        if not os.path.isdir(cached_dir_path):\n            logger.debug(\"Remote repo %s does not appear to have been \"\n                         \"previously downloaded - starting clone to %s\",\n                         config['uri'],\n                         cached_dir_path)\n            tmp_dir = tempfile.mkdtemp(prefix='stacker')\n            try:\n                tmp_repo_path = os.path.join(tmp_dir, dir_name)\n                with Repo.clone_from(config['uri'], tmp_repo_path) as repo:\n                    repo.head.reference = ref\n                    repo.head.reset(index=True, working_tree=True)\n                shutil.move(tmp_repo_path, self.package_cache_dir)\n            finally:\n                shutil.rmtree(tmp_dir)\n        else:\n            logger.debug(\"Remote repo %s appears to have been previously \"\n                         \"cloned to %s -- bypassing download\",\n                         config['uri'],\n                         cached_dir_path)\n\n        # Update sys.path & merge in remote configs (if necessary)\n        self.update_paths_and_config(config=config,\n                                     pkg_dir_name=dir_name)\n\n    def update_paths_and_config(self, config, pkg_dir_name,\n                                pkg_cache_dir=None):\n        \"\"\"Handle remote source defined sys.paths & configs.\n\n        Args:\n            config (dict): git config dictionary\n            pkg_dir_name (string): directory name of the stacker archive\n            pkg_cache_dir (string): fully qualified path to stacker cache\n                                    cache directory\n\n        \"\"\"\n        if pkg_cache_dir is None:\n            pkg_cache_dir = self.package_cache_dir\n        cached_dir_path = os.path.join(pkg_cache_dir, pkg_dir_name)\n\n        # Add the appropriate directory (or directories) to sys.path\n        if config.get('paths'):\n            for path in config['paths']:\n                path_to_append = os.path.join(cached_dir_path,\n                                              path)\n                logger.debug(\"Appending \\\"%s\\\" to python sys.path\",\n                             path_to_append)\n                sys.path.append(path_to_append)\n        else:\n            sys.path.append(cached_dir_path)\n\n        # If the configuration defines a set of remote config yamls to\n        # include, add them to the list for merging\n        if config.get('configs'):\n            for config_filename in config['configs']:\n                self.configs_to_merge.append(os.path.join(cached_dir_path,\n                                                          config_filename))\n\n    def git_ls_remote(self, uri, ref):\n        \"\"\"Determine the latest commit id for a given ref.\n\n        Args:\n            uri (string): git URI\n            ref (string): git ref\n\n        Returns:\n            str: A commit id\n\n        \"\"\"\n        logger.debug(\"Invoking git to retrieve commit id for repo %s...\", uri)\n        lsremote_output = subprocess.check_output(['git',\n                                                   'ls-remote',\n                                                   uri,\n                                                   ref])\n        if b\"\\t\" in lsremote_output:\n            commit_id = lsremote_output.split(b\"\\t\")[0]\n            logger.debug(\"Matching commit id found: %s\", commit_id)\n            return commit_id\n        else:\n            raise ValueError(\"Ref \\\"%s\\\" not found for repo %s.\" % (ref, uri))\n\n    def determine_git_ls_remote_ref(self, config):\n        \"\"\"Determine the ref to be used with the \"git ls-remote\" command.\n\n        Args:\n            config (:class:`stacker.config.GitPackageSource`): git config\n                dictionary; 'branch' key is optional\n\n        Returns:\n            str: A branch reference or \"HEAD\"\n\n        \"\"\"\n        if config.get('branch'):\n            ref = \"refs/heads/%s\" % config['branch']\n        else:\n            ref = \"HEAD\"\n\n        return ref\n\n    def determine_git_ref(self, config):\n        \"\"\"Determine the ref to be used for 'git checkout'.\n\n        Args:\n            config (dict): git config dictionary\n\n        Returns:\n            str: A commit id or tag name\n\n        \"\"\"\n        # First ensure redundant config keys aren't specified (which could\n        # cause confusion as to which take precedence)\n        ref_config_keys = 0\n        for i in ['commit', 'tag', 'branch']:\n            if config.get(i):\n                ref_config_keys += 1\n        if ref_config_keys > 1:\n            raise ImportError(\"Fetching remote git sources failed: \"\n                              \"conflicting revisions (e.g. 'commit', 'tag', \"\n                              \"'branch') specified for a package source\")\n\n        # Now check for a specific point in time referenced and return it if\n        # present\n        if config.get('commit'):\n            ref = config['commit']\n        elif config.get('tag'):\n            ref = config['tag']\n        else:\n            # Since a specific commit/tag point in time has not been specified,\n            # check the remote repo for the commit id to use\n            ref = self.git_ls_remote(\n                config['uri'],\n                self.determine_git_ls_remote_ref(config)\n            )\n        if sys.version_info[0] > 2 and isinstance(ref, bytes):\n            return ref.decode()\n        return ref\n\n    def sanitize_uri_path(self, uri):\n        \"\"\"Take a URI and converts it to a directory safe path.\n\n        Args:\n            uri (string): URI (e.g. http://example.com/cats)\n\n        Returns:\n            str: Directory name for the supplied uri\n\n        \"\"\"\n        for i in ['@', '/', ':']:\n            uri = uri.replace(i, '_')\n        return uri\n\n    def sanitize_git_path(self, uri, ref=None):\n        \"\"\"Take a git URI and ref and converts it to a directory safe path.\n\n        Args:\n            uri (string): git URI\n                          (e.g. git@github.com:foo/bar.git)\n            ref (string): optional git ref to be appended to the path\n\n        Returns:\n            str: Directory name for the supplied uri\n\n        \"\"\"\n        if uri.endswith('.git'):\n            dir_name = uri[:-4]  # drop .git\n        else:\n            dir_name = uri\n        dir_name = self.sanitize_uri_path(dir_name)\n        if ref is not None:\n            dir_name += \"-%s\" % ref\n        return dir_name\n\n\ndef stack_template_key_name(blueprint):\n    \"\"\"Given a blueprint, produce an appropriate key name.\n\n    Args:\n        blueprint (:class:`stacker.blueprints.base.Blueprint`): The blueprint\n            object to create the key from.\n\n    Returns:\n        string: Key name resulting from blueprint.\n    \"\"\"\n    name = blueprint.name\n    return \"stack_templates/%s/%s-%s.json\" % (blueprint.context.get_fqn(name),\n                                              name,\n                                              blueprint.version)\n"
  },
  {
    "path": "stacker/variables.py",
    "content": "\nimport re\n\nfrom past.builtins import basestring\nfrom string import Template\n\nfrom .exceptions import InvalidLookupCombination, UnresolvedVariable, \\\n    UnknownLookupType, FailedVariableLookup, FailedLookup, \\\n    UnresolvedVariableValue, InvalidLookupConcatenation\nfrom .lookups.registry import LOOKUP_HANDLERS\n\n\nclass LookupTemplate(Template):\n\n    \"\"\"A custom string template we use to replace lookup values\"\"\"\n    idpattern = r'[_a-z][^\\$\\{\\}]*'\n\n\ndef resolve_variables(variables, context, provider):\n    \"\"\"Given a list of variables, resolve all of them.\n\n    Args:\n        variables (list of :class:`stacker.variables.Variable`): list of\n            variables\n        context (:class:`stacker.context.Context`): stacker context\n        provider (:class:`stacker.provider.base.BaseProvider`): subclass of the\n            base provider\n\n    \"\"\"\n    for variable in variables:\n        variable.resolve(context, provider)\n\n\nclass Variable(object):\n    \"\"\"Represents a variable passed to a stack.\n\n    Args:\n        name (str): Name of the variable\n        value (any): Initial value of the variable from the config (str, list,\n                     dict)\n    \"\"\"\n\n    def __init__(self, name, value):\n        self.name = name\n        self._raw_value = value\n        self._value = VariableValue.parse(value)\n\n    @property\n    def value(self):\n        \"\"\"Return the current value of the Variable.\n        \"\"\"\n        try:\n            return self._value.value()\n        except UnresolvedVariableValue:\n            raise UnresolvedVariable(\"<unknown>\", self)\n        except InvalidLookupConcatenation as e:\n            raise InvalidLookupCombination(e.lookup, e.lookups, self)\n\n    @property\n    def resolved(self):\n        \"\"\"Boolean for whether the Variable has been resolved.\n\n        Variables only need to be resolved if they contain lookups.\n        \"\"\"\n        return self._value.resolved()\n\n    def resolve(self, context, provider):\n        \"\"\"Recursively resolve any lookups with the Variable.\n\n        Args:\n            context (:class:`stacker.context.Context`): Current context for\n                building the stack\n            provider (:class:`stacker.provider.base.BaseProvider`): subclass of\n                the base provider\n\n        \"\"\"\n        try:\n            self._value.resolve(context, provider)\n        except FailedLookup as e:\n            raise FailedVariableLookup(self.name, e.lookup, e.error)\n\n    def dependencies(self):\n        \"\"\"\n        Returns:\n            Set[str]: Stack names that this variable depends on\n        \"\"\"\n        return self._value.dependencies()\n\n\nclass VariableValue(object):\n    \"\"\"\n    Abstract Syntax Tree base object to parse the value for a variable\n    \"\"\"\n    def value(self):\n        return NotImplementedError()\n\n    def __iter__(self):\n        return NotImplementedError()\n\n    def resolved(self):\n        \"\"\"\n        Returns:\n            bool: Whether value() will not raise an error\n        \"\"\"\n        return NotImplementedError()\n\n    def resolve(self, context, provider):\n        pass\n\n    def dependencies(self):\n        return set()\n\n    def simplified(self):\n        \"\"\"\n        Return a simplified version of the Value.\n        This can be used to e.g. concatenate two literals in to one literal, or\n        to flatten nested Concatenations\n\n        Returns:\n            VariableValue\n        \"\"\"\n        return self\n\n    @classmethod\n    def parse(cls, input_object):\n        if isinstance(input_object, list):\n            return VariableValueList.parse(input_object)\n        elif isinstance(input_object, dict):\n            return VariableValueDict.parse(input_object)\n        elif not isinstance(input_object, basestring):\n            return VariableValueLiteral(input_object)\n        # else:  # str\n\n        tokens = VariableValueConcatenation([\n            VariableValueLiteral(t)\n            for t in re.split(r'(\\$\\{|\\}|\\s+)', input_object)\n        ])\n\n        opener = '${'\n        closer = '}'\n\n        while True:\n            last_open = None\n            next_close = None\n            for i, t in enumerate(tokens):\n                if not isinstance(t, VariableValueLiteral):\n                    continue\n\n                if t.value() == opener:\n                    last_open = i\n                    next_close = None\n                if last_open is not None and \\\n                        t.value() == closer and \\\n                        next_close is None:\n                    next_close = i\n\n            if next_close is not None:\n                lookup_data = VariableValueConcatenation(\n                    tokens[(last_open + len(opener) + 1):next_close]\n                )\n                lookup = VariableValueLookup(\n                    lookup_name=tokens[last_open + 1],\n                    lookup_data=lookup_data,\n                )\n                tokens[last_open:(next_close + 1)] = [lookup]\n            else:\n                break\n\n        tokens = tokens.simplified()\n\n        return tokens\n\n\nclass VariableValueLiteral(VariableValue):\n    def __init__(self, value):\n        self._value = value\n\n    def value(self):\n        return self._value\n\n    def __iter__(self):\n        yield self\n\n    def resolved(self):\n        return True\n\n    def __repr__(self):\n        return \"Literal<{}>\".format(repr(self._value))\n\n\nclass VariableValueList(VariableValue, list):\n    @classmethod\n    def parse(cls, input_object):\n        acc = [\n            VariableValue.parse(obj)\n            for obj in input_object\n        ]\n        return cls(acc)\n\n    def value(self):\n        return [\n            item.value()\n            for item in self\n        ]\n\n    def resolved(self):\n        accumulator = True\n        for item in self:\n            accumulator = accumulator and item.resolved()\n        return accumulator\n\n    def __repr__(self):\n        return \"List[{}]\".format(', '.join([repr(value) for value in self]))\n\n    def __iter__(self):\n        return list.__iter__(self)\n\n    def resolve(self, context, provider):\n        for item in self:\n            item.resolve(context, provider)\n\n    def dependencies(self):\n        deps = set()\n        for item in self:\n            deps.update(item.dependencies())\n        return deps\n\n    def simplified(self):\n        return [\n            item.simplified()\n            for item in self\n        ]\n\n\nclass VariableValueDict(VariableValue, dict):\n    @classmethod\n    def parse(cls, input_object):\n        acc = {\n            k: VariableValue.parse(v)\n            for k, v in input_object.items()\n        }\n        return cls(acc)\n\n    def value(self):\n        return {\n            k: v.value()\n            for k, v in self.items()\n        }\n\n    def resolved(self):\n        accumulator = True\n        for item in self.values():\n            accumulator = accumulator and item.resolved()\n        return accumulator\n\n    def __repr__(self):\n        return \"Dict[{}]\".format(', '.join([\n            \"{}={}\".format(k, repr(v)) for k, v in self.items()\n        ]))\n\n    def __iter__(self):\n        return dict.__iter__(self)\n\n    def resolve(self, context, provider):\n        for item in self.values():\n            item.resolve(context, provider)\n\n    def dependencies(self):\n        deps = set()\n        for item in self.values():\n            deps.update(item.dependencies())\n        return deps\n\n    def simplified(self):\n        return {\n            k: v.simplified()\n            for k, v in self.items()\n        }\n\n\nclass VariableValueConcatenation(VariableValue, list):\n    def value(self):\n        if len(self) == 1:\n            return self[0].value()\n\n        values = []\n        for value in self:\n            resolved_value = value.value()\n            if not isinstance(resolved_value, basestring):\n                raise InvalidLookupConcatenation(value, self)\n            values.append(resolved_value)\n        return ''.join(values)\n\n    def __iter__(self):\n        return list.__iter__(self)\n\n    def resolved(self):\n        accumulator = True\n        for item in self:\n            accumulator = accumulator and item.resolved()\n        return accumulator\n\n    def __repr__(self):\n        return \"Concat[{}]\".format(', '.join([repr(value) for value in self]))\n\n    def resolve(self, context, provider):\n        for value in self:\n            value.resolve(context, provider)\n\n    def dependencies(self):\n        deps = set()\n        for item in self:\n            deps.update(item.dependencies())\n        return deps\n\n    def simplified(self):\n        concat = []\n        for item in self:\n            if isinstance(item, VariableValueLiteral) and \\\n                    item.value() == '':\n                pass\n\n            elif isinstance(item, VariableValueLiteral) and \\\n                    len(concat) > 0 and \\\n                    isinstance(concat[-1], VariableValueLiteral):\n                # Join the literals together\n                concat[-1] = VariableValueLiteral(\n                    concat[-1].value() + item.value()\n                )\n\n            elif isinstance(item, VariableValueConcatenation):\n                # Flatten concatenations\n                concat.extend(item.simplified())\n\n            else:\n                concat.append(item.simplified())\n\n        if len(concat) == 0:\n            return VariableValueLiteral('')\n        elif len(concat) == 1:\n            return concat[0]\n        else:\n            return VariableValueConcatenation(concat)\n\n\nclass VariableValueLookup(VariableValue):\n    def __init__(self, lookup_name, lookup_data, handler=None):\n        \"\"\"\n        Args:\n            lookup_name (basestring): Name of the invoked lookup\n            lookup_data (VariableValue): Data portion of the lookup\n        \"\"\"\n        self._resolved = False\n        self._value = None\n\n        self.lookup_name = lookup_name\n\n        if isinstance(lookup_data, basestring):\n            lookup_data = VariableValueLiteral(lookup_data)\n        self.lookup_data = lookup_data\n\n        if handler is None:\n            lookup_name_resolved = lookup_name.value()\n            try:\n                handler = LOOKUP_HANDLERS[lookup_name_resolved]\n            except KeyError:\n                raise UnknownLookupType(lookup_name_resolved)\n        self.handler = handler\n\n    def resolve(self, context, provider):\n        self.lookup_data.resolve(context, provider)\n        try:\n            if type(self.handler) == type:\n                # Hander is a new-style handler\n                result = self.handler.handle(\n                    value=self.lookup_data.value(),\n                    context=context,\n                    provider=provider\n                )\n            else:\n                result = self.handler(\n                    value=self.lookup_data.value(),\n                    context=context,\n                    provider=provider\n                )\n            self._resolve(result)\n        except Exception as e:\n            raise FailedLookup(self, e)\n\n    def _resolve(self, value):\n        self._value = value\n        self._resolved = True\n\n    def dependencies(self):\n        if type(self.handler) == type:\n            return self.handler.dependencies(self.lookup_data)\n        else:\n            return set()\n\n    def value(self):\n        if self._resolved:\n            return self._value\n        else:\n            raise UnresolvedVariableValue(self)\n\n    def __iter__(self):\n        yield self\n\n    def resolved(self):\n        return self._resolved\n\n    def __repr__(self):\n        if self._resolved:\n            return \"Lookup<{r} ({t} {d})>\".format(\n                r=self._value,\n                t=self.lookup_name,\n                d=repr(self.lookup_data),\n            )\n        else:\n            return \"Lookup<{t} {d}>\".format(\n                t=self.lookup_name,\n                d=repr(self.lookup_data),\n            )\n\n    def __str__(self):\n        return \"${{{type} {data}}}\".format(\n            type=self.lookup_name.value(),\n            data=self.lookup_data.value(),\n        )\n\n    def simplified(self):\n        return VariableValueLookup(\n            lookup_name=self.lookup_name,\n            lookup_data=self.lookup_data.simplified(),\n        )\n"
  },
  {
    "path": "test-requirements.in",
    "content": "pytest~=6.0\npytest-cov~=2.6\nmock~=2.0\nmoto[awslambda,ec2]~=3.0.0\ntestfixtures~=6.18.3\nflake8\npep8-naming"
  },
  {
    "path": "tests/Makefile",
    "content": "permissions:\n\t./stacker.yaml.sh | stacker build -\n\ntest: permissions\n\t$(eval AWS_ACCESS_KEY_ID := $(shell ./stacker.yaml.sh | stacker info - 2>&1 | awk '/AccessKeyId/ {print $$3}'))\n\t$(eval AWS_SECRET_ACCESS_KEY := $(shell ./stacker.yaml.sh | stacker info - 2>&1 | awk '/SecretAccessKey/ {print $$3}'))\n\t$(eval STACKER_ROLE := $(shell ./stacker.yaml.sh | stacker info - 2>&1 | awk '/FunctionalTestRole/ {print $$3}'))\n\t@STACKER_ROLE=$(STACKER_ROLE) AWS_ACCESS_KEY_ID=$(AWS_ACCESS_KEY_ID) AWS_SECRET_ACCESS_KEY=$(AWS_SECRET_ACCESS_KEY) ./run_test_suite.sh ${TESTS}\n"
  },
  {
    "path": "tests/README.md",
    "content": "This directory contains the functional testing suite for stacker. It exercises all of stacker against a real AWS account. Make sure you have the AWS credentials loaded into your environment when you run these steps.\n\n## Setup\n\n1. First, ensure that you're inside a virtualenv:\n\n  ```console\n  $ source venv/bin/activate\n  ```\n\n2. Set a stacker namespace & the AWS region for the test suite to use:\n\n  ```console\n  $ export STACKER_NAMESPACE=my-stacker-test-namespace\n  $ export AWS_DEFAULT_REGION=us-east-1\n  ```\n\n3. Ensure that bats is installed:\n\n  ```console\n  # On MacOS if brew is installed\n  $ brew install bats-core\n  ```\n\n4. Setup functional test environment & run tests:\n\n  ```console\n  # To run all the tests\n  $ make -C tests test\n  # To run specific tests (ie: tests 1, 2 and 3)\n  $ TESTS=\"1 2 3\" make -C tests test\n  ```\n"
  },
  {
    "path": "tests/cleanup_functional_test_buckets.sh",
    "content": "#!/usr/bin/env bash\n\nif [ -z \"$AWS_ACCESS_KEY_ID\" ]\nthen\n    echo \"AWS_ACCESS_KEY_ID not set, skipping bucket cleanup.\"\n    exit 0\nfi\n\nsudo pip install awscli\n\nALL_BUT_LAST_6_BUCKETS=$(aws s3 ls | grep stacker-cloudtools-functional-tests- | sort -r | tail -n +7 | awk '{print $3}')\n\nfor bucket in ${ALL_BUT_LAST_6_BUCKETS}\ndo\n    echo \"## Deleting bucket: 's3://$bucket'\"\n    aws --region us-east-1 s3 rm --recursive s3://$bucket/\n    aws --region us-east-1 s3 rb s3://$bucket\ndone\n"
  },
  {
    "path": "tests/fixtures/blueprints/test_repo.json",
    "content": "{\n    \"Resources\": {\n        \"repo1Repository\": {\n            \"Properties\": {\n                \"RepositoryName\": \"repo1\"\n            },\n            \"Type\": \"AWS::ECR::Repository\"\n        },\n        \"repo2Repository\": {\n            \"Properties\": {\n                \"RepositoryName\": \"repo2\"\n            },\n            \"Type\": \"AWS::ECR::Repository\"\n        }\n    }\n}"
  },
  {
    "path": "tests/fixtures/stack_policies/default.json",
    "content": "{\n  \"Statement\" : [\n    {\n      \"Effect\" : \"Allow\",\n      \"Action\" : \"Update:*\",\n      \"Principal\": \"*\",\n      \"Resource\" : \"*\"\n    }  \n  ]\n}\n"
  },
  {
    "path": "tests/fixtures/stack_policies/none.json",
    "content": "{\n  \"Statement\" : [\n    {\n      \"Effect\" : \"Deny\",\n      \"Action\" : \"Update:*\",\n      \"Principal\": \"*\",\n      \"Resource\" : \"*\"\n    }  \n  ]\n}\n"
  },
  {
    "path": "tests/run_test_suite.sh",
    "content": "#!/bin/sh\n\nTEST_ARGS=$*\n\nif [ -z \"$TEST_ARGS\" ]\nthen\n    _TESTS=\"test_suite\"\nelse\n    for T in ${TEST_ARGS}\n    do\n        _TESTS=\"${_TESTS} test_suite/$(printf %02d ${T})_*\"\n    done\nfi\n\necho \"bats ${_TESTS}\"\n\nbats ${_TESTS}\n"
  },
  {
    "path": "tests/stacker.yaml.sh",
    "content": "#!/bin/bash\n\ncat - <<EOF\nnamespace: ${STACKER_NAMESPACE}\nstacker_bucket: '' # No need to upload to S3\nstacks:\n  - name: stackerFunctionalTests\n    class_path: stacker.tests.fixtures.mock_blueprints.FunctionalTests\n    variables:\n      StackerBucket: stacker-${STACKER_NAMESPACE}\n      StackerNamespace: ${STACKER_NAMESPACE}\nEOF\n"
  },
  {
    "path": "tests/test_helper.bash",
    "content": "#!/usr/bin/env bash\n\n# To make the tests run faster, we don't wait between calls to DescribeStacks\n# to check on the status of Create/Update.\nexport STACKER_STACK_POLL_TIME=2\n\nif [ -z \"$STACKER_NAMESPACE\" ]; then\n  >&2 echo \"To run these tests, you must set a STACKER_NAMESPACE environment variable\"\n  exit 1\nfi\n\nif [ -z \"$STACKER_ROLE\" ]; then\n  >&2 echo \"To run these tests, you must set a STACKER_ROLE environment variable\"\n  exit 1\nfi\n\n# Setup a base .aws/config that can be use to test stack configurations that\n# require stacker to assume a role.\nexport AWS_CONFIG_DIR=$(mktemp -d)\nexport AWS_CONFIG_FILE=\"$AWS_CONFIG_DIR/config\"\n\ncat <<EOF > \"$AWS_CONFIG_FILE\"\n[default]\nregion = us-east-1\n\n[profile stacker]\nregion = us-east-1\nrole_arn = ${STACKER_ROLE}\ncredential_source = Environment\nEOF\n\n# Simple wrapper around the builtin bash `test` command.\nassert() {\n  builtin test \"$@\"\n}\n\n# Checks that the given line is in $output.\nassert_has_line() {\n  echo \"$output\" | grep \"$@\" 1>/dev/null\n}\n\n# This helper wraps \"stacker\" with bats' \"run\" and also outputs debug\n# information. If you need to execute the stacker binary _without_ calling\n# \"run\", you can use \"command stacker\".\nstacker() {\n  # Sleep between runs of stacker to try and avoid rate limiting issues.\n  sleep 2\n  echo \"$ stacker $@\"\n  run command stacker \"$@\"\n  echo \"$output\"\n  echo\n}\n\n# A helper to tag a test as requiring access to AWS. If no credentials are set,\n# then the tests will be skipped.\nneeds_aws() {\n  if [ -z \"$AWS_ACCESS_KEY_ID\" ]; then\n    skip \"aws credentials not set\"\n  fi\n}\n"
  },
  {
    "path": "tests/test_suite/01_stacker_build_no_config.bats",
    "content": "#!/usr/bin/env bats\n\nload ../test_helper\n\n@test \"stacker build - no config\" {\n  stacker build\n  assert ! \"$status\" -eq 0\n  assert_has_line -E \"too few arguments|the following arguments are required: config\"\n}\n"
  },
  {
    "path": "tests/test_suite/02_stacker_build_empty_config.bats",
    "content": "#!/usr/bin/env bats\n#\nload ../test_helper\n\n@test \"stacker build - empty config\" {\n  stacker build <(echo \"\")\n  assert ! \"$status\" -eq 0\n  assert_has_line 'stacker.exceptions.InvalidConfig:'\n}\n"
  },
  {
    "path": "tests/test_suite/03_stacker_build-config_with_no_stacks.bats",
    "content": "#!/usr/bin/env bats\n\nload ../test_helper\n\n@test \"stacker build - config with no stacks\" {\n  needs_aws\n\n  stacker build - <<EOF\nnamespace: ${STACKER_NAMESPACE}\nEOF\n  assert \"$status\" -eq 0\n  assert_has_line 'WARNING: No stacks detected (error in config?)'\n}\n"
  },
  {
    "path": "tests/test_suite/04_stacker_build-config_with_no_namespace.bats",
    "content": "#!/usr/bin/env bats\n\nload ../test_helper\n\n@test \"stacker build - config with no namespace\" {\n  stacker build - <<EOF\nstacker_bucket: stacker-${STACKER_NAMESPACE}\nstacks:\n  - name: vpc\n    class_path: stacker.tests.fixtures.mock_blueprints.VPC\nEOF\n  assert ! \"$status\" -eq 0\n  assert_has_line \"This field is required\"\n}\n"
  },
  {
    "path": "tests/test_suite/05_stacker_build-missing_environment_key.bats",
    "content": "#!/usr/bin/env bats\n\nload ../test_helper\n\n@test \"stacker build - missing environment key\" {\n  environment() {\n    cat <<EOF\nvpc_private_subnets: 10.128.8.0/22,10.128.12.0/22,10.128.16.0/22,10.128.20.0/22\nEOF\n  }\n\n  config() {\n    cat <<EOF\nnamespace: ${STACKER_NAMESPACE}\nstacks:\n  - name: vpc\n    class_path: stacker.tests.fixtures.mock_blueprints.VPC\n    variables:\n      PublicSubnets: \\${vpc_public_subnets}\n      PrivateSubnets: \\${vpc_private_subnets\nEOF\n  }\n\n  # Create the new stacks.\n  stacker build <(environment) <(config)\n  assert ! \"$status\" -eq 0\n  assert_has_line \"stacker.exceptions.MissingEnvironment: Environment missing key vpc_public_subnets.\"\n}\n"
  },
  {
    "path": "tests/test_suite/06_stacker_build-duplicate_stacks.bats",
    "content": "#!/usr/bin/env bats\n\nload ../test_helper\n\n@test \"stacker build - duplicate stacks\" {\n  stacker build - <<EOF\nnamespace: ${STACKER_NAMESPACE}\nstacks:\n  - name: vpc\n    class_path: stacker.tests.fixtures.mock_blueprints.VPC\n  - name: vpc\n    class_path: stacker.tests.fixtures.mock_blueprints.Dummy\nEOF\n  assert ! \"$status\" -eq 0\n  assert_has_line \"Duplicate stack vpc found\"\n}\n"
  },
  {
    "path": "tests/test_suite/07_stacker_graph-json_format.bats",
    "content": "#!/usr/bin/env bats\n\nload ../test_helper\n\n@test \"stacker graph - json format\" {\n  config() {\n    cat <<EOF\nnamespace: ${STACKER_NAMESPACE}\nstacks:\n  - name: vpc\n    class_path: stacker.tests.fixtures.mock_blueprints.Dummy\n  - name: bastion1\n    class_path: stacker.tests.fixtures.mock_blueprints.Dummy2\n    variables:\n      StringVariable: \\${output vpc::DummyId}\n  - name: bastion2\n    class_path: stacker.tests.fixtures.mock_blueprints.Dummy\n    variables:\n      StringVariable: \\${output vpc::DummyId}\n  - name: app1\n    class_path: stacker.tests.fixtures.mock_blueprints.Dummy2\n    variables:\n      StringVariable: \\${output bastion1::DummyId}\n  - name: app2\n    class_path: stacker.tests.fixtures.mock_blueprints.Dummy\n    variables:\n      StringVariable: \\${output bastion2::DummyId}\nEOF\n  }\n\n  # Print the graph\n  stacker graph -f json <(config)\n  assert \"$status\" -eq 0\n  assert $(echo \"$output\" | grep -v \"Using default\" | python -c \"import sys, json; data = json.loads(sys.stdin.read()); print(data['steps']['vpc']['deps'] == [] and data['steps']['bastion1']['deps'] == ['vpc'] and data['steps']['app2']['deps'] == ['bastion2'])\") = 'True'\n}\n"
  },
  {
    "path": "tests/test_suite/08_stacker_graph-dot_format.bats",
    "content": "#!/usr/bin/env bats\n\nload ../test_helper\n\n@test \"stacker graph - dot format\" {\n  config() {\n    cat <<EOF\nnamespace: ${STACKER_NAMESPACE}\nstacks:\n  - name: vpc\n    class_path: stacker.tests.fixtures.mock_blueprints.Dummy\n  - name: bastion1\n    class_path: stacker.tests.fixtures.mock_blueprints.Dummy2\n    variables:\n      StringVariable: \\${output vpc::DummyId}\n  - name: bastion2\n    class_path: stacker.tests.fixtures.mock_blueprints.Dummy\n    variables:\n      StringVariable: \\${output vpc::DummyId}\n  - name: app1\n    class_path: stacker.tests.fixtures.mock_blueprints.Dummy2\n    variables:\n      StringVariable: \\${output bastion1::DummyId}\n  - name: app2\n    class_path: stacker.tests.fixtures.mock_blueprints.Dummy\n    variables:\n      StringVariable: \\${output bastion2::DummyId}\nEOF\n  }\n\n  # Print the graph\n  stacker graph -f dot <(config)\n  assert \"$status\" -eq 0\n  assert_has_line '\"bastion1\" -> \"vpc\";'\n  assert_has_line '\"bastion2\" -> \"vpc\";'\n  assert_has_line '\"app1\" -> \"bastion1\";'\n  assert_has_line '\"app2\" -> \"bastion2\";'\n  assert $(echo \"$output\" | grep -A 2 vpc | tail -n 2 | grep -c vpc) = '0'\n}\n"
  },
  {
    "path": "tests/test_suite/09_stacker_build-missing_variable.bats",
    "content": "#!/usr/bin/env bats\n\nload ../test_helper\n\n@test \"stacker build - missing variable\" {\n  needs_aws\n\n  stacker build - <<EOF\nnamespace: ${STACKER_NAMESPACE}\nstacks:\n  - name: vpc\n    class_path: stacker.tests.fixtures.mock_blueprints.VPC\nEOF\n  assert ! \"$status\" -eq 0\n  assert_has_line -E 'MissingVariable: Variable \"(PublicSubnets|PrivateSubnets)\" in blueprint \"vpc\" is missing'\n  assert_has_line -E 'vpc: failed \\(Variable \"(PublicSubnets|PrivateSubnets)\" in blueprint \"vpc\" is missing\\)'\n}\n"
  },
  {
    "path": "tests/test_suite/10_stacker_build-simple_build.bats",
    "content": "#!/usr/bin/env bats\n\nload ../test_helper\n\n@test \"stacker build - simple build\" {\n  needs_aws\n\n  config() {\n    cat <<EOF\nnamespace: ${STACKER_NAMESPACE}\nstacks:\n  - name: vpc\n    class_path: stacker.tests.fixtures.mock_blueprints.VPC\n    stack_policy_path: ${PWD}/fixtures/stack_policies/default.json\n    variables:\n      PublicSubnets: 10.128.0.0/24,10.128.1.0/24,10.128.2.0/24,10.128.3.0/24\n      PrivateSubnets: 10.128.8.0/22,10.128.12.0/22,10.128.16.0/22,10.128.20.0/22\nEOF\n  }\n\n  teardown() {\n    stacker destroy --force <(config)\n  }\n\n  # Create the new stacks.\n  stacker build <(config)\n  assert \"$status\" -eq 0\n  assert_has_line \"Using default AWS provider mode\"\n  assert_has_line \"vpc: submitted (creating new stack)\"\n  assert_has_line \"vpc: complete (creating new stack)\"\n\n  # Perform a noop update to the stacks, in interactive mode.\n  stacker build -i <(config)\n  assert \"$status\" -eq 0\n  assert_has_line \"Using interactive AWS provider mode\"\n  assert_has_line \"vpc: skipped (nochange)\"\n\n  # Cleanup\n  stacker destroy --force <(config)\n  assert \"$status\" -eq 0\n  assert_has_line \"vpc: submitted (submitted for destruction)\"\n  assert_has_line \"vpc: complete (stack destroyed)\"\n}\n"
  },
  {
    "path": "tests/test_suite/11_stacker_info-simple_info.bats",
    "content": "#!/usr/bin/env bats\n\nload ../test_helper\n\n@test \"stacker info - simple info\" {\n  needs_aws\n\n  config() {\n    cat <<EOF\nnamespace: ${STACKER_NAMESPACE}\nstacks:\n  - name: vpc\n    class_path: stacker.tests.fixtures.mock_blueprints.Dummy\nEOF\n  }\n\n  teardown() {\n    stacker destroy --force <(config)\n  }\n\n  # Create the new stacks.\n  stacker build <(config)\n  assert \"$status\" -eq 0\n\n  stacker info <(config)\n  assert \"$status\" -eq 0\n  assert_has_line \"Outputs for stacks: ${STACKER_NAMESPACE}\"\n  assert_has_line \"vpc:\"\n  assert_has_line \"DummyId: dummy-1234\"\n}\n"
  },
  {
    "path": "tests/test_suite/12_stacker_build-simple_build_with_output_lookups.bats",
    "content": "#!/usr/bin/env bats\n\nload ../test_helper\n\n@test \"stacker build - simple build with output lookups\" {\n  needs_aws\n\n  config() {\n    cat <<EOF\nnamespace: ${STACKER_NAMESPACE}\nstacks:\n  - name: vpc\n    class_path: stacker.tests.fixtures.mock_blueprints.Dummy\n  - name: bastion\n    class_path: stacker.tests.fixtures.mock_blueprints.Dummy\n    variables:\n      StringVariable: \\${output vpc::DummyId}\nEOF\n  }\n\n  teardown() {\n    stacker destroy --force <(config)\n  }\n\n  # Create the new stacks.\n  stacker build <(config)\n  assert \"$status\" -eq 0\n  assert_has_line \"Using default AWS provider mode\"\n\n  for stack in vpc bastion; do\n    assert_has_line \"${stack}: submitted (creating new stack)\"\n    assert_has_line \"${stack}: complete (creating new stack)\"\n  done\n}\n"
  },
  {
    "path": "tests/test_suite/13_stacker_build-simple_build_with_environment.bats",
    "content": "#!/usr/bin/env bats\n\nload ../test_helper\n\n@test \"stacker build - simple build with environment\" {\n  needs_aws\n\n  environment() {\n    cat <<EOF\nnamespace: ${STACKER_NAMESPACE}\nvpc_public_subnets: 10.128.0.0/24,10.128.1.0/24,10.128.2.0/24,10.128.3.0/24\nvpc_private_subnets: 10.128.8.0/22,10.128.12.0/22,10.128.16.0/22,10.128.20.0/22\nEOF\n  }\n\n  config() {\n    cat <<EOF\nnamespace: \\${namespace}\nstacks:\n  - name: vpc\n    class_path: stacker.tests.fixtures.mock_blueprints.VPC\n    variables:\n      PublicSubnets: \\${vpc_public_subnets}\n      PrivateSubnets: \\${vpc_private_subnets\nEOF\n  }\n\n  teardown() {\n    stacker destroy --force <(environment) <(config)\n  }\n\n  # Create the new stacks.\n  stacker build <(environment) <(config)\n  assert \"$status\" -eq 0\n}\n"
  },
  {
    "path": "tests/test_suite/14_stacker_build-interactive_with_skipped_update.bats",
    "content": "#!/usr/bin/env bats\n\nload ../test_helper\n\n@test \"stacker build - interactive with skipped update\" {\n  needs_aws\n\n  config1() {\n    cat <<EOF\nnamespace: ${STACKER_NAMESPACE}\nstacks:\n  - name: vpc\n    class_path: stacker.tests.fixtures.mock_blueprints.Dummy\n  - name: bastion\n    class_path: stacker.tests.fixtures.mock_blueprints.Dummy\n    variables:\n      StringVariable: \\${output vpc::DummyId}\nEOF\n  }\n\n  config2() {\n    cat <<EOF\nnamespace: ${STACKER_NAMESPACE}\nstacks:\n  - name: vpc\n    class_path: stacker.tests.fixtures.mock_blueprints.Dummy2\n  - name: bastion\n    class_path: stacker.tests.fixtures.mock_blueprints.Dummy2\n    requires: [vpc]\nEOF\n  }\n\n  teardown() {\n    stacker destroy --force <(config1)\n  }\n\n  # Create the new stacks.\n  stacker build <(config1)\n  assert \"$status\" -eq 0\n  assert_has_line \"Using default AWS provider mode\"\n  assert_has_line \"vpc: submitted (creating new stack)\"\n  assert_has_line \"vpc: complete (creating new stack)\"\n  assert_has_line \"bastion: submitted (creating new stack)\"\n  assert_has_line \"bastion: complete (creating new stack)\"\n\n  # Attempt an update to all stacks, but skip the vpc update.\n  stacker build -i <(config2) <<< $'n\\ny\\n'\n  assert \"$status\" -eq 0\n  assert_has_line \"vpc: skipped (canceled execution)\"\n  assert_has_line \"bastion: submitted (updating existing stack)\"\n}\n"
  },
  {
    "path": "tests/test_suite/15_stacker_build-no_namespace.bats",
    "content": "#!/usr/bin/env bats\n\nload ../test_helper\n\n@test \"stacker build - no namespace\" {\n  needs_aws\n\n  config() {\n    cat <<EOF\nnamespace: \"\"\nstacks:\n  - name: vpc\n    stack_name: ${STACKER_NAMESPACE}-vpc\n    class_path: stacker.tests.fixtures.mock_blueprints.Dummy\nEOF\n  }\n\n  teardown() {\n    stacker destroy --force <(config)\n  }\n\n  # Create the new stacks.\n  stacker build <(config)\n  assert \"$status\" -eq 0\n}\n"
  },
  {
    "path": "tests/test_suite/16_stacker_build-overriden_environment_key_with_-e.bats",
    "content": "#!/usr/bin/env bats\n\nload ../test_helper\n\n@test \"stacker build - overriden environment key with -e\" {\n  needs_aws\n\n  environment() {\n    cat <<EOF\nnamespace: stacker\nEOF\n  }\n\n  config() {\n    cat <<EOF\nnamespace: \\${namespace}\nstacks:\n  - name: vpc\n    class_path: stacker.tests.fixtures.mock_blueprints.VPC\n    variables:\n      PublicSubnets: 10.128.0.0/24,10.128.1.0/24,10.128.2.0/24,10.128.3.0/24\n      PrivateSubnets: 10.128.8.0/22,10.128.12.0/22,10.128.16.0/22,10.128.20.0/22\nEOF\n  }\n\n  teardown() {\n    stacker destroy -e namespace=$STACKER_NAMESPACE --force <(environment) <(config)\n  }\n\n  # Create the new stacks.\n  stacker build -e namespace=$STACKER_NAMESPACE <(environment) <(config)\n  assert \"$status\" -eq 0\n  assert_has_line \"vpc: submitted (creating new stack)\"\n}\n"
  },
  {
    "path": "tests/test_suite/17_stacker_build-dump.bats",
    "content": "#!/usr/bin/env bats\n\nload ../test_helper\n\n@test \"stacker build - dump\" {\n  needs_aws\n\n  config() {\n    cat <<EOF\nnamespace: ${STACKER_NAMESPACE}\nstacks:\n  - name: vpc\n    class_path: stacker.tests.fixtures.mock_blueprints.Dummy\n  - name: bastion\n    class_path: stacker.tests.fixtures.mock_blueprints.Dummy\n    variables:\n      StringVariable: \\${output vpc::DummyId}\nEOF\n  }\n\n  teardown() {\n    stacker destroy --force <(config)\n  }\n\n  # Create the new stacks.\n  stacker build <(config)\n  assert \"$status\" -eq 0\n\n  stacker build -d \"$TMP\" <(config)\n  assert \"$status\" -eq 0\n}\n"
  },
  {
    "path": "tests/test_suite/18_stacker_diff-simple_diff_with_output_lookups.bats",
    "content": "#!/usr/bin/env bats\n\nload ../test_helper\n\n@test \"stacker diff - simple diff with output lookups\" {\n  needs_aws\n\n  config1() {\n    cat <<EOF\nnamespace: ${STACKER_NAMESPACE}\nstacks:\n  - name: vpc\n    class_path: stacker.tests.fixtures.mock_blueprints.DiffTester\n    variables:\n      InstanceType: m3.large\n      WaitConditionCount: 1\nEOF\n  }\n\n  config2() {\n    cat <<EOF\nnamespace: ${STACKER_NAMESPACE}\nstacks:\n  - name: vpc\n    class_path: stacker.tests.fixtures.mock_blueprints.DiffTester\n    variables:\n      InstanceType: m3.xlarge\n      WaitConditionCount: 2\nEOF\n  }\n\n  teardown() {\n    stacker destroy --force <(config1)\n  }\n\n  # Create the new stacks.\n  stacker build <(config1)\n  assert \"$status\" -eq 0\n\n  stacker diff <(config2)\n  assert \"$status\" -eq 0\n  assert_has_line \"\\-InstanceType = m3.large\"\n  assert_has_line \"+InstanceType = m3.xlarge\"\n  assert_has_line \"LogicalResourceId: VPC1\"\n  assert_has_line \"ResourceType: AWS::CloudFormation::WaitConditionHandle\"\n}\n"
  },
  {
    "path": "tests/test_suite/19_stacker_build-replacements-only_test_with_additional_resource_no_keyerror.bats",
    "content": "#!/usr/bin/env bats\n\nload ../test_helper\n\n@test \"stacker build - replacements-only test with additional resource, no keyerror\" {\n  needs_aws\n\n  config() {\n    cat <<EOF\nnamespace: ${STACKER_NAMESPACE}\nstacks:\n  - name: add-resource-test-with-replacements-only\n    class_path: stacker.tests.fixtures.mock_blueprints.Dummy\n\nEOF\n  }\n\nconfig2() {\n    cat <<EOF\nnamespace: ${STACKER_NAMESPACE}\nstacks:\n  - name: add-resource-test-with-replacements-only\n    class_path: stacker.tests.fixtures.mock_blueprints.Dummy2\n\nEOF\n  }\n\n  teardown() {\n    stacker destroy --force <(config)\n  }\n\n  # Create the new stacks.\n  stacker build <(config)\n  assert \"$status\" -eq 0\n  assert_has_line \"Using default AWS provider mode\"\n  assert_has_line \"add-resource-test-with-replacements-only: submitted (creating new stack)\"\n  assert_has_line \"add-resource-test-with-replacements-only: complete (creating new stack)\"\n\n  # Perform a additional resouce addition in replacements-only mode, should not crash.  This is testing issue #463.\n  stacker build -i --replacements-only <(config2)\n  assert \"$status\" -eq 0\n  assert_has_line \"Using interactive AWS provider mode\"\n  assert_has_line \"add-resource-test-with-replacements-only: complete (updating existing stack)\"\n\n  # Cleanup\n  stacker destroy --force <(config2)\n  assert \"$status\" -eq 0\n  assert_has_line \"add-resource-test-with-replacements-only: submitted (submitted for destruction)\"\n  assert_has_line \"add-resource-test-with-replacements-only: complete (stack destroyed)\"\n}\n"
  },
  {
    "path": "tests/test_suite/20_stacker_build-locked_stacks.bats",
    "content": "#!/usr/bin/env bats\n\nload ../test_helper\n\n@test \"stacker build - locked stacks\" {\n  needs_aws\n\n  config1() {\n    cat <<EOF\nnamespace: ${STACKER_NAMESPACE}\nstacks:\n  - name: vpc\n    class_path: stacker.tests.fixtures.mock_blueprints.Dummy\nEOF\n  }\n\n  config2() {\n    cat <<EOF\nnamespace: ${STACKER_NAMESPACE}\nstacks:\n  - name: vpc\n    locked: true\n  - name: bastion\n    class_path: stacker.tests.fixtures.mock_blueprints.Dummy\n    variables:\n      StringVariable: \\${output vpc::DummyId}\nEOF\n  }\n\n  teardown() {\n    stacker destroy --force <(config2)\n  }\n\n  stacker build <(config2)\n  assert \"$status\" -eq 1\n  assert_has_line \"AttributeError: Stack does not have a defined class or template path.\"\n\n  # Create the new stacks.\n  stacker build <(config1)\n  assert \"$status\" -eq 0\n\n  stacker build <(config2)\n  assert \"$status\" -eq 0\n  assert_has_line \"Using default AWS provider mode\"\n  assert_has_line \"vpc: skipped (locked)\"\n  assert_has_line \"bastion: submitted (creating new stack)\"\n  assert_has_line \"bastion: complete (creating new stack)\"\n}\n"
  },
  {
    "path": "tests/test_suite/21_stacker_build-default_mode_without_&_with_protected_stack.bats",
    "content": "#!/usr/bin/env bats\n\nload ../test_helper\n\n@test \"stacker build - default mode, without & with protected stack\" {\n  needs_aws\n\n  config() {\n    cat <<EOF\nnamespace: ${STACKER_NAMESPACE}\nstacks:\n  - name: mystack\n    class_path: stacker.tests.fixtures.mock_blueprints.Dummy\n    protected: ${PROTECTED}\n\nEOF\n  }\n\n  config2() {\n    cat <<EOF\nnamespace: ${STACKER_NAMESPACE}\nstacks:\n  - name: mystack\n    class_path: stacker.tests.fixtures.mock_blueprints.Dummy2\n  \nEOF\n  }\n\n  teardown() {\n    stacker destroy --force <(config)\n  }\n\n  # First create the stack\n  stacker build --interactive <(PROTECTED=\"false\" config)\n  assert \"$status\" -eq 0\n  assert_has_line \"Using interactive AWS provider mode\"\n  assert_has_line \"mystack: submitted (creating new stack)\"\n  assert_has_line \"mystack: complete (creating new stack)\"\n\n  # Perform a additional resouce addition in interactive mode, non-protected stack\n  stacker build --interactive <(config2) < <(echo \"y\")\n  assert \"$status\" -eq 0\n  assert_has_line \"Using interactive AWS provider mode\"\n  assert_has_line \"mystack: submitted (updating existing stack)\"\n  assert_has_line \"mystack: complete (updating existing stack)\"\n  assert_has_line \"Add Dummy2\"\n\n  # Perform another update, this time without interactive, but with a protected stack\n  stacker build <(PROTECTED=\"true\" config) < <(echo \"y\")\n  assert \"$status\" -eq 0\n  assert_has_line \"Using default AWS provider mode\"\n  assert_has_line \"mystack: submitted (updating existing stack)\"\n  assert_has_line \"mystack: complete (updating existing stack)\"\n  assert_has_line \"Remove Dummy2\"\n\n  # Cleanup\n  stacker destroy --force <(config2)\n  assert \"$status\" -eq 0\n  assert_has_line \"mystack: submitted (submitted for destruction)\"\n  assert_has_line \"mystack: complete (stack destroyed)\"\n}\n"
  },
  {
    "path": "tests/test_suite/22_stacker_build-recreate_failed_stack_non-interactive_mode.bats",
    "content": "#!/usr/bin/env bats\n\nload ../test_helper\n\n@test \"stacker build - recreate failed stack, non-interactive mode\" {\n  needs_aws\n\n  bad_config() {\n    cat <<EOF\nnamespace: ${STACKER_NAMESPACE}\nstacks:\n  - name: recreate-failed\n    class_path: stacker.tests.fixtures.mock_blueprints.LongRunningDummy\n    variables:\n      Count: 10\n      BreakLast: true\n\nEOF\n  }\n\n  good_config() {\n    cat <<EOF\nnamespace: ${STACKER_NAMESPACE}\nstacks:\n  - name: recreate-failed\n    class_path: stacker.tests.fixtures.mock_blueprints.LongRunningDummy\n    variables:\n      Count: 10\n      BreakLast: false\n      OutputValue: GoodOutput\n\nEOF\n  }\n\n  teardown() {\n    stacker destroy --force <(good_config)\n  }\n\n  stacker destroy --force <(good_config)\n\n  # Create the initial stack. This must fail.\n  stacker build -v <(bad_config)\n  assert \"$status\" -eq 1\n  assert_has_line \"Using default AWS provider mode\"\n  assert_has_line \"recreate-failed: submitted (creating new stack)\"\n  assert_has_line \"recreate-failed: failed (rolled back new stack)\"\n\n  # Updating the stack should prompt to re-create it.\n  stacker build --recreate-failed <(good_config)\n  assert \"$status\" -eq 0\n  assert_has_line \"Using default AWS provider mode\"\n  assert_has_line \"recreate-failed: submitted (destroying stack for re-creation)\"\n  assert_has_line \"recreate-failed: submitted (creating new stack)\"\n  assert_has_line \"recreate-failed: complete (creating new stack)\"\n\n  # Confirm the stack is really updated\n  stacker build <(good_config)\n  assert \"$status\" -eq 0\n  assert_has_line \"Using default AWS provider mode\"\n  assert_has_line \"recreate-failed: skipped (nochange)\"\n\n  # Cleanup\n  stacker destroy --force <(good_config)\n  assert \"$status\" -eq 0\n  assert_has_line \"recreate-failed: submitted (submitted for destruction)\"\n  assert_has_line \"recreate-failed: complete (stack destroyed)\"\n}\n"
  },
  {
    "path": "tests/test_suite/23_stacker_build-recreate_failed_stack_interactive_mode.bats",
    "content": "#!/usr/bin/env bats\n\nload ../test_helper\n\n@test \"stacker build - recreate failed stack, interactive mode\" {\n  needs_aws\n\n  bad_config() {\n    cat <<EOF\nnamespace: ${STACKER_NAMESPACE}\nstacks:\n  - name: recreate-failed-interactive\n    class_path: stacker.tests.fixtures.mock_blueprints.LongRunningDummy\n    variables:\n      Count: 10\n      BreakLast: true\n\nEOF\n  }\n\n  good_config() {\n    cat <<EOF\nnamespace: ${STACKER_NAMESPACE}\nstacks:\n  - name: recreate-failed-interactive\n    class_path: stacker.tests.fixtures.mock_blueprints.LongRunningDummy\n    variables:\n      Count: 10\n      BreakLast: false\n      OutputValue: GoodOutput\n\nEOF\n  }\n\n  teardown() {\n    stacker destroy --force <(good_config)\n  }\n\n  stacker destroy --force <(good_config)\n\n  # Create the initial stack. This must fail.\n  stacker build -v <(bad_config)\n  assert \"$status\" -eq 1\n  assert_has_line \"Using default AWS provider mode\"\n  assert_has_line \"recreate-failed-interactive: submitted (creating new stack)\"\n  assert_has_line \"recreate-failed-interactive: failed (rolled back new stack)\"\n\n  # Updating the stack should prompt to re-create it.\n  stacker build -i <(good_config) <<< $'y\\n'\n  assert \"$status\" -eq 0\n  assert_has_line \"Using interactive AWS provider mode\"\n  assert_has_line \"recreate-failed-interactive: submitted (destroying stack for re-creation)\"\n  assert_has_line \"recreate-failed-interactive: submitted (creating new stack)\"\n  assert_has_line \"recreate-failed-interactive: complete (creating new stack)\"\n\n  # Confirm the stack is really updated\n  stacker build -i <(good_config)\n  assert \"$status\" -eq 0\n  assert_has_line \"Using interactive AWS provider mode\"\n  assert_has_line \"recreate-failed-interactive: skipped (nochange)\"\n\n  # Cleanup\n  stacker destroy --force <(good_config)\n  assert \"$status\" -eq 0\n  assert_has_line \"recreate-failed-interactive: submitted (submitted for destruction)\"\n  assert_has_line \"recreate-failed-interactive: complete (stack destroyed)\"\n}\n"
  },
  {
    "path": "tests/test_suite/24_stacker_build-handle_rollbacks_during_updates.bats",
    "content": "#!/usr/bin/env bats\n\nload ../test_helper\n\n@test \"stacker build - handle rollbacks during updates\" {\n  needs_aws\n\n  bad_config() {\n    cat <<EOF\nnamespace: ${STACKER_NAMESPACE}\nstacks:\n  - name: update-rollback\n    class_path: stacker.tests.fixtures.mock_blueprints.LongRunningDummy\n    variables:\n      Count: 10\n      BreakLast: true\n\nEOF\n  }\n\n  good_config() {\n    cat <<EOF\nnamespace: ${STACKER_NAMESPACE}\nstacks:\n  - name: update-rollback\n    class_path: stacker.tests.fixtures.mock_blueprints.LongRunningDummy\n    variables:\n      Count: 10\n      BreakLast: false\n\nEOF\n  }\n\n  good_config2() {\n    cat <<EOF\nnamespace: ${STACKER_NAMESPACE}\nstacks:\n  - name: update-rollback\n    class_path: stacker.tests.fixtures.mock_blueprints.LongRunningDummy\n    variables:\n      Count: 10\n      BreakLast: false\n      OutputValue: UpdateFoo\n\nEOF\n  }\n\n  teardown() {\n    stacker destroy --force <(good_config)\n  }\n\n  stacker destroy --force <(good_config)\n\n  # Create the initial stack\n  stacker build -v <(good_config)\n  assert \"$status\" -eq 0\n  assert_has_line \"Using default AWS provider mode\"\n  assert_has_line \"update-rollback: submitted (creating new stack)\"\n  assert_has_line \"update-rollback: complete (creating new stack)\"\n\n  # Do a bad update and watch the rollback\n  stacker build -v <(bad_config)\n  assert \"$status\" -eq 1\n  assert_has_line \"Using default AWS provider mode\"\n  assert_has_line \"update-rollback: submitted (updating existing stack)\"\n  assert_has_line \"update-rollback: failed (rolled back update)\"\n\n  # Do a good update so we know we've correctly waited for rollback\n  stacker build -v <(good_config2)\n  assert \"$status\" -eq 0\n  assert_has_line \"Using default AWS provider mode\"\n  assert_has_line \"update-rollback: submitted (updating existing stack)\"\n  assert_has_line \"update-rollback: complete (updating existing stack)\"\n}\n"
  },
  {
    "path": "tests/test_suite/25_stacker_build-handle_rollbacks_in_dependent_stacks.bats",
    "content": "#!/usr/bin/env bats\n\nload ../test_helper\n\n@test \"stacker build - handle rollbacks in dependent stacks\" {\n  needs_aws\n\n  config() {\n    cat <<EOF\nnamespace: ${STACKER_NAMESPACE}\nstacks:\n  - name: dependent-rollback-parent\n    class_path: stacker.tests.fixtures.mock_blueprints.Broken\n\n  - name: dependent-rollback-child\n    class_path: stacker.tests.fixtures.mock_blueprints.Dummy\n    requires: [dependent-rollback-parent]\n\nEOF\n  }\n\n  teardown() {\n    stacker destroy --force <(config)\n  }\n\n  stacker destroy --force <(config)\n\n  # Verify both stacks fail during creation\n  stacker build -v <(config)\n  assert \"$status\" -eq 1\n  assert_has_line \"Using default AWS provider mode\"\n  assert_has_line \"dependent-rollback-parent: submitted (creating new stack)\"\n  assert_has_line \"dependent-rollback-parent: submitted (rolling back new stack)\"\n  assert_has_line \"dependent-rollback-parent: failed (rolled back new stack)\"\n  assert_has_line \"dependent-rollback-child: failed (dependency has failed)\"\n  assert_has_line \"The following steps failed: dependent-rollback-parent, dependent-rollback-child\"\n}\n"
  },
  {
    "path": "tests/test_suite/26_stacker_build-raw_template.bats",
    "content": "#!/usr/bin/env bats\n\nload ../test_helper\n\n@test \"stacker build - raw template\" {\n  needs_aws\n\n  config() {\n    cat <<EOF\nnamespace: ${STACKER_NAMESPACE}\nstacks:\n  - name: vpc\n    template_path: ../stacker/tests/fixtures/cfn_template.json\n    variables:\n      Param1: foobar\nEOF\n  }\n\n  teardown() {\n    stacker destroy --force <(config)\n  }\n\n  # Create the new stacks.\n  stacker build <(config)\n  assert \"$status\" -eq 0\n  assert_has_line \"Using default AWS provider mode\"\n  assert_has_line \"vpc: submitted (creating new stack)\"\n  assert_has_line \"vpc: complete (creating new stack)\"\n\n  # Perform a noop update to the stacks, in interactive mode.\n  stacker build -i <(config)\n  assert \"$status\" -eq 0\n  assert_has_line \"Using interactive AWS provider mode\"\n  assert_has_line \"vpc: skipped (nochange)\"\n\n  # Cleanup\n  stacker destroy --force <(config)\n  assert \"$status\" -eq 0\n  assert_has_line \"vpc: submitted (submitted for destruction)\"\n  assert_has_line \"vpc: complete (stack destroyed)\"\n}\n"
  },
  {
    "path": "tests/test_suite/27_stacker_diff-raw_template.bats",
    "content": "#!/usr/bin/env bats\n\nload ../test_helper\n\n@test \"stacker diff - raw template\" {\n  needs_aws\n\n  config1() {\n    cat <<EOF\nnamespace: ${STACKER_NAMESPACE}\nstacks:\n  - name: vpc\n    template_path: ../stacker/tests/fixtures/cfn_template.json\n    variables:\n      Param1: foobar\nEOF\n  }\n\n  config2() {\n    cat <<EOF\nnamespace: ${STACKER_NAMESPACE}\nstacks:\n  - name: vpc\n    template_path: ../stacker/tests/fixtures/cfn_template.json\n    variables:\n      Param1: newbar\nEOF\n  }\n\n  teardown() {\n    stacker destroy --force <(config1)\n  }\n\n  # Create the new stacks.\n  stacker build <(config1)\n  assert \"$status\" -eq 0\n\n  stacker diff <(config2)\n  assert \"$status\" -eq 0\n  assert_has_line \"\\-Param1 = foobar\"\n  assert_has_line \"+Param1 = newbar\"\n}\n"
  },
  {
    "path": "tests/test_suite/28_stacker_build-raw_template_parameter_resolution.bats",
    "content": "#!/usr/bin/env bats\n\nload ../test_helper\n\n@test \"stacker build - raw template parameter resolution\" {\n  needs_aws\n\n  echo \"PWD: $PWD\"\n\n  SECRET_VALUE=\"foo-secret\"\n  DEFAULT_SECRET_VALUE=\"default-secret\"\n\n  NORMAL_VALUE=\"foo\"\n  CHANGED_NORMAL_VALUE=\"foo-changed\"\n\n  initial_config() {\n    cat <<EOF\nnamespace: ${STACKER_NAMESPACE}\nstacks:\n  - name: vpc\n    template_path: ../stacker/tests/fixtures/parameter_resolution/template.yml\n    variables:\n      NormalParam: ${NORMAL_VALUE}\n      SecretParam: ${SECRET_VALUE}\nEOF\n  }\n\n  # Remove the value for SecretParam - should use the existing value if a stack\n  # exists, if not should use the default\n  no_secret_value_config() {\n    cat <<EOF\nnamespace: ${STACKER_NAMESPACE}\nstacks:\n  - name: vpc\n    template_path: ../stacker/tests/fixtures/parameter_resolution/template.yml\n    variables:\n      NormalParam: ${NORMAL_VALUE}\nEOF\n  }\n\n  # Remove the value for SecretParam - should use the existing value if a stack\n  # exists, if not should use the default\n  no_secret_value_change_normal_value_config() {\n    cat <<EOF\nnamespace: ${STACKER_NAMESPACE}\nstacks:\n  - name: vpc\n    template_path: ../stacker/tests/fixtures/parameter_resolution/template.yml\n    variables:\n      NormalParam: ${CHANGED_NORMAL_VALUE}\nEOF\n  }\n\n\n  teardown() {\n    stacker destroy --force <(initial_config)\n  }\n\n  # Create the new stacks.\n  stacker build <(initial_config)\n  assert \"$status\" -eq 0\n  assert_has_line \"vpc: complete (creating new stack)\"\n\n  # Update without providing secret value, should use existing value, so\n  # no change\n  stacker build <(no_secret_value_config)\n  assert \"$status\" -eq 0\n  assert_has_line \"vpc: skipped (nochange)\"\n\n  # Update without providing secret value, should use existing value, but\n  # update the normal value - so should update\n  stacker build <(no_secret_value_change_normal_value_config)\n  assert \"$status\" -eq 0\n  assert_has_line \"vpc: complete (updating existing stack)\"\n\n  # Check that the normal value changed\n  stacker info <(no_secret_value_change_normal_value_config)\n  assert \"$status\" -eq 0\n  assert_has_line \"NormalParam: ${CHANGED_NORMAL_VALUE}\"\n\n  # Check that we used the previous value\n  stacker info <(no_secret_value_config)\n  assert \"$status\" -eq 0\n  assert_has_line \"SecretParam: ${SECRET_VALUE}\"\n\n  # Cleanup\n  stacker destroy --force <(initial_config)\n  assert \"$status\" -eq 0\n  assert_has_line \"vpc: complete (stack destroyed)\"\n\n  # Create the new stacks but with no secret parameter, should use the default\n  stacker build <(no_secret_value_config)\n  assert \"$status\" -eq 0\n  assert_has_line \"vpc: complete (creating new stack)\"\n\n  # Check that we used the default value\n  stacker info <(no_secret_value_config)\n  assert \"$status\" -eq 0\n  assert_has_line \"SecretParam: ${DEFAULT_SECRET_VALUE}\"\n}\n"
  },
  {
    "path": "tests/test_suite/29_stacker_build-no_parallelism.bats",
    "content": "#!/usr/bin/env bats\n\nload ../test_helper\n\n@test \"stacker build - no parallelism\" {\n  needs_aws\n\n  config() {\n    cat <<EOF\nnamespace: ${STACKER_NAMESPACE}\nstacks:\n  - name: vpc1\n    class_path: stacker.tests.fixtures.mock_blueprints.Dummy\n  - name: vpc2\n    class_path: stacker.tests.fixtures.mock_blueprints.Dummy\nEOF\n  }\n\n  teardown() {\n    stacker destroy --force <(config)\n  }\n\n  # Create the new stacks.\n  stacker build -j 1 <(config)\n  assert \"$status\" -eq 0\n  assert_has_line \"vpc1: submitted (creating new stack)\"\n  assert_has_line \"vpc1: complete (creating new stack)\"\n  assert_has_line \"vpc2: submitted (creating new stack)\"\n  assert_has_line \"vpc2: complete (creating new stack)\"\n}\n"
  },
  {
    "path": "tests/test_suite/30_stacker_build-tailing.bats",
    "content": "#!/usr/bin/env bats\n\nload ../test_helper\n\n@test \"stacker build - tailing\" {\n  needs_aws\n\n  config() {\n    cat <<EOF\nnamespace: ${STACKER_NAMESPACE}\nstacks:\n  - name: vpc\n    class_path: stacker.tests.fixtures.mock_blueprints.Dummy\n  - name: bastion\n    class_path: stacker.tests.fixtures.mock_blueprints.Dummy\n    requires: [vpc]\nEOF\n  }\n\n  teardown() {\n    stacker destroy --force <(config)\n  }\n\n  # Create the new stacks.\n  stacker build --tail <(config)\n  assert \"$status\" -eq 0\n  assert_has_line \"Using default AWS provider mode\"\n  assert_has_line \"Tailing stack: ${STACKER_NAMESPACE}-vpc\"\n  assert_has_line \"vpc: submitted (creating new stack)\"\n  assert_has_line \"vpc: complete (creating new stack)\"\n  assert_has_line \"Tailing stack: ${STACKER_NAMESPACE}-bastion\"\n  assert_has_line \"bastion: submitted (creating new stack)\"\n  assert_has_line \"bastion: complete (creating new stack)\"\n\n  stacker destroy --force --tail <(config)\n  assert \"$status\" -eq 0\n}\n"
  },
  {
    "path": "tests/test_suite/31_stacker_build-override_stack_name.bats",
    "content": "#!/usr/bin/env bats\n\nload ../test_helper\n\n@test \"stacker build - override stack name\" {\n  needs_aws\n\n  config() {\n    cat <<EOF\nnamespace: ${STACKER_NAMESPACE}\nstacks:\n  - name: vpc\n    stack_name: vpcx\n    class_path: stacker.tests.fixtures.mock_blueprints.Dummy\n  - name: bastion\n    class_path: stacker.tests.fixtures.mock_blueprints.Dummy\n    variables:\n      StringVariable: \\${output vpc::DummyId}\nEOF\n  }\n\n  teardown() {\n    stacker destroy --force <(config)\n  }\n\n  # Create the new stacks.\n  stacker build <(config)\n  assert \"$status\" -eq 0\n  assert_has_line \"Using default AWS provider mode\"\n  assert_has_line \"vpc: submitted (creating new stack)\"\n  assert_has_line \"vpc: complete (creating new stack)\"\n  assert_has_line \"bastion: submitted (creating new stack)\"\n  assert_has_line \"bastion: complete (creating new stack)\"\n}\n"
  },
  {
    "path": "tests/test_suite/32_stacker_build-multi_region.bats",
    "content": "#!/usr/bin/env bats\n\nload ../test_helper\n\n@test \"stacker build - multi region\" {\n  needs_aws\n\n  config() {\n    cat <<EOF\nnamespace: ${STACKER_NAMESPACE}\nstacks:\n  - name: west/vpc\n    region: us-west-1\n    stack_name: vpc\n    class_path: stacker.tests.fixtures.mock_blueprints.Dummy\n  - name: east/vpc\n    region: us-east-1\n    stack_name: vpc\n    class_path: stacker.tests.fixtures.mock_blueprints.Dummy\n  - name: app\n    region: us-east-1\n    class_path: stacker.tests.fixtures.mock_blueprints.Dummy\n    variables:\n      StringVariable: \\${output west/vpc::DummyId}\nEOF\n  }\n\n  teardown() {\n    stacker destroy --force <(config)\n  }\n\n  # Create the new stacks.\n  stacker build <(config)\n  assert \"$status\" -eq 0\n  assert_has_line \"Using default AWS provider mode\"\n  assert_has_line \"vpc: submitted (creating new stack)\"\n  assert_has_line \"vpc: complete (creating new stack)\"\n  assert_has_line \"app: submitted (creating new stack)\"\n  assert_has_line \"app: complete (creating new stack)\"\n\n  config_simple() {\n    cat <<EOF\nnamespace: ${STACKER_NAMESPACE}\nstacks:\n  - name: west/vpc\n    region: us-west-1\n    stack_name: vpc\n    class_path: stacker.tests.fixtures.mock_blueprints.Dummy\nEOF\n  }\n\n  # Assert that the vpc stack was built in us-west-1\n  stacker info <(config_simple)\n  assert_has_line \"Region: us-west-1\"\n}\n"
  },
  {
    "path": "tests/test_suite/33_stacker_build-profiles.bats",
    "content": "#!/usr/bin/env bats\n\nload ../test_helper\n\n@test \"stacker build - profiles\" {\n  needs_aws\n\n  config() {\n    cat <<EOF\nnamespace: ${STACKER_NAMESPACE}\nstacks:\n  - name: vpc\n    profile: stacker\n    class_path: stacker.tests.fixtures.mock_blueprints.Dummy\nEOF\n  }\n\n  teardown() {\n    stacker destroy --force <(config)\n  }\n\n  # Create the new stacks.\n  stacker build <(config)\n  assert \"$status\" -eq 0\n  assert_has_line \"Using default AWS provider mode\"\n  assert_has_line \"vpc: submitted (creating new stack)\"\n  assert_has_line \"vpc: complete (creating new stack)\"\n}\n"
  }
]