Repository: remind101/stacker Branch: master Commit: b357f83596e0 Files: 226 Total size: 802.1 KB Directory structure: gitextract_put1k2j_/ ├── .circleci/ │ └── config.yml ├── .dockerignore ├── .gitignore ├── AUTHORS.rst ├── CHANGELOG.md ├── CODE_OF_CONDUCT.md ├── CONTRIBUTING.md ├── Dockerfile ├── LICENSE ├── Makefile ├── README.rst ├── RELEASE.md ├── codecov.yml ├── conf/ │ └── README.rst ├── docs/ │ ├── .gitignore │ ├── Makefile │ ├── api/ │ │ ├── modules.rst │ │ ├── stacker.actions.rst │ │ ├── stacker.blueprints.rst │ │ ├── stacker.blueprints.variables.rst │ │ ├── stacker.commands.rst │ │ ├── stacker.commands.stacker.rst │ │ ├── stacker.config.rst │ │ ├── stacker.config.translators.rst │ │ ├── stacker.hooks.rst │ │ ├── stacker.logger.rst │ │ ├── stacker.lookups.handlers.rst │ │ ├── stacker.lookups.rst │ │ ├── stacker.providers.aws.rst │ │ ├── stacker.providers.rst │ │ └── stacker.rst │ ├── blueprints.rst │ ├── commands.rst │ ├── conf.py │ ├── config.rst │ ├── environments.rst │ ├── index.rst │ ├── lookups.rst │ ├── organizations_using_stacker.rst │ ├── templates.rst │ ├── terminology.rst │ └── translators.rst ├── examples/ │ └── cross-account/ │ ├── .aws/ │ │ └── config │ ├── README.md │ ├── stacker.yaml │ └── templates/ │ ├── stacker-bucket.yaml │ └── stacker-role.yaml ├── requirements.in ├── scripts/ │ ├── compare_env │ ├── docker-stacker │ ├── stacker │ └── stacker.cmd ├── setup.cfg ├── setup.py ├── stacker/ │ ├── __init__.py │ ├── actions/ │ │ ├── __init__.py │ │ ├── base.py │ │ ├── build.py │ │ ├── destroy.py │ │ ├── diff.py │ │ ├── graph.py │ │ └── info.py │ ├── awscli_yamlhelper.py │ ├── blueprints/ │ │ ├── __init__.py │ │ ├── base.py │ │ ├── raw.py │ │ ├── testutil.py │ │ └── variables/ │ │ ├── __init__.py │ │ └── types.py │ ├── commands/ │ │ ├── __init__.py │ │ └── stacker/ │ │ ├── __init__.py │ │ ├── base.py │ │ ├── build.py │ │ ├── destroy.py │ │ ├── diff.py │ │ ├── graph.py │ │ └── info.py │ ├── config/ │ │ ├── __init__.py │ │ └── translators/ │ │ ├── __init__.py │ │ └── kms.py │ ├── context.py │ ├── dag/ │ │ └── __init__.py │ ├── environment.py │ ├── exceptions.py │ ├── hooks/ │ │ ├── __init__.py │ │ ├── aws_lambda.py │ │ ├── command.py │ │ ├── ecs.py │ │ ├── iam.py │ │ ├── keypair.py │ │ ├── route53.py │ │ └── utils.py │ ├── logger/ │ │ └── __init__.py │ ├── lookups/ │ │ ├── __init__.py │ │ ├── handlers/ │ │ │ ├── __init__.py │ │ │ ├── ami.py │ │ │ ├── default.py │ │ │ ├── dynamodb.py │ │ │ ├── envvar.py │ │ │ ├── file.py │ │ │ ├── hook_data.py │ │ │ ├── kms.py │ │ │ ├── output.py │ │ │ ├── rxref.py │ │ │ ├── split.py │ │ │ ├── ssmstore.py │ │ │ └── xref.py │ │ └── registry.py │ ├── plan.py │ ├── providers/ │ │ ├── __init__.py │ │ ├── aws/ │ │ │ ├── __init__.py │ │ │ └── default.py │ │ └── base.py │ ├── session_cache.py │ ├── stack.py │ ├── status.py │ ├── target.py │ ├── tests/ │ │ ├── __init__.py │ │ ├── actions/ │ │ │ ├── __init__.py │ │ │ ├── test_base.py │ │ │ ├── test_build.py │ │ │ ├── test_destroy.py │ │ │ └── test_diff.py │ │ ├── blueprints/ │ │ │ ├── __init__.py │ │ │ ├── test_base.py │ │ │ ├── test_raw.py │ │ │ └── test_testutil.py │ │ ├── conftest.py │ │ ├── factories.py │ │ ├── fixtures/ │ │ │ ├── __init__.py │ │ │ ├── basic.env │ │ │ ├── cfn_template.json │ │ │ ├── cfn_template.json.j2 │ │ │ ├── cfn_template.yaml │ │ │ ├── keypair/ │ │ │ │ ├── fingerprint │ │ │ │ ├── id_rsa │ │ │ │ └── id_rsa.pub │ │ │ ├── mock_blueprints.py │ │ │ ├── mock_hooks.py │ │ │ ├── mock_lookups.py │ │ │ ├── not-basic.env │ │ │ ├── parameter_resolution/ │ │ │ │ └── template.yml │ │ │ ├── vpc-bastion-db-web-pre-1.0.yaml │ │ │ ├── vpc-bastion-db-web.yaml │ │ │ └── vpc-custom-log-format-info.yaml │ │ ├── hooks/ │ │ │ ├── __init__.py │ │ │ ├── test_aws_lambda.py │ │ │ ├── test_command.py │ │ │ ├── test_ecs.py │ │ │ ├── test_iam.py │ │ │ └── test_keypair.py │ │ ├── lookups/ │ │ │ ├── __init__.py │ │ │ ├── handlers/ │ │ │ │ ├── __init__.py │ │ │ │ ├── test_ami.py │ │ │ │ ├── test_default.py │ │ │ │ ├── test_dynamodb.py │ │ │ │ ├── test_envvar.py │ │ │ │ ├── test_file.py │ │ │ │ ├── test_hook_data.py │ │ │ │ ├── test_output.py │ │ │ │ ├── test_rxref.py │ │ │ │ ├── test_split.py │ │ │ │ ├── test_ssmstore.py │ │ │ │ └── test_xref.py │ │ │ └── test_registry.py │ │ ├── providers/ │ │ │ ├── __init__.py │ │ │ └── aws/ │ │ │ ├── __init__.py │ │ │ └── test_default.py │ │ ├── test_config.py │ │ ├── test_context.py │ │ ├── test_dag.py │ │ ├── test_environment.py │ │ ├── test_lookups.py │ │ ├── test_parse_user_data.py │ │ ├── test_plan.py │ │ ├── test_stack.py │ │ ├── test_stacker.py │ │ ├── test_util.py │ │ └── test_variables.py │ ├── tokenize_userdata.py │ ├── ui.py │ ├── util.py │ └── variables.py ├── test-requirements.in └── tests/ ├── Makefile ├── README.md ├── cleanup_functional_test_buckets.sh ├── fixtures/ │ ├── blueprints/ │ │ └── test_repo.json │ └── stack_policies/ │ ├── default.json │ └── none.json ├── run_test_suite.sh ├── stacker.yaml.sh ├── test_helper.bash └── test_suite/ ├── 01_stacker_build_no_config.bats ├── 02_stacker_build_empty_config.bats ├── 03_stacker_build-config_with_no_stacks.bats ├── 04_stacker_build-config_with_no_namespace.bats ├── 05_stacker_build-missing_environment_key.bats ├── 06_stacker_build-duplicate_stacks.bats ├── 07_stacker_graph-json_format.bats ├── 08_stacker_graph-dot_format.bats ├── 09_stacker_build-missing_variable.bats ├── 10_stacker_build-simple_build.bats ├── 11_stacker_info-simple_info.bats ├── 12_stacker_build-simple_build_with_output_lookups.bats ├── 13_stacker_build-simple_build_with_environment.bats ├── 14_stacker_build-interactive_with_skipped_update.bats ├── 15_stacker_build-no_namespace.bats ├── 16_stacker_build-overriden_environment_key_with_-e.bats ├── 17_stacker_build-dump.bats ├── 18_stacker_diff-simple_diff_with_output_lookups.bats ├── 19_stacker_build-replacements-only_test_with_additional_resource_no_keyerror.bats ├── 20_stacker_build-locked_stacks.bats ├── 21_stacker_build-default_mode_without_&_with_protected_stack.bats ├── 22_stacker_build-recreate_failed_stack_non-interactive_mode.bats ├── 23_stacker_build-recreate_failed_stack_interactive_mode.bats ├── 24_stacker_build-handle_rollbacks_during_updates.bats ├── 25_stacker_build-handle_rollbacks_in_dependent_stacks.bats ├── 26_stacker_build-raw_template.bats ├── 27_stacker_diff-raw_template.bats ├── 28_stacker_build-raw_template_parameter_resolution.bats ├── 29_stacker_build-no_parallelism.bats ├── 30_stacker_build-tailing.bats ├── 31_stacker_build-override_stack_name.bats ├── 32_stacker_build-multi_region.bats └── 33_stacker_build-profiles.bats ================================================ FILE CONTENTS ================================================ ================================================ FILE: .circleci/config.yml ================================================ version: 2 workflows: version: 2 test-all: jobs: - lint - unit-test-37: requires: - lint - functional-test-37: requires: - unit-test-37 - unit-test-38: requires: - lint - functional-test-38: requires: - unit-test-38 - functional-test-37 - unit-test-39: requires: - lint - functional-test-39: requires: - unit-test-39 - functional-test-38 - unit-test-310: requires: - lint - functional-test-310: requires: - unit-test-310 - functional-test-39 - cleanup-functional-buckets: requires: - functional-test-37 - functional-test-38 - functional-test-39 - functional-test-310 jobs: lint: docker: - image: circleci/python:3.7 steps: - checkout - run: sudo pip install flake8 codecov pep8-naming - run: sudo python setup.py install - run: flake8 --version - run: sudo make lint unit-test-37: docker: - image: circleci/python:3.7 steps: &unit_test_steps - checkout - run: sudo python setup.py install - run: sudo make test-unit unit-test-38: docker: - image: circleci/python:3.8 steps: *unit_test_steps unit-test-39: docker: - image: circleci/python:3.9 steps: *unit_test_steps unit-test-310: docker: - image: circleci/python:3.10 steps: *unit_test_steps functional-test-37: docker: - image: circleci/python:3.7 steps: &functional_test_steps - checkout - run: command: | git clone https://github.com/bats-core/bats-core.git cd bats-core git checkout v1.0.2 sudo ./install.sh /usr/local bats --version - run: sudo python setup.py install - run: command: | export TERM=xterm export AWS_DEFAULT_REGION=us-east-1 export STACKER_NAMESPACE=cloudtools-functional-tests-$CIRCLE_BUILD_NUM export STACKER_ROLE=arn:aws:iam::459170252436:role/cloudtools-functional-tests-sta-FunctionalTestRole-1M9HFJ9VQVMFX sudo -E make test-functional functional-test-38: docker: - image: circleci/python:3.8 steps: *functional_test_steps functional-test-39: docker: - image: circleci/python:3.9 steps: *functional_test_steps functional-test-310: docker: - image: circleci/python:3.10 steps: *functional_test_steps cleanup-functional-buckets: docker: - image: circleci/python:3.7 steps: - checkout - run: command: | tests/cleanup_functional_test_buckets.sh ================================================ FILE: .dockerignore ================================================ Dockerfile ================================================ FILE: .gitignore ================================================ # Compiled source # ################### *.com *.class *.dll *.exe *.o *.so # Packages # ############ # it's better to unpack these files and commit the raw source # git has its own built in compression methods *.7z *.dmg *.gz *.iso *.jar *.rar *.tar *.zip # Logs and databases # ###################### *.log *.sql *.sqlite # OS generated files # ###################### .DS_Store* ehthumbs.db Icon? Thumbs.db # Vagrant .vagrant Vagrantfile # Editor crap *.sw* *~ .idea *.iml # Byte-compiled python *.pyc # Package directory build/ # Build object file directory objdir/ dist/ *.egg-info .eggs/ *.egg # Coverage artifacts .coverage htmlcov # Ignore development conf/env files dev.yaml dev.env tests/fixtures/blueprints/*-result FakeKey.pem vm_setup.sh ================================================ FILE: AUTHORS.rst ================================================ Authors ======= Stacker was designed and developed by the OpsEng team at `Remind, Inc.`_ Current Maintainers ------------------- - `Michael Barrett`_ - `Eric Holmes`_ - `Ignacio Nin`_ - `Russell Ballestrini`_ Alumni ------ - `Michael Hahn`_ - `Tom Taubkin`_ Thanks ------ Stacker wouldn't be where it is today without the open source community that has formed around it. Thank you to everyone who has contributed, and special thanks to the following folks who have contributed great features and bug requests, as well as given guidance in stacker's development: - `Adam McElwee`_ - `Daniel Miranda`_ - `Troy Ready`_ - `Garison Draper`_ - `Mariusz`_ - `Tolga Tarhan`_ .. _`Remind, Inc.`: https://www.remind.com/ .. _`Michael Barrett`: https://github.com/phobologic .. _`Eric Holmes`: https://github.com/ejholmes .. _`Ignacio Nin`: https://github.com/Lowercases .. _`Russell Ballestrini`: https://github.com/russellballestrini .. _`Michael Hahn`: https://github.com/mhahn .. _`Tom Taubkin`: https://github.com/ttaub .. _`Adam McElwee`: https://github.com/acmcelwee .. _`Daniel Miranda`: https://github.com/danielkza .. _`Troy Ready`: https://github.com/troyready .. _`Garison Draper`: https://github.com/GarisonLotus .. _`Mariusz`: https://github.com/discobean .. _`Tolga Tarhan`: https://github.com/ttarhan ================================================ FILE: CHANGELOG.md ================================================ ## Upcoming release ## 1.7.2 (2020-11-09) - address breaking moto change to awslambda [GH-763] - Added Python version validation before update kms decrypt output [GH-765] ## 1.7.1 (2020-08-17) - Fixing AMI lookup Key error on 'Name' - hooks: lambda: allow uploading pre-built payloads [GH-564] - Ensure that base64 lookup codec encodes the bytes object as a string [GH-742] - Use CloudFormation Change Sets for `stacker diff` - Locked stacks still have requirements [GH-746] - change diff to use CFN change sets instead of comparing template dicts [GH-744] - Add YAML environment file support [GH-740] - fix `stack.set_outputs` not being called by diff if stack did not change [GH-754] - Fix python 2.7/3.5 dependency issue - add cf notification arns [GH-756] ## 1.7.0 (2019-04-07) - Additional ECS unit tests [GH-696] - Keypair unit tests [GH-700] - Jinja2 templates in plain cloudformation templates [GH-701] - Custom log output formats [GH-705] - Python 3.7 unit tests in CircleCI [GH-711] - Upload blueprint templates with bucket-owner-full-control ACL [GH-713] - Change test runner from nose to py.test [GH-714] - support for importing a local public key file with the keypair hook [GH-715] - support for storing private keys in SSM parameter store with the keypair hook [GH-715] ## 1.6.0 (2019-01-21) - New lookup format/syntax, making it more generic [GH-665] - Allow lowercase y/Y when prompted [GH-674] - Local package sources [GH-677] - Add `in_progress` option to stack config [GH-678] - Use default ACL for uploaded lambda code [GH-682] - Display rollback reason after error [GH-687] - ssm parameter types [GH-692] ## 1.5.0 (2018-10-14) The big feature in this release is the introduction of "targets" which act as sort of "virtual nodes" in the graph. It provides a nice way to logically group stacks. - Add support for "targets" [GH-572] - Fix non-interactive changeset updates w/ stack policies [GH-657] - Fix interactive_update_stack calls with empty string parameters [GH-658] - Fix KMS unicode lookup in python 2 [GH-659] - Locked stacks have no dependencies [GH-661] - Set default profile earlier [GH-662] - Get rid of recursion for tail retries and extend retry/timeout [GH-663] ## 1.4.1 (2018-08-28) This is a minor bugfix release for 1.4.0, no major feature updates. As of this release python 3.5+ support is no longer considered experimental, and should be stable. Special thanks to @troyready for this release, I think most of these PRs were his :) - allow raw cfn templates to be loaded from remote package\_sources [GH-638] - Add missing config keys to s3 package source model [GH-642] - Account for UsePreviousValue parameters in diff [GH-644] - fix file lookup documented and actual return types [GH-646] - Creates a memoized provider builder for AWS [GH-648] - update git ref to explicitly return string (fix py3 bytes error) [GH-649] - Lock botocore/boto to versions that work with moto [GH-651] ## 1.4.0 (2018-08-05) - YAML & JSON codecs for `file` lookup [GH-537] - Arbitrary `command` hook [GH-565] - Fix datetime is not JSON serializable error [GH-591] - Run dump and outline actions offline [GH-594] - Helper Makefile for functional tests [GH-597] - Python3 support!!! [GH-600] - YAML blueprint testing framework [GH-606] - new `add_output` helper on Blueprint [GH-611] - Include lookup contents when lookups fail [GH-614] - Fix issue with using previous value for parameters [GH-615] - Stricter config parsing - only allow unrecognized config variables at the top-level [GH-623] - Documentation for the `default` lookup [GH-636] - Allow configs without stacks [GH-640] ## 1.3.0 (2018-05-03) - Support for provisioning stacks in multiple accounts and regions has been added [GH-553], [GH-551] - Added a `--profile` flag, which can be used to set the global default profile that stacker will use (similar to `AWS_PROFILE`) [GH-563] - `class_path`/`template_path` are no longer required when a stack is `locked` [GH-557] - Support for setting stack policies on stacks has been added [GH-570] ## 1.2.0 (2018-03-01) The biggest change in this release has to do with how we build the graph of dependencies between stacks. This is now a true DAG. As well, to speed up performance we now walk the graph in a threaded mode, allowing true parallelism and speeding up "wide" stack graphs considerably. - assertRenderedBlueprint always dumps current results [GH-528] - The `--stacks` flag now automatically builds dependencies of the given stack [GH-523] - an unecessary DescribeStacks network call was removed [GH-529] - support stack json/yaml templates [GH-530] - `stacker {build,destroy}` now executes stacks in parallel. Parallelism can be controled with a `-j` flag. [GH-531] - logging output has been simplified and no longer uses ANSI escape sequences to clear the screen [GH-532] - logging output is now colorized in `--interactive` mode if the terminal has a TTY [GH-532] - removed the upper bound on the boto3 dependency [GH-542] ## 1.2.0rc2 (2018-02-27) - Fix parameter handling for diffs [GH-540] - Fix an issue where SIGTERM/SIGINT weren't handled immediately [GH-543] - Log a line when SIGINT/SIGTERM are handled [GH-543] - Log failed steps at the end of plan execution [GH-543] - Remove upper bound on boto3 dependency [GH-542] ## 1.2.0rc1 (2018-02-15) The biggest change in this release has to do with how we build the graph of dependencies between stacks. This is now a true DAG. As well, to speed up performance we now walk the graph in a threaded mode, allowing true parallelism and speeding up "wide" stack graphs considerably. - assertRenderedBlueprint always dumps current results [GH-528] - stacker now builds a DAG internally [GH-523] - The `--stacks` flag now automatically builds dependencies of the given stack [GH-523] - an unecessary DescribeStacks network call was removed [GH-529] - support stack json/yaml templates [GH-530] - `stacker {build,destroy}` now executes stacks in parallel. Parallelism can be controled with a `-j` flag. [GH-531] - logging output has been simplified and no longer uses ANSI escape sequences to clear the screen [GH-532] - logging output is now colorized in `--interactive` mode if the terminal has a TTY [GH-532] ## 1.1.4 (2018-01-26) - Add `blueprint.to_json` for standalone rendering [GH-459] - Add global config for troposphere template indent [GH-505] - Add serverless transform/CREATE changeset types [GH-517] ## 1.1.3 (2017-12-23) Bugfix release- primarily to deal with a bug that's been around since the introduction of interactive mode/changesets. The bug primarily deals with the fact that we weren't deleting Changesets that were not submitted. This didn't affect anyone for the longest time, but recently people have started to hit limits on the # of changesets in an account. The current thinking is that the limits weren't enforced before, and only recently has been enforced. - Add S3 remote package sources [GH-487] - Make blueprint dump always create intermediate directories [GH-499] - Allow duplicate keys for most config mappings except `stacks` [GH-507] - Remove un-submitted changesets [GH-513] ## 1.1.2 (2017-11-01) This is a minor update to help deal with some of the issues between `stacker` and `stacker_blueprints` both having dependencies on `troposphere`. It loosens the dependencies, allowing stacker to work with any reasonably new version of troposphere (anything greater than `1.9.0`). `stacker_blueprints` will likely require newer versions of troposphere, as new types are introduced to the blueprints, but it's unlikely we'll change the `troposphere` version string for stacker, since it relies on only the most basic parts of the `troposphere` API. ## 1.1.1 (2017-10-11) This release is mostly about updating the dependencies for stacker to newer versions, since that was missed in the last release. ## 1.1.0 (2017-10-08) - `--max-zones` removed from CLI [GH-427] - Ami lookup: add region specification [GH-433] - DynamoDB Lookup [GH-434] - Environment file is optional now [GH-436] - New functional test suite [GH-439] - Structure config object using Schematics [GH-443] - S3 endpoint fallback [GH-445] - Stack specific tags [GH-450] - Allow disabling of stacker bucket (direct CF updates) [GH-451] - Uniform deprecation warnings [GH-452] - Remote configuration support [GH-458] - TroposphereType updates [GH-462] - Fix replacements-only issue [GH-464] - testutil enhancments to blueprint testing [GH-467] - Removal of Interactive Provider (now combined w/ default provider) [GH-469] - protected stacks [GH-472] - MUCH Better handling of stack rollbacks & recreations [GH-473] - follow\_symlinks argument for aws lambda hook [GH-474] - Enable service\_role for cloudformation operations [GH-476] - Allow setting stack description from config [GH-477] - Move S3 templates into sub-directories [GH-478] ## 1.0.4 (2017-07-07) - Fix issue w/ tail being required (but not existing) on diff/info/etc [GH-429] ## 1.0.3 (2017-07-06) There was some reworking on how regions are handled, specifically around s3 and where the buckets for both stacker and the awslambda lookup are created. Now the stacker bucket will default to being created in the region where the stacks are being created (ie: from the `--region` argument). If you want to have the bucket be in a different region you now can set the `stacker_bucket_region` top level config value. For the awslambda hook, you also have the option of using `bucket_region` as an argument, provided you are using a custom `bucket` for the hook. If you are not using a custom bucket, then it will use the logic used above. - add ami lookup [GH-360] - Add support for Property objects in TroposphereType variables [GH-379] - Add debugging statements to sys.path appending [GH-385] - Catch undefined variable value [GH-388] - Exponential backoff waiting for AWS changeset to stabilize [GH-389] - Add parameter changes to diff output [GH-394] - Add CODE\_OF\_CONDUCT.md [GH-399] - Add a hint for forbidden bucket access [GH-401] - Fix issues w/ "none" as variable values [GH-405] - Remove extra '/' in blueprint tests [GH-409] - Fix dump provider interaction with lookups [GH-410] - Add ssmstore lookup docs [GH-411] - Fix issue w/ s3 buckets in different regions [GH-413, GH-417] - Disable loop logger whe --tail is provided [GH-414] - Add envvar lookup [GH-418] ## 1.0.2 (2017-05-10) - fix lambda hook determinism [GH-372] - give lambda hook ability to upload to a prefix [GH-376] - fix bad argument for approval in interactive provider [GH-381] ## 1.0.1 (2017-04-24) - rxref lookup [GH-328] - Cleaned up raise statement in blueprints [GH-348] - Fix missing default provider for build\_parameters [GH-353] - Setup codecov [GH-354] - Added blueprint testing harness [GH-362] - context hook\_data lookup [GH-366] ## 1.0.0 (2017-03-04) This is a major release with the main change being the removal of the old Parameters logic in favor of Blueprint Variables and Lookups. - Add support for resolving variables when calling `dump`[GH-231] - Remove old Parameters code [GH-232] - Pass Context & Provider to hooks [GH-233] - Fix Issue w/ Dump [GH-241] - Support `allowed_values` within variable definitions [GH-245] - Fix filehandler lookups with pseudo parameters [GH-247] - keypair hook update to match route53 update [GH-248] - Add support for `TroposphereType` [GH-249] - Allow = in lookup contents [GH-251] - Add troposphere types [GH-257] - change capabilities to CAPABILITY\_NAMED\_IAM [GH-262] - Disable transformation of variables [GH-266] - Support destroying a subset of stacks [GH-278] - Update all hooks to use advanced results [GH-285] - Use sys\_path for hooks and lookups [GH-286] - Remove last of botocore connections [GH-287] - Remove --var flag [GH-289] - Avoid dictionary sharing pollution [GH-293] - Change aws\_lambda hook handler to use proper parameters [GH-297] - New `split` lookup handler [GH-302] - add parse\_user\_data [GH-306] - Add credential caching [GH-307] - Require explicit call to `output` lookup [GH-310] - Convert booleans to strings for CFNTypes [GH-311] - Add ssmstore as a lookup type [GH-314] - Added region to the ssm store test client [GH-316] - Add default lookup [GH-317] - Clean up errors from variables [GH-319] ## 0.8.6 (2017-01-26) - Support destroying subset of stacks [GH-278] - Update all hooks to use advanced results [GH-285] - Use sys\_path for hooks and lookups [GH-286] - Remove last of botocore conns [GH-287] - Avoid dictionary sharing pollution [GH-293] ## 0.8.5 (2016-11-28) - Allow `=` in lookup input [GH-251] - Add hook for uploading AWS Lambda functions [GH-252] - Upgrade hard coded capabilities to include named IAM [GH-262] - Allow hooks to return results that can be looked up later [GH-270] ## 0.8.4 (2016-11-01) - Fix an issue w/ boto3 version string not working with older setuptools ## 0.8.3 (2016-10-31) - pass context to hooks as a kwarg [GH-234] - Fix file handler lookups w/ pseudo parameters [GH-239] - Allow use of later boto3 [GH-253] ## 0.8.1 (2016-09-22) Minor update to remove dependencies on stacker\_blueprints for tests, since it resulted in a circular dependency. This is just a fix to get tests running again, and results in no change in functionality. ## 0.8.0 (2016-09-22) This is a big release which introduces the new concepts of Blueprint Variables and Lookups. A lot of folks contributed to this release - in both code, and just testing of the new features. Thanks to: @kylev, @oliviervg1, @datadotworld, @acmcelwee, @troyready, @danielkza, and @ttarhan Special thanks to @mhahn who did the bulk of the heavy lifting in this release, and the work towards 1.0! - Add docs on config, environments & translators [GH-157] - locked output changed to debug [GH-159] - Multi-output parameter doc [GH-160] - Remove spaces from multi-item parameters [GH-161] - Remove blueprints & configs in favor of stacker\_blueprints [GH-163] - Clean up plan/status split [GH-165] - Allow s3 server side encryption [GH-167] - Support configurable namespace delimiter [GH-169] - Support tags as a new top-level keyword [GH-171] - Update to boto3 [GH-174] - Interactive AWS Provider [GH-178] - Add config option for appending to sys.path [GH-179] - More condensed output [GH-182] - File loading lookup [GH-185] - Handle stacks without parameters [GH-193] - Implement blueprint variables & lookups [GH-194] - Fix traceback on interactive provider when adding resources [GH-198] - kms lookup [GH-200] - Compatible release version dependencies [GH-201] - add xref lookup [GH-202] - Update docstrings for consistency [GH-204] - Add support for CFN Parameter types in Blueprint Variables [GH-206] - Deal w/ multiprocessing library sharing ssl connections [GH-208] - Fix issues with slashes inside variable lookups [GH-213] - Custom validators for blueprint variables [GH-218] ## 0.6.3 (2016-05-24) - add `stacker dump` subcommand for testing stack/blueprints [GH-156] ## 0.6.2 (2016-05-17) - Allow users to override name of bucket to store templates [GH-145] - Add support for passing environment variables on the cli via --env [GH-148] - Cleanup output on non-verbose runs [GH-153] - Added `compare_env` command, for easier comparing of environment files [GH-155] ## 0.6.1 (2016-02-11) - Add support for the 'stacker diff' command [GH-133] - Python boolean parameters automatically converted to strings for CloudFormation [GH-136] - No longer require mappings in config [GH-140] - Skipped steps now include a reason [GH-141] ## 0.6.0 (2016-01-07) - Support tailing cloudformation event stream when building/destroying stacks [GH-90] - More customizable ASG userdata & options [GH-100] - Deprecate 'blueprints' in favor of 'stacker\_blueprints' package [GH-125] - Add KMS based encryption translator [GH-126] - Fix typo in ASG customization [GH-127] - Allow file:// prefix with KMS encryption translator [GH-128] - No longer require a confirmation if the user passes the `--force` flag when destroying [GH-131] ## 0.5.4 (2015-12-03) - Fix memory leak issue (GH-111) [GH-114] - Add enabled flag to stacks [GH-115] - Add support for List parameters [GH-117] - Add eu-west-1 support for empire [GH-116] - Move get\_fqn to a function, add tests [GH-119] - Add new postgres versions (9.4.4, 9.4.5) [GH-121] - Handle blank parameter values [GH-120] ## 0.5.3 (2015-11-03) - Add --version [GH-91] - Simplify environment file to key: value, rather than YAML [GH-94] - Ensure certificate exists hook [GH-94] - Ensure keypair exists hook [GH-99] - Custom field constructors & vault encryption [GH-95] - DBSnapshotIdentifier to RDS blueprints [GH-105] - Empire ECS Agent telemetry support fixes, use new Empire AMI [GH-107] - Remove stack tags [GH-110] ## 0.5.2 (2015-09-10) - Add Dockerfile/image [GH-87] - Clean up environment docs [GH-88] - Make StorageType configurable in RDS v2 [GH-92] ## 0.5.1 (2015-09-08) - Add info subcommand [GH-73] - Move namespace into environment [GH-72] - Simplified basecommand [GH-74] - Documentation updates [GH-75, GH-77, GH-78] - aws\_helper removal [GH-79] - Move VPC to use LOCAL\_PARAMETERS [GH-81] - Lower default AZ count to 2 [GH-82] - Allow use of all parameter properties [GH-83] - Parameter gathering in method [GH-84] - NoEcho on sensitive parameters in blueprnts [GH-85] - Version 2 RDS Blueprints [GH-86] ## 0.5.0 (2015-08-13) - stacker subcommands [GH-35] - Added Empire production stacks [GH-43] - Major change in internal code layout & added testing - added destroy subcommand [GH-59] - Local Blueprint Parameters [GH-61] - Lockable stacks [GH-62] - Deal with Cloudformation API throttling [GH-64] - Clarify Remind's usage of stacker in README [GH-70] ## 0.4.1 (2015-07-23) - Stack Specific Parameters [GH-32] - Random fixes & cleanup [GH-34] - Handle skipped rollbacks [GH-36] - Internal zone detection [GH-39] - Internal hostname conditional [GH-40] - Empire production stacks [GH-43] ## 0.4.0 (2015-05-13) - Optional internal DNS Zone on vpc blueprint [GH-29] - Add environment concept [GH-27] - Optional internal zone cname for rds databases [GH-30] ## 0.3.0 (2015-05-05) - remove auto-subnet splitting in vpc stack (GH-25) - create bucket in correct region (GH-17, GH-23) - asg sets optionally sets up ELB w/ (optional) SSL - Remove DNS core requirement, add plugin/hook system (GH-26) ## 0.2.2 (2015-03-31) - Allow AWS to generate the DBInstanceIdentifier ## 0.2.1 (2015-03-31) - Bah, typo in version string, fixing ## 0.2.0 (2015-03-31) - New taxonomy (GH-18) - better setup.py (GH-16) - thanks mhahn - Use exitsing parameters (GH-20) - Able to work on subset of stacks (GH-14) - Config cleanup (GH-9) ================================================ FILE: CODE_OF_CONDUCT.md ================================================ # Contributor Covenant Code of Conduct ## Our Pledge In the interest of fostering an open and welcoming environment, we as contributors and maintainers pledge to making participation in our project and our community a harassment-free experience for everyone, regardless of age, body size, disability, ethnicity, gender identity and expression, level of experience, nationality, personal appearance, race, religion, or sexual identity and orientation. ## Our Standards Examples of behavior that contributes to creating a positive environment include: * Using welcoming and inclusive language * Being respectful of differing viewpoints and experiences * Gracefully accepting constructive criticism * Focusing on what is best for the community * Showing empathy towards other community members Examples of unacceptable behavior by participants include: * The use of sexualized language or imagery and unwelcome sexual attention or advances * Trolling, insulting/derogatory comments, and personal or political attacks * Public or private harassment * Publishing others' private information, such as a physical or electronic address, without explicit permission * Other conduct which could reasonably be considered inappropriate in a professional setting ## Our Responsibilities Project maintainers are responsible for clarifying the standards of acceptable behavior and are expected to take appropriate and fair corrective action in response to any instances of unacceptable behavior. Project maintainers have the right and responsibility to remove, edit, or reject comments, commits, code, wiki edits, issues, and other contributions that are not aligned to this Code of Conduct, or to ban temporarily or permanently any contributor for other behaviors that they deem inappropriate, threatening, offensive, or harmful. ## Scope This Code of Conduct applies both within project spaces and in public spaces when an individual is representing the project or its community. Examples of representing a project or community include using an official project e-mail address, posting via an official social media account, or acting as an appointed representative at an online or offline event. Representation of a project may be further defined and clarified by project maintainers. ## Enforcement Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by contacting the project team at cloudtools-maintainers@groups.google.com. The project team will review and investigate all complaints, and will respond in a way that it deems appropriate to the circumstances. The project team is obligated to maintain confidentiality with regard to the reporter of an incident. Further details of specific enforcement policies may be posted separately. Project maintainers who do not follow or enforce the Code of Conduct in good faith may face temporary or permanent repercussions as determined by other members of the project's leadership. ## Attribution This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, available at [http://contributor-covenant.org/version/1/4][version] [homepage]: http://contributor-covenant.org [version]: http://contributor-covenant.org/version/1/4/ ================================================ FILE: CONTRIBUTING.md ================================================ # Contributing Contributions are welcome, and they are greatly appreciated! You can contribute in many ways: ## Types of Contributions ### Report Bugs Report bugs at https://github.com/cloudtools/stacker/issues. If you are reporting a bug, please include: * Your operating system name and version. * Any details about your local setup that might be helpful in troubleshooting. * Detailed steps to reproduce the bug. ### Fix Bugs Look through the GitHub issues for bugs. Anything tagged with "bug" is open to whoever wants to implement it. ### Implement Features Look through the GitHub issues for features. Anything tagged with "feature" is open to whoever wants to implement it. ### Write Documentation stacker could always use more documentation, whether as part of the official stacker docs, in docstrings, or even on the web in blog posts, articles, and such. Note: We use Google style docstrings (http://sphinxcontrib-napoleon.readthedocs.io/en/latest/example\_google.html) ### Submit Feedback The best way to send feedback is to file an issue at https://github.com/cloudtools/stacker/issues. If you are proposing a feature: * Explain in detail how it would work. * Keep the scope as narrow as possible, to make it easier to implement. * Remember that this is a volunteer-driven project, and that contributions are welcome :) ## Get Started! Ready to contribute? Here's how to set up `stacker` for local development. 1. Fork the `stacker` repo on GitHub. 2. Clone your fork locally: ```console $ git clone git@github.com:your_name_here/stacker.git ``` 3. Install your local copy into a virtualenv. Assuming you have virtualenvwrapper installed, this is how you set up your fork for local development: ```console $ mkvirtualenv stacker $ cd stacker/ $ python setup.py develop ``` 4. Create a branch for local development: ```console $ git checkout -b name-of-your-bugfix-or-feature ``` Now you can make your changes locally. 5. When you're done making changes, check that your changes pass flake8 and the tests, including testing other Python versions with tox: ```console $ make test ``` To get flake8 just pip install it into your virtualenv. 6. Commit your changes and push your branch to GitHub: ```console $ git add . $ git commit -m "Your detailed description of your changes." $ git push origin name-of-your-bugfix-or-feature ``` 7. Submit a pull request through the GitHub website. For information about the functional testing suite, see [tests/README.md](./tests). ## Pull Request Guidelines Before you submit a pull request, check that it meets these guidelines: 1. The pull request should include tests. 2. If the pull request adds functionality, the docs should be updated. (See `Write Documentation` above for guidelines) 3. The pull request should work for Python 2.7 and for PyPy. Check https://circleci.com/gh/cloudtools/stacker and make sure that the tests pass for all supported Python versions. 4. Please update the `Upcoming/Master` section of the [CHANGELOG](./CHANGELOG.md) with a small bullet point about the change. ================================================ FILE: Dockerfile ================================================ FROM python:2.7.10 MAINTAINER Mike Barrett COPY scripts/docker-stacker /bin/docker-stacker RUN mkdir -p /stacks && pip install --upgrade pip setuptools WORKDIR /stacks COPY . /tmp/stacker RUN pip install --upgrade pip RUN pip install --upgrade setuptools RUN cd /tmp/stacker && python setup.py install && rm -rf /tmp/stacker ENTRYPOINT ["docker-stacker"] CMD ["-h"] ================================================ FILE: LICENSE ================================================ Copyright (c) 2015, Remind101, Inc. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ================================================ FILE: Makefile ================================================ .PHONY: build lint test-unit test-functional test build: docker build -t remind101/stacker . lint: flake8 --ignore E402,W503,W504,W605,N818 --exclude stacker/tests/ stacker flake8 --ignore E402,N802,W605,N818 stacker/tests # ignore setUp naming test-unit: clean python setup.py test test-unit3: clean python3 setup.py test clean: rm -rf .egg stacker.egg-info test-functional: cd tests && bats test_suite # General testing target for most development. test: lint test-unit test-unit3 apidocs: sphinx-apidoc --force -o docs/api stacker ================================================ FILE: README.rst ================================================ ======= stacker ======= .. image:: https://readthedocs.org/projects/stacker/badge/?version=latest :target: http://stacker.readthedocs.org/en/latest/ .. image:: https://circleci.com/gh/cloudtools/stacker.svg?style=shield :target: https://circleci.com/gh/cloudtools/stacker .. image:: https://empire-slack.herokuapp.com/badge.svg :target: https://empire-slack.herokuapp.com .. image:: https://badge.fury.io/py/stacker.svg :target: https://badge.fury.io/py/stacker .. image:: https://landscape.io/github/cloudtools/stacker/master/landscape.svg?style=flat :target: https://landscape.io/github/cloudtools/stacker/master :alt: Code Health .. image:: https://codecov.io/gh/cloudtools/stacker/branch/master/graph/badge.svg :target: https://codecov.io/gh/cloudtools/stacker :alt: codecov For full documentation, please see the readthedocs_ site. `Click here to join the Slack team`_ for stacker, and then join the #stacker channel! About ===== stacker is a tool and library used to create & update multiple CloudFormation stacks. It was originally written at Remind_ and released to the open source community. stacker Blueprints are written in troposphere_, though the purpose of most templates is to keep them as generic as possible and then use configuration to modify them. At Remind we use stacker to manage all of our Cloudformation stacks - both in development, staging, and production without any major issues. Requirements ============ * Python 3.7+ Stacker Command =============== The ``stacker`` command has sub-commands, similar to git. Here are some examples: ``build``: handles taking your stack config and then launching or updating stacks as necessary. ``destroy``: tears down your stacks ``diff``: compares your currently deployed stack templates to your config files ``info``: prints information about your currently deployed stacks We document these sub-commands in full along with others, in the documentation. Getting Started =============== ``stacker_cookiecutter``: https://github.com/cloudtools/stacker_cookiecutter We recommend creating your base `stacker` project using ``stacker_cookiecutter``. This tool will install all the needed dependencies and created the project directory structure and files. The resulting files are well documented with comments to explain their purpose and examples on how to extend. ``stacker_blueprints``: https://github.com/cloudtools/stacker_blueprints This repository holds working examples of ``stacker`` blueprints. Each blueprint works in isolation and may be referenced, extended, or copied into your project files. The blueprints are written in Python and use the troposphere_ library. ``stacker reference documentation``: We document all functionality and features of stacker in our extensive reference documentation located at readthedocs_. ``AWS OSS Blog``: https://aws.amazon.com/blogs/opensource/using-aws-codepipeline-and-open-source-tools-for-at-scale-infrastructure-deployment/ The AWS OSS Blog has a getting started guide using stacker with AWS CodePipeline. Docker ====== Stack can also be executed from Docker. Use this method to run stacker if you want to avoid setting up a python environment:: docker run -it -v `pwd`:/stacks remind101/stacker build ... .. _Remind: http://www.remind.com/ .. _troposphere: https://github.com/cloudtools/troposphere .. _string.Template: https://docs.python.org/2/library/string.html#template-strings .. _readthedocs: http://stacker.readthedocs.io/en/latest/ .. _`Click here to join the Slack team`: https://empire-slack.herokuapp.com ================================================ FILE: RELEASE.md ================================================ # Steps to release a new version ## Preparing for the release - Check out a branch named for the version: `git checkout -b release-1.1.1` - Change version in setup.py and stacker/\_\_init\_\_.py - Update CHANGELOG.md with changes made since last release (see below for helpful command) - add changed files: `git add setup.py stacker/\_\_init\_\_.py CHANGELOG.md` - Commit changes: `git commit -m "Release 1.1.1"` - Create a signed tag: `git tag --sign -m "Release 1.1.1" 1.1.1` - Push branch up to git: `git push -u origin release-1.1.1` - Open a PR for the release, ensure that tests pass ## Releasing - Push tag: `git push --tags` - Merge PR into master, checkout master locally: `git checkout master; git pull` - Create PyPI release: `python setup.py sdist upload --sign` - Update github release page: https://github.com/cloudtools/stacker/releases - use the contents of the latest CHANGELOG entry for the body. # Helper to create CHANGELOG entries git log --reverse --pretty=format:"%s" | tail -100 | sed 's/^/- /' ================================================ FILE: codecov.yml ================================================ comment: false ================================================ FILE: conf/README.rst ================================================ Please check out the stacker_blueprints_ repo for example configs and blueprints. .. _stacker_blueprints: https://github.com/cloudtools/stacker_blueprints ================================================ FILE: docs/.gitignore ================================================ _build ================================================ FILE: docs/Makefile ================================================ # Makefile for Sphinx documentation # # You can set these variables from the command line. SPHINXOPTS = SPHINXBUILD = python -m sphinx PAPER = BUILDDIR = _build # Internal variables. PAPEROPT_a4 = -D latex_paper_size=a4 PAPEROPT_letter = -D latex_paper_size=letter ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) . # the i18n builder cannot share the environment and doctrees with the others I18NSPHINXOPTS = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) . .PHONY: help clean html dirhtml singlehtml pickle json htmlhelp qthelp devhelp epub latex latexpdf text man changes linkcheck doctest coverage gettext help: @echo "Please use \`make ' where is one of" @echo " html to make standalone HTML files" @echo " serve to run a webserver in the html dir (0.0.0.0:8000)" @echo " dirhtml to make HTML files named index.html in directories" @echo " singlehtml to make a single large HTML file" @echo " pickle to make pickle files" @echo " json to make JSON files" @echo " htmlhelp to make HTML files and a HTML help project" @echo " qthelp to make HTML files and a qthelp project" @echo " applehelp to make an Apple Help Book" @echo " devhelp to make HTML files and a Devhelp project" @echo " epub to make an epub" @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter" @echo " latexpdf to make LaTeX files and run them through pdflatex" @echo " latexpdfja to make LaTeX files and run them through platex/dvipdfmx" @echo " text to make text files" @echo " man to make manual pages" @echo " texinfo to make Texinfo files" @echo " info to make Texinfo files and run them through makeinfo" @echo " gettext to make PO message catalogs" @echo " changes to make an overview of all changed/added/deprecated items" @echo " xml to make Docutils-native XML files" @echo " pseudoxml to make pseudoxml-XML files for display purposes" @echo " linkcheck to check all external links for integrity" @echo " doctest to run all doctests embedded in the documentation (if enabled)" @echo " coverage to run coverage check of the documentation (if enabled)" clean: rm -rf $(BUILDDIR)/* html: $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html @echo @echo "Build finished. The HTML pages are in $(BUILDDIR)/html." serve: cd $(BUILDDIR)/html/ && python -m SimpleHTTPServer dirhtml: $(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml @echo @echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml." singlehtml: $(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml @echo @echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml." pickle: $(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle @echo @echo "Build finished; now you can process the pickle files." json: $(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json @echo @echo "Build finished; now you can process the JSON files." htmlhelp: $(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp @echo @echo "Build finished; now you can run HTML Help Workshop with the" \ ".hhp project file in $(BUILDDIR)/htmlhelp." qthelp: $(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp @echo @echo "Build finished; now you can run "qcollectiongenerator" with the" \ ".qhcp project file in $(BUILDDIR)/qthelp, like this:" @echo "# qcollectiongenerator $(BUILDDIR)/qthelp/stacker.qhcp" @echo "To view the help file:" @echo "# assistant -collectionFile $(BUILDDIR)/qthelp/stacker.qhc" applehelp: $(SPHINXBUILD) -b applehelp $(ALLSPHINXOPTS) $(BUILDDIR)/applehelp @echo @echo "Build finished. The help book is in $(BUILDDIR)/applehelp." @echo "N.B. You won't be able to view it unless you put it in" \ "~/Library/Documentation/Help or install it in your application" \ "bundle." devhelp: $(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp @echo @echo "Build finished." @echo "To view the help file:" @echo "# mkdir -p $$HOME/.local/share/devhelp/stacker" @echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/stacker" @echo "# devhelp" epub: $(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub @echo @echo "Build finished. The epub file is in $(BUILDDIR)/epub." latex: $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex @echo @echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex." @echo "Run \`make' in that directory to run these through (pdf)latex" \ "(use \`make latexpdf' here to do that automatically)." latexpdf: $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex @echo "Running LaTeX files through pdflatex..." $(MAKE) -C $(BUILDDIR)/latex all-pdf @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." latexpdfja: $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex @echo "Running LaTeX files through platex and dvipdfmx..." $(MAKE) -C $(BUILDDIR)/latex all-pdf-ja @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." text: $(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text @echo @echo "Build finished. The text files are in $(BUILDDIR)/text." man: $(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man @echo @echo "Build finished. The manual pages are in $(BUILDDIR)/man." texinfo: $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo @echo @echo "Build finished. The Texinfo files are in $(BUILDDIR)/texinfo." @echo "Run \`make' in that directory to run these through makeinfo" \ "(use \`make info' here to do that automatically)." info: $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo @echo "Running Texinfo files through makeinfo..." make -C $(BUILDDIR)/texinfo info @echo "makeinfo finished; the Info files are in $(BUILDDIR)/texinfo." gettext: $(SPHINXBUILD) -b gettext $(I18NSPHINXOPTS) $(BUILDDIR)/locale @echo @echo "Build finished. The message catalogs are in $(BUILDDIR)/locale." changes: $(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes @echo @echo "The overview file is in $(BUILDDIR)/changes." linkcheck: $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck @echo @echo "Link check complete; look for any errors in the above output " \ "or in $(BUILDDIR)/linkcheck/output.txt." doctest: $(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest @echo "Testing of doctests in the sources finished, look at the " \ "results in $(BUILDDIR)/doctest/output.txt." coverage: $(SPHINXBUILD) -b coverage $(ALLSPHINXOPTS) $(BUILDDIR)/coverage @echo "Testing of coverage in the sources finished, look at the " \ "results in $(BUILDDIR)/coverage/python.txt." xml: $(SPHINXBUILD) -b xml $(ALLSPHINXOPTS) $(BUILDDIR)/xml @echo @echo "Build finished. The XML files are in $(BUILDDIR)/xml." pseudoxml: $(SPHINXBUILD) -b pseudoxml $(ALLSPHINXOPTS) $(BUILDDIR)/pseudoxml @echo @echo "Build finished. The pseudo-XML files are in $(BUILDDIR)/pseudoxml." ================================================ FILE: docs/api/modules.rst ================================================ stacker ======= .. toctree:: :maxdepth: 4 stacker ================================================ FILE: docs/api/stacker.actions.rst ================================================ stacker\.actions package ======================== Submodules ---------- stacker\.actions\.base module ----------------------------- .. automodule:: stacker.actions.base :members: :undoc-members: :show-inheritance: stacker\.actions\.build module ------------------------------ .. automodule:: stacker.actions.build :members: :undoc-members: :show-inheritance: stacker\.actions\.destroy module -------------------------------- .. automodule:: stacker.actions.destroy :members: :undoc-members: :show-inheritance: stacker\.actions\.diff module ----------------------------- .. automodule:: stacker.actions.diff :members: :undoc-members: :show-inheritance: stacker\.actions\.info module ----------------------------- .. automodule:: stacker.actions.info :members: :undoc-members: :show-inheritance: Module contents --------------- .. automodule:: stacker.actions :members: :undoc-members: :show-inheritance: ================================================ FILE: docs/api/stacker.blueprints.rst ================================================ stacker\.blueprints package =========================== Subpackages ----------- .. toctree:: stacker.blueprints.variables Submodules ---------- stacker\.blueprints\.base module -------------------------------- .. automodule:: stacker.blueprints.base :members: :undoc-members: :show-inheritance: stacker\.blueprints\.testutil module ------------------------------------ .. automodule:: stacker.blueprints.testutil :members: :undoc-members: :show-inheritance: Module contents --------------- .. automodule:: stacker.blueprints :members: :undoc-members: :show-inheritance: ================================================ FILE: docs/api/stacker.blueprints.variables.rst ================================================ stacker\.blueprints\.variables package ====================================== Submodules ---------- stacker\.blueprints\.variables\.types module -------------------------------------------- .. automodule:: stacker.blueprints.variables.types :members: :undoc-members: :show-inheritance: Module contents --------------- .. automodule:: stacker.blueprints.variables :members: :undoc-members: :show-inheritance: ================================================ FILE: docs/api/stacker.commands.rst ================================================ stacker\.commands package ========================= Subpackages ----------- .. toctree:: stacker.commands.stacker Module contents --------------- .. automodule:: stacker.commands :members: :undoc-members: :show-inheritance: ================================================ FILE: docs/api/stacker.commands.stacker.rst ================================================ stacker\.commands\.stacker package ================================== Submodules ---------- stacker\.commands\.stacker\.base module --------------------------------------- .. automodule:: stacker.commands.stacker.base :members: :undoc-members: :show-inheritance: stacker\.commands\.stacker\.build module ---------------------------------------- .. automodule:: stacker.commands.stacker.build :members: :undoc-members: :show-inheritance: stacker\.commands\.stacker\.destroy module ------------------------------------------ .. automodule:: stacker.commands.stacker.destroy :members: :undoc-members: :show-inheritance: stacker\.commands\.stacker\.diff module --------------------------------------- .. automodule:: stacker.commands.stacker.diff :members: :undoc-members: :show-inheritance: stacker\.commands\.stacker\.info module --------------------------------------- .. automodule:: stacker.commands.stacker.info :members: :undoc-members: :show-inheritance: Module contents --------------- .. automodule:: stacker.commands.stacker :members: :undoc-members: :show-inheritance: ================================================ FILE: docs/api/stacker.config.rst ================================================ stacker\.config package ======================= Subpackages ----------- .. toctree:: stacker.config.translators Module contents --------------- .. automodule:: stacker.config :members: :undoc-members: :show-inheritance: ================================================ FILE: docs/api/stacker.config.translators.rst ================================================ stacker\.config\.translators package ==================================== Submodules ---------- stacker\.config\.translators\.kms module ---------------------------------------- .. automodule:: stacker.config.translators.kms :members: :undoc-members: :show-inheritance: Module contents --------------- .. automodule:: stacker.config.translators :members: :undoc-members: :show-inheritance: ================================================ FILE: docs/api/stacker.hooks.rst ================================================ stacker\.hooks package ====================== Submodules ---------- stacker\.hooks\.aws\_lambda module ---------------------------------- .. automodule:: stacker.hooks.aws_lambda :members: :undoc-members: :show-inheritance: stacker\.hooks\.ecs module -------------------------- .. automodule:: stacker.hooks.ecs :members: :undoc-members: :show-inheritance: stacker\.hooks\.iam module -------------------------- .. automodule:: stacker.hooks.iam :members: :undoc-members: :show-inheritance: stacker\.hooks\.keypair module ------------------------------ .. automodule:: stacker.hooks.keypair :members: :undoc-members: :show-inheritance: stacker\.hooks\.route53 module ------------------------------ .. automodule:: stacker.hooks.route53 :members: :undoc-members: :show-inheritance: stacker\.hooks\.utils module ---------------------------- .. automodule:: stacker.hooks.utils :members: :undoc-members: :show-inheritance: Module contents --------------- .. automodule:: stacker.hooks :members: :undoc-members: :show-inheritance: ================================================ FILE: docs/api/stacker.logger.rst ================================================ stacker\.logger package ======================= Submodules ---------- stacker\.logger\.formatter module --------------------------------- .. automodule:: stacker.logger.formatter :members: :undoc-members: :show-inheritance: stacker\.logger\.handler module ------------------------------- .. automodule:: stacker.logger.handler :members: :undoc-members: :show-inheritance: Module contents --------------- .. automodule:: stacker.logger :members: :undoc-members: :show-inheritance: ================================================ FILE: docs/api/stacker.lookups.handlers.rst ================================================ stacker\.lookups\.handlers package ================================== Submodules ---------- stacker\.lookups\.handlers\.ami module -------------------------------------- .. automodule:: stacker.lookups.handlers.ami :members: :undoc-members: :show-inheritance: stacker\.lookups\.handlers\.default module ------------------------------------------ .. automodule:: stacker.lookups.handlers.default :members: :undoc-members: :show-inheritance: stacker\.lookups\.handlers\.dynamodb module ------------------------------------------- .. automodule:: stacker.lookups.handlers.dynamodb :members: :undoc-members: :show-inheritance: stacker\.lookups\.handlers\.envvar module ----------------------------------------- .. automodule:: stacker.lookups.handlers.envvar :members: :undoc-members: :show-inheritance: stacker\.lookups\.handlers\.file module --------------------------------------- .. automodule:: stacker.lookups.handlers.file :members: :undoc-members: :show-inheritance: stacker\.lookups\.handlers\.hook\_data module --------------------------------------------- .. automodule:: stacker.lookups.handlers.hook_data :members: :undoc-members: :show-inheritance: stacker\.lookups\.handlers\.kms module -------------------------------------- .. automodule:: stacker.lookups.handlers.kms :members: :undoc-members: :show-inheritance: stacker\.lookups\.handlers\.output module ----------------------------------------- .. automodule:: stacker.lookups.handlers.output :members: :undoc-members: :show-inheritance: stacker\.lookups\.handlers\.rxref module ---------------------------------------- .. automodule:: stacker.lookups.handlers.rxref :members: :undoc-members: :show-inheritance: stacker\.lookups\.handlers\.split module ---------------------------------------- .. automodule:: stacker.lookups.handlers.split :members: :undoc-members: :show-inheritance: stacker\.lookups\.handlers\.ssmstore module ------------------------------------------- .. automodule:: stacker.lookups.handlers.ssmstore :members: :undoc-members: :show-inheritance: stacker\.lookups\.handlers\.xref module --------------------------------------- .. automodule:: stacker.lookups.handlers.xref :members: :undoc-members: :show-inheritance: Module contents --------------- .. automodule:: stacker.lookups.handlers :members: :undoc-members: :show-inheritance: ================================================ FILE: docs/api/stacker.lookups.rst ================================================ stacker\.lookups package ======================== Subpackages ----------- .. toctree:: stacker.lookups.handlers Submodules ---------- stacker\.lookups\.registry module --------------------------------- .. automodule:: stacker.lookups.registry :members: :undoc-members: :show-inheritance: Module contents --------------- .. automodule:: stacker.lookups :members: :undoc-members: :show-inheritance: ================================================ FILE: docs/api/stacker.providers.aws.rst ================================================ stacker\.providers\.aws package =============================== Submodules ---------- stacker\.providers\.aws\.default module --------------------------------------- .. automodule:: stacker.providers.aws.default :members: :undoc-members: :show-inheritance: Module contents --------------- .. automodule:: stacker.providers.aws :members: :undoc-members: :show-inheritance: ================================================ FILE: docs/api/stacker.providers.rst ================================================ stacker\.providers package ========================== Subpackages ----------- .. toctree:: stacker.providers.aws Submodules ---------- stacker\.providers\.base module ------------------------------- .. automodule:: stacker.providers.base :members: :undoc-members: :show-inheritance: Module contents --------------- .. automodule:: stacker.providers :members: :undoc-members: :show-inheritance: ================================================ FILE: docs/api/stacker.rst ================================================ stacker package =============== Subpackages ----------- .. toctree:: stacker.actions stacker.blueprints stacker.commands stacker.config stacker.hooks stacker.logger stacker.lookups stacker.providers stacker.tests Submodules ---------- stacker\.context module ----------------------- .. automodule:: stacker.context :members: :undoc-members: :show-inheritance: stacker\.environment module --------------------------- .. automodule:: stacker.environment :members: :undoc-members: :show-inheritance: stacker\.exceptions module -------------------------- .. automodule:: stacker.exceptions :members: :undoc-members: :show-inheritance: stacker\.plan module -------------------- .. automodule:: stacker.plan :members: :undoc-members: :show-inheritance: stacker\.session\_cache module ------------------------------ .. automodule:: stacker.session_cache :members: :undoc-members: :show-inheritance: stacker\.stack module --------------------- .. automodule:: stacker.stack :members: :undoc-members: :show-inheritance: stacker\.status module ---------------------- .. automodule:: stacker.status :members: :undoc-members: :show-inheritance: stacker\.tokenize\_userdata module ---------------------------------- .. automodule:: stacker.tokenize_userdata :members: :undoc-members: :show-inheritance: stacker\.util module -------------------- .. automodule:: stacker.util :members: :undoc-members: :show-inheritance: stacker\.variables module ------------------------- .. automodule:: stacker.variables :members: :undoc-members: :show-inheritance: Module contents --------------- .. automodule:: stacker :members: :undoc-members: :show-inheritance: ================================================ FILE: docs/blueprints.rst ================================================ ========== Blueprints ========== Blueprints are python classes that dynamically build CloudFormation templates. Where you would specify a raw Cloudformation template in a stack using the ``template_path`` key, you instead specify a blueprint python file using the ``class_path`` key. Traditionally blueprints are built using troposphere_, but that is not absolutely necessary. You are encouraged to check out the library of publicly shared Blueprints in the stacker_blueprints_ package. Making your own should be easy, and you can take a lot of examples from stacker_blueprints_. In the end, all that is required is that the Blueprint is a subclass of *stacker.blueprints.base* and it have the following methods: .. code-block:: python # Initializes the blueprint def __init__(self, name, context, mappings=None): # Updates self.template to create the actual template def create_template(self): # Returns a tuple: (version, rendered_template) def render_template(self): Variables ========= A Blueprint can define a ``VARIABLES`` property that defines the variables it accepts from the `Config Variables `_. ``VARIABLES`` should be a dictionary of ``: ``. The variable definition should be a dictionary which supports the following optional keys: **type:** The type for the variable value. This can either be a native python type or one of the `Variable Types`_. **default:** The default value that should be used for the variable if none is provided in the config. **description:** A string that describes the purpose of the variable. **validator:** An optional function that can do custom validation of the variable. A validator function should take a single argument, the value being validated, and should return the value if validation is successful. If there is an issue validating the value, an exception (``ValueError``, ``TypeError``, etc) should be raised by the function. **no_echo:** Only valid for variables whose type subclasses ``CFNType``. Whether to mask the parameter value whenever anyone makes a call that describes the stack. If you set the value to true, the parameter value is masked with asterisks (*****). **allowed_values:** Only valid for variables whose type subclasses ``CFNType``. The set of values that should be allowed for the CloudFormation Parameter. **allowed_pattern:** Only valid for variables whose type subclasses ``CFNType``. A regular expression that represents the patterns you want to allow for the CloudFormation Parameter. **max_length:** Only valid for variables whose type subclasses ``CFNType``. The maximum length of the value for the CloudFormation Parameter. **min_length:** Only valid for variables whose type subclasses ``CFNType``. The minimum length of the value for the CloudFormation Parameter. **max_value:** Only valid for variables whose type subclasses ``CFNType``. The max value for the CloudFormation Parameter. **min_value:** Only valid for variables whose type subclasses ``CFNType``. The min value for the CloudFormation Parameter. **constraint_description:** Only valid for variables whose type subclasses ``CFNType``. A string that explains the constraint when the constraint is violated for the CloudFormation Parameter. Variable Types ============== Any native python type can be specified as the ``type`` for a variable. You can also use the following custom types: TroposphereType --------------- The ``TroposphereType`` can be used to generate resources for use in the blueprint directly from user-specified configuration. Which case applies depends on what ``type`` was chosen, and how it would be normally used in the blueprint (and CloudFormation in general). Resource Types ^^^^^^^^^^^^^^ When ``type`` is a `Resource Type`_, the value specified by the user in the configuration file must be a dictionary, but with two possible structures. When ``many`` is disabled, the top-level dictionary keys correspond to parameters of the ``type`` constructor. The key-value pairs will be used directly, and one object will be created and stored in the variable. When ``many`` is enabled, the top-level dictionary *keys* are resource titles, and the corresponding *values* are themselves dictionaries, to be used as parameters for creating each of multiple ``type`` objects. A list of those objects will be stored in the variable. Property Types ^^^^^^^^^^^^^^ When ``type`` is a `Property Type`_ the value specified by the user in the configuration file must be a dictionary or a list of dictionaries. When ``many`` is disabled, the top-level dictionary keys correspond to parameters of the ``type`` constructor. The key-value pairs will be used directly, and one object will be created and stored in the variable. When ``many`` is enabled, a list of dictionaries is expected. For each element, one corresponding call will be made to the ``type`` constructor, and all the objects produced will be stored (also as a list) in the variable. Optional variables ^^^^^^^^^^^^^^^^^^ In either case, when ``optional`` is enabled, the variable may have no value assigned, or be explicitly assigned a null value. When that happens the variable's final value will be ``None``. Example ^^^^^^^ Below is an annotated example: .. code-block:: python from stacker.blueprints.base import Blueprint from stacker.blueprints.variables.types import TroposphereType from troposphere import s3, sns class Buckets(Blueprint): VARIABLES = { # Specify that Buckets will be a list of s3.Bucket types. # This means the config should a dictionary of dictionaries # which will be converted into troposphere buckets. "Buckets": { "type": TroposphereType(s3.Bucket, many=True), "description": "S3 Buckets to create.", }, # Specify that only a single bucket can be passed. "SingleBucket": { "type": TroposphereType(s3.Bucket), "description": "A single S3 bucket", }, # Specify that Subscriptions will be a list of sns.Subscription types. # Note: sns.Subscription is the property type, not the standalone # sns.SubscriptionResource. "Subscriptions": { "type": TroposphereType(sns.Subscription, many=True), "description": "Multiple SNS subscription designations" }, # Specify that only a single subscription can be passed, and that it # is made optional. "SingleOptionalSubscription": { "type": TroposphereType(sns.Subscription, optional=True), "description": "A single, optional SNS subscription designation" } } def create_template(self): t = self.template variables = self.get_variables() # The Troposphere s3 buckets have already been created when we access variables["Buckets"], we just need to add them as resources to the template. [t.add_resource(bucket) for bucket in variables["Buckets"]] # Add the single bucket to the template. You can use `Ref(single_bucket)` to pass CloudFormation references to the bucket just as you would with any other Troposphere type. single_bucket = variables["SingleBucket"] t.add_resource(single_bucket) subscriptions = variables["Subscriptions"] optional_subscription = variables["SingleOptionalSubscription"] # Handle it in some special way... if optional_subscription is not None: subscriptions.append(optional_subscription) t.add_resource(sns.Topic( TopicName="one-test", Subscriptions=)) t.add_resource(sns.Topic( TopicName="another-test", Subscriptions=subscriptions)) A sample config for the above: .. code-block:: yaml stacks: - name: buckets class_path: path.to.above.Buckets variables: Buckets: # resource name (title) that will be added to CloudFormation. FirstBucket: # name of the s3 bucket BucketName: my-first-bucket SecondBucket: BucketName: my-second-bucket SingleBucket: # resource name (title) that will be added to CloudFormation. MySingleBucket: BucketName: my-single-bucket Subscriptions: - Endpoint: one-lambda Protocol: lambda - Endpoint: another-lambda Protocol: lambda # The following could be ommited entirely SingleOptionalSubscription: Endpoint: a-third-lambda Protocol: lambda CFNType ------- The ``CFNType`` can be used to signal that a variable should be submitted to CloudFormation as a Parameter instead of only available to the Blueprint when rendering. This is useful if you want to leverage AWS- Specific Parameter types (e.g. ``List``) or Systems Manager Parameter Store values (e.g. ``AWS::SSM::Parameter::Value``). See ``stacker.blueprints.variables.types`` for available subclasses of the ``CFNType``. Example ^^^^^^^ Below is an annotated example: .. code-block:: python from stacker.blueprints.base import Blueprint from stacker.blueprints.variables.types import ( CFNString, EC2AvailabilityZoneNameList, ) class SampleBlueprint(Blueprint): VARIABLES = { "String": { "type": str, "description": "Simple string variable", }, "List": { "type": list, "description": "Simple list variable", }, "CloudFormationString": { "type": CFNString, "description": "A variable which will create a CloudFormation Parameter of type String", }, "CloudFormationSpecificType": { "type": EC2AvailabilityZoneNameList, "description": "A variable which will create a CloudFormation Parameter of type List" }, } def create_template(self): t = self.template # `get_variables` returns a dictionary of : . For the subclasses of `CFNType`, the values are instances of `CFNParameter` which have a `ref` helper property which will return a troposphere `Ref` to the parameter name. variables = self.get_variables() t.add_output(Output("StringOutput", variables["String"])) # variables["List"] is a native list for index, value in enumerate(variables["List"]): t.add_output(Output("ListOutput:{}".format(index), value)) # `CFNParameter` values (which wrap variables with a `type` that is a `CFNType` subclass) can be converted to troposphere `Ref` objects with the `ref` property t.add_output(Output("CloudFormationStringOutput", variables["CloudFormationString"].ref)) t.add_output(Output("CloudFormationSpecificTypeOutput", variables["CloudFormationSpecificType"].ref)) Utilizing Stack name within your Blueprint ========================================== Sometimes your blueprint might want to utilize the already existing stack name within your blueprint. Stacker provides access to both the fully qualified stack name matching what’s shown in the CloudFormation console, in addition to the stacks short name you have set in your YAML config. Referencing Fully Qualified Stack name -------------------------------------- The fully qualified name is a combination of the Stacker namespace + the short name (what you set as `name` in your YAML config file). If your stacker namespace is `StackerIsCool` and the stacks short name is `myAwesomeEC2Instance`, the fully qualified name would be: ``StackerIsCool-myAwesomeEC2Instance`` To use this in your blueprint, you can get the name from context. The ``self.context.get_fqn(self.name)`` Referencing the Stack short name -------------------------------- The Stack short name is the name you specified for the stack within your YAML config. It does not include the namespace. If your stacker namespace is `StackerIsCool` and the stacks short name is `myAwesomeEC2Instance`, the short name would be: ``myAwesomeEC2Instance`` To use this in your blueprint, you can get the name from self.name: ``self.name`` Example ^^^^^^^ Below is an annotated example creating a security group: .. code-block:: python # we are importing Ref to allow for CFN References in the EC2 resource. Tags # will be used to set the Name tag from troposphere import Ref, ec2, Tags from stacker.blueprints.base import Blueprint # CFNString is imported to allow for stand alone stack use from stacker.blueprints.variables.types import CFNString class SampleBlueprint(Blueprint): # VpcId set here to allow for blueprint to be reused VARIABLES = { "VpcId": { "type": CFNString, "description": "The VPC to create the Security group in", } } def create_template(self): template = self.template # Assigning the variables to a variable variables = self.get_variables() # now adding a SecurityGroup resource named `SecurityGroup` to the CFN template template.add_resource( ec2.SecurityGroup( "SecurityGroup", # Refering the VpcId set as the varible VpcId=variables['VpcId'].ref, # Setting the group description as the fully qualified name GroupDescription=self.context.get_fqn(self.name), # setting the Name tag to be the stack short name Tags=Tags( Name=self.name ) ) ) Testing Blueprints ================== When writing your own blueprints its useful to write tests for them in order to make sure they behave the way you expect they would, especially if there is any complex logic inside. To this end, a sub-class of the `unittest.TestCase` class has been provided: `stacker.blueprints.testutil.BlueprintTestCase`. You use it like the regular TestCase class, but it comes with an addition assertion: `assertRenderedBlueprint`. This assertion takes a Blueprint object and renders it, then compares it to an expected output, usually in `tests/fixtures/blueprints`. Examples of using the `BlueprintTestCase` class can be found in the stacker_blueprints repo. For example, see the tests used to test the `Route53 DNSRecords Blueprint`_ and the accompanying `output results`_: Yaml (stacker) format tests --------------------------- In order to wrap the `BlueprintTestCase` tests in a format similar to stacker's stack format, the `YamlDirTestGenerator` class is provided. When subclassed in a directory, it will search for yaml files in that directory with certain structure and execute a test case for it. As an example: .. code-block:: yaml --- namespace: test stacks: - name: test_stack class_path: stacker_blueprints.s3.Buckets variables: var1: val1 When run from tests, this will create a template fixture file called test_stack.json containing the output from the `stacker_blueprints.s3.Buckets` template. Examples of using the `YamlDirTestGenerator` class can be found in the stacker_blueprints repo. For example, see the tests used to test the `s3.Buckets`_ class and the accompanying `fixture`_. These are generated from a `subclass of YamlDirTestGenerator`_. .. _troposphere: https://github.com/cloudtools/troposphere .. _stacker_blueprints: https://github.com/cloudtools/stacker_blueprints .. _Route53 DNSRecords Blueprint: https://github.com/cloudtools/stacker_blueprints/blob/master/tests/test_route53.py .. _output results: https://github.com/cloudtools/stacker_blueprints/tree/master/tests/fixtures/blueprints .. _Resource Type: https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-template-resource-type-ref.html .. _Property Type: https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-product-property-reference.html .. _s3.Buckets: https://github.com/cloudtools/stacker_blueprints/blob/master/tests/test_s3.yaml .. _fixture: https://github.com/cloudtools/stacker_blueprints/blob/master/tests/fixtures/blueprints/s3_static_website.json .. _subclass of YamlDirTestGenerator: https://github.com/cloudtools/stacker_blueprints/blob/master/tests/__init__.py ================================================ FILE: docs/commands.rst ================================================ ======== Commands ======== Build ----- Build is used to create/update the stacks provided in the config file. It automatically figures out any dependencies between stacks, and creates them in parallel safely (if a stack depends on another stack, it will wait for that stack to be finished before updating/creating). It also provides the *--dump* flag for testing out blueprints before pushing them up into CloudFormation. Even then, some errors might only be noticed after first submitting a stack, at which point it can no longer be updated by Stacker. When that situation is detected in interactive mode, you will be prompted to delete and re-create the stack, so that you don't need to do it manually in the AWS console. If that behavior is also desired in non-interactive mode, enable the *--recreate-failed* flag. :: # stacker build -h usage: stacker build [-h] [-e ENV=VALUE] [-r REGION] [-v] [-i] [--replacements-only] [--recreate-failed] [-o] [--force STACKNAME] [--stacks STACKNAME] [-t] [-d DUMP] [environment] config Launches or updates CloudFormation stacks based on the given config. Stacker is smart enough to figure out if anything (the template or parameters) have changed for a given stack. If nothing has changed, stacker will correctly skip executing anything against the stack. positional arguments: environment Path to a simple `key: value` pair environment file. The values in the environment file can be used in the stack config as if it were a string.Template type: https://docs.python.org/2/library/string.html #template-strings. config The config file where stack configuration is located. Must be in yaml format. If `-` is provided, then the config will be read from stdin. optional arguments: -h, --help show this help message and exit -e ENV=VALUE, --env ENV=VALUE Adds environment key/value pairs from the command line. Overrides your environment file settings. Can be specified more than once. -r REGION, --region REGION The AWS region to launch in. -v, --verbose Increase output verbosity. May be specified up to twice. -i, --interactive Enable interactive mode. If specified, this will use the AWS interactive provider, which leverages Cloudformation Change Sets to display changes before running cloudformation templates. You'll be asked if you want to execute each change set. If you only want to authorize replacements, run with "--replacements- only" as well. --replacements-only If interactive mode is enabled, stacker will only prompt to authorize replacements. --recreate-failed Destroy and re-create stacks that are stuck in a failed state from an initial deployment when updating. -o, --outline Print an outline of what steps will be taken to build the stacks --force STACKNAME If a stackname is provided to --force, it will be updated, even if it is locked in the config. --stacks STACKNAME Only work on the stacks given. Can be specified more than once. If not specified then stacker will work on all stacks in the config file. -t, --tail Tail the CloudFormation logs while working with stacks -d DUMP, --dump DUMP Dump the rendered Cloudformation templates to a directory Destroy ------- Destroy handles the tearing down of CloudFormation stacks defined in the config file. It figures out any dependencies that may exist, and destroys the stacks in the correct order (in parallel if all dependent stacks have already been destroyed). :: # stacker destroy -h usage: stacker destroy [-h] [-e ENV=VALUE] [-r REGION] [-v] [-i] [--replacements-only] [-f] [--stacks STACKNAME] [-t] environment config Destroys CloudFormation stacks based on the given config. Stacker will determine the order in which stacks should be destroyed based on any manual requirements they specify or output values they rely on from other stacks. positional arguments: environment Path to a simple `key: value` pair environment file. The values in the environment file can be used in the stack config as if it were a string.Template type: https://docs.python.org/2/library/string.html #template-strings. Must define at least a "namespace". config The config file where stack configuration is located. Must be in yaml format. If `-` is provided, then the config will be read from stdin. optional arguments: -h, --help show this help message and exit -e ENV=VALUE, --env ENV=VALUE Adds environment key/value pairs from the command line. Overrides your environment file settings. Can be specified more than once. -r REGION, --region REGION The AWS region to launch in. -v, --verbose Increase output verbosity. May be specified up to twice. -i, --interactive Enable interactive mode. If specified, this will use the AWS interactive provider, which leverages Cloudformation Change Sets to display changes before running cloudformation templates. You'll be asked if you want to execute each change set. If you only want to authorize replacements, run with "--replacements- only" as well. --replacements-only If interactive mode is enabled, stacker will only prompt to authorize replacements. -f, --force Whether or not you want to go through with destroying the stacks --stacks STACKNAME Only work on the stacks given. Can be specified more than once. If not specified then stacker will work on all stacks in the config file. -t, --tail Tail the CloudFormation logs while working with stacks Info ---- Info displays information on the CloudFormation stacks based on the given config. :: # stacker info -h usage: stacker info [-h] [-e ENV=VALUE] [-r REGION] [-v] [-i] [--replacements-only] [--stacks STACKNAME] environment config Gets information on the CloudFormation stacks based on the given config. positional arguments: environment Path to a simple `key: value` pair environment file. The values in the environment file can be used in the stack config as if it were a string.Template type: https://docs.python.org/2/library/string.html #template-strings. Must define at least a "namespace". config The config file where stack configuration is located. Must be in yaml format. If `-` is provided, then the config will be read from stdin. optional arguments: -h, --help show this help message and exit -e ENV=VALUE, --env ENV=VALUE Adds environment key/value pairs from the command line. Overrides your environment file settings. Can be specified more than once. -r REGION, --region REGION The AWS region to launch in. -v, --verbose Increase output verbosity. May be specified up to twice. -i, --interactive Enable interactive mode. If specified, this will use the AWS interactive provider, which leverages Cloudformation Change Sets to display changes before running cloudformation templates. You'll be asked if you want to execute each change set. If you only want to authorize replacements, run with "--replacements- only" as well. --replacements-only If interactive mode is enabled, stacker will only prompt to authorize replacements. --stacks STACKNAME Only work on the stacks given. Can be specified more than once. If not specified then stacker will work on all stacks in the config file. Diff ---- Diff creates a CloudFormation Change Set for each stack and displays the resulting changes. This works for stacks that already exist and new stacks. For stacks that are dependent on outputs from other stacks in the same file, stacker will infer that an update was made to the "parent" stack and invalidate outputs from resources that were changed and replace their value with ````. This is done to illustrate the potential blast radius of a change and assist in tracking down why subsequent stacks could change. This inference is not perfect but takes a "best effort" approach to showing potential change between stacks that rely on each others outputs. :: # stacker diff -h usage: stacker diff [-h] [-e ENV=VALUE] [-r REGION] [-v] [-i] [--replacements-only] [--force STACKNAME] [--stacks STACKNAME] environment config Diffs the config against the currently running CloudFormation stacks Sometimes small changes can have big impacts. Run "stacker diff" before "stacker build" to detect bad things(tm) from happening in advance! positional arguments: environment Path to a simple `key: value` pair environment file. The values in the environment file can be used in the stack config as if it were a string.Template type: https://docs.python.org/2/library/string.html #template-strings. Must define at least a "namespace". config The config file where stack configuration is located. Must be in yaml format. If `-` is provided, then the config will be read from stdin. optional arguments: -h, --help show this help message and exit -e ENV=VALUE, --env ENV=VALUE Adds environment key/value pairs from the command line. Overrides your environment file settings. Can be specified more than once. -r REGION, --region REGION The AWS region to launch in. -v, --verbose Increase output verbosity. May be specified up to twice. -i, --interactive Enable interactive mode. If specified, this will use the AWS interactive provider, which leverages Cloudformation Change Sets to display changes before running cloudformation templates. You'll be asked if you want to execute each change set. If you only want to authorize replacements, run with "--replacements- only" as well. --replacements-only If interactive mode is enabled, stacker will only prompt to authorize replacements. --force STACKNAME If a stackname is provided to --force, it will be diffed, even if it is locked in the config. --stacks STACKNAME Only work on the stacks given. Can be specified more than once. If not specified then stacker will work on all stacks in the config file. ================================================ FILE: docs/conf.py ================================================ # -*- coding: utf-8 -*- # # stacker documentation build configuration file, created by # sphinx-quickstart on Fri Aug 14 09:59:29 2015. # # This file is execfile()d with the current directory set to its # containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. import sys import os import shlex # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. sys.path.insert(0, os.path.abspath('..')) import stacker # -- General configuration ------------------------------------------------ # If your documentation needs a minimal Sphinx version, state it here. #needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = [ 'sphinx.ext.autodoc', 'sphinx.ext.doctest', 'sphinx.ext.todo', 'sphinx.ext.coverage', 'sphinx.ext.viewcode', 'sphinx.ext.napoleon', ] # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # The suffix(es) of source filenames. # You can specify multiple suffix as a list of string: # source_suffix = ['.rst', '.md'] source_suffix = '.rst' # The encoding of source files. #source_encoding = 'utf-8-sig' # The master toctree document. master_doc = 'index' # General information about the project. project = u'stacker' copyright = u'2015, Michael Barrett' author = u'Michael Barrett' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. version = stacker.__version__ # The full version, including alpha/beta/rc tags. release = stacker.__version__ # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. # # This is also used if you do content translation via gettext catalogs. # Usually you set "language" from the command line for these cases. language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: #today = '' # Else, today_fmt is used as the format for a strftime call. #today_fmt = '%B %d, %Y' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. exclude_patterns = ['_build'] # The reST default role (used for this markup: `text`) to use for all # documents. #default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. #add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). #add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. #show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # A list of ignored prefixes for module index sorting. #modindex_common_prefix = [] # If true, keep warnings as "system message" paragraphs in the built documents. #keep_warnings = False # If true, `todo` and `todoList` produce output, else they produce nothing. todo_include_todos = True # -- Options for HTML output ---------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. #html_theme = 'sphinx_rtd_theme' html_theme = 'alabaster' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. html_theme_options = { "description": "A Cloudformation Stack Manager", "github_button": True, "github_user": "cloudtools", "github_repo": "stacker", "github_banner": True, } # Add any paths that contain custom themes here, relative to this directory. #html_theme_path = [] # The name for this set of Sphinx documents. If None, it defaults to # " v documentation". #html_title = None # A shorter title for the navigation bar. Default is the same as html_title. #html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. #html_logo = None # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. #html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['_static'] # Add any extra paths that contain custom files (such as robots.txt or # .htaccess) here, relative to this directory. These files are copied # directly to the root of the documentation. #html_extra_path = [] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. #html_last_updated_fmt = '%b %d, %Y' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. #html_use_smartypants = True # Custom sidebar templates, maps document names to template names. #html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. #html_additional_pages = {} # If false, no module index is generated. #html_domain_indices = True # If false, no index is generated. #html_use_index = True # If true, the index is split into individual pages for each letter. #html_split_index = False # If true, links to the reST sources are added to the pages. #html_show_sourcelink = True # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. #html_show_sphinx = True # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. #html_show_copyright = True # If true, an OpenSearch description file will be output, and all pages will # contain a tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. #html_use_opensearch = '' # This is the file name suffix for HTML files (e.g. ".xhtml"). #html_file_suffix = None # Language to be used for generating the HTML full-text search index. # Sphinx supports the following languages: # 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja' # 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr' #html_search_language = 'en' # A dictionary with options for the search language support, empty by default. # Now only 'ja' uses this config value #html_search_options = {'type': 'default'} # The name of a javascript file (relative to the configuration directory) that # implements a search results scorer. If empty, the default will be used. #html_search_scorer = 'scorer.js' # Output file base name for HTML help builder. htmlhelp_basename = 'stackerdoc' # -- Options for LaTeX output --------------------------------------------- latex_elements = { # The paper size ('letterpaper' or 'a4paper'). #'papersize': 'letterpaper', # The font size ('10pt', '11pt' or '12pt'). #'pointsize': '10pt', # Additional stuff for the LaTeX preamble. #'preamble': '', # Latex figure (float) alignment #'figure_align': 'htbp', } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, # author, documentclass [howto, manual, or own class]). latex_documents = [ (master_doc, 'stacker.tex', u'stacker Documentation', u'Michael Barrett', 'manual'), ] # The name of an image file (relative to this directory) to place at the top of # the title page. #latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. #latex_use_parts = False # If true, show page references after internal links. #latex_show_pagerefs = False # If true, show URL addresses after external links. #latex_show_urls = False # Documents to append as an appendix to all manuals. #latex_appendices = [] # If false, no module index is generated. #latex_domain_indices = True # -- Options for manual page output --------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ (master_doc, 'stacker', u'stacker Documentation', [author], 1) ] # If true, show URL addresses after external links. #man_show_urls = False # -- Options for Texinfo output ------------------------------------------- # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ (master_doc, 'stacker', u'stacker Documentation', author, 'stacker', 'One line description of project.', 'Miscellaneous'), ] # Documents to append as an appendix to all manuals. #texinfo_appendices = [] # If false, no module index is generated. #texinfo_domain_indices = True # How to display URL addresses: 'footnote', 'no', or 'inline'. #texinfo_show_urls = 'footnote' # If true, do not generate a @detailmenu in the "Top" node's menu. #texinfo_no_detailmenu = False ================================================ FILE: docs/config.rst ================================================ ============= Configuration ============= stacker makes use of a YAML formatted config file to define the different CloudFormation stacks that make up a given environment. The configuration file has a loose definition, with only a few top-level keywords. Other than those keywords, you can define your own top-level keys to make use of other YAML features like `anchors & references`_ to avoid duplicating config. (See `YAML anchors & references`_ for details) Top Level Keywords ================== Namespace --------- You can provide a **namespace** to create all stacks within. The namespace will be used as a prefix for the name of any stack that stacker creates, and makes it unnecessary to specify the fully qualified name of the stack in output lookups. In addition, this value will be used to create an S3 bucket that stacker will use to upload and store all CloudFormation templates. In general, this is paired with the concept of `Environments `_ to create a namespace per environment:: namespace: ${namespace} Namespace Delimiter ------------------- By default, stacker will use '-' as a delimiter between your namespace and the declared stack name to build the actual CloudFormation stack name that gets created. Since child resources of your stacks will, by default, use a portion of your stack name in the auto-generated resource names, the first characters of your fully-qualified stack name potentially convey valuable information to someone glancing at resource names. If you prefer to not use a delimiter, you can pass the **namespace_delimiter** top level key word in the config as an empty string. See the `CloudFormation API Reference`_ for allowed stack name characters .. _`CloudFormation API Reference`: http://docs.aws.amazon.com/AWSCloudFormation/latest/APIReference/API_CreateStack.html S3 Bucket --------- Stacker, by default, pushes your CloudFormation templates into an S3 bucket and points CloudFormation at the template in that bucket when launching or updating your stacks. By default it uses a bucket named **stacker-${namespace}**, where the namespace is the namespace provided the config. If you want to change this, provide the **stacker_bucket** top level key word in the config. The bucket will be created in the same region that the stacks will be launched in. If you want to change this, or if you already have an existing bucket in a different region, you can set the **stacker_bucket_region** to the region where you want to create the bucket. **S3 Bucket location prior to 1.0.4:** There was a "bug" early on in stacker that created the s3 bucket in us-east-1, no matter what you specified as your --region. An issue came up leading us to believe this shouldn't be the expected behavior, so we fixed the behavior. If you executed a stacker build prior to V 1.0.4, your bucket for templates would already exist in us-east-1, requiring you to specify the **stacker_bucket_region** top level keyword. .. note:: Deprecation of fallback to legacy template bucket. We will first try the region you defined using the top level keyword under **stacker_bucket_region**, or what was specified in the --region flag. If that fails, we fallback to the us-east-1 region. The fallback to us-east-1 will be removed in a future release resulting in the following botocore excpetion to be thrown: ``TemplateURL must reference a valid S3 object to which you have access.`` To avoid this issue, specify the stacker_bucket_region top level keyword as described above. You can specify this keyword now to remove the deprecation warning. If you want stacker to upload templates directly to CloudFormation, instead of first uploading to S3, you can set **stacker_bucket** to an empty string. However, note that template size is greatly limited when uploading directly. See the `CloudFormation Limits Reference`_. .. _`CloudFormation Limits Reference`: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/cloudformation-limits.html Module Paths ------------ When setting the ``classpath`` for blueprints/hooks, it is sometimes desirable to load modules from outside the default ``sys.path`` (e.g., to include modules inside the same repo as config files). Adding a path (e.g. ``./``) to the **sys_path** top level key word will allow modules from that path location to be used. Service Role ------------ By default stacker doesn't specify a service role when executing changes to CloudFormation stacks. If you would prefer that it do so, you can set **service_role** to be the ARN of the service that stacker should use when executing CloudFormation changes. This is the equivalent of setting ``RoleARN`` on a call to the following CloudFormation api calls: ``CreateStack``, ``UpdateStack``, ``CreateChangeSet``. See the AWS documentation for `AWS CloudFormation Service Roles`_. .. _`AWS CloudFormation Service Roles`: https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-iam-servicerole.html?icmpid=docs_cfn_console Remote Packages --------------- The **package_sources** top level keyword can be used to define remote sources for blueprints (e.g., retrieving ``stacker_blueprints`` on github at tag ``v1.0.2``). The only required key for a git repository config is ``uri``, but ``branch``, ``tag``, & ``commit`` can also be specified:: package_sources: git: - uri: git@github.com:acmecorp/stacker_blueprints.git - uri: git@github.com:remind101/stacker_blueprints.git tag: 1.0.0 paths: - stacker_blueprints - uri: git@github.com:contoso/webapp.git branch: staging - uri: git@github.com:contoso/foo.git commit: 12345678 If no specific commit or tag is specified for a repo, the remote repository will be checked for newer commits on every execution of Stacker. For ``.tar.gz`` & ``zip`` archives on s3, specify a ``bucket`` & ``key``:: package_sources: s3: - bucket: mystackers3bucket key: archives/blueprints-v1.zip paths: - stacker_blueprints - bucket: anothers3bucket key: public/public-blueprints-v2.tar.gz requester_pays: true - bucket: yetanothers3bucket key: sallys-blueprints-v1.tar.gz # use_latest defaults to true - will update local copy if the # last modified date on S3 changes use_latest: false Local directories can also be specified:: package_sources: local: - source: ../vpc Use the ``paths`` option when subdirectories of the repo/archive/directory should be added to Stacker's ``sys.path``. Cloned repos/archives will be cached between builds; the cache location defaults to ~/.stacker but can be manually specified via the **stacker_cache_dir** top level keyword. Remote Configs ~~~~~~~~~~~~~~ Configuration yamls from remote configs can also be used by specifying a list of ``configs`` in the repo to use:: package_sources: git: - uri: git@github.com:acmecorp/stacker_blueprints.git configs: - vpc.yaml In this example, the configuration in ``vpc.yaml`` will be merged into the running current configuration, with the current configuration's values taking priority over the values in ``vpc.yaml``. Dictionary Stack Names & Hook Paths ::::::::::::::::::::::::::::::::::: To allow remote configs to be selectively overriden, stack names & hook paths can optionally be defined as dictionaries, e.g.:: pre_build: my_route53_hook: path: stacker.hooks.route53.create_domain: required: true enabled: true args: domain: mydomain.com stacks: vpc-example: class_path: stacker_blueprints.vpc.VPC locked: false enabled: true bastion-example: class_path: stacker_blueprints.bastion.Bastion locked: false enabled: true Pre & Post Hooks ---------------- Many actions allow for pre & post hooks. These are python methods that are executed before, and after the action is taken for the entire config. Hooks can be enabled or disabled, per hook. Only the following actions allow pre/post hooks: * build (keywords: *pre_build*, *post_build*) * destroy (keywords: *pre_destroy*, *post_destroy*) There are a few reasons to use these, though the most common is if you want better control over the naming of a resource than what CloudFormation allows. The keyword is a list of dictionaries with the following keys: **path:** the python import path to the hook **data_key:** If set, and the hook returns data (a dictionary), the results will be stored in the context.hook_data with the data_key as its key. **required:** whether to stop execution if the hook fails **enabled:** whether to execute the hook every stacker run. Default: True. This is a bool that grants you the ability to execute a hook per environment when combined with a variable pulled from an environment file. **args:** a dictionary of arguments to pass to the hook An example using the *create_domain* hook for creating a route53 domain before the build action:: pre_build: - path: stacker.hooks.route53.create_domain required: true enabled: true args: domain: mydomain.com An example of a hook using the ``create_domain_bool`` variable from the environment file to determine if hook should run. Set ``create_domain_bool: true`` or ``create_domain_bool: false`` in the environment file to determine if the hook should run in the environment stacker is running against:: pre_build: - path: stacker.hooks.route53.create_domain required: true enabled: ${create_domain_bool} args: domain: mydomain.com Tags ---- CloudFormation supports arbitrary key-value pair tags. All stack-level, including automatically created tags, are propagated to resources that AWS CloudFormation supports. See `AWS CloudFormation Resource Tags Type`_ for more details. If no tags are specified, the `stacker_namespace` tag is applied to your stack with the value of `namespace` as the tag value. If you prefer to apply a custom set of tags, specify the top-level keyword `tags` as a map. Example:: tags: "hello": world "my_tag:with_colons_in_key": ${dynamic_tag_value_from_my_env} simple_tag: simple value If you prefer to have no tags applied to your stacks (versus the default tags that stacker applies), specify an empty map for the top-level keyword:: tags: {} .. _`AWS CloudFormation Resource Tags Type`: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-resource-tags.html Mappings -------- Mappings are dictionaries that are provided as Mappings_ to each CloudFormation stack that stacker produces. These can be useful for providing things like different AMIs for different instance types in different regions:: mappings: AmiMap: us-east-1: NAT: ami-ad227cc4 ubuntu1404: ami-74e27e1c bastion: ami-74e27e1c us-west-2: NAT: ami-290f4119 ubuntu1404: ami-5189a661 bastion: ami-5189a661 These can be used in each blueprint/stack as usual. Lookups ------- Lookups allow you to create custom methods which take a value and are resolved at build time. The resolved values are passed to the `Blueprints `_ before it is rendered. For more information, see the `Lookups `_ documentation. stacker provides some common `lookups `_, but it is sometimes useful to have your own custom lookup that doesn't get shipped with stacker. You can register your own lookups by defining a `lookups` key:: lookups: custom: path.to.lookup.handler The key name for the lookup will be used as the type name when registering the lookup. The value should be the path to a valid lookup handler. You can then use these within your config:: conf_value: ${custom some-input-here} Stacks ------ This is the core part of the config - this is where you define each of the stacks that will be deployed in the environment. The top level keyword *stacks* is populated with a list of dictionaries, each representing a single stack to be built. A stack has the following keys: **name:** The logical name for this stack, which can be used in conjuction with the ``output`` lookup. The value here must be unique within the config. If no ``stack_name`` is provided, the value here will be used for the name of the CloudFormation stack. **class_path:** The python class path to the Blueprint to be used. Specify this or ``template_path`` for the stack. **template_path:** Path to raw CloudFormation template (JSON or YAML). Specify this or ``class_path`` for the stack. Path can be specified relative to the current working directory (e.g. templates stored alongside the Config), or relative to a directory in the python ``sys.path`` (i.e. for loading templates retrieved via ``packages_sources``). **description:** A short description to apply to the stack. This overwrites any description provided in the Blueprint. See: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/template-description-structure.html **variables:** A dictionary of Variables_ to pass into the Blueprint when rendering the CloudFormation template. Variables_ can be any valid YAML data structure. **locked:** (optional) If set to true, the stack is locked and will not be updated unless the stack is passed to stacker via the *--force* flag. This is useful for *risky* stacks that you don't want to take the risk of allowing CloudFormation to update, but still want to make sure get launched when the environment is first created. When ``locked``, it's not necessary to specify a ``class_path`` or ``template_path``. **enabled:** (optional) If set to false, the stack is disabled, and will not be built or updated. This can allow you to disable stacks in different environments. **protected:** (optional) When running an update in non-interactive mode, if a stack has *protected* set to *true* and would get changed, stacker will switch to interactive mode for that stack, allowing you to approve/skip the change. **requires:** (optional) a list of other stacks this stack requires. This is for explicit dependencies - you do not need to set this if you refer to another stack in a Parameter, so this is rarely necessary. **required_by:** (optional) a list of other stacks or targets that require this stack. It's an inverse to ``requires``. **tags:** (optional) a dictionary of CloudFormation tags to apply to this stack. This will be combined with the global tags, but these tags will take precendence. **stack_name:** (optional) If provided, this will be used as the name of the CloudFormation stack. Unlike ``name``, the value doesn't need to be unique within the config, since you could have multiple stacks with the same name, but in different regions or accounts. (note: the namespace from the environment will be prepended to this) **region**: (optional): If provided, specifies the name of the region that the CloudFormation stack should reside in. If not provided, the default region will be used (``AWS_DEFAULT_REGION``, ``~/.aws/config`` or the ``--region`` flag). If both ``region`` and ``profile`` are specified, the value here takes precedence over the value in the profile. **profile**: (optional): If provided, specifies the name of a AWS profile to use when performing AWS API calls for this stack. This can be used to provision stacks in multiple accounts or regions. **stack_policy_path**: (optional): If provided, specifies the path to a JSON formatted stack policy that will be applied when the CloudFormation stack is created and updated. You can use stack policies to prevent CloudFormation from making updates to protected resources (e.g. databases). See: https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/protect-stack-resources.html **in_progress_behavior**: (optional): If provided, specifies the behavior for when a stack is in `CREATE_IN_PROGRESS` or `UPDATE_IN_PROGRESS`. By default, stacker will raise an exception if the stack is in an `IN_PROGRESS` state. You can set this option to `wait` and stacker will wait for the previous update to complete before attempting to update the stack. **notification_arns**: (optional): If provided, accepts a list of None or many AWS SNS Topic ARNs which will be notified of this stack's CloudFormation state changes. Stacks Example ~~~~~~~~~~~~~~ Here's an example from stacker_blueprints_, used to create a VPC:: stacks: - name: vpc-example class_path: stacker_blueprints.vpc.VPC locked: false enabled: true variables: InstanceType: t2.small SshKeyName: default ImageName: NAT AZCount: 2 PublicSubnets: - 10.128.0.0/24 - 10.128.1.0/24 - 10.128.2.0/24 - 10.128.3.0/24 PrivateSubnets: - 10.128.8.0/22 - 10.128.12.0/22 - 10.128.16.0/22 - 10.128.20.0/22 CidrBlock: 10.128.0.0/16 Targets ------- In stacker, **targets** can be used as a lightweight method to group a number of stacks together, as a named "target" in the graph. Internally, this adds a node to the underlying DAG, which can then be used alongside the `--targets` flag. If you're familiar with the concept of "targets" in systemd, the concept is the same. **name:** The logical name for this target. **requires:** (optional) a list of stacks or other targets this target requires. **required_by:** (optional) a list of stacks or other targets that require this target. Here's an example of a target that will execute all "database" stacks:: targets: - name: databases stacks: - name: dbA class_path: blueprints.DB required_by: - databases - name: dbB class_path: blueprints.DB required_by: - databases Custom Log Formats ------------------ By default, stacker uses the following `log_formats`:: log_formats: info: "[%(asctime)s] %(message)s" color: "[%(asctime)s] \033[%(color)sm%(message)s\033[39m" debug: "[%(asctime)s] %(levelname)s %(threadName)s %(name)s:%(lineno)d(%(funcName)s): %(message)s" You may optionally provide custom `log_formats`. In this example, we add the environment name to each log line:: log_formats: info: "[%(asctime)s] ${environment} %(message)s" color: "[%(asctime)s] ${environment} \033[%(color)sm%(message)s\033[39m" You may use any of the standard Python `logging module format attributes `_ when building your `log_formats`. Variables ========== Variables are values that will be passed into a `Blueprint `_ before it is rendered. Variables can be any valid YAML data structure and can leverage Lookups_ to expand values at build time. The following concepts make working with variables within large templates easier: YAML anchors & references ------------------------- If you have a common set of variables that you need to pass around in many places, it can be annoying to have to copy and paste them in multiple places. Instead, using a feature of YAML known as `anchors & references`_, you can define common values in a single place and then refer to them with a simple syntax. For example, say you pass a common domain name to each of your stacks, each of them taking it as a Variable. Rather than having to enter the domain into each stack (and hopefully not typo'ing any of them) you could do the following:: domain_name: &domain mydomain.com Now you have an anchor called **domain** that you can use in place of any value in the config to provide the value **mydomain.com**. You use the anchor with a reference:: stacks: - name: vpc class_path: stacker_blueprints.vpc.VPC variables: DomainName: *domain Even more powerful is the ability to anchor entire dictionaries, and then reference them in another dictionary, effectively providing it with default values. For example:: common_variables: &common_variables DomainName: mydomain.com InstanceType: m3.medium AMI: ami-12345abc Now, rather than having to provide each of those variables to every stack that could use them, you can just do this instead:: stacks: - name: vpc class_path: stacker_blueprints.vpc.VPC variables: << : *common_variables InstanceType: c4.xlarge # override the InstanceType in this stack Using Outputs as Variables --------------------------- Since stacker encourages the breaking up of your CloudFormation stacks into entirely separate stacks, sometimes you'll need to pass values from one stack to another. The way this is handled in stacker is by having one stack provide Outputs_ for all the values that another stack may need, and then using those as the inputs for another stack's Variables_. stacker makes this easier for you by providing a syntax for Variables_ that will cause stacker to automatically look up the values of Outputs_ from another stack in its config. To do so, use the following format for the Variable on the target stack:: MyParameter: ${output OtherStack::OutputName} Since referencing Outputs_ from stacks is the most common use case, `output` is the default lookup type. For more information see Lookups_. This example is taken from stacker_blueprints_ example config - when building things inside a VPC, you will need to pass the *VpcId* of the VPC that you want the resources to be located in. If the *vpc* stack provides an Output called *VpcId*, you can reference it easily:: domain_name: my_domain &domain stacks: - name: vpc class_path: stacker_blueprints.vpc.VPC variables: DomainName: *domain - name: webservers class_path: stacker_blueprints.asg.AutoscalingGroup variables: DomainName: *domain VpcId: ${output vpc::VpcId} # gets the VpcId Output from the vpc stack Note: Doing this creates an implicit dependency from the *webservers* stack to the *vpc* stack, which will cause stacker to submit the *vpc* stack, and then wait until it is complete until it submits the *webservers* stack. Multi Account/Region Provisioning --------------------------------- You can use stacker to manage CloudFormation stacks in multiple accounts and regions, and reference outputs across them. As an example, let's say you had 3 accounts you wanted to manage: #) OpsAccount: An AWS account that has IAM users for employees. #) ProdAccount: An AWS account for a "production" environment. #) StageAccount: An AWS account for a "staging" environment. You want employees with IAM user accounts in OpsAccount to be able to assume roles in both the ProdAccount and StageAccount. You can use stacker to easily manage this:: stacks: # Create some stacks in both the "prod" and "stage" accounts with IAM roles # that employees can use. - name: prod/roles profile: prod class_path: blueprints.Roles - name: stage/roles profile: stage class_path: blueprints.Roles # Create a stack in the "ops" account and grant each employee access to # assume the roles we created above. - name: users profile: ops class_path: blueprints.IAMUsers variables: Users: john-smith: Roles: - ${output prod/roles::EmployeeRoleARN} - ${output stage/roles::EmployeeRoleARN} Note how I was able to reference outputs from stacks in multiple accounts using the `output` plugin! Environments ============ A pretty common use case is to have separate environments that you want to look mostly the same, though with some slight modifications. For example, you might want a *production* and a *staging* environment. The production environment likely needs more instances, and often those instances will be of a larger instance type. Environments allow you to use your existing stacker config, but provide different values based on the environment file chosen on the command line. For more information, see the `Environments `_ documentation. Translators =========== .. note:: Translators have been deprecated in favor of Lookups_ and will be removed in a future release. Translators allow you to create custom methods which take a value, then modify it before passing it on to the stack. Currently this is used to allow you to pass a KMS encrypted string as a Parameter, then have KMS decrypt it before submitting it to CloudFormation. For more information, see the `Translators `_ documentation. .. _`anchors & references`: https://en.wikipedia.org/wiki/YAML#Repeated_nodes .. _Mappings: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/mappings-section-structure.html .. _Outputs: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/outputs-section-structure.html .. _stacker_blueprints: https://github.com/cloudtools/stacker_blueprints .. _`AWS profiles`: https://docs.aws.amazon.com/cli/latest/userguide/cli-multiple-profiles.html ================================================ FILE: docs/environments.rst ================================================ ============ Environments ============ When running stacker, you can optionally provide an "environment" file. The environment file defines values, which can then be referred to by name from your stack config file. The environment file is interpreted as YAML if it ends in `.yaml` or `.yml`, otherwise it's interpreted as simple key/value pairs. Key/Value environments ---------------------- The stacker config file will be interpolated as a `string.Template `_ using the key/value pairs from the environment file. The format of the file is a single key/value per line, separated by a colon (**:**), like this:: vpcID: vpc-12345678 Provided the key/value vpcID above, you will now be able to use this in your configs for the specific environment you are deploying into. They act as keys that can be used in your config file, providing a sort of templating ability. This allows you to change the values of your config based on the environment you are in. For example, if you have a *webserver* stack, and you need to provide it a variable for the instance size it should use, you would have something like this in your config file:: stacks: - name: webservers class_path: stacker_blueprints.asg.AutoscalingGroup variables: InstanceType: m3.medium But what if you needed more CPU in your production environment, but not in your staging? Without Environments, you'd need a separate config for each. With environments, you can simply define two different environment files with the appropriate *InstanceType* in each, and then use the key in the environment files in your config. For example:: # in the file: stage.env web_instance_type: m3.medium # in the file: prod.env web_instance_type: c4.xlarge # in your config file: stacks: - name: webservers class_path: stacker_blueprints.asg.AutoscalingGroup variables: InstanceType: ${web_instance_type} YAML environments ----------------- YAML environments allow for more complex environment configuration rather than simple text substitution, and support YAML features like anchors and references. To build on the example above, let's define a stack that's a little more complex:: stacks: - name: webservers class_path: stacker_blueprints.asg.AutoscalingGroup variables: InstanceType: ${web_instance_type} IngressCIDRsByPort: ${ingress_cidrs_by_port} We've defined a stack which expects a list of ingress CIDR's allowed access to each port. Our environment files would look like this:: # in the file: stage.yml web_instance_type: m3.medium ingress_cidrs_by_port: 80: - 192.168.1.0/8 8080: - 0.0.0.0/0 # in the file: prod.env web_instance_type: c4.xlarge ingress_cidrs_by_port: 80: - 192.168.1.0/8 443: - 10.0.0.0/16 - 10.1.0.0/16 The YAML format allows for specifying lists, maps, and supports all `pyyaml` functionality allowed in `safe_load()` function. Variable substitution in the YAML case is a bit more complex than in the `string.Template` case. Objects can only be substituted for variables in the case where we perform a full substitution, such as this:: vpcID: ${vpc_variable} We can not substitute an object in a sub-string, such as this:: vpcID: prefix-${vpc_variable} It makes no sense to substitute a complex object in this case, and we will raise an error if that happens. You can still perform this substitution with primitives; numbers, strings, but not dicts or lists. .. note:: Namespace defined in the environment file has been deprecated in favor of defining the namespace in the config and will be removed in a future release. ================================================ FILE: docs/index.rst ================================================ .. stacker documentation master file, created by sphinx-quickstart on Fri Aug 14 09:59:29 2015. You can adapt this file completely to your liking, but it should at least contain the root `toctree` directive. Welcome to stacker's documentation! =================================== stacker is a tool and library used to create & update multiple CloudFormation stacks. It was originally written at Remind_ and released to the open source community. stacker Blueprints are written in troposphere_, though the purpose of most templates is to keep them as generic as possible and then use configuration to modify them. At Remind we use stacker to manage all of our Cloudformation stacks - both in development, staging and production without any major issues. Main Features ------------- - Easily `Create/Update `_/`Destroy `_ many stacks in parallel (though with an understanding of cross-stack dependencies) - Makes it easy to manage large environments in a single config, while still allowing you to break each part of the environment up into its own completely separate stack. - Manages dependencies between stacks, only launching one after all the stacks it depends on are finished. - Only updates stacks that have changed and that have not been explicitly locked or disabled. - Easily pass Outputs from one stack in as Variables on another (which also automatically provides an implicit dependency) - Use `Environments `_ to manage slightly different configuration in different environments. - Use `Lookups `_ to allow dynamic fetching or altering of data used in Variables. - A diff command for diffing your config against what is running in a live CloudFormation environment. - A small library of pre-shared Blueprints can be found at the stacker_blueprints_ repo, making things like setting up a VPC easy. Contents: .. toctree:: :maxdepth: 2 organizations_using_stacker terminology config environments translators lookups commands blueprints templates API Docs Indices and tables ================== * :ref:`genindex` * :ref:`modindex` * :ref:`search` .. _Remind: http://www.remind.com/ .. _troposphere: https://github.com/cloudtools/troposphere .. _stacker_blueprints: https://github.com/cloudtools/stacker_blueprints ================================================ FILE: docs/lookups.rst ================================================ ======= Lookups ======= Stacker provides the ability to dynamically replace values in the config via a concept called lookups. A lookup is meant to take a value and convert it by calling out to another service or system. A lookup is denoted in the config with the ``${ }`` syntax. If ```` isn't provided, stacker will fall back to use the ``output`` lookup . Lookups are only resolved within `Variables `_. They can be nested in any part of a YAML data structure and within another lookup itself. .. note:: If a lookup has a non-string return value, it can be the only lookup within a value. ie. if `custom` returns a list, this would raise an exception:: Variable: ${custom something}, ${output otherStack::Output} This is valid:: Variable: ${custom something} For example, given the following:: stacks: - name: sg class_path: some.stack.blueprint.Blueprint variables: Roles: - ${output otherStack::IAMRole} Values: Env: Custom: ${custom ${output otherStack::Output}} DBUrl: postgres://${output dbStack::User}@${output dbStack::HostName} The Blueprint would have access to the following resolved variables dictionary:: # variables { "Roles": ["other-stack-iam-role"], "Values": { "Env": { "Custom": "custom-output", "DBUrl": "postgres://user@hostname", }, }, } stacker includes the following lookup types: - `output lookup`_ - `ami lookup`_ - `custom lookup`_ - `default lookup`_ - `dynamodb lookup`_ - `envvar lookup`_ - `file lookup`_ - `hook_data lookup`_ - `kms lookup`_ - `rxref lookup`_ - `ssmstore lookup`_ - `xref lookup`_ .. _`output lookup`: Output Lookup ------------- The ``output`` lookup takes a value of the format: ``::`` and retrieves the output from the given stack name within the current namespace. stacker treats output lookups differently than other lookups by auto adding the referenced stack in the lookup as a requirement to the stack whose variable the output value is being passed to. You can specify an output lookup with the following syntax:: ConfVariable: ${output someStack::SomeOutput} .. _`default lookup`: default Lookup -------------- The ``default`` lookup type will check if a value exists for the variable in the environment file, then fall back to a default defined in the stacker config if the environment file doesn't contain the variable. This allows defaults to be set at the config file level, while granting the user the ability to override that value per environment. Format of value:: :: For example:: Groups: ${default app_security_groups::sg-12345,sg-67890} If `app_security_groups` is defined in the environment file, its defined value will be returned. Otherwise, `sg-12345,sg-67890` will be the returned value. .. note:: The ``default`` lookup only supports checking if a variable is defined in an environment file. It does not support other embedded lookups to see if they exist. Only checking variables in the environment file are supported. If you attempt to have the default lookup perform any other lookup that fails, stacker will throw an exception for that lookup and will stop your build before it gets a chance to fall back to the default in your config. .. _`kms lookup`: KMS Lookup ---------- The ``kms`` lookup type decrypts its input value. As an example, if you have a database and it has a parameter called ``DBPassword`` that you don't want to store in clear text in your config (maybe because you want to check it into your version control system to share with the team), you could instead encrypt the value using ``kms``. For example:: # We use the aws cli to get the encrypted value for the string # "PASSWORD" using the master key called 'myStackerKey' in us-east-1 $ aws --region us-east-1 kms encrypt --key-id alias/myStackerKey \ --plaintext "PASSWORD" --output text --query CiphertextBlob CiD6bC8t2Y<...encrypted blob...> # In stacker we would reference the encrypted value like: DBPassword: ${kms us-east-1@CiD6bC8t2Y<...encrypted blob...>} # The above would resolve to DBPassword: PASSWORD This requires that the person using stacker has access to the master key used to encrypt the value. It is also possible to store the encrypted blob in a file (useful if the value is large) using the ``file://`` prefix, ie:: DockerConfig: ${kms file://dockercfg} .. note:: Lookups resolve the path specified with `file://` relative to the location of the config file, not where the stacker command is run. .. _`xref lookup`: XRef Lookup ----------- The ``xref`` lookup type is very similar to the ``output`` lookup type, the difference being that ``xref`` resolves output values from stacks that aren't contained within the current stacker namespace, but are existing stacks containing outputs within the same region on the AWS account you are deploying into. ``xref`` allows you to lookup these outputs from the stacks already on your account by specifying the stacks fully qualified name in the CloudFormation console. Where the ``output`` type will take a stack name and use the current context to expand the fully qualified stack name based on the namespace, ``xref`` skips this expansion because it assumes you've provided it with the fully qualified stack name already. This allows you to reference output values from any CloudFormation stack in the same region. Also, unlike the ``output`` lookup type, ``xref`` doesn't impact stack requirements. For example:: ConfVariable: ${xref fully-qualified-stack::SomeOutput} .. _`rxref lookup`: RXRef Lookup ------------ The ``rxref`` lookup type is very similar to the ``xref`` lookup type, the difference being that ``rxref`` will lookup output values from stacks that are relative to the current namespace but external to the stack, but will not resolve them. ``rxref`` assumes the stack containing the output already exists. Where the ``xref`` type assumes you provided a fully qualified stack name, ``rxref``, like ``output`` expands and retrieves the output from the given stack name within the current namespace, even if not defined in the stacker config you provided it. Because there is no requirement to keep all stacks defined within the same stacker YAML config, you might need the ability to read outputs from other stacks deployed by stacker into your same account under the same namespace. ``rxref`` gives you that ability. This is useful if you want to break up very large configs into smaller groupings. Also, unlike the ``output`` lookup type, ``rxref`` doesn't impact stack requirements. For example:: # in stacker.env namespace: MyNamespace # in stacker.yml ConfVariable: ${rxref my-stack::SomeOutput} # the above would effectively resolve to ConfVariable: ${xref MyNamespace-my-stack::SomeOutput} Although possible, it is not recommended to use ``rxref`` for stacks defined within the same stacker YAML config. .. _`file lookup`: File Lookup ----------- The ``file`` lookup type allows the loading of arbitrary data from files on disk. The lookup additionally supports using a ``codec`` to manipulate or wrap the file contents prior to injecting it. The parameterized-b64 ``codec`` is particularly useful to allow the interpolation of CloudFormation parameters in a UserData attribute of an instance or launch configuration. Basic examples:: # We've written a file to /some/path: $ echo "hello there" > /some/path # In stacker we would reference the contents of this file with the following conf_key: ${file plain:file://some/path} # The above would resolve to conf_key: hello there # Or, if we used wanted a base64 encoded copy of the file data conf_key: ${file base64:file://some/path} # The above would resolve to conf_key: aGVsbG8gdGhlcmUK Supported codecs: - plain - load the contents of the file untouched. This is the only codec that should be used with raw Cloudformation templates (the other codecs are intended for blueprints). - base64 - encode the plain text file at the given path with base64 prior to returning it - parameterized - the same as plain, but additionally supports referencing CloudFormation parameters to create userdata that's supplemented with information from the template, as is commonly needed in EC2 UserData. For example, given a template parameter of BucketName, the file could contain the following text:: #!/bin/sh aws s3 sync s3://{{BucketName}}/somepath /somepath and then you could use something like this in the YAML config file:: UserData: ${file parameterized:/path/to/file} resulting in the UserData parameter being defined as:: { "Fn::Join" : ["", [ "#!/bin/sh\naws s3 sync s3://", {"Ref" : "BucketName"}, "/somepath /somepath" ]] } - parameterized-b64 - the same as parameterized, with the results additionally wrapped in { "Fn::Base64": ... } , which is what you actually need for EC2 UserData - json - decode the file as JSON and return the resulting object - json-parameterized - Same as ``json``, but applying templating rules from ``parameterized`` to every object *value*. Note that object *keys* are not modified. Example (an external PolicyDocument):: { "Version": "2012-10-17", "Statement": [ { "Effect": "Allow", "Action": [ "some:Action" ], "Resource": "{{MyResource}}" } ] } - yaml - decode the file as YAML and return the resulting object. All strings are returned as ``unicode`` even in Python 2. - yaml-parameterized - Same as ``json-parameterized``, but using YAML. Example:: Version: 2012-10-17 Statement - Effect: Allow Action: - "some:Action" Resource: "{{MyResource}}" When using parameterized-b64 for UserData, you should use a local_parameter defined as such:: from troposphere import AWSHelperFn "UserData": { "type": AWSHelperFn, "description": "Instance user data", "default": Ref("AWS::NoValue") } and then assign UserData in a LaunchConfiguration or Instance to self.get_variables()["UserData"]. Note that we use AWSHelperFn as the type because the parameterized-b64 codec returns either a Base64 or a GenericHelperFn troposphere object. .. _`ssmstore lookup`: SSM Parameter Store Lookup -------------------------- The ``ssmstore`` lookup type retrieves a value from the Simple Systems Manager Parameter Store. As an example, if you have a database and it has a parameter called ``DBUser`` that you don't want to store in clear text in your config, you could instead store it as a SSM parameter named ``MyDBUser``. For example:: # We use the aws cli to store the database username $ aws ssm put-parameter --name "MyDBUser" --type "String" \ --value "root" # In stacker we would reference the value like: DBUser: ${ssmstore us-east-1@MyDBUser} # Which would resolve to: DBUser: root Encrypted values ("SecureStrings") can also be used, which will be automatically decrypted (assuming the Stacker user has access to the associated KMS key). Care should be taken when using this with encrypted values (i.e. a safe policy is to only use it with ``no_echo`` CFNString values) The region can be omitted (e.g. ``DBUser: ${ssmstore MyDBUser}``), in which case ``us-east-1`` will be assumed. .. _`dynamodb lookup`: DynamoDb Lookup -------------------------- The ``dynamodb`` lookup type retrieves a value from a DynamoDb table. As an example, if you have a Dynamo Table named ``TestTable`` and it has an Item with a Primary Partition key called ``TestKey`` and a value named ``BucketName`` , you can look it up by using Stacker. The lookup key in this case is TestVal For example:: # We can reference that dynamo value BucketName: ${dynamodb us-east-1:TestTable@TestKey:TestVal.BucketName} # Which would resolve to: BucketName: stacker-test-bucket You can lookup other data types by putting the data type in the lookup. Valid values are "S"(String), "N"(Number), "M"(Map), "L"(List). For example:: ServerCount: ${dynamodb us-east-1:TestTable@TestKey:TestVal.ServerCount[N]} This would return an int value, rather than a string You can lookup values inside of a map: For example:: ServerCount: ${dynamodb us-east-1:TestTable@TestKey:TestVal.ServerInfo[M]. ServerCount[N]} .. _`envvar lookup`: Shell Environment Lookup ------------------------ The ``envvar`` lookup type retrieves a value from a variable in the shell's environment. Example:: # Set an environment variable in the current shell. $ export DATABASE_USER=root # In the stacker config we could reference the value: DBUser: ${envvar DATABASE_USER} # Which would resolve to: DBUser: root You can also get the variable name from a file, by using the ``file://`` prefix in the lookup, like so:: DBUser: ${envvar file://dbuser_file.txt} .. _`ami lookup`: EC2 AMI Lookup -------------- The ``ami`` lookup is meant to search for the most recent AMI created that matches the given filters. Valid arguments:: region OPTIONAL ONCE: e.g. us-east-1@ owners (comma delimited) REQUIRED ONCE: aws_account_id | amazon | self name_regex (a regex) REQUIRED ONCE: e.g. my-ubuntu-server-[0-9]+ executable_users (comma delimited) OPTIONAL ONCE: aws_account_id | amazon | self Any other arguments specified are sent as filters to the aws api For example, "architecture:x86_64" will add a filter. Example:: # Grabs the most recently created AMI that is owned by either this account, # amazon, or the account id 888888888888 that has a name that matches # the regex "server[0-9]+" and has "i386" as its architecture. # Note: The region is optional, and defaults to the current stacker region ImageId: ${ami [@]owners:self,888888888888,amazon name_regex:server[0-9]+ architecture:i386} .. _`hook_data lookup`: Hook Data Lookup ---------------- When using hooks, you can have the hook store results in the `hook_data`_ dictionary on the context by setting *data_key* in the hook config. This lookup lets you look up values in that dictionary. A good example of this is when you use the `aws_lambda hook`_ to upload AWS Lambda code, then need to pass that code object as the *Code* variable in the `aws_lambda blueprint`_ dictionary. Example:: # If you set the "data_key" config on the aws_lambda hook to be "myfunction" # and you name the function package "TheCode" you can get the troposphere # awslambda.Code object with: Code: ${hook_data myfunction::TheCode} .. _`custom lookup`: Custom Lookup -------------- A custom lookup may be registered within the config. For more information see `Configuring Lookups `_. .. _`hook_data`: http://stacker.readthedocs.io/en/latest/config.html#pre-post-hooks .. _`aws_lambda hook`: http://stacker.readthedocs.io/en/latest/api/stacker.hooks.html#stacker.hooks.aws_lambda.upload_lambda_functions .. _`aws_lambda blueprint`: https://github.com/cloudtools/stacker_blueprints/blob/master/stacker_blueprints/aws_lambda.py ================================================ FILE: docs/organizations_using_stacker.rst ================================================ =========================== Organizations using stacker =========================== Below is a list of organizations that currently use stacker in some sense. If you are using stacker, please submit a PR and add your company below! Remind_ Remind helps educators send quick, simple messages to students and parents on any device. We believe that when communication improves, relationships get stronger. Education gets better. Remind is the original author of stacker, and has been using it to manage the infrastructure in multiple environments (including production) since early 2015. .. _Remind: https://www.remind.com/ `Onica`_ Onica is a global technology consulting company at the forefront of cloud computing. Through collaboration with Amazon Web Services, we help customers embrace a broad spectrum of innovative solutions. From migration strategy to operational excellence, cloud native development, and immersive transformation. Onica is a full spectrum AWS integrator. .. _`Onica`: https://www.onica.com AltoStack_ AltoStack is a technology and services consultancy specialising in Cloud Consultancy, DevOps, Continuous Delivery and Configuration Management. From strategy and operations to culture and technology, AltoStack helps businesses identify and address opportunities for growth and profitability. We are an Amazon Web Services - (AWS) APN Consulting Partner. .. _AltoStack: https://altostack.io/ Cobli_ Cobli develops cutting-edge solutions for fleet management efficiency and intelligence in South America. We bring advanced tracking, analysis and predictions to fleets of any size by connecting vehicles to an easy to use platform through smart devices. Cobli manages most of its AWS infrastructure using stacker, and we encourage our developers to contribute to free-software whenever possible. .. _Cobli: https://cobli.co/ ================================================ FILE: docs/templates.rst ================================================ ========== Templates ========== CloudFormation templates can be provided via python Blueprints_ or JSON/YAML. JSON/YAML templates are specified for stacks via the ``template_path`` config option (see `Stacks `_). Jinja2 Templating ================= Templates with a ``.j2`` extension will be parsed using `Jinja2 `_. The stacker ``context`` and ``mappings`` objects and stack ``variables`` objects are available for use in the template: .. code-block:: yaml Description: TestTemplate Resources: Bucket: Type: AWS::S3::Bucket Properties: BucketName: {{ context.environment.foo }}-{{ variables.myparamname }} ================================================ FILE: docs/terminology.rst ================================================ =========== Terminology =========== blueprint ========= .. _blueprints: A python class that is responsible for creating a CloudFormation template. Usually this is built using troposphere_. config ====== A YAML config file that defines the `stack definitions`_ for all of the stacks you want stacker to manage. environment =========== A set of variables that can be used inside the config, allowing you to slightly adjust configs based on which environment you are launching. namespace ========= A way to uniquely identify a stack. Used to determine the naming of many things, such as the S3 bucket where compiled templates are stored, as well as the prefix for stack names. stack definition ================ .. _stack definitions: Defines the stack_ you want to build, usually there are multiple of these in the config_. It also defines the variables_ to be used when building the stack_. stack ===== .. _stacks: The resulting stack of resources that is created by CloudFormation when it executes a template. Each stack managed by stacker is defined by a `stack definition`_ in the config_. output ====== A CloudFormation Template concept. Stacks can output values, allowing easy access to those values. Often used to export the unique ID's of resources that templates create. Stacker makes it simple to pull outputs from one stack and then use them as a variable_ in another stack. variable ======== .. _variables: Dynamic variables that are passed into stacks when they are being built. Variables are defined within the config_. lookup ====== A method for expanding values in the config_ at build time. By default lookups are used to reference Output values from other stacks_ within the same namespace_. provider ======== Provider that supports provisioning rendered blueprints_. By default, an AWS provider is used. context ======= Context is responsible for translating the values passed in via the command line and specified in the config_ to stacks_. .. _troposphere: https://github.com/cloudtools/troposphere .. _CloudFormation Parameters: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/parameters-section-structure.html ================================================ FILE: docs/translators.rst ================================================ =========== Translators =========== .. note:: Translators have been deprecated in favor of `Lookups `_ and will be removed in a future release. Stacker provides the ability to dynamically replace values in the config via a concept called translators. A translator is meant to take a value and convert it by calling out to another service or system. This is initially meant to deal with encrypting fields in your config. Translators are custom YAML constructors. As an example, if you have a database and it has a parameter called ``DBPassword`` that you don't want to store in clear text in your config (maybe because you want to check it into your version control system to share with the team), you could instead encrypt the value using ``kms``. For example:: # We use the aws cli to get the encrypted value for the string # "PASSWORD" using the master key called 'myStackerKey' in us-east-1 $ aws --region us-east-1 kms encrypt --key-id alias/myStackerKey \ --plaintext "PASSWORD" --output text --query CiphertextBlob CiD6bC8t2Y<...encrypted blob...> # In stacker we would reference the encrypted value like: DBPassword: !kms us-east-1@CiD6bC8t2Y<...encrypted blob...> # The above would resolve to DBPassword: PASSWORD This requires that the person using stacker has access to the master key used to encrypt the value. It is also possible to store the encrypted blob in a file (useful if the value is large) using the `file://` prefix, ie:: DockerConfig: !kms file://dockercfg .. note:: Translators resolve the path specified with `file://` relative to the location of the config file, not where the stacker command is run. ================================================ FILE: examples/cross-account/.aws/config ================================================ # The master account is like the root of our AWS account tree. It's the # entrypoint for all other profiles to sts.AssumeRole from. [profile master] region = us-east-1 role_arn = arn:aws:iam:::role/Stacker role_session_name = stacker credential_source = Environment [profile prod] region = us-east-1 role_arn = arn:aws:iam:::role/Stacker role_session_name = stacker source_profile = master [profile stage] region = us-east-1 role_arn = arn:aws:iam:::role/Stacker role_session_name = stacker source_profile = master ================================================ FILE: examples/cross-account/README.md ================================================ This is a secure example setup to support cross-account provisioning of stacks with stacker. It: 1. Sets up an appropriate [AWS Config File](https://docs.aws.amazon.com/cli/latest/topic/config-vars.html) in [.aws/config] for stacker to use, with profiles for a "master", "prod" and "stage" AWS account. 2. Configures a stacker bucket in the "master" account, with permissions that allows CloudFormation in "sub" accounts to fetch templates. ## Setup ### Create IAM roles First things first, we need to create some IAM roles that stacker can assume to make changes in each AWS account. This is generally a manual step after you've created a new AWS account. In each account, create a new stack using the [stacker-role.yaml](./templates/stacker-role.yaml) CloudFormation template. This will create an IAM role called `Stacker` in the target account, with a trust policy that will allow the `Stacker` role in the master account to `sts:AssumeRole` it. Once the roles have been created, update the `role_arn`'s in [.aws/config] to match the ones that were just created. ```console $ aws cloudformation describe-stacks \ --profile \ --stack-name \ --query 'Stacks[0].Outputs' --output text StackerRole arn:aws:iam:::role/Stacker ``` ### GetSessionToken In order for stacker to be able to call `sts:AssumeRole` with the roles we've specified in [.aws/config], we'll need to pass it credentials via environment variables (see [`credential_source = Environment`](./.aws/config)) with appropriate permissions. Generally, the best way to do this is to obtain temporary credentials via the `sts:GetSessionToken` API, while passing an MFA OTP. Assuming you have an IAM user in your master account, you can get temporary credentials using the AWS CLI: ```console $ aws sts get-session-token \ --serial-number arn:aws:iam:::mfa/ \ --token-code ``` At Remind, we like to use [aws-vault], which allows us to simplify this to: ```console $ aws-vault exec default -- env AWS_VAULT=default AWS_DEFAULT_REGION=us-east-1 AWS_REGION=us-east-1 AWS_ACCESS_KEY_ID=ASIAJ...ICSXSQ AWS_SECRET_ACCESS_KEY=4oFx...LSNjpFq AWS_SESSION_TOKEN=FQoDYXdzED...V6Wrdko2KjW1QU= AWS_SECURITY_TOKEN=FQoDYXdzED...V6Wrdko2KjW1QU= ``` For the rest of this guide, I'll use `aws-vault` for simplicity. **NOTE**: You'll need to ensure that this IAM user has access to call `sts:AssumeRole` on the `Stacker` IAM role in the "master" account. ### Bootstrap Stacker Bucket After we have some IAM roles that stacker can assume, and some temporary credentials, we'll want to create a stacker bucket in the master account, and allow the Stacker roles in sub-accounts access to fetch templates from it. To do that, first, change the "Roles" variable in [stacker.yaml], then: ```console $ aws-vault exec default # GetSessionToken + MFA $ AWS_CONFIG_FILE=.aws/config stacker build --profile master --stacks stacker-bucket stacker.yaml ``` Once the bucket has been created, replace `stacker_bucket` with the name of the bucket in [stacker.yaml]. ```console $ aws cloudformation describe-stacks \ --profile master \ --stack-name stacker-bucket \ --query 'Stacks[0].Outputs' --output text BucketId stacker-bucket-1234 ``` ### Provision stacks Now that everything is setup, you can add new stacks to your config file, and target them to a specific AWS account using the `profile` option. For example, if I wanted to create a new VPC in both the "production" and "staging" accounts: ```yaml stacks: - name: prod/vpc stack_name: vpc class_path: stacker_blueprints.vpc.VPC profile: prod # target this to the production account - name: stage/vpc stack_name: vpc class_path: stacker_blueprints.vpc.VPC profile: stage # target this to the staging account ``` ```console $ AWS_CONFIG_FILE=.aws/config stacker build --profile master stacker.yaml ``` [.aws/config]: ./.aws/config [stacker.yaml]: ./stacker.yaml [aws-vault]: https://github.com/99designs/aws-vault ================================================ FILE: examples/cross-account/stacker.yaml ================================================ --- namespace: '' # We'll set this to an empty string until we've provisioned the # "stacker-bucket" stack below. stacker_bucket: '' stacks: # This stack will provision an S3 bucket for stacker to use to upload # templates. This will also configure the bucket with a bucket policy # allowing CloudFormation in other accounts to fetch templates from it. - name: stacker-bucket # We're going to "target" this stack in our "master" account. profile: master template_path: templates/stacker-bucket.yaml variables: # Change these to the correct AWS account IDs, must be comma seperated list Roles: arn:aws:iam:::role/Stacker, arn:aws:iam:::role/Stacker ================================================ FILE: examples/cross-account/templates/stacker-bucket.yaml ================================================ --- AWSTemplateFormatVersion: "2010-09-09" Description: A bucket for stacker to store CloudFormation templates Parameters: Roles: Type: CommaDelimitedList Description: A list of IAM roles that will be given read access on the bucket. Resources: StackerBucket: Type: AWS::S3::Bucket Properties: BucketEncryption: ServerSideEncryptionConfiguration: - ServerSideEncryptionByDefault: SSEAlgorithm: AES256 BucketPolicy: Type: AWS::S3::BucketPolicy Properties: Bucket: Ref: StackerBucket PolicyDocument: Statement: - Action: - s3:GetObject Effect: Allow Principal: AWS: Ref: Roles Resource: - Fn::Sub: arn:aws:s3:::${StackerBucket}/* Outputs: BucketId: Value: Ref: StackerBucket ================================================ FILE: examples/cross-account/templates/stacker-role.yaml ================================================ --- AWSTemplateFormatVersion: "2010-09-09" Description: A role that stacker can assume Parameters: MasterAccountId: Type: String Description: The 12-digit ID for the master account MinLength: 12 MaxLength: 12 AllowedPattern: "[0-9]+" ConstraintDescription: Must contain a 12 digit account ID RoleName: Type: String Description: The name of the stacker role. Default: Stacker Conditions: # Check if we're creating this role in the master account. InMasterAccount: Fn::Equals: - { Ref: "AWS::AccountId" } - { Ref: "MasterAccountId" } Resources: StackerRole: Type: AWS::IAM::Role Properties: RoleName: Ref: RoleName AssumeRolePolicyDocument: Version: "2012-10-17" Statement: Fn::If: - InMasterAccount - Effect: Allow Principal: AWS: Fn::Sub: "arn:aws:iam::${MasterAccountId}:root" Action: sts:AssumeRole Condition: 'Null': aws:MultiFactorAuthAge: false - Effect: Allow Principal: AWS: Fn::Sub: "arn:aws:iam::${MasterAccountId}:role/${RoleName}" Action: sts:AssumeRole Condition: 'Null': aws:MultiFactorAuthAge: false # Generally, Stacker will need fairly wide open permissions, since it will be # managing all resources in an account. StackerPolicies: Type: AWS::IAM::Policy Properties: PolicyName: Stacker PolicyDocument: Version: "2012-10-17" Statement: - Effect: Allow Action: ["*"] Resource: "*" Roles: - Ref: StackerRole Outputs: StackerRole: Value: Fn::GetAtt: - StackerRole - Arn ================================================ FILE: requirements.in ================================================ troposphere>=3.0.0 botocore>=1.12.111 boto3>=1.9.111,<2.0 PyYAML>=3.13b1 awacs>=0.6.0 gitpython>=3.0 jinja2>=2.7 schematics>=2.1.0 formic2 python-dateutil>=2.0,<3.0 MarkupSafe>=2 more-itertools rsa>=4.7 python-jose future ================================================ FILE: scripts/compare_env ================================================ #!/usr/bin/env python """ A script to compare environment files. """ import argparse import os.path from stacker.environment import parse_environment def parse_args(): parser = argparse.ArgumentParser(description=__doc__) parser.add_argument( "-i", "--ignore-changed", action="store_true", help="Only print added & deleted keys, not changed keys.") parser.add_argument( "-s", "--show-changes", action="store_true", help="Print content changes.") parser.add_argument( "first_env", type=str, help="The first environment file to compare.") parser.add_argument( "second_env", type=str, help="The second environment file to compare.") return parser.parse_args() def parse_env_file(path): expanded_path = os.path.expanduser(path) with open(expanded_path) as fd: return parse_environment(fd.read()) def main(): args = parse_args() first_env = parse_env_file(args.first_env) second_env = parse_env_file(args.second_env) first_env_keys = set(first_env.keys()) second_env_keys = set(second_env.keys()) common_keys = first_env_keys & second_env_keys removed_keys = first_env_keys - second_env_keys added_keys = second_env_keys - first_env_keys changed_keys = set() for k in common_keys: if first_env[k] != second_env[k]: changed_keys.add(k) print "-- Added keys:" print " %s" % ", ".join(added_keys) print print "-- Removed keys:" print " %s" % ", ".join(removed_keys) print print "-- Changed keys:" if not args.show_changes: print " %s" % ", ".join(changed_keys) if args.show_changes: for k in changed_keys: print " %s:" % (k) print " < %s" % (first_env[k]) print " > %s" % (second_env[k]) if __name__ == "__main__": main() ================================================ FILE: scripts/docker-stacker ================================================ #!/bin/bash # This script is meant to be used from within the Docker image for stacker. It # simply installs the stacks at /stacks and then runs stacker. set -e cd /stacks python setup.py install exec stacker $@ ================================================ FILE: scripts/stacker ================================================ #!/usr/bin/env python from stacker.logger import setup_logging from stacker.commands import Stacker if __name__ == "__main__": stacker = Stacker(setup_logging=setup_logging) args = stacker.parse_args() stacker.configure(args) args.run(args) ================================================ FILE: scripts/stacker.cmd ================================================ @echo OFF REM=""" setlocal set PythonExe="" set PythonExeFlags= for %%i in (cmd bat exe) do ( for %%j in (python.%%i) do ( call :SetPythonExe "%%~$PATH:j" ) ) for /f "tokens=2 delims==" %%i in ('assoc .py') do ( for /f "tokens=2 delims==" %%j in ('ftype %%i') do ( for /f "tokens=1" %%k in ("%%j") do ( call :SetPythonExe %%k ) ) ) %PythonExe% -x %PythonExeFlags% "%~f0" %* exit /B %ERRORLEVEL% goto :EOF :SetPythonExe if not ["%~1"]==[""] ( if [%PythonExe%]==[""] ( set PythonExe="%~1" ) ) goto :EOF """ # =================================================== # Python script starts here # Above helper adapted from https://github.com/aws/aws-cli/blob/1.11.121/bin/aws.cmd # =================================================== #!/usr/bin/env python from stacker.logger import setup_logging from stacker.commands import Stacker if __name__ == "__main__": stacker = Stacker(setup_logging=setup_logging) args = stacker.parse_args() stacker.configure(args) args.run(args) ================================================ FILE: setup.cfg ================================================ [metadata] description-file = README.rst [aliases] test = pytest [tool:pytest] testpaths = stacker/tests cov = stacker filterwarnings = ignore::DeprecationWarning ================================================ FILE: setup.py ================================================ import os from setuptools import setup, find_packages VERSION = "1.7.2" src_dir = os.path.dirname(__file__) def get_install_requirements(path): content = open(os.path.join(os.path.dirname(__file__), path)).read() return [req for req in content.split("\n") if req != "" and not req.startswith("#")] install_requires = get_install_requirements("requirements.in") setup_requires = ['pytest-runner'] tests_require = get_install_requirements("test-requirements.in") scripts = [ "scripts/compare_env", "scripts/docker-stacker", "scripts/stacker.cmd", "scripts/stacker", ] def read(filename): full_path = os.path.join(src_dir, filename) with open(full_path) as fd: return fd.read() if __name__ == "__main__": setup( name="stacker", version=VERSION, author="Michael Barrett", author_email="loki77@gmail.com", license="New BSD license", url="https://github.com/cloudtools/stacker", description="AWS CloudFormation Stack manager", long_description=read("README.rst"), packages=find_packages(), scripts=scripts, install_requires=install_requires, tests_require=tests_require, setup_requires=setup_requires, extras_require=dict(testing=tests_require), classifiers=[ "Development Status :: 5 - Production/Stable", "Environment :: Console", "License :: OSI Approved :: BSD License", "Programming Language :: Python :: 3.7", "Programming Language :: Python :: 3.8", "Programming Language :: Python :: 3.9", "Programming Language :: Python :: 3.10", ], ) ================================================ FILE: stacker/__init__.py ================================================ __version__ = "1.7.2" ================================================ FILE: stacker/actions/__init__.py ================================================ ================================================ FILE: stacker/actions/base.py ================================================ import os import sys import logging import threading from ..dag import walk, ThreadedWalker, UnlimitedSemaphore from ..plan import Step, build_plan, build_graph import botocore.exceptions from stacker.session_cache import get_session from stacker.exceptions import PlanFailed from ..status import ( COMPLETE ) from stacker.util import ( ensure_s3_bucket, get_s3_endpoint, ) logger = logging.getLogger(__name__) # After submitting a stack update/create, this controls how long we'll wait # between calls to DescribeStacks to check on it's status. Most stack updates # take at least a couple minutes, so 30 seconds is pretty reasonable and inline # with the suggested value in # https://github.com/boto/botocore/blob/1.6.1/botocore/data/cloudformation/2010-05-15/waiters-2.json#L22 # # This can be controlled via an environment variable, mostly for testing. STACK_POLL_TIME = int(os.environ.get("STACKER_STACK_POLL_TIME", 30)) def build_walker(concurrency): """This will return a function suitable for passing to :class:`stacker.plan.Plan` for walking the graph. If concurrency is 1 (no parallelism) this will return a simple topological walker that doesn't use any multithreading. If concurrency is 0, this will return a walker that will walk the graph as fast as the graph topology allows. If concurrency is greater than 1, it will return a walker that will only execute a maximum of concurrency steps at any given time. Returns: func: returns a function to walk a :class:`stacker.dag.DAG`. """ if concurrency == 1: return walk semaphore = UnlimitedSemaphore() if concurrency > 1: semaphore = threading.Semaphore(concurrency) return ThreadedWalker(semaphore).walk def plan(description, stack_action, context, tail=None, reverse=False): """A simple helper that builds a graph based plan from a set of stacks. Args: description (str): a description of the plan. action (func): a function to call for each stack. context (:class:`stacker.context.Context`): a :class:`stacker.context.Context` to build the plan from. tail (func): an optional function to call to tail the stack progress. reverse (bool): if True, execute the graph in reverse (useful for destroy actions). Returns: :class:`plan.Plan`: The resulting plan object """ def target_fn(*args, **kwargs): return COMPLETE steps = [ Step(stack, fn=stack_action, watch_func=tail) for stack in context.get_stacks()] steps += [ Step(target, fn=target_fn) for target in context.get_targets()] graph = build_graph(steps) return build_plan( description=description, graph=graph, targets=context.stack_names, reverse=reverse) def stack_template_key_name(blueprint): """Given a blueprint, produce an appropriate key name. Args: blueprint (:class:`stacker.blueprints.base.Blueprint`): The blueprint object to create the key from. Returns: string: Key name resulting from blueprint. """ name = blueprint.name return "stack_templates/%s/%s-%s.json" % (blueprint.context.get_fqn(name), name, blueprint.version) def stack_template_url(bucket_name, blueprint, endpoint): """Produces an s3 url for a given blueprint. Args: bucket_name (string): The name of the S3 bucket where the resulting templates are stored. blueprint (:class:`stacker.blueprints.base.Blueprint`): The blueprint object to create the URL to. endpoint (string): The s3 endpoint used for the bucket. Returns: string: S3 URL. """ key_name = stack_template_key_name(blueprint) return "%s/%s/%s" % (endpoint, bucket_name, key_name) class BaseAction(object): """Actions perform the actual work of each Command. Each action is tied to a :class:`stacker.commands.base.BaseCommand`, and is responsible for building the :class:`stacker.plan.Plan` that will be executed to perform that command. Args: context (:class:`stacker.context.Context`): The stacker context for the current run. provider_builder (:class:`stacker.providers.base.BaseProviderBuilder`, optional): An object that will build a provider that will be interacted with in order to perform the necessary actions. """ def __init__(self, context, provider_builder=None, cancel=None): self.context = context self.provider_builder = provider_builder self.bucket_name = context.bucket_name self.cancel = cancel or threading.Event() self.bucket_region = context.config.stacker_bucket_region if not self.bucket_region and provider_builder: self.bucket_region = provider_builder.region self.s3_conn = get_session(self.bucket_region).client('s3') def ensure_cfn_bucket(self): """The CloudFormation bucket where templates will be stored.""" if self.bucket_name: ensure_s3_bucket(self.s3_conn, self.bucket_name, self.bucket_region) def stack_template_url(self, blueprint): return stack_template_url( self.bucket_name, blueprint, get_s3_endpoint(self.s3_conn) ) def s3_stack_push(self, blueprint, force=False): """Pushes the rendered blueprint's template to S3. Verifies that the template doesn't already exist in S3 before pushing. Returns the URL to the template in S3. """ key_name = stack_template_key_name(blueprint) template_url = self.stack_template_url(blueprint) try: template_exists = self.s3_conn.head_object( Bucket=self.bucket_name, Key=key_name) is not None except botocore.exceptions.ClientError as e: if e.response['Error']['Code'] == '404': template_exists = False else: raise if template_exists and not force: logger.debug("Cloudformation template %s already exists.", template_url) return template_url self.s3_conn.put_object(Bucket=self.bucket_name, Key=key_name, Body=blueprint.rendered, ServerSideEncryption='AES256', ACL='bucket-owner-full-control') logger.debug("Blueprint %s pushed to %s.", blueprint.name, template_url) return template_url def execute(self, *args, **kwargs): try: self.pre_run(*args, **kwargs) self.run(*args, **kwargs) self.post_run(*args, **kwargs) except PlanFailed as e: logger.error(str(e)) sys.exit(1) def pre_run(self, *args, **kwargs): pass def run(self, *args, **kwargs): raise NotImplementedError("Subclass must implement \"run\" method") def post_run(self, *args, **kwargs): pass def build_provider(self, stack): """Builds a :class:`stacker.providers.base.Provider` suitable for operating on the given :class:`stacker.Stack`.""" return self.provider_builder.build(region=stack.region, profile=stack.profile) @property def provider(self): """Some actions need a generic provider using the default region (e.g. hooks).""" return self.provider_builder.build() def _tail_stack(self, stack, cancel, retries=0, **kwargs): provider = self.build_provider(stack) return provider.tail_stack(stack, cancel, retries, **kwargs) ================================================ FILE: stacker/actions/build.py ================================================ import logging from .base import BaseAction, plan, build_walker from .base import STACK_POLL_TIME from ..providers.base import Template from stacker.hooks import utils from ..exceptions import ( MissingParameterException, StackDidNotChange, StackDoesNotExist, CancelExecution, ) from ..status import ( NotSubmittedStatus, NotUpdatedStatus, DidNotChangeStatus, SubmittedStatus, CompleteStatus, FailedStatus, SkippedStatus, PENDING, WAITING, SUBMITTED, INTERRUPTED ) logger = logging.getLogger(__name__) def build_stack_tags(stack): """Builds a common set of tags to attach to a stack""" return [{'Key': t[0], 'Value': t[1]} for t in stack.tags.items()] def should_update(stack): """Tests whether a stack should be submitted for updates to CF. Args: stack (:class:`stacker.stack.Stack`): The stack object to check. Returns: bool: If the stack should be updated, return True. """ if stack.locked: if not stack.force: logger.debug("Stack %s locked and not in --force list. " "Refusing to update.", stack.name) return False else: logger.debug("Stack %s locked, but is in --force " "list.", stack.name) return True def should_submit(stack): """Tests whether a stack should be submitted to CF for update/create Args: stack (:class:`stacker.stack.Stack`): The stack object to check. Returns: bool: If the stack should be submitted, return True. """ if stack.enabled: return True logger.debug("Stack %s is not enabled. Skipping.", stack.name) return False def should_ensure_cfn_bucket(outline, dump): """Test whether access to the cloudformation template bucket is required Args: outline (bool): The outline action. dump (bool): The dump action. Returns: bool: If access to CF bucket is needed, return True. """ return not outline and not dump def _resolve_parameters(parameters, blueprint): """Resolves CloudFormation Parameters for a given blueprint. Given a list of parameters, handles: - discard any parameters that the blueprint does not use - discard any empty values - convert booleans to strings suitable for CloudFormation Args: parameters (dict): A dictionary of parameters provided by the stack definition blueprint (:class:`stacker.blueprint.base.Blueprint`): A Blueprint object that is having the parameters applied to it. Returns: dict: The resolved parameters. """ params = {} param_defs = blueprint.get_parameter_definitions() for key, value in parameters.items(): if key not in param_defs: logger.debug("Blueprint %s does not use parameter %s.", blueprint.name, key) continue if value is None: logger.debug("Got None value for parameter %s, not submitting it " "to cloudformation, default value should be used.", key) continue if isinstance(value, bool): logger.debug("Converting parameter %s boolean \"%s\" to string.", key, value) value = str(value).lower() params[key] = value return params class UsePreviousParameterValue(object): """ A simple class used to indicate a Parameter should use it's existng value. """ pass def _handle_missing_parameters(parameter_values, all_params, required_params, existing_stack=None): """Handles any missing parameters. If an existing_stack is provided, look up missing parameters there. Args: parameter_values (dict): key/value dictionary of stack definition parameters all_params (list): A list of all the parameters used by the template/blueprint. required_params (list): A list of all the parameters required by the template/blueprint. existing_stack (dict): A dict representation of the stack. If provided, will be searched for any missing parameters. Returns: list of tuples: The final list of key/value pairs returned as a list of tuples. Raises: MissingParameterException: Raised if a required parameter is still missing. """ missing_params = list(set(all_params) - set(parameter_values.keys())) if existing_stack and 'Parameters' in existing_stack: stack_parameters = [ p["ParameterKey"] for p in existing_stack["Parameters"] ] for p in missing_params: if p in stack_parameters: logger.debug( "Using previous value for parameter %s from existing " "stack", p ) parameter_values[p] = UsePreviousParameterValue final_missing = list(set(required_params) - set(parameter_values.keys())) if final_missing: raise MissingParameterException(final_missing) return list(parameter_values.items()) def handle_hooks(stage, hooks, provider, context, dump, outline): """Handle pre/post hooks. Args: stage (str): The name of the hook stage - pre_build/post_build. hooks (list): A list of dictionaries containing the hooks to execute. provider (:class:`stacker.provider.base.BaseProvider`): The provider the current stack is using. context (:class:`stacker.context.Context`): The current stacker context. dump (bool): Whether running with dump set or not. outline (bool): Whether running with outline set or not. """ if not outline and not dump and hooks: utils.handle_hooks( stage=stage, hooks=hooks, provider=provider, context=context ) class Action(BaseAction): """Responsible for building & coordinating CloudFormation stacks. Generates the build plan based on stack dependencies (these dependencies are determined automatically based on output lookups from other stacks). The plan can then either be printed out as an outline or executed. If executed, each stack will get launched in order which entails: - Pushing the generated CloudFormation template to S3 if it has changed - Submitting either a build or update of the given stack to the :class:`stacker.provider.base.Provider`. """ def build_parameters(self, stack, provider_stack=None): """Builds the CloudFormation Parameters for our stack. Args: stack (:class:`stacker.stack.Stack`): A stacker stack provider_stack (dict): An optional Stacker provider object Returns: dict: The parameters for the given stack """ resolved = _resolve_parameters(stack.parameter_values, stack.blueprint) required_parameters = list(stack.required_parameter_definitions) all_parameters = list(stack.all_parameter_definitions) parameters = _handle_missing_parameters(resolved, all_parameters, required_parameters, provider_stack) param_list = [] for key, value in parameters: param_dict = {"ParameterKey": key} if value is UsePreviousParameterValue: param_dict["UsePreviousValue"] = True else: param_dict["ParameterValue"] = str(value) param_list.append(param_dict) return param_list def _launch_stack(self, stack, **kwargs): """Handles the creating or updating of a stack in CloudFormation. Also makes sure that we don't try to create or update a stack while it is already updating or creating. """ old_status = kwargs.get("status") wait_time = 0 if old_status is PENDING else STACK_POLL_TIME if self.cancel.wait(wait_time): return INTERRUPTED if not should_submit(stack): return NotSubmittedStatus() provider = self.build_provider(stack) try: provider_stack = provider.get_stack(stack.fqn) except StackDoesNotExist: provider_stack = None if provider_stack and not should_update(stack): stack.set_outputs( self.provider.get_output_dict(provider_stack)) return NotUpdatedStatus() recreate = False if provider_stack and old_status == SUBMITTED: logger.debug( "Stack %s provider status: %s", stack.fqn, provider.get_stack_status(provider_stack), ) if provider.is_stack_rolling_back(provider_stack): if 'rolling back' in old_status.reason: return old_status logger.debug("Stack %s entered a roll back", stack.fqn) if 'updating' in old_status.reason: reason = 'rolling back update' else: reason = 'rolling back new stack' return SubmittedStatus(reason) elif provider.is_stack_in_progress(provider_stack): logger.debug("Stack %s in progress.", stack.fqn) return old_status elif provider.is_stack_destroyed(provider_stack): logger.debug("Stack %s finished deleting", stack.fqn) recreate = True # Continue with creation afterwards # Failure must be checked *before* completion, as both will be true # when completing a rollback, and we don't want to consider it as # a successful update. elif provider.is_stack_failed(provider_stack): reason = old_status.reason if 'rolling' in reason: reason = reason.replace('rolling', 'rolled') status_reason = provider.get_rollback_status_reason(stack.fqn) logger.info( "%s Stack Roll Back Reason: " + status_reason, stack.fqn) return FailedStatus(reason) elif provider.is_stack_completed(provider_stack): stack.set_outputs( provider.get_output_dict(provider_stack)) return CompleteStatus(old_status.reason) else: return old_status logger.debug("Resolving stack %s", stack.fqn) stack.resolve(self.context, self.provider) logger.debug("Launching stack %s now.", stack.fqn) template = self._template(stack.blueprint) stack_policy = self._stack_policy(stack) tags = build_stack_tags(stack) parameters = self.build_parameters(stack, provider_stack) force_change_set = stack.blueprint.requires_change_set if recreate: logger.debug("Re-creating stack: %s", stack.fqn) provider.create_stack(stack.fqn, template, parameters, tags, stack_policy=stack_policy) return SubmittedStatus("re-creating stack") elif not provider_stack: logger.debug("Creating new stack: %s", stack.fqn) provider.create_stack(stack.fqn, template, parameters, tags, force_change_set, stack_policy=stack_policy, notification_arns=stack.notification_arns) return SubmittedStatus("creating new stack") try: wait = stack.in_progress_behavior == "wait" if wait and provider.is_stack_in_progress(provider_stack): return WAITING if provider.prepare_stack_for_update(provider_stack, tags): existing_params = provider_stack.get('Parameters', []) provider.update_stack( stack.fqn, template, existing_params, parameters, tags, force_interactive=stack.protected, force_change_set=force_change_set, stack_policy=stack_policy, notification_arns=stack.notification_arns ) logger.debug("Updating existing stack: %s", stack.fqn) return SubmittedStatus("updating existing stack") else: return SubmittedStatus("destroying stack for re-creation") except CancelExecution: stack.set_outputs(provider.get_output_dict(provider_stack)) return SkippedStatus(reason="canceled execution") except StackDidNotChange: stack.set_outputs(provider.get_output_dict(provider_stack)) return DidNotChangeStatus() def _template(self, blueprint): """Generates a suitable template based on whether or not an S3 bucket is set. If an S3 bucket is set, then the template will be uploaded to S3 first, and CreateStack/UpdateStack operations will use the uploaded template. If not bucket is set, then the template will be inlined. """ if self.bucket_name: return Template(url=self.s3_stack_push(blueprint)) else: return Template(body=blueprint.rendered) def _stack_policy(self, stack): """Returns a Template object for the stacks stack policy, or None if the stack doesn't have a stack policy.""" if stack.stack_policy: return Template(body=stack.stack_policy) def _generate_plan(self, tail=False): return plan( description="Create/Update stacks", stack_action=self._launch_stack, tail=self._tail_stack if tail else None, context=self.context) def pre_run(self, outline=False, dump=False, *args, **kwargs): """Any steps that need to be taken prior to running the action.""" if should_ensure_cfn_bucket(outline, dump): self.ensure_cfn_bucket() hooks = self.context.config.pre_build handle_hooks( "pre_build", hooks, self.provider, self.context, dump, outline ) def run(self, concurrency=0, outline=False, tail=False, dump=False, *args, **kwargs): """Kicks off the build/update of the stacks in the stack_definitions. This is the main entry point for the Builder. """ plan = self._generate_plan(tail=tail) if not plan.keys(): logger.warn('WARNING: No stacks detected (error in config?)') if not outline and not dump: plan.outline(logging.DEBUG) logger.debug("Launching stacks: %s", ", ".join(plan.keys())) walker = build_walker(concurrency) plan.execute(walker) else: if outline: plan.outline() if dump: plan.dump(directory=dump, context=self.context, provider=self.provider) def post_run(self, outline=False, dump=False, *args, **kwargs): """Any steps that need to be taken after running the action.""" hooks = self.context.config.post_build handle_hooks( "post_build", hooks, self.provider, self.context, dump, outline ) ================================================ FILE: stacker/actions/destroy.py ================================================ import logging from .base import BaseAction, plan, build_walker from .base import STACK_POLL_TIME from ..exceptions import StackDoesNotExist from stacker.hooks.utils import handle_hooks from ..status import ( CompleteStatus, SubmittedStatus, PENDING, SUBMITTED, INTERRUPTED ) from ..status import StackDoesNotExist as StackDoesNotExistStatus logger = logging.getLogger(__name__) DestroyedStatus = CompleteStatus("stack destroyed") DestroyingStatus = SubmittedStatus("submitted for destruction") class Action(BaseAction): """Responsible for destroying CloudFormation stacks. Generates a destruction plan based on stack dependencies. Stack dependencies are reversed from the build action. For example, if a Stack B requires Stack A during build, during destroy Stack A requires Stack B be destroyed first. The plan defaults to printing an outline of what will be destroyed. If forced to execute, each stack will get destroyed in order. """ def _generate_plan(self, tail=False): return plan( description="Destroy stacks", stack_action=self._destroy_stack, tail=self._tail_stack if tail else None, context=self.context, reverse=True) def _destroy_stack(self, stack, **kwargs): old_status = kwargs.get("status") wait_time = 0 if old_status is PENDING else STACK_POLL_TIME if self.cancel.wait(wait_time): return INTERRUPTED provider = self.build_provider(stack) try: provider_stack = provider.get_stack(stack.fqn) except StackDoesNotExist: logger.debug("Stack %s does not exist.", stack.fqn) # Once the stack has been destroyed, it doesn't exist. If the # status of the step was SUBMITTED, we know we just deleted it, # otherwise it should be skipped if kwargs.get("status", None) == SUBMITTED: return DestroyedStatus else: return StackDoesNotExistStatus() logger.debug( "Stack %s provider status: %s", provider.get_stack_name(provider_stack), provider.get_stack_status(provider_stack), ) if provider.is_stack_destroyed(provider_stack): return DestroyedStatus elif provider.is_stack_in_progress(provider_stack): return DestroyingStatus else: logger.debug("Destroying stack: %s", stack.fqn) provider.destroy_stack(provider_stack) return DestroyingStatus def pre_run(self, outline=False, *args, **kwargs): """Any steps that need to be taken prior to running the action.""" pre_destroy = self.context.config.pre_destroy if not outline and pre_destroy: handle_hooks( stage="pre_destroy", hooks=pre_destroy, provider=self.provider, context=self.context) def run(self, force, concurrency=0, tail=False, *args, **kwargs): plan = self._generate_plan(tail=tail) if not plan.keys(): logger.warn('WARNING: No stacks detected (error in config?)') if force: # need to generate a new plan to log since the outline sets the # steps to COMPLETE in order to log them plan.outline(logging.DEBUG) walker = build_walker(concurrency) plan.execute(walker) else: plan.outline(message="To execute this plan, run with \"--force\" " "flag.") def post_run(self, outline=False, *args, **kwargs): """Any steps that need to be taken after running the action.""" post_destroy = self.context.config.post_destroy if not outline and post_destroy: handle_hooks( stage="post_destroy", hooks=post_destroy, provider=self.provider, context=self.context) ================================================ FILE: stacker/actions/diff.py ================================================ import logging from operator import attrgetter from .base import plan, build_walker from . import build from .. import exceptions from ..status import ( NotSubmittedStatus, NotUpdatedStatus, COMPLETE, INTERRUPTED, ) logger = logging.getLogger(__name__) class DictValue(object): ADDED = "ADDED" REMOVED = "REMOVED" MODIFIED = "MODIFIED" UNMODIFIED = "UNMODIFIED" formatter = "%s%s = %s" def __init__(self, key, old_value, new_value): self.key = key self.old_value = old_value self.new_value = new_value def __eq__(self, other): return self.__dict__ == other.__dict__ def changes(self): """Returns a list of changes to represent the diff between old and new value. Returns: list: [string] representation of the change (if any) between old and new value """ output = [] if self.status() is self.UNMODIFIED: output = [self.formatter % (' ', self.key, self.old_value)] elif self.status() is self.ADDED: output.append(self.formatter % ('+', self.key, self.new_value)) elif self.status() is self.REMOVED: output.append(self.formatter % ('-', self.key, self.old_value)) elif self.status() is self.MODIFIED: output.append(self.formatter % ('-', self.key, self.old_value)) output.append(self.formatter % ('+', self.key, self.new_value)) return output def status(self): if self.old_value == self.new_value: return self.UNMODIFIED elif self.old_value is None: return self.ADDED elif self.new_value is None: return self.REMOVED else: return self.MODIFIED def diff_dictionaries(old_dict, new_dict): """Diffs two single dimension dictionaries Returns the number of changes and an unordered list expressing the common entries and changes. Args: old_dict(dict): old dictionary new_dict(dict): new dictionary Returns: list() int: number of changed records list: [DictValue] """ old_set = set(old_dict) new_set = set(new_dict) added_set = new_set - old_set removed_set = old_set - new_set common_set = old_set & new_set changes = 0 output = [] for key in added_set: changes += 1 output.append(DictValue(key, None, new_dict[key])) for key in removed_set: changes += 1 output.append(DictValue(key, old_dict[key], None)) for key in common_set: output.append(DictValue(key, old_dict[key], new_dict[key])) if str(old_dict[key]) != str(new_dict[key]): changes += 1 output.sort(key=attrgetter("key")) return [changes, output] def format_params_diff(parameter_diff): """Handles the formatting of differences in parameters. Args: parameter_diff (list): A list of DictValues detailing the differences between two dicts returned by :func:`stacker.actions.diff.diff_dictionaries` Returns: string: A formatted string that represents a parameter diff """ params_output = '\n'.join([line for v in parameter_diff for line in v.changes()]) return """--- Old Parameters +++ New Parameters ****************** %s\n""" % params_output def diff_parameters(old_params, new_params): """Compares the old vs. new parameters and returns a "diff" If there are no changes, we return an empty list. Args: old_params(dict): old paramters new_params(dict): new parameters Returns: list: A list of differences """ [changes, diff] = diff_dictionaries(old_params, new_params) if changes == 0: return [] return diff class Action(build.Action): """ Responsible for diff'ing CF stacks in AWS and on disk Generates the build plan based on stack dependencies (these dependencies are determined automatically based on references to output values from other stacks). The plan is then used to create a changeset for a stack using a generated template based on the current config. """ def _diff_stack(self, stack, **kwargs): """Handles the diffing a stack in CloudFormation vs our config""" if self.cancel.wait(0): return INTERRUPTED if not build.should_submit(stack): return NotSubmittedStatus() provider = self.build_provider(stack) if not build.should_update(stack): stack.set_outputs(provider.get_outputs(stack.fqn)) return NotUpdatedStatus() tags = build.build_stack_tags(stack) stack.resolve(self.context, provider) parameters = self.build_parameters(stack) try: outputs = provider.get_stack_changes( stack, self._template(stack.blueprint), parameters, tags ) stack.set_outputs(outputs) except exceptions.StackDidNotChange: logger.info('No changes: %s', stack.fqn) stack.set_outputs(provider.get_outputs(stack.fqn)) return COMPLETE def _generate_plan(self): return plan( description="Diff stacks", stack_action=self._diff_stack, context=self.context) def run(self, concurrency=0, *args, **kwargs): plan = self._generate_plan() plan.outline(logging.DEBUG) if plan.keys(): logger.info("Diffing stacks: %s", ", ".join(plan.keys())) else: logger.warn('WARNING: No stacks detected (error in config?)') walker = build_walker(concurrency) plan.execute(walker) """Don't ever do anything for pre_run or post_run""" def pre_run(self, *args, **kwargs): pass def post_run(self, *args, **kwargs): pass ================================================ FILE: stacker/actions/graph.py ================================================ import logging import sys import json from .base import BaseAction, plan logger = logging.getLogger(__name__) def each_step(graph): """Returns an iterator that yields each step and it's direct dependencies. """ steps = graph.topological_sort() steps.reverse() for step in steps: deps = graph.downstream(step.name) yield (step, deps) def dot_format(out, graph, name="digraph"): """Outputs the graph using the graphviz "dot" format.""" out.write("digraph %s {\n" % name) for step, deps in each_step(graph): for dep in deps: out.write(" \"%s\" -> \"%s\";\n" % (step, dep)) out.write("}\n") def json_format(out, graph): """Outputs the graph in a machine readable JSON format.""" steps = {} for step, deps in each_step(graph): steps[step.name] = {} steps[step.name]["deps"] = [dep.name for dep in deps] json.dump({"steps": steps}, out, indent=4) out.write("\n") FORMATTERS = { "dot": dot_format, "json": json_format, } class Action(BaseAction): def _generate_plan(self): return plan( description="Print graph", stack_action=None, context=self.context) def run(self, format=None, reduce=False, *args, **kwargs): """Generates the underlying graph and prints it. """ plan = self._generate_plan() if reduce: # This will performa a transitive reduction on the underlying # graph, producing less edges. Mostly useful for the "dot" format, # when converting to PNG, so it creates a prettier/cleaner # dependency graph. plan.graph.transitive_reduction() fn = FORMATTERS[format] fn(sys.stdout, plan.graph) sys.stdout.flush() ================================================ FILE: stacker/actions/info.py ================================================ import logging from .base import BaseAction from .. import exceptions logger = logging.getLogger(__name__) class Action(BaseAction): """Get information on CloudFormation stacks. Displays the outputs for the set of CloudFormation stacks. """ def run(self, *args, **kwargs): logger.info('Outputs for stacks: %s', self.context.get_fqn()) if not self.context.get_stacks(): logger.warn('WARNING: No stacks detected (error in config?)') for stack in self.context.get_stacks(): provider = self.build_provider(stack) try: provider_stack = provider.get_stack(stack.fqn) except exceptions.StackDoesNotExist: logger.info('Stack "%s" does not exist.' % (stack.fqn,)) continue logger.info('%s:', stack.fqn) if 'Outputs' in provider_stack: for output in provider_stack['Outputs']: logger.info( '\t%s: %s', output['OutputKey'], output['OutputValue'] ) ================================================ FILE: stacker/awscli_yamlhelper.py ================================================ # Copyright 2012-2015 Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). You # may not use this file except in compliance with the License. A copy of # the License is located at # # http://aws.amazon.com/apache2.0/ # # or in the "license" file accompanying this file. This file is # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. import json import yaml from yaml.resolver import ScalarNode, SequenceNode from botocore.compat import six def intrinsics_multi_constructor(loader, tag_prefix, node): """ YAML constructor to parse CloudFormation intrinsics. This will return a dictionary with key being the instrinsic name """ # Get the actual tag name excluding the first exclamation tag = node.tag[1:] # Some intrinsic functions doesn't support prefix "Fn::" prefix = "Fn::" if tag in ["Ref", "Condition"]: prefix = "" cfntag = prefix + tag if tag == "GetAtt" and isinstance(node.value, six.string_types): # ShortHand notation for !GetAtt accepts Resource.Attribute format # while the standard notation is to use an array # [Resource, Attribute]. Convert shorthand to standard format value = node.value.split(".", 1) elif isinstance(node, ScalarNode): # Value of this node is scalar value = loader.construct_scalar(node) elif isinstance(node, SequenceNode): # Value of this node is an array (Ex: [1,2]) value = loader.construct_sequence(node) else: # Value of this node is an mapping (ex: {foo: bar}) value = loader.construct_mapping(node) return {cfntag: value} def yaml_dump(dict_to_dump): """ Dumps the dictionary as a YAML document :param dict_to_dump: :return: """ return yaml.safe_dump(dict_to_dump, default_flow_style=False) def yaml_parse(yamlstr): """Parse a yaml string""" try: # PyYAML doesn't support json as well as it should, so if the input # is actually just json it is better to parse it with the standard # json parser. return json.loads(yamlstr) except ValueError: yaml.SafeLoader.add_multi_constructor( "!", intrinsics_multi_constructor) return yaml.safe_load(yamlstr) ================================================ FILE: stacker/blueprints/__init__.py ================================================ ================================================ FILE: stacker/blueprints/base.py ================================================ from past.builtins import basestring import copy import hashlib import logging import string from stacker.util import read_value_from_path from stacker.variables import Variable from troposphere import ( Output, Parameter, Ref, Template, ) from ..exceptions import ( MissingVariable, UnresolvedVariable, UnresolvedVariables, ValidatorError, VariableTypeRequired, InvalidUserdataPlaceholder ) from .variables.types import ( CFNType, TroposphereType, ) logger = logging.getLogger(__name__) PARAMETER_PROPERTIES = { "default": "Default", "description": "Description", "no_echo": "NoEcho", "allowed_values": "AllowedValues", "allowed_pattern": "AllowedPattern", "max_length": "MaxLength", "min_length": "MinLength", "max_value": "MaxValue", "min_value": "MinValue", "constraint_description": "ConstraintDescription" } class CFNParameter(object): def __init__(self, name, value): """Wrapper around a value to indicate a CloudFormation Parameter. Args: name (str): the name of the CloudFormation Parameter value (str, list, int or bool): the value we're going to submit as a CloudFormation Parameter. """ acceptable_types = [basestring, bool, list, int] acceptable = False for acceptable_type in acceptable_types: if isinstance(value, acceptable_type): acceptable = True if acceptable_type == bool: logger.debug("Converting parameter %s boolean '%s' " "to string.", name, value) value = str(value).lower() break if acceptable_type == int: logger.debug("Converting parameter %s integer '%s' " "to string.", name, value) value = str(value) break if not acceptable: raise ValueError( "CFNParameter (%s) value must be one of %s got: %s" % ( name, "str, int, bool, or list", value)) self.name = name self.value = value def __repr__(self): return "CFNParameter({}: {})".format(self.name, self.value) def to_parameter_value(self): """Return the value to be submitted to CloudFormation""" return self.value @property def ref(self): return Ref(self.name) def build_parameter(name, properties): """Builds a troposphere Parameter with the given properties. Args: name (string): The name of the parameter. properties (dict): Contains the properties that will be applied to the parameter. See: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/parameters-section-structure.html Returns: :class:`troposphere.Parameter`: The created parameter object. """ p = Parameter(name, Type=properties.get("type")) for name, attr in PARAMETER_PROPERTIES.items(): if name in properties: setattr(p, attr, properties[name]) return p def validate_variable_type(var_name, var_type, value): """Ensures the value is the correct variable type. Args: var_name (str): The name of the defined variable on a blueprint. var_type (type): The type that the value should be. value (obj): The object representing the value provided for the variable Returns: object: Returns the appropriate value object. If the original value was of CFNType, the returned value will be wrapped in CFNParameter. Raises: ValueError: If the `value` isn't of `var_type` and can't be cast as that type, this is raised. """ if isinstance(var_type, CFNType): value = CFNParameter(name=var_name, value=value) elif isinstance(var_type, TroposphereType): try: value = var_type.create(value) except Exception as exc: name = "{}.create".format(var_type.resource_name) raise ValidatorError(var_name, name, value, exc) else: if not isinstance(value, var_type): raise ValueError( "Value for variable %s must be of type %s. Actual " "type: %s." % (var_name, var_type, type(value)) ) return value def validate_allowed_values(allowed_values, value): """Support a variable defining which values it allows. Args: allowed_values (Optional[list]): A list of allowed values from the variable definition value (obj): The object representing the value provided for the variable Returns: bool: Boolean for whether or not the value is valid. """ # ignore CFNParameter, troposphere handles these for us if not allowed_values or isinstance(value, CFNParameter): return True return value in allowed_values def resolve_variable(var_name, var_def, provided_variable, blueprint_name): """Resolve a provided variable value against the variable definition. Args: var_name (str): The name of the defined variable on a blueprint. var_def (dict): A dictionary representing the defined variables attributes. provided_variable (:class:`stacker.variables.Variable`): The variable value provided to the blueprint. blueprint_name (str): The name of the blueprint that the variable is being applied to. Returns: object: The resolved variable value, could be any python object. Raises: MissingVariable: Raised when a variable with no default is not provided a value. UnresolvedVariable: Raised when the provided variable is not already resolved. ValueError: Raised when the value is not the right type and cannot be cast as the correct type. Raised by :func:`stacker.blueprints.base.validate_variable_type` ValidatorError: Raised when a validator raises an exception. Wraps the original exception. """ try: var_type = var_def["type"] except KeyError: raise VariableTypeRequired(blueprint_name, var_name) if provided_variable: if not provided_variable.resolved: raise UnresolvedVariable(blueprint_name, provided_variable) value = provided_variable.value else: # Variable value not provided, try using the default, if it exists # in the definition try: value = var_def["default"] except KeyError: raise MissingVariable(blueprint_name, var_name) # If no validator, return the value as is, otherwise apply validator validator = var_def.get("validator", lambda v: v) try: value = validator(value) except Exception as exc: raise ValidatorError(var_name, validator.__name__, value, exc) # Ensure that the resulting value is the correct type value = validate_variable_type(var_name, var_type, value) allowed_values = var_def.get("allowed_values") if not validate_allowed_values(allowed_values, value): message = ( "Invalid value passed to '%s' in blueprint: %s. Got: '%s', " "expected one of %s" ) % (var_name, blueprint_name, value, allowed_values) raise ValueError(message) return value def parse_user_data(variables, raw_user_data, blueprint_name): """Parse the given user data and renders it as a template It supports referencing template variables to create userdata that's supplemented with information from the stack, as commonly required when creating EC2 userdata files. For example: Given a raw_user_data string: 'open file ${file}' And a variables dictionary with: {'file': 'test.txt'} parse_user_data would output: open file test.txt Args: variables (dict): variables available to the template raw_user_data (str): the user_data to be parsed blueprint_name (str): the name of the blueprint Returns: str: The parsed user data, with all the variables values and refs replaced with their resolved values. Raises: InvalidUserdataPlaceholder: Raised when a placeholder name in raw_user_data is not valid. E.g ${100} would raise this. MissingVariable: Raised when a variable is in the raw_user_data that is not given in the blueprint """ variable_values = {} for key, value in variables.items(): if type(value) is CFNParameter: variable_values[key] = value.to_parameter_value() else: variable_values[key] = value template = string.Template(raw_user_data) res = "" try: res = template.substitute(variable_values) except ValueError as exp: raise InvalidUserdataPlaceholder(blueprint_name, exp.args[0]) except KeyError as key: raise MissingVariable(blueprint_name, key) return res class Blueprint(object): """Base implementation for rendering a troposphere template. Args: name (str): A name for the blueprint. context (:class:`stacker.context.Context`): the context the blueprint is being executed under. mappings (dict, optional): Cloudformation Mappings to be used in the template. """ def __init__(self, name, context, mappings=None, description=None): self.name = name self.context = context self.mappings = mappings self.outputs = {} self.reset_template() self.resolved_variables = None self.description = description if hasattr(self, "PARAMETERS") or hasattr(self, "LOCAL_PARAMETERS"): raise AttributeError("DEPRECATION WARNING: Blueprint %s uses " "deprecated PARAMETERS or " "LOCAL_PARAMETERS, rather than VARIABLES. " "Please update your blueprints. See https://" "stacker.readthedocs.io/en/latest/blueprints." "html#variables for aditional information." % name) def get_parameter_definitions(self): """Get the parameter definitions to submit to CloudFormation. Any variable definition whose `type` is an instance of `CFNType` will be returned as a CloudFormation Parameter. Returns: dict: parameter definitions. Keys are parameter names, the values are dicts containing key/values for various parameter properties. """ output = {} for var_name, attrs in self.defined_variables().items(): var_type = attrs.get("type") if isinstance(var_type, CFNType): cfn_attrs = copy.deepcopy(attrs) cfn_attrs["type"] = var_type.parameter_type output[var_name] = cfn_attrs return output def get_output_definitions(self): """Gets the output definitions. Returns: dict: output definitions. Keys are output names, the values are dicts containing key/values for various output properties. """ return {k: output.to_dict() for k, output in self.template.outputs.items()} def get_required_parameter_definitions(self): """Returns all template parameters that do not have a default value. Returns: dict: dict of required CloudFormation Parameters for the blueprint. Will be a dictionary of : . """ required = {} for name, attrs in self.get_parameter_definitions().items(): if "Default" not in attrs: required[name] = attrs return required def get_parameter_values(self): """Return a dictionary of variables with `type` :class:`CFNType`. Returns: dict: variables that need to be submitted as CloudFormation Parameters. Will be a dictionary of : . """ variables = self.get_variables() output = {} for key, value in variables.items(): try: output[key] = value.to_parameter_value() except AttributeError: continue return output def setup_parameters(self): """Add any CloudFormation parameters to the template""" t = self.template parameters = self.get_parameter_definitions() if not parameters: logger.debug("No parameters defined.") return for name, attrs in parameters.items(): p = build_parameter(name, attrs) t.add_parameter(p) def defined_variables(self): """Return a dictionary of variables defined by the blueprint. By default, this will just return the values from `VARIABLES`, but this makes it easy for subclasses to add variables. Returns: dict: variables defined by the blueprint """ return copy.deepcopy(getattr(self, "VARIABLES", {})) def get_variables(self): """Return a dictionary of variables available to the template. These variables will have been defined within `VARIABLES` or `self.defined_variables`. Any variable value that contains a lookup will have been resolved. Returns: dict: variables available to the template Raises: """ if self.resolved_variables is None: raise UnresolvedVariables(self.name) return self.resolved_variables def get_cfn_parameters(self): """Return a dictionary of variables with `type` :class:`CFNType`. Returns: dict: variables that need to be submitted as CloudFormation Parameters. """ variables = self.get_variables() output = {} for key, value in variables.items(): if hasattr(value, "to_parameter_value"): output[key] = value.to_parameter_value() return output def resolve_variables(self, provided_variables): """Resolve the values of the blueprint variables. This will resolve the values of the `VARIABLES` with values from the env file, the config, and any lookups resolved. Args: provided_variables (list of :class:`stacker.variables.Variable`): list of provided variables """ self.resolved_variables = {} defined_variables = self.defined_variables() variable_dict = dict((var.name, var) for var in provided_variables) for var_name, var_def in defined_variables.items(): value = resolve_variable( var_name, var_def, variable_dict.get(var_name), self.name ) self.resolved_variables[var_name] = value def import_mappings(self): if not self.mappings: return for name, mapping in self.mappings.items(): logger.debug("Adding mapping %s.", name) self.template.add_mapping(name, mapping) def reset_template(self): self.template = Template() self._rendered = None self._version = None def render_template(self): """Render the Blueprint to a CloudFormation template""" self.import_mappings() self.create_template() if self.description: self.set_template_description(self.description) self.setup_parameters() rendered = self.template.to_json(indent=self.context.template_indent) version = hashlib.md5(rendered.encode()).hexdigest()[:8] return (version, rendered) def to_json(self, variables=None): """Render the blueprint and return the template in json form. Args: variables (dict): Optional dictionary providing/overriding variable values. Returns: str: the rendered CFN JSON template """ variables_to_resolve = [] if variables: for key, value in variables.items(): variables_to_resolve.append(Variable(key, value)) for k in self.get_parameter_definitions(): if not variables or k not in variables: # The provided value for a CFN parameter has no effect in this # context (generating the CFN template), so any string can be # provided for its value - just needs to be something variables_to_resolve.append(Variable(k, 'unused_value')) self.resolve_variables(variables_to_resolve) return self.render_template()[1] def read_user_data(self, user_data_path): """Reads and parses a user_data file. Args: user_data_path (str): path to the userdata file Returns: str: the parsed user data file """ raw_user_data = read_value_from_path(user_data_path) variables = self.get_variables() return parse_user_data(variables, raw_user_data, self.name) def set_template_description(self, description): """Adds a description to the Template Args: description (str): A description to be added to the resulting template. """ self.template.set_description(description) def add_output(self, name, value): """Simple helper for adding outputs. Args: name (str): The name of the output to create. value (str): The value to put in the output. """ self.template.add_output(Output(name, Value=value)) @property def requires_change_set(self): """Returns true if the underlying template has transforms.""" return self.template.transform is not None @property def rendered(self): if not self._rendered: self._version, self._rendered = self.render_template() return self._rendered @property def version(self): if not self._version: self._version, self._rendered = self.render_template() return self._version def create_template(self): raise NotImplementedError ================================================ FILE: stacker/blueprints/raw.py ================================================ """Blueprint representing raw template module.""" import hashlib import json import os import sys from jinja2 import Template from ..util import parse_cloudformation_template from ..exceptions import InvalidConfig, UnresolvedVariable from .base import Blueprint def get_template_path(filename): """Find raw template in working directory or in sys.path. template_path from config may refer to templates colocated with the Stacker config, or files in remote package_sources. Here, we emulate python module loading to find the path to the template. Args: filename (str): Template filename. Returns: Optional[str]: Path to file, or None if no file found """ if os.path.isfile(filename): return os.path.abspath(filename) for i in sys.path: if os.path.isfile(os.path.join(i, filename)): return os.path.abspath(os.path.join(i, filename)) return None def get_template_params(template): """Parse a CFN template for defined parameters. Args: template (dict): Parsed CFN template. Returns: dict: Template parameters. """ params = {} if 'Parameters' in template: params = template['Parameters'] return params def resolve_variable(provided_variable, blueprint_name): """Resolve a provided variable value against the variable definition. This acts as a subset of resolve_variable logic in the base module, leaving out everything that doesn't apply to CFN parameters. Args: provided_variable (:class:`stacker.variables.Variable`): The variable value provided to the blueprint. blueprint_name (str): The name of the blueprint that the variable is being applied to. Returns: object: The resolved variable string value. Raises: UnresolvedVariable: Raised when the provided variable is not already resolved. """ value = None if provided_variable: if not provided_variable.resolved: raise UnresolvedVariable(blueprint_name, provided_variable) value = provided_variable.value return value class RawTemplateBlueprint(Blueprint): """Blueprint class for blueprints auto-generated from raw templates.""" def __init__(self, name, context, raw_template_path, mappings=None, # noqa pylint: disable=too-many-arguments description=None): # pylint: disable=unused-argument """Initialize RawTemplateBlueprint object.""" self.name = name self.context = context self.mappings = mappings self.resolved_variables = None self.raw_template_path = raw_template_path self._rendered = None self._version = None def to_json(self, variables=None): # pylint: disable=unused-argument """Return the template in JSON. Args: variables (dict): Unused in this subclass (variables won't affect the template). Returns: str: the rendered CFN JSON template """ # load -> dumps will produce json from json or yaml templates return json.dumps(self.to_dict(), sort_keys=True, indent=4) def to_dict(self): """Return the template as a python dictionary. Returns: dict: the loaded template as a python dictionary """ return parse_cloudformation_template(self.rendered) def render_template(self): """Load template and generate its md5 hash.""" return (self.version, self.rendered) def get_parameter_definitions(self): """Get the parameter definitions to submit to CloudFormation. Returns: dict: parameter definitions. Keys are parameter names, the values are dicts containing key/values for various parameter properties. """ return get_template_params(self.to_dict()) def get_output_definitions(self): """Gets the output definitions. Returns: dict: output definitions. Keys are output names, the values are dicts containing key/values for various output properties. """ return self.to_dict().get('Outputs', {}) def resolve_variables(self, provided_variables): """Resolve the values of the blueprint variables. This will resolve the values of the template parameters with values from the env file, the config, and any lookups resolved. The resolution is run twice, in case the blueprint is jinja2 templated and requires provided variables to render. Args: provided_variables (list of :class:`stacker.variables.Variable`): list of provided variables """ # Pass 1 to set resolved_variables to provided variables self.resolved_variables = {} variable_dict = dict((var.name, var) for var in provided_variables) for var_name, _var_def in variable_dict.items(): value = resolve_variable( variable_dict.get(var_name), self.name ) if value is not None: self.resolved_variables[var_name] = value # Pass 2 to render the blueprint and set resolved_variables according # to defined variables defined_variables = self.get_parameter_definitions() self.resolved_variables = {} variable_dict = dict((var.name, var) for var in provided_variables) for var_name, _var_def in defined_variables.items(): value = resolve_variable( variable_dict.get(var_name), self.name ) if value is not None: self.resolved_variables[var_name] = value def get_parameter_values(self): """Return a dictionary of variables with `type` :class:`CFNType`. Returns: dict: variables that need to be submitted as CloudFormation Parameters. Will be a dictionary of : . """ return self.resolved_variables @property def requires_change_set(self): """Return True if the underlying template has transforms.""" return bool("Transform" in self.to_dict()) @property def rendered(self): """Return (generating first if needed) rendered template.""" if not self._rendered: template_path = get_template_path(self.raw_template_path) if template_path: with open(template_path, 'r') as template: if len(os.path.splitext(template_path)) == 2 and ( os.path.splitext(template_path)[1] == '.j2'): self._rendered = Template(template.read()).render( context=self.context, mappings=self.mappings, name=self.name, variables=self.resolved_variables ) else: self._rendered = template.read() else: raise InvalidConfig( 'Could not find template %s' % self.raw_template_path ) return self._rendered @property def version(self): """Return (generating first if needed) version hash.""" if not self._version: self._version = hashlib.md5(self.rendered.encode()).hexdigest()[:8] return self._version ================================================ FILE: stacker/blueprints/testutil.py ================================================ import difflib import json import unittest import os.path from glob import glob from stacker.config import parse as parse_config from stacker.context import Context from stacker.util import load_object_from_string from stacker.variables import Variable def diff(a, b): """A human readable differ.""" return '\n'.join( list( difflib.Differ().compare( a.splitlines(), b.splitlines() ) ) ) class BlueprintTestCase(unittest.TestCase): OUTPUT_PATH = "tests/fixtures/blueprints" def assertRenderedBlueprint(self, blueprint): # noqa: N802 expected_output = "%s/%s.json" % (self.OUTPUT_PATH, blueprint.name) rendered_dict = blueprint.template.to_dict() rendered_text = json.dumps(rendered_dict, indent=4, sort_keys=True) with open(expected_output + "-result", "w") as fd: fd.write(rendered_text) with open(expected_output) as fd: expected_dict = json.loads(fd.read()) expected_text = json.dumps(expected_dict, indent=4, sort_keys=True) self.assertEquals(rendered_dict, expected_dict, diff(rendered_text, expected_text)) class YamlDirTestGenerator(object): """Generate blueprint tests from yaml config files. This class creates blueprint tests from yaml files with a syntax similar to stackers' configuration syntax. For example, --- namespace: test stacks: - name: test_sample class_path: stacker_blueprints.test.Sample variables: var1: value1 will create a test for the specified blueprint, passing that variable as part of the test. The test will generate a .json file for this blueprint, and compare it with the stored result. By default, the generator looks for files named 'test_*.yaml' in its same directory. In order to use it, subclass it in a directory containing such tests, and name the class with a pattern that will include it in nosetests' tests (for example, TestGenerator). The subclass may override some properties: @property base_class: by default, the generated tests are subclasses of stacker.blueprints.testutil.BlueprintTestCase. In order to change this, set this property to the desired base class. @property yaml_dirs: by default, the directory where the generator is subclassed is searched for test files. Override this array for specifying more directories. These must be relative to the directory in which the subclass lives in. Globs may be used. Default: [ '.' ]. Example override: [ '.', 'tests/*/' ] @property yaml_filename: by default, the generator looks for files named 'test_*.yaml'. Use this to change this pattern. Globs may be used. There's an example of this use in the tests/ subdir of stacker_blueprints. """ def __init__(self): self.classdir = os.path.relpath( self.__class__.__module__.replace('.', '/')) if not os.path.isdir(self.classdir): self.classdir = os.path.dirname(self.classdir) # These properties can be overriden from the test generator subclass. @property def base_class(self): return BlueprintTestCase @property def yaml_dirs(self): return ['.'] @property def yaml_filename(self): return 'test_*.yaml' def test_generator(self): # Search for tests in given paths configs = [] for d in self.yaml_dirs: configs.extend( glob('%s/%s/%s' % (self.classdir, d, self.yaml_filename))) class ConfigTest(self.base_class): def __init__(self, config, stack, filepath): self.config = config self.stack = stack self.description = "%s (%s)" % (stack.name, filepath) def __call__(self): # Use the context property of the baseclass, if present. # If not, default to a basic context. try: ctx = self.context except AttributeError: ctx = Context(config=self.config, environment={'environment': 'test'}) configvars = self.stack.variables or {} variables = [Variable(k, v) for k, v in configvars.iteritems()] blueprint_class = load_object_from_string( self.stack.class_path) blueprint = blueprint_class(self.stack.name, ctx) blueprint.resolve_variables(variables or []) blueprint.setup_parameters() blueprint.create_template() self.assertRenderedBlueprint(blueprint) def assertEquals(self, a, b, msg): # noqa: N802 assert a == b, msg for f in configs: with open(f) as test: config = parse_config(test.read()) config.validate() for stack in config.stacks: # Nosetests supports "test generators", which allows us to # yield a callable object which will be wrapped as a test # case. # # http://nose.readthedocs.io/en/latest/writing_tests.html#test-generators yield ConfigTest(config, stack, filepath=f) ================================================ FILE: stacker/blueprints/variables/__init__.py ================================================ ================================================ FILE: stacker/blueprints/variables/types.py ================================================ class TroposphereType(object): def __init__(self, defined_type, many=False, optional=False, validate=True): """Represents a Troposphere type. :class:`Troposphere` will convert the value provided to the variable to the specified Troposphere type. Both resource and parameter classes (which are just used to configure other resources) are acceptable as configuration values. Complete resource definitions must be dictionaries, with the keys identifying the resource titles, and the values being used as the constructor parameters. Parameter classes can be defined as dictionariy or a list of dictionaries. In either case, the keys and values will be used directly as constructor parameters. Args: defined_type (type): Troposphere type many (bool): Whether or not multiple resources can be constructed. If the defined type is a resource, multiple resources can be passed as a dictionary of dictionaries. If it is a parameter class, multiple resources are passed as a list. optional (bool): Whether an undefined/null configured value is acceptable. In that case a value of ``None`` will be passed to the template, even if ``many`` is enabled. validate (bool): Whether to validate the generated object on creation. Should be left enabled unless the object will be augmented with mandatory parameters in the template code, such that it must be validated at a later point. """ self._validate_type(defined_type) self._type = defined_type self._many = many self._optional = optional self._validate = validate def _validate_type(self, defined_type): if not hasattr(defined_type, "from_dict"): raise ValueError("Type must have `from_dict` attribute") @property def resource_name(self): return ( getattr(self._type, 'resource_name', None) or self._type.__name__ ) def create(self, value): """Create the troposphere type from the value. Args: value (Union[dict, list]): A dictionary or list of dictionaries (see class documentation for details) to use as parameters to create the Troposphere type instance. Each dictionary will be passed to the `from_dict` method of the type. Returns: Union[list, type]: Returns the value converted to the troposphere type """ # Explicitly check with len such that non-sequence types throw. if self._optional and (value is None or len(value) == 0): return None if hasattr(self._type, 'resource_type'): # Our type is a resource, so ensure we have a dict of title to # parameters if not isinstance(value, dict): raise ValueError("Resources must be specified as a dict of " "title to parameters") if not self._many and len(value) > 1: raise ValueError("Only one resource can be provided for this " "TroposphereType variable") result = [ self._type.from_dict(title, v) for title, v in value.items() ] else: # Our type is for properties, not a resource, so don't use # titles if self._many: result = [self._type.from_dict(None, v) for v in value] elif not isinstance(value, dict): raise ValueError("TroposphereType for a single non-resource" "type must be specified as a dict of " "parameters") else: result = [self._type.from_dict(None, value)] if self._validate: for v in result: v._validate_props() return result[0] if not self._many else result class CFNType(object): def __init__(self, parameter_type): """Represents a CloudFormation Parameter Type. :class:`CFNType`` can be used as the `type` for a Blueprint variable. Unlike other variables, a variable with `type` :class:`CFNType`, will be submitted to CloudFormation as a Parameter. Args: parameter_type (str): An AWS specific parameter type (http://goo.gl/PthovJ) """ self.parameter_type = parameter_type # General CFN types CFNString = CFNType("String") CFNNumber = CFNType("Number") CFNNumberList = CFNType("List") CFNCommaDelimitedList = CFNType("CommaDelimitedList") # AWS-Specific Parameter Types # https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/parameters-section-structure.html#aws-specific-parameter-types EC2AvailabilityZoneName = CFNType("AWS::EC2::AvailabilityZone::Name") EC2ImageId = CFNType("AWS::EC2::Image::Id") EC2InstanceId = CFNType("AWS::EC2::Instance::Id") EC2KeyPairKeyName = CFNType("AWS::EC2::KeyPair::KeyName") EC2SecurityGroupGroupName = CFNType("AWS::EC2::SecurityGroup::GroupName") EC2SecurityGroupId = CFNType("AWS::EC2::SecurityGroup::Id") EC2SubnetId = CFNType("AWS::EC2::Subnet::Id") EC2VolumeId = CFNType("AWS::EC2::Volume::Id") EC2VPCId = CFNType("AWS::EC2::VPC::Id") Route53HostedZoneId = CFNType("AWS::Route53::HostedZone::Id") EC2AvailabilityZoneNameList = CFNType("List") EC2ImageIdList = CFNType("List") EC2InstanceIdList = CFNType("List") EC2SecurityGroupGroupNameList = CFNType( "List") EC2SecurityGroupIdList = CFNType("List") EC2SubnetIdList = CFNType("List") EC2VolumeIdList = CFNType("List") EC2VPCIdList = CFNType("List") Route53HostedZoneIdList = CFNType("List") # SSM Parameter Types # https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/parameters-section-structure.html#aws-ssm-parameter-types SSMParameterName = CFNType("AWS::SSM::Parameter::Name") SSMParameterValueString = CFNType("AWS::SSM::Parameter::Value") SSMParameterValueStringList = CFNType( "AWS::SSM::Parameter::Value>") SSMParameterValueCommaDelimitedList = CFNType( "AWS::SSM::Parameter::Value") # Each AWS-specific type here is repeated from the the list above SSMParameterValueEC2AvailabilityZoneName = CFNType( "AWS::SSM::Parameter::Value") SSMParameterValueEC2ImageId = CFNType( "AWS::SSM::Parameter::Value") SSMParameterValueEC2InstanceId = CFNType( "AWS::SSM::Parameter::Value") SSMParameterValueEC2KeyPairKeyName = CFNType( "AWS::SSM::Parameter::Value") SSMParameterValueEC2SecurityGroupGroupName = CFNType( "AWS::SSM::Parameter::Value") SSMParameterValueEC2SecurityGroupId = CFNType( "AWS::SSM::Parameter::Value") SSMParameterValueEC2SubnetId = CFNType( "AWS::SSM::Parameter::Value") SSMParameterValueEC2VolumeId = CFNType( "AWS::SSM::Parameter::Value") SSMParameterValueEC2VPCId = CFNType( "AWS::SSM::Parameter::Value") SSMParameterValueRoute53HostedZoneId = CFNType( "AWS::SSM::Parameter::Value") SSMParameterValueEC2AvailabilityZoneNameList = CFNType( "AWS::SSM::Parameter::Value>") SSMParameterValueEC2ImageIdList = CFNType( "AWS::SSM::Parameter::Value>") SSMParameterValueEC2InstanceIdList = CFNType( "AWS::SSM::Parameter::Value>") SSMParameterValueEC2SecurityGroupGroupNameList = CFNType( "AWS::SSM::Parameter::Value>") SSMParameterValueEC2SecurityGroupIdList = CFNType( "AWS::SSM::Parameter::Value>") SSMParameterValueEC2SubnetIdList = CFNType( "AWS::SSM::Parameter::Value>") SSMParameterValueEC2VolumeIdList = CFNType( "AWS::SSM::Parameter::Value>") SSMParameterValueEC2VPCIdList = CFNType( "AWS::SSM::Parameter::Value>") SSMParameterValueRoute53HostedZoneIdList = CFNType( "AWS::SSM::Parameter::Value>") ================================================ FILE: stacker/commands/__init__.py ================================================ from .stacker import Stacker # NOQA ================================================ FILE: stacker/commands/stacker/__init__.py ================================================ import logging from .build import Build from .destroy import Destroy from .info import Info from .diff import Diff from .graph import Graph from .base import BaseCommand from ...config import render_parse_load as load_config from ...context import Context from ...providers.aws import default from ... import __version__ from ... import session_cache logger = logging.getLogger(__name__) class Stacker(BaseCommand): name = "stacker" subcommands = (Build, Destroy, Info, Diff, Graph) def configure(self, options, **kwargs): session_cache.default_profile = options.profile self.config = load_config( options.config.read(), environment=options.environment, validate=True, ) options.provider_builder = default.ProviderBuilder( region=options.region, interactive=options.interactive, replacements_only=options.replacements_only, recreate_failed=options.recreate_failed, service_role=self.config.service_role, ) options.context = Context( environment=options.environment, config=self.config, # Allow subcommands to provide any specific kwargs to the Context # that it wants. **options.get_context_kwargs(options) ) super(Stacker, self).configure(options, **kwargs) if options.interactive: logger.info("Using interactive AWS provider mode.") else: logger.info("Using default AWS provider mode") def add_arguments(self, parser): parser.add_argument("--version", action="version", version="%%(prog)s %s" % (__version__,)) ================================================ FILE: stacker/commands/stacker/base.py ================================================ import argparse import threading import signal from collections.abc import Mapping import logging import os.path from ...environment import ( DictWithSourceType, parse_environment, parse_yaml_environment ) logger = logging.getLogger(__name__) SIGNAL_NAMES = { signal.SIGINT: "SIGINT", signal.SIGTERM: "SIGTERM", } def cancel(): """Returns a threading.Event() that will get set when SIGTERM, or SIGINT are triggered. This can be used to cancel execution of threads. """ cancel = threading.Event() def cancel_execution(signum, frame): signame = SIGNAL_NAMES.get(signum, signum) logger.info("Signal %s received, quitting " "(this can take some time)...", signame) cancel.set() signal.signal(signal.SIGINT, cancel_execution) signal.signal(signal.SIGTERM, cancel_execution) return cancel class KeyValueAction(argparse.Action): def __init__(self, option_strings, dest, default=None, nargs=None, **kwargs): if nargs: raise ValueError("nargs not allowed") default = default or {} super(KeyValueAction, self).__init__(option_strings, dest, nargs, default=default, **kwargs) def __call__(self, parser, namespace, values, option_string=None): if not isinstance(values, Mapping): raise ValueError("type must be \"key_value\"") if not getattr(namespace, self.dest): setattr(namespace, self.dest, {}) getattr(namespace, self.dest).update(values) def key_value_arg(string): try: k, v = string.split("=", 1) except ValueError: raise argparse.ArgumentTypeError( "%s does not match KEY=VALUE format." % string) return {k: v} def environment_file(input_file): """Reads a stacker environment file and returns the resulting data.""" is_yaml = os.path.splitext(input_file)[1].lower() in ['.yaml', '.yml'] with open(input_file) as fd: if is_yaml: return parse_yaml_environment(fd.read()) else: return parse_environment(fd.read()) class BaseCommand(object): """Base class for all stacker subcommands. The way argparse handles common arguments that should be passed to the subparser is confusing. You can add arguments to the parent parser that will get passed to the subparser, but these then need to be provided on the command line before specifying the subparser. Furthermore, when viewing the help for a subcommand, you can't view these parameters. By including shared parameters for stacker commands within this subclass, we don't have to redundantly add the parameters we want on all subclasses within each subparser and these shared parameters are treated as normal arguments to the subcommand. """ name = None description = None subcommands = tuple() subcommands_help = None def __init__(self, setup_logging=None, *args, **kwargs): self.setup_logging = setup_logging if not self.name: raise ValueError("Subcommands must set \"name\": %s" % (self,)) def add_subcommands(self, parser): if self.subcommands: subparsers = parser.add_subparsers(help=self.subcommands_help) for subcommand_class in self.subcommands: subcommand = subcommand_class() subparser = subparsers.add_parser( subcommand.name, description=subcommand.description, ) subcommand.add_arguments(subparser) subparser.set_defaults(run=subcommand.run) subparser.set_defaults( get_context_kwargs=subcommand.get_context_kwargs) def parse_args(self, *vargs): parser = argparse.ArgumentParser(description=self.description) self.add_subcommands(parser) self.add_arguments(parser) args = parser.parse_args(*vargs) args.environment.update(args.cli_envs) return args def run(self, options, **kwargs): pass def configure(self, options, **kwargs): if self.setup_logging: self.setup_logging(options.verbose, self.config.log_formats) def get_context_kwargs(self, options, **kwargs): """Return a dictionary of kwargs that will be used with the Context. This allows commands to pass in any specific arguments they define to the context. Args: options (:class:`argparse.Namespace`): arguments that have been passed via the command line Returns: dict: Dictionary that will be passed to Context initializer as kwargs. """ return {} def add_arguments(self, parser): parser.add_argument( "-e", "--env", dest="cli_envs", metavar="ENV=VALUE", type=key_value_arg, action=KeyValueAction, default={}, help="Adds environment key/value pairs from the command line. " "Overrides your environment file settings. Can be specified " "more than once.") parser.add_argument( "-r", "--region", help="The default AWS region to use for all AWS API calls.") parser.add_argument( "-p", "--profile", help="The default AWS profile to use for all AWS API calls. If " "not specified, the default will be according to http://bo" "to3.readthedocs.io/en/latest/guide/configuration.html.") parser.add_argument( "-v", "--verbose", action="count", default=0, help="Increase output verbosity. May be specified up to twice.") parser.add_argument( "environment", type=environment_file, nargs='?', default=DictWithSourceType('simple'), help="Path to an environment file. The file can be a simple " "`key: value` pair environment file, or a YAML file ending in" ".yaml or .yml. In the simple key:value case, values in the " "environment file can be used in the stack config as if it " "were a string.Template type: " "https://docs.python.org/2/library/" "string.html#template-strings. In the YAML case, variable" "references in the stack config are replaced with the objects" "in the environment after parsing") parser.add_argument( "config", type=argparse.FileType(), help="The config file where stack configuration is located. Must " "be in yaml format. If `-` is provided, then the config will " "be read from stdin.") parser.add_argument( "-i", "--interactive", action="store_true", help="Enable interactive mode. If specified, this will use the " "AWS interactive provider, which leverages Cloudformation " "Change Sets to display changes before running " "cloudformation templates. You'll be asked if you want to " "execute each change set. If you only want to authorize " "replacements, run with \"--replacements-only\" as well.") parser.add_argument( "--replacements-only", action="store_true", help="If interactive mode is enabled, stacker will only prompt to " "authorize replacements.") parser.add_argument( "--recreate-failed", action="store_true", help="Destroy and re-create stacks that are stuck in a failed " "state from an initial deployment when updating.") ================================================ FILE: stacker/commands/stacker/build.py ================================================ """Launches or updates CloudFormation stacks based on the given config. Stacker is smart enough to figure out if anything (the template or parameters) have changed for a given stack. If nothing has changed, stacker will correctly skip executing anything against the stack. """ from .base import BaseCommand, cancel from ...actions import build class Build(BaseCommand): name = "build" description = __doc__ def add_arguments(self, parser): super(Build, self).add_arguments(parser) parser.add_argument("-o", "--outline", action="store_true", help="Print an outline of what steps will be " "taken to build the stacks") parser.add_argument("--force", action="append", default=[], metavar="STACKNAME", type=str, help="If a stackname is provided to --force, it " "will be updated, even if it is locked in " "the config.") parser.add_argument("--targets", "--stacks", action="append", metavar="STACKNAME", type=str, help="Only work on the stacks given, and their " "dependencies. Can be specified more than " "once. If not specified then stacker will " "work on all stacks in the config file.") parser.add_argument("-j", "--max-parallel", action="store", type=int, default=0, help="The maximum number of stacks to execute in " "parallel. If not provided, the value will " "be constrained based on the underlying " "graph.") parser.add_argument("-t", "--tail", action="store_true", help="Tail the CloudFormation logs while working " "with stacks") parser.add_argument("-d", "--dump", action="store", type=str, help="Dump the rendered Cloudformation templates " "to a directory") def run(self, options, **kwargs): super(Build, self).run(options, **kwargs) action = build.Action(options.context, provider_builder=options.provider_builder, cancel=cancel()) action.execute(concurrency=options.max_parallel, outline=options.outline, tail=options.tail, dump=options.dump) def get_context_kwargs(self, options, **kwargs): return {"stack_names": options.targets, "force_stacks": options.force} ================================================ FILE: stacker/commands/stacker/destroy.py ================================================ """Destroys CloudFormation stacks based on the given config. Stacker will determine the order in which stacks should be destroyed based on any manual requirements they specify or output values they rely on from other stacks. """ from .base import BaseCommand, cancel from ...actions import destroy class Destroy(BaseCommand): name = "destroy" description = __doc__ def add_arguments(self, parser): super(Destroy, self).add_arguments(parser) parser.add_argument("-f", "--force", action="store_true", help="Whether or not you want to go through " " with destroying the stacks") parser.add_argument("--targets", "--stacks", action="append", metavar="STACKNAME", type=str, help="Only work on the stacks given. Can be " "specified more than once. If not specified " "then stacker will work on all stacks in the " "config file.") parser.add_argument("-j", "--max-parallel", action="store", type=int, default=0, help="The maximum number of stacks to execute in " "parallel. If not provided, the value will " "be constrained based on the underlying " "graph.") parser.add_argument("-t", "--tail", action="store_true", help="Tail the CloudFormation logs while working " "with stacks") def run(self, options, **kwargs): super(Destroy, self).run(options, **kwargs) action = destroy.Action(options.context, provider_builder=options.provider_builder, cancel=cancel()) action.execute(concurrency=options.max_parallel, force=options.force, tail=options.tail) def get_context_kwargs(self, options, **kwargs): return {"stack_names": options.targets} ================================================ FILE: stacker/commands/stacker/diff.py ================================================ """ Diffs the config against the currently running CloudFormation stacks Sometimes small changes can have big impacts. Run "stacker diff" before "stacker build" to detect bad things(tm) from happening in advance! """ from .base import BaseCommand from ...actions import diff class Diff(BaseCommand): name = "diff" description = __doc__ def add_arguments(self, parser): super(Diff, self).add_arguments(parser) parser.add_argument("--force", action="append", default=[], metavar="STACKNAME", type=str, help="If a stackname is provided to --force, it " "will be diffed, even if it is locked in " "the config.") parser.add_argument("--stacks", action="append", metavar="STACKNAME", type=str, help="Only work on the stacks given. Can be " "specified more than once. If not specified " "then stacker will work on all stacks in the " "config file.") def run(self, options, **kwargs): super(Diff, self).run(options, **kwargs) action = diff.Action(options.context, provider_builder=options.provider_builder) action.execute() def get_context_kwargs(self, options, **kwargs): return {"stack_names": options.stacks, "force_stacks": options.force} ================================================ FILE: stacker/commands/stacker/graph.py ================================================ """Prints the the relationships between steps as a graph. """ from .base import BaseCommand from ...actions import graph class Graph(BaseCommand): name = "graph" description = __doc__ def add_arguments(self, parser): super(Graph, self).add_arguments(parser) parser.add_argument("-f", "--format", default="dot", choices=graph.FORMATTERS, help="The format to print the graph in.") parser.add_argument("--reduce", action="store_true", help="When provided, this will create a " "graph with less edges, by performing " "a transitive reduction on the underlying " "graph. While this will produce a less " "noisy graph, it is slower.") def run(self, options, **kwargs): super(Graph, self).run(options, **kwargs) action = graph.Action(options.context, provider_builder=options.provider_builder) action.execute( format=options.format, reduce=options.reduce) ================================================ FILE: stacker/commands/stacker/info.py ================================================ """Gets information on the CloudFormation stacks based on the given config.""" from .base import BaseCommand from ...actions import info class Info(BaseCommand): name = "info" description = __doc__ def add_arguments(self, parser): super(Info, self).add_arguments(parser) parser.add_argument("--stacks", action="append", metavar="STACKNAME", type=str, help="Only work on the stacks given. Can be " "specified more than once. If not specified " "then stacker will work on all stacks in the " "config file.") def run(self, options, **kwargs): super(Info, self).run(options, **kwargs) action = info.Action(options.context, provider_builder=options.provider_builder) action.execute() def get_context_kwargs(self, options, **kwargs): return {"stack_names": options.stacks} ================================================ FILE: stacker/config/__init__.py ================================================ from past.types import basestring import copy import sys import logging import re from string import Template from io import StringIO from schematics import Model from schematics.exceptions import ValidationError from schematics.exceptions import ( BaseError as SchematicsError, UndefinedValueError ) from schematics.types import ( ModelType, ListType, StringType, BooleanType, DictType, BaseType ) import yaml from ..lookups import register_lookup_handler from ..util import merge_map, yaml_to_ordered_dict, SourceProcessor from .. import exceptions from ..environment import DictWithSourceType # register translators (yaml constructors) from .translators import * # NOQA logger = logging.getLogger(__name__) def render_parse_load(raw_config, environment=None, validate=True): """Encapsulates the render -> parse -> validate -> load process. Args: raw_config (str): the raw stacker configuration string. environment (dict, optional): any environment values that should be passed to the config validate (bool): if provided, the config is validated before being loaded. Returns: :class:`Config`: the parsed stacker config. """ pre_rendered = render(raw_config, environment) rendered = process_remote_sources(pre_rendered, environment) config = parse(rendered) # For backwards compatibility, if the config doesn't specify a namespace, # we fall back to fetching it from the environment, if provided. if config.namespace is None: namespace = environment.get("namespace") if namespace: logger.warn("DEPRECATION WARNING: specifying namespace in the " "environment is deprecated. See " "https://stacker.readthedocs.io/en/latest/config.html" "#namespace " "for more info.") config.namespace = namespace if validate: config.validate() return load(config) def render(raw_config, environment=None): """Renders a config, using it as a template with the environment. Args: raw_config (str): the raw stacker configuration string. environment (DictWithSourceType, optional): any environment values that should be passed to the config Returns: str: the stacker configuration populated with any values passed from the environment """ if not environment: environment = {} # If we have a naked dict, we got here through the old non-YAML path, so # we can't have a YAML config file. is_yaml = False if type(environment) == DictWithSourceType: is_yaml = environment.source_type == 'yaml' if is_yaml: # First, read the config as yaml config = yaml.safe_load(raw_config) # Next, we need to walk the yaml structure, and find all things which # look like variable references. This regular expression is copied from # string.template to match variable references identically as the # simple configuration case below. We've got two cases of this pattern, # since python 2.7 doesn't support re.fullmatch(), so we have to add # the end of line anchor to the inner patterns. idpattern = r'[_a-z][_a-z0-9]*' pattern = r""" %(delim)s(?: (?P%(id)s) | # delimiter and a Python identifier {(?P%(id)s)} # delimiter and a braced identifier ) """ % {'delim': re.escape('$'), 'id': idpattern, } full_pattern = r""" %(delim)s(?: (?P%(id)s)$ | # delimiter and a Python identifier {(?P%(id)s)}$ # delimiter and a braced identifier ) """ % {'delim': re.escape('$'), 'id': idpattern, } exp = re.compile(pattern, re.IGNORECASE | re.VERBOSE) full_exp = re.compile(full_pattern, re.IGNORECASE | re.VERBOSE) new_config = substitute_references(config, environment, exp, full_exp) # Now, re-encode the whole thing as YAML and return that. return yaml.safe_dump(new_config) else: t = Template(raw_config) buff = StringIO() try: substituted = t.substitute(environment) except KeyError as e: raise exceptions.MissingEnvironment(e.args[0]) except ValueError: # Support "invalid" placeholders for lookup placeholders. substituted = t.safe_substitute(environment) if not isinstance(substituted, str): substituted = substituted.decode('utf-8') buff.write(substituted) buff.seek(0) return buff.read() def substitute_references(root, environment, exp, full_exp): # We need to check for something being a string in both python 2.7 and # 3+. The aliases in the future package don't work for yaml sourced # strings, so we have to spin our own. def isstr(s): try: return isinstance(s, basestring) except NameError: return isinstance(s, str) if isinstance(root, list): result = [] for x in root: result.append(substitute_references(x, environment, exp, full_exp)) return result elif isinstance(root, dict): result = {} for k, v in root.items(): result[k] = substitute_references(v, environment, exp, full_exp) return result elif isstr(root): # Strings are the special type where all substitutions happen. If we # encounter a string object in the expression tree, we need to perform # one of two different kinds of matches on it. First, if the entire # string is a variable, we can replace it with an arbitrary object; # dict, list, primitive. If the string contains variables within it, # then we have to do string substitution. match_obj = full_exp.match(root.strip()) if match_obj: matches = match_obj.groupdict() var_name = matches['named'] or matches['braced'] if var_name is not None: value = environment.get(var_name) if value is None: raise exceptions.MissingEnvironment(var_name) return value # Returns if an object is a basic type. Once again, the future package # overrides don't work for string here, so we have to special case it def is_basic_type(o): if isstr(o): return True basic_types = [int, bool, float] for t in basic_types: if isinstance(o, t): return True return False # If we got here, then we didn't have any full matches, now perform # partial substitutions within a string. def replace(mo): name = mo.groupdict()['braced'] or mo.groupdict()['named'] if not name: return root[mo.start():mo.end()] val = environment.get(name) if val is None: raise exceptions.MissingEnvironment(name) if not is_basic_type(val): raise exceptions.WrongEnvironmentType(name) return str(val) value = exp.sub(replace, root) return value # In all other unhandled cases, return a copy of the input return copy.copy(root) def parse(raw_config): """Parse a raw yaml formatted stacker config. Args: raw_config (str): the raw stacker configuration string in yaml format. Returns: :class:`Config`: the parsed stacker config. """ # Convert any applicable dictionaries back into lists # This is necessary due to the move from lists for these top level config # values to either lists or OrderedDicts. # Eventually we should probably just make them OrderedDicts only. config_dict = yaml_to_ordered_dict(raw_config) if config_dict: for top_level_key in ['stacks', 'pre_build', 'post_build', 'pre_destroy', 'post_destroy']: top_level_value = config_dict.get(top_level_key) if isinstance(top_level_value, dict): tmp_list = [] for key, value in top_level_value.items(): tmp_dict = copy.deepcopy(value) if top_level_key == 'stacks': tmp_dict['name'] = key tmp_list.append(tmp_dict) config_dict[top_level_key] = tmp_list # Top-level excess keys are removed by Config._convert, so enabling strict # mode is fine here. try: return Config(config_dict, strict=True) except SchematicsError as e: raise exceptions.InvalidConfig(e.errors) def load(config): """Loads a stacker configuration by modifying sys paths, loading lookups, etc. Args: config (:class:`Config`): the stacker config to load. Returns: :class:`Config`: the stacker config provided above. """ if config.sys_path: logger.debug("Appending %s to sys.path.", config.sys_path) sys.path.append(config.sys_path) logger.debug("sys.path is now %s", sys.path) if config.lookups: for key, handler in config.lookups.items(): register_lookup_handler(key, handler) return config def dump(config): """Dumps a stacker Config object as yaml. Args: config (:class:`Config`): the stacker Config object. stream (stream): an optional stream object to write to. Returns: str: the yaml formatted stacker Config. """ return yaml.safe_dump( config.to_primitive(), default_flow_style=False, encoding='utf-8', allow_unicode=True) def process_remote_sources(raw_config, environment=None): """Stage remote package sources and merge in remote configs. Args: raw_config (str): the raw stacker configuration string. environment (dict, optional): any environment values that should be passed to the config Returns: str: the raw stacker configuration string """ config = yaml.safe_load(raw_config) if config and config.get('package_sources'): processor = SourceProcessor( sources=config['package_sources'], stacker_cache_dir=config.get('stacker_cache_dir') ) processor.get_package_sources() if processor.configs_to_merge: for i in processor.configs_to_merge: logger.debug("Merging in remote config \"%s\"", i) remote_config = yaml.safe_load(open(i)) config = merge_map(remote_config, config) # Call the render again as the package_sources may have merged in # additional environment lookups if not environment: environment = {} return render(str(config), environment) return raw_config def not_empty_list(value): if not value or len(value) < 1: raise ValidationError("Should have more than one element.") return value class AnyType(BaseType): pass class LocalPackageSource(Model): source = StringType(required=True) paths = ListType(StringType, serialize_when_none=False) configs = ListType(StringType, serialize_when_none=False) class GitPackageSource(Model): uri = StringType(required=True) tag = StringType(serialize_when_none=False) branch = StringType(serialize_when_none=False) commit = StringType(serialize_when_none=False) paths = ListType(StringType, serialize_when_none=False) configs = ListType(StringType, serialize_when_none=False) class S3PackageSource(Model): bucket = StringType(required=True) key = StringType(required=True) use_latest = BooleanType(serialize_when_none=False) requester_pays = BooleanType(serialize_when_none=False) paths = ListType(StringType, serialize_when_none=False) configs = ListType(StringType, serialize_when_none=False) class PackageSources(Model): local = ListType(ModelType(LocalPackageSource)) git = ListType(ModelType(GitPackageSource)) s3 = ListType(ModelType(S3PackageSource)) class Hook(Model): path = StringType(required=True) required = BooleanType(default=True) enabled = BooleanType(default=True) data_key = StringType(serialize_when_none=False) args = DictType(AnyType) class Target(Model): name = StringType(required=True) requires = ListType(StringType, serialize_when_none=False) required_by = ListType(StringType, serialize_when_none=False) class Stack(Model): name = StringType(required=True) stack_name = StringType(serialize_when_none=False) region = StringType(serialize_when_none=False) profile = StringType(serialize_when_none=False) class_path = StringType(serialize_when_none=False) template_path = StringType(serialize_when_none=False) description = StringType(serialize_when_none=False) requires = ListType(StringType, serialize_when_none=False) required_by = ListType(StringType, serialize_when_none=False) locked = BooleanType(default=False) enabled = BooleanType(default=True) protected = BooleanType(default=False) variables = DictType(AnyType, serialize_when_none=False) parameters = DictType(AnyType, serialize_when_none=False) tags = DictType(StringType, serialize_when_none=False) stack_policy_path = StringType(serialize_when_none=False) in_progress_behavior = StringType(serialize_when_none=False) notification_arns = ListType( StringType, serialize_when_none=False, default=[]) def validate_class_path(self, data, value): if value and data["template_path"]: raise ValidationError( "template_path cannot be present when " "class_path is provided.") self.validate_stack_source(data) def validate_template_path(self, data, value): if value and data["class_path"]: raise ValidationError( "class_path cannot be present when " "template_path is provided.") self.validate_stack_source(data) def validate_stack_source(self, data): # Locked stacks don't actually need a template, since they're # read-only. if data["locked"]: return if not (data["class_path"] or data["template_path"]): raise ValidationError( "class_path or template_path is required.") def validate_parameters(self, data, value): if value: stack_name = data['name'] raise ValidationError( "DEPRECATION: Stack definition %s contains " "deprecated 'parameters', rather than 'variables'. You are" " required to update your config. See https://stacker.rea" "dthedocs.io/en/latest/config.html#variables for " "additional information." % stack_name) return value class Config(Model): """This is the Python representation of a stacker config file. This is used internally by stacker to parse and validate a yaml formatted stacker configuration file, but can also be used in scripts to generate a stacker config file before handing it off to stacker to build/destroy. Example:: from stacker.config import dump, Config, Stack vpc = Stack({ "name": "vpc", "class_path": "blueprints.VPC"}) config = Config() config.namespace = "prod" config.stacks = [vpc] print dump(config) """ namespace = StringType(required=True) namespace_delimiter = StringType(serialize_when_none=False) stacker_bucket = StringType(serialize_when_none=False) stacker_bucket_region = StringType(serialize_when_none=False) stacker_cache_dir = StringType(serialize_when_none=False) sys_path = StringType(serialize_when_none=False) package_sources = ModelType(PackageSources, serialize_when_none=False) service_role = StringType(serialize_when_none=False) pre_build = ListType(ModelType(Hook), serialize_when_none=False) post_build = ListType(ModelType(Hook), serialize_when_none=False) pre_destroy = ListType(ModelType(Hook), serialize_when_none=False) post_destroy = ListType(ModelType(Hook), serialize_when_none=False) tags = DictType(StringType, serialize_when_none=False) template_indent = StringType(serialize_when_none=False) mappings = DictType( DictType(DictType(StringType)), serialize_when_none=False) lookups = DictType(StringType, serialize_when_none=False) targets = ListType( ModelType(Target), serialize_when_none=False) stacks = ListType( ModelType(Stack), default=[]) log_formats = DictType(StringType, serialize_when_none=False) def _remove_excess_keys(self, data): excess_keys = set(data.keys()) excess_keys -= self._schema.valid_input_keys if not excess_keys: return data logger.debug('Removing excess keys from config input: %s', excess_keys) clean_data = data.copy() for key in excess_keys: del clean_data[key] return clean_data def _convert(self, raw_data=None, context=None, **kwargs): if raw_data is not None: # Remove excess top-level keys, since we want to allow them to be # used for custom user variables to be reference later. This is # preferable to just disabling strict mode, as we can still # disallow excess keys in the inner models. raw_data = self._remove_excess_keys(raw_data) return super(Config, self)._convert(raw_data=raw_data, context=context, **kwargs) def validate(self, *args, **kwargs): try: return super(Config, self).validate(*args, **kwargs) except UndefinedValueError as e: raise exceptions.InvalidConfig([e.message]) except SchematicsError as e: raise exceptions.InvalidConfig(e.errors) def validate_stacks(self, data, value): if value: stack_names = [stack.name for stack in value] if len(set(stack_names)) != len(stack_names): # only loop / enumerate if there is an issue. for i, stack_name in enumerate(stack_names): if stack_names.count(stack_name) != 1: raise ValidationError( "Duplicate stack %s found at index %d." % (stack_name, i)) ================================================ FILE: stacker/config/translators/__init__.py ================================================ import yaml from .kms import kms_simple_constructor yaml.add_constructor('!kms', kms_simple_constructor) ================================================ FILE: stacker/config/translators/kms.py ================================================ # NOTE: The translator is going to be deprecated in favor of the lookup from ...lookups.handlers.kms import KmsLookup def kms_simple_constructor(loader, node): value = loader.construct_scalar(node) return KmsLookup.handler(value) ================================================ FILE: stacker/context.py ================================================ import collections.abc import logging from stacker.config import Config from .stack import Stack from .target import Target logger = logging.getLogger(__name__) DEFAULT_NAMESPACE_DELIMITER = "-" DEFAULT_TEMPLATE_INDENT = 4 def get_fqn(base_fqn, delimiter, name=None): """Return the fully qualified name of an object within this context. If the name passed already appears to be a fully qualified name, it will be returned with no further processing. """ if name and name.startswith("%s%s" % (base_fqn, delimiter)): return name return delimiter.join([_f for _f in [base_fqn, name] if _f]) class Context(object): """The context under which the current stacks are being executed. The stacker Context is responsible for translating the values passed in via the command line and specified in the config to `Stack` objects. Args: environment (dict): A dictionary used to pass in information about the environment. Useful for templating. stack_names (list): A list of stack_names to operate on. If not passed, usually all stacks defined in the config will be operated on. config (:class:`stacker.config.Config`): The stacker configuration being operated on. force_stacks (list): A list of stacks to force work on. Used to work on locked stacks. """ def __init__(self, environment=None, stack_names=None, config=None, force_stacks=None): self.environment = environment self.stack_names = stack_names or [] self.config = config or Config() self.force_stacks = force_stacks or [] self.hook_data = {} @property def namespace(self): return self.config.namespace @property def namespace_delimiter(self): delimiter = self.config.namespace_delimiter if delimiter is not None: return delimiter return DEFAULT_NAMESPACE_DELIMITER @property def template_indent(self): indent = self.config.template_indent if indent is not None: return int(indent) return DEFAULT_TEMPLATE_INDENT @property def bucket_name(self): if not self.upload_templates_to_s3: return None return self.config.stacker_bucket \ or "stacker-%s" % (self.get_fqn(),) @property def upload_templates_to_s3(self): # Don't upload stack templates to S3 if `stacker_bucket` is explicitly # set to an empty string. if self.config.stacker_bucket == '': logger.debug("Not uploading templates to s3 because " "`stacker_bucket` is explicity set to an " "empty string") return False # If no namespace is specificied, and there's no explicit stacker # bucket specified, don't upload to s3. This makes sense because we # can't realistically auto generate a stacker bucket name in this case. if not self.namespace and not self.config.stacker_bucket: logger.debug("Not uploading templates to s3 because " "there is no namespace set, and no " "stacker_bucket set") return False return True @property def tags(self): tags = self.config.tags if tags is not None: return tags if self.namespace: return {"stacker_namespace": self.namespace} return {} @property def _base_fqn(self): return self.namespace.replace(".", "-").lower() @property def mappings(self): return self.config.mappings or {} def _get_stack_definitions(self): return self.config.stacks def get_targets(self): """Returns the named targets that are specified in the config. Returns: list: a list of :class:`stacker.target.Target` objects """ if not hasattr(self, "_targets"): targets = [] for target_def in self.config.targets or []: target = Target(target_def) targets.append(target) self._targets = targets return self._targets def get_stacks(self): """Get the stacks for the current action. Handles configuring the :class:`stacker.stack.Stack` objects that will be used in the current action. Returns: list: a list of :class:`stacker.stack.Stack` objects """ if not hasattr(self, "_stacks"): stacks = [] definitions = self._get_stack_definitions() for stack_def in definitions: stack = Stack( definition=stack_def, context=self, mappings=self.mappings, force=stack_def.name in self.force_stacks, locked=stack_def.locked, enabled=stack_def.enabled, protected=stack_def.protected, notification_arns=stack_def.notification_arns ) stacks.append(stack) self._stacks = stacks return self._stacks def get_stack(self, name): for stack in self.get_stacks(): if stack.name == name: return stack def get_stacks_dict(self): return dict((stack.fqn, stack) for stack in self.get_stacks()) def get_fqn(self, name=None): """Return the fully qualified name of an object within this context. If the name passed already appears to be a fully qualified name, it will be returned with no further processing. """ return get_fqn(self._base_fqn, self.namespace_delimiter, name) def set_hook_data(self, key, data): """Set hook data for the given key. Args: key(str): The key to store the hook data in. data(:class:`collections.Mapping`): A dictionary of data to store, as returned from a hook. """ if not isinstance(data, collections.abc.Mapping): raise ValueError("Hook (key: %s) data must be an instance of " "collections.Mapping (a dictionary for " "example)." % key) if key in self.hook_data: raise KeyError("Hook data for key %s already exists, each hook " "must have a unique data_key.", key) self.hook_data[key] = data ================================================ FILE: stacker/dag/__init__.py ================================================ import logging from threading import Thread from copy import copy, deepcopy import collections.abc from collections import deque, OrderedDict logger = logging.getLogger(__name__) class DAGValidationError(Exception): pass class DAG(object): """ Directed acyclic graph implementation. """ def __init__(self): """ Construct a new DAG with no nodes or edges. """ self.reset_graph() def add_node(self, node_name): """ Add a node if it does not exist yet, or error out. Args: node_name (str): The unique name of the node to add. Raises: KeyError: Raised if a node with the same name already exist in the graph """ graph = self.graph if node_name in graph: raise KeyError('node %s already exists' % node_name) graph[node_name] = set() def add_node_if_not_exists(self, node_name): """ Add a node if it does not exist yet, ignoring duplicates. Args: node_name (str): The name of the node to add. """ try: self.add_node(node_name) except KeyError: pass def delete_node(self, node_name): """ Deletes this node and all edges referencing it. Args: node_name (str): The name of the node to delete. Raises: KeyError: Raised if the node does not exist in the graph. """ graph = self.graph if node_name not in graph: raise KeyError('node %s does not exist' % node_name) graph.pop(node_name) for node, edges in graph.items(): if node_name in edges: edges.remove(node_name) def delete_node_if_exists(self, node_name): """ Deletes this node and all edges referencing it. Ignores any node that is not in the graph, rather than throwing an exception. Args: node_name (str): The name of the node to delete. """ try: self.delete_node(node_name) except KeyError: pass def add_edge(self, ind_node, dep_node): """ Add an edge (dependency) between the specified nodes. Args: ind_node (str): The independent node to add an edge to. dep_node (str): The dependent node that has a dependency on the ind_node. Raises: KeyError: Either the ind_node, or dep_node do not exist. DAGValidationError: Raised if the resulting graph is invalid. """ graph = self.graph if ind_node not in graph: raise KeyError('independent node %s does not exist' % ind_node) if dep_node not in graph: raise KeyError('dependent node %s does not exist' % dep_node) test_graph = deepcopy(graph) test_graph[ind_node].add(dep_node) test_dag = DAG() test_dag.graph = test_graph is_valid, message = test_dag.validate() if is_valid: graph[ind_node].add(dep_node) else: raise DAGValidationError(message) def delete_edge(self, ind_node, dep_node): """ Delete an edge from the graph. Args: ind_node (str): The independent node to delete an edge from. dep_node (str): The dependent node that has a dependency on the ind_node. Raises: KeyError: Raised when the edge doesn't already exist. """ graph = self.graph if dep_node not in graph.get(ind_node, []): raise KeyError( "No edge exists between %s and %s." % (ind_node, dep_node) ) graph[ind_node].remove(dep_node) def transpose(self): """ Builds a new graph with the edges reversed. Returns: :class:`stacker.dag.DAG`: The transposed graph. """ graph = self.graph transposed = DAG() for node, edges in graph.items(): transposed.add_node(node) for node, edges in graph.items(): # for each edge A -> B, transpose it so that B -> A for edge in edges: transposed.add_edge(edge, node) return transposed def walk(self, walk_func): """ Walks each node of the graph in reverse topological order. This can be used to perform a set of operations, where the next operation depends on the previous operation. It's important to note that walking happens serially, and is not paralellized. Args: walk_func (:class:`types.FunctionType`): The function to be called on each node of the graph. """ nodes = self.topological_sort() # Reverse so we start with nodes that have no dependencies. nodes.reverse() for n in nodes: walk_func(n) def transitive_reduction(self): """ Performs a transitive reduction on the DAG. The transitive reduction of a graph is a graph with as few edges as possible with the same reachability as the original graph. See https://en.wikipedia.org/wiki/Transitive_reduction """ combinations = [] for node, edges in self.graph.items(): combinations += [[node, edge] for edge in edges] while True: new_combinations = [] for comb1 in combinations: for comb2 in combinations: if not comb1[-1] == comb2[0]: continue new_entry = comb1 + comb2[1:] if new_entry not in combinations: new_combinations.append(new_entry) if not new_combinations: break combinations += new_combinations constructed = {(c[0], c[-1]) for c in combinations if len(c) != 2} for node, edges in self.graph.items(): bad_nodes = {e for n, e in constructed if node == n} self.graph[node] = edges - bad_nodes def rename_edges(self, old_node_name, new_node_name): """ Change references to a node in existing edges. Args: old_node_name (str): The old name for the node. new_node_name (str): The new name for the node. """ graph = self.graph for node, edges in graph.items(): if node == old_node_name: graph[new_node_name] = copy(edges) del graph[old_node_name] else: if old_node_name in edges: edges.remove(old_node_name) edges.add(new_node_name) def predecessors(self, node): """ Returns a list of all immediate predecessors of the given node Args: node (str): The node whose predecessors you want to find. Returns: list: A list of nodes that are immediate predecessors to node. """ graph = self.graph return [key for key in graph if node in graph[key]] def downstream(self, node): """ Returns a list of all nodes this node has edges towards. Args: node (str): The node whose downstream nodes you want to find. Returns: list: A list of nodes that are immediately downstream from the node. """ graph = self.graph if node not in graph: raise KeyError('node %s is not in graph' % node) return list(graph[node]) def all_downstreams(self, node): """Returns a list of all nodes ultimately downstream of the given node in the dependency graph, in topological order. Args: node (str): The node whose downstream nodes you want to find. Returns: list: A list of nodes that are downstream from the node. """ nodes = [node] nodes_seen = set() i = 0 while i < len(nodes): downstreams = self.downstream(nodes[i]) for downstream_node in downstreams: if downstream_node not in nodes_seen: nodes_seen.add(downstream_node) nodes.append(downstream_node) i += 1 return [ node_ for node_ in self.topological_sort() if node_ in nodes_seen ] def filter(self, nodes): """ Returns a new DAG with only the given nodes and their dependencies. Args: nodes (list): The nodes you are interested in. Returns: :class:`stacker.dag.DAG`: The filtered graph. """ filtered_dag = DAG() # Add only the nodes we need. for node in nodes: filtered_dag.add_node_if_not_exists(node) for edge in self.all_downstreams(node): filtered_dag.add_node_if_not_exists(edge) # Now, rebuild the graph for each node that's present. for node, edges in self.graph.items(): if node in filtered_dag.graph: filtered_dag.graph[node] = edges return filtered_dag def all_leaves(self): """ Return a list of all leaves (nodes with no downstreams) Returns: list: A list of all the nodes with no downstreams. """ graph = self.graph return [key for key in graph if not graph[key]] def from_dict(self, graph_dict): """ Reset the graph and build it from the passed dictionary. The dictionary takes the form of {node_name: [directed edges]} Args: graph_dict (dict): The dictionary used to create the graph. Raises: TypeError: Raised if the value of items in the dict are not lists. """ self.reset_graph() for new_node in graph_dict: self.add_node(new_node) for ind_node, dep_nodes in graph_dict.items(): if not isinstance(dep_nodes, collections.abc.Iterable): raise TypeError('%s: dict values must be lists' % ind_node) for dep_node in dep_nodes: self.add_edge(ind_node, dep_node) def reset_graph(self): """ Restore the graph to an empty state. """ self.graph = OrderedDict() def ind_nodes(self): """ Returns a list of all nodes in the graph with no dependencies. Returns: list: A list of all independent nodes. """ graph = self.graph dependent_nodes = set( node for dependents in graph.values() for node in dependents) return [node_ for node_ in graph if node_ not in dependent_nodes] def validate(self): """ Returns (Boolean, message) of whether DAG is valid. """ if len(self.ind_nodes()) == 0: return (False, 'no independent nodes detected') try: self.topological_sort() except ValueError as e: return (False, str(e)) return (True, 'valid') def topological_sort(self): """ Returns a topological ordering of the DAG. Returns: list: A list of topologically sorted nodes in the graph. Raises: ValueError: Raised if the graph is not acyclic. """ graph = self.graph in_degree = {} for u in graph: in_degree[u] = 0 for u in graph: for v in graph[u]: in_degree[v] += 1 queue = deque() for u in in_degree: if in_degree[u] == 0: queue.appendleft(u) sorted_graph = [] while queue: u = queue.pop() sorted_graph.append(u) for v in sorted(graph[u]): in_degree[v] -= 1 if in_degree[v] == 0: queue.appendleft(v) if len(sorted_graph) == len(graph): return sorted_graph else: raise ValueError('graph is not acyclic') def size(self): return len(self) def __len__(self): return len(self.graph) def walk(dag, walk_func): return dag.walk(walk_func) class UnlimitedSemaphore(object): """UnlimitedSemaphore implements the same interface as threading.Semaphore, but acquire's always succeed. """ def acquire(self, *args): pass def release(self): pass class ThreadedWalker(object): """A DAG walker that walks the graph as quickly as the graph topology allows, using threads. Args: semaphore (threading.Semaphore): a semaphore object which can be used to control how many steps are executed in parallel. """ def __init__(self, semaphore): self.semaphore = semaphore def walk(self, dag, walk_func): """ Walks each node of the graph, in parallel if it can. The walk_func is only called when the nodes dependencies have been satisfied """ # First, we'll topologically sort all of the nodes, with nodes that # have no dependencies first. We do this to ensure that we don't call # .join on a thread that hasn't yet been started. # # TODO(ejholmes): An alternative would be to ensure that Thread.join # blocks if the thread has not yet been started. nodes = dag.topological_sort() nodes.reverse() # This maps a node name to a thread of execution. threads = {} # Blocks until all of the given nodes have completed execution (whether # successfully, or errored). Returns True if all nodes returned True. def wait_for(nodes): for node in nodes: thread = threads[node] while thread.is_alive(): threads[node].join(0.5) # For each node in the graph, we're going to allocate a thread to # execute. The thread will block executing walk_func, until all of the # nodes dependencies have executed. for node in nodes: def fn(n, deps): if deps: logger.debug( "%s waiting for %s to complete", n, ", ".join(deps)) # Wait for all dependencies to complete. wait_for(deps) logger.debug("%s starting", n) self.semaphore.acquire() try: return walk_func(n) finally: self.semaphore.release() deps = dag.all_downstreams(node) threads[node] = Thread(target=fn, args=(node, deps), name=node) # Start up all of the threads. for node in nodes: threads[node].start() # Wait for all threads to complete executing. wait_for(nodes) ================================================ FILE: stacker/environment.py ================================================ import yaml class DictWithSourceType(dict): """An environment dict which keeps track of its source. Environment files may be loaded from simple key/value files, or from structured YAML files, and we need to render them using a different strategy based on their source. This class adds a source_type property to a dict which keeps track of whether the source for the dict is yaml or simple. """ def __init__(self, source_type, *args): dict.__init__(self, args) if source_type not in ['yaml', 'simple']: raise ValueError('source_type must be yaml or simple') self.source_type = source_type def parse_environment(raw_environment): environment = DictWithSourceType('simple') for line in raw_environment.split('\n'): line = line.strip() if not line: continue if line.startswith('#'): continue try: key, value = line.split(':', 1) except ValueError: raise ValueError('Environment must be in key: value format') environment[key] = value.strip() return environment def parse_yaml_environment(raw_environment): environment = DictWithSourceType('yaml') parsed_env = yaml.safe_load(raw_environment) if type(parsed_env) != dict: raise ValueError('Environment must be valid YAML') environment.update(parsed_env) return environment ================================================ FILE: stacker/exceptions.py ================================================ class InvalidConfig(Exception): def __init__(self, errors): super(InvalidConfig, self).__init__(errors) self.errors = errors class InvalidLookupCombination(Exception): def __init__(self, lookup, lookups, value, *args, **kwargs): message = ( "Lookup: \"{}\" has non-string return value, must be only lookup " "present (not {}) in \"{}\"" ).format(str(lookup), len(lookups), value) super(InvalidLookupCombination, self).__init__(message, *args, **kwargs) class InvalidLookupConcatenation(Exception): """ Intermediary Exception to be converted to InvalidLookupCombination once it bubbles up there """ def __init__(self, lookup, lookups, *args, **kwargs): self.lookup = lookup self.lookups = lookups super(InvalidLookupConcatenation, self).__init__("", *args, **kwargs) class UnknownLookupType(Exception): def __init__(self, lookup_type, *args, **kwargs): message = "Unknown lookup type: \"{}\"".format(lookup_type) super(UnknownLookupType, self).__init__(message, *args, **kwargs) class FailedVariableLookup(Exception): def __init__(self, variable_name, lookup, error, *args, **kwargs): self.lookup = lookup self.error = error message = "Couldn't resolve lookup in variable `%s`, " % variable_name message += "lookup: ${%s}: " % repr(lookup) message += "(%s) %s" % (error.__class__, error) super(FailedVariableLookup, self).__init__(message, *args, **kwargs) class FailedLookup(Exception): """ Intermediary Exception to be converted to FailedVariableLookup once it bubbles up there """ def __init__(self, lookup, error, *args, **kwargs): self.lookup = lookup self.error = error super(FailedLookup, self).__init__("Failed lookup", *args, **kwargs) class InvalidUserdataPlaceholder(Exception): def __init__(self, blueprint_name, exception_message, *args, **kwargs): message = exception_message + ". " message += "Could not parse userdata in blueprint \"%s\". " % ( blueprint_name) message += "Make sure to escape all $ symbols with a $$." super(InvalidUserdataPlaceholder, self).__init__( message, *args, **kwargs) class UnresolvedVariables(Exception): def __init__(self, blueprint_name, *args, **kwargs): message = "Blueprint: \"%s\" hasn't resolved it's variables" % ( blueprint_name) super(UnresolvedVariables, self).__init__(message, *args, **kwargs) class UnresolvedVariable(Exception): def __init__(self, blueprint_name, variable, *args, **kwargs): message = ( "Variable \"%s\" in blueprint \"%s\" hasn't been resolved" % ( variable.name, blueprint_name ) ) super(UnresolvedVariable, self).__init__(message, *args, **kwargs) class UnresolvedVariableValue(Exception): """ Intermediary Exception to be converted to UnresolvedVariable once it bubbles up there """ def __init__(self, lookup, *args, **kwargs): self.lookup = lookup super(UnresolvedVariableValue, self).__init__( "Unresolved lookup", *args, **kwargs) class MissingVariable(Exception): def __init__(self, blueprint_name, variable_name, *args, **kwargs): message = "Variable \"%s\" in blueprint \"%s\" is missing" % ( variable_name, blueprint_name) super(MissingVariable, self).__init__(message, *args, **kwargs) class VariableTypeRequired(Exception): def __init__(self, blueprint_name, variable_name, *args, **kwargs): message = ( "Variable \"%s\" in blueprint \"%s\" does not have a type" % ( variable_name, blueprint_name) ) super(VariableTypeRequired, self).__init__(message, *args, **kwargs) class StackDoesNotExist(Exception): def __init__(self, stack_name, *args, **kwargs): message = ("Stack: \"%s\" does not exist in outputs or the lookup is " "not available in this stacker run") % (stack_name,) super(StackDoesNotExist, self).__init__(message, *args, **kwargs) class MissingParameterException(Exception): def __init__(self, parameters, *args, **kwargs): self.parameters = parameters message = "Missing required cloudformation parameters: %s" % ( ", ".join(parameters), ) super(MissingParameterException, self).__init__(message, *args, **kwargs) class OutputDoesNotExist(Exception): def __init__(self, stack_name, output, *args, **kwargs): self.stack_name = stack_name self.output = output message = "Output %s does not exist on stack %s" % (output, stack_name) super(OutputDoesNotExist, self).__init__(message, *args, **kwargs) class MissingEnvironment(Exception): def __init__(self, key, *args, **kwargs): self.key = key message = "Environment missing key %s." % (key,) super(MissingEnvironment, self).__init__(message, *args, **kwargs) class WrongEnvironmentType(Exception): def __init__(self, key, *args, **kwargs): self.key = key message = "Environment key %s can't be merged into a string" % (key,) super(WrongEnvironmentType, self).__init__(message, *args, **kwargs) class ImproperlyConfigured(Exception): def __init__(self, cls, error, *args, **kwargs): message = "Class \"%s\" is improperly configured: %s" % ( cls, error, ) super(ImproperlyConfigured, self).__init__(message, *args, **kwargs) class StackDidNotChange(Exception): """Exception raised when there are no changes to be made by the provider. """ class CancelExecution(Exception): """Exception raised when we want to cancel executing the plan.""" class ValidatorError(Exception): """Used for errors raised by custom validators of blueprint variables. """ def __init__(self, variable, validator, value, exception=None): self.variable = variable self.validator = validator self.value = value self.exception = exception self.message = ("Validator '%s' failed for variable '%s' with value " "'%s'") % (self.validator, self.variable, self.value) if self.exception: self.message += ": %s: %s" % (self.exception.__class__.__name__, str(self.exception)) def __str__(self): return self.message class ChangesetDidNotStabilize(Exception): def __init__(self, change_set_id): self.id = change_set_id message = "Changeset '%s' did not reach a completed state." % ( change_set_id ) super(ChangesetDidNotStabilize, self).__init__(message) class UnhandledChangeSetStatus(Exception): def __init__(self, stack_name, change_set_id, status, status_reason): self.stack_name = stack_name self.id = change_set_id self.status = status self.status_reason = status_reason message = ( "Changeset '%s' on stack '%s' returned an unhandled status " "'%s: %s'." % (change_set_id, stack_name, status, status_reason) ) super(UnhandledChangeSetStatus, self).__init__(message) class UnableToExecuteChangeSet(Exception): def __init__(self, stack_name, change_set_id, execution_status): self.stack_name = stack_name self.id = change_set_id self.execution_status = execution_status message = ("Changeset '%s' on stack '%s' had bad execution status: " "%s" % (change_set_id, stack_name, execution_status)) super(UnableToExecuteChangeSet, self).__init__(message) class StackUpdateBadStatus(Exception): def __init__(self, stack_name, stack_status, reason, *args, **kwargs): self.stack_name = stack_name self.stack_status = stack_status message = ("Stack: \"%s\" cannot be updated nor re-created from state " "%s: %s" % (stack_name, stack_status, reason)) super(StackUpdateBadStatus, self).__init__(message, *args, **kwargs) class PlanFailed(Exception): def __init__(self, failed_steps, *args, **kwargs): self.failed_steps = failed_steps step_names = ', '.join(step.name for step in failed_steps) message = "The following steps failed: %s" % (step_names,) super(PlanFailed, self).__init__(message, *args, **kwargs) class GraphError(Exception): """Raised when the graph is invalid (e.g. acyclic dependencies) """ def __init__(self, exception, stack, dependency): self.stack = stack self.dependency = dependency self.exception = exception message = ( "Error detected when adding '%s' " "as a dependency of '%s': %s" ) % (dependency, stack, str(exception)) super(GraphError, self).__init__(message) ================================================ FILE: stacker/hooks/__init__.py ================================================ ================================================ FILE: stacker/hooks/aws_lambda.py ================================================ from past.builtins import basestring import os import os.path import stat import logging import hashlib from io import BytesIO as StringIO from zipfile import ZipFile, ZIP_DEFLATED import botocore import formic from troposphere.awslambda import Code from stacker.session_cache import get_session from stacker.util import ( get_config_directory, ensure_s3_bucket, ) """Mask to retrieve only UNIX file permissions from the external attributes field of a ZIP entry. """ ZIP_PERMS_MASK = (stat.S_IRWXU | stat.S_IRWXG | stat.S_IRWXO) << 16 logger = logging.getLogger(__name__) def _zip_files(files, root): """Generates a ZIP file in-memory from a list of files. Files will be stored in the archive with relative names, and have their UNIX permissions forced to 755 or 644 (depending on whether they are user-executable in the source filesystem). Args: files (list[str]): file names to add to the archive, relative to ``root``. root (str): base directory to retrieve files from. Returns: str: content of the ZIP file as a byte string. str: A calculated hash of all the files. """ zip_data = StringIO() with ZipFile(zip_data, 'w', ZIP_DEFLATED) as zip_file: for fname in files: zip_file.write(os.path.join(root, fname), fname) # Fix file permissions to avoid any issues - only care whether a file # is executable or not, choosing between modes 755 and 644 accordingly. for zip_entry in zip_file.filelist: perms = (zip_entry.external_attr & ZIP_PERMS_MASK) >> 16 if perms & stat.S_IXUSR != 0: new_perms = 0o755 else: new_perms = 0o644 if new_perms != perms: logger.debug("lambda: fixing perms: %s: %o => %o", zip_entry.filename, perms, new_perms) new_attr = ((zip_entry.external_attr & ~ZIP_PERMS_MASK) | (new_perms << 16)) zip_entry.external_attr = new_attr contents = zip_data.getvalue() zip_data.close() content_hash = _calculate_hash(files, root) return contents, content_hash def _calculate_hash(files, root): """ Returns a hash of all of the given files at the given root. Args: files (list[str]): file names to include in the hash calculation, relative to ``root``. root (str): base directory to analyze files in. Returns: str: A hash of the hashes of the given files. """ file_hash = hashlib.md5() for fname in sorted(files): f = os.path.join(root, fname) file_hash.update((fname + "\0").encode()) with open(f, "rb") as fd: for chunk in iter(lambda: fd.read(4096), ""): if not chunk: break file_hash.update(chunk) file_hash.update("\0".encode()) return file_hash.hexdigest() def _calculate_prebuilt_hash(f): file_hash = hashlib.md5() while True: chunk = f.read(4096) if not chunk: break file_hash.update(chunk) return file_hash.hexdigest() def _find_files(root, includes, excludes, follow_symlinks): """List files inside a directory based on include and exclude rules. This is a more advanced version of `glob.glob`, that accepts multiple complex patterns. Args: root (str): base directory to list files from. includes (list[str]): inclusion patterns. Only files matching those patterns will be included in the result. excludes (list[str]): exclusion patterns. Files matching those patterns will be excluded from the result. Exclusions take precedence over inclusions. follow_symlinks (bool): If true, symlinks will be included in the resulting zip file Yields: str: a file name relative to the root. Note: Documentation for the patterns can be found at http://www.aviser.asia/formic/doc/index.html """ root = os.path.abspath(root) file_set = formic.FileSet( directory=root, include=includes, exclude=excludes, symlinks=follow_symlinks, ) for filename in file_set.qualified_files(absolute=False): yield filename def _zip_from_file_patterns(root, includes, excludes, follow_symlinks): """Generates a ZIP file in-memory from file search patterns. Args: root (str): base directory to list files from. includes (list[str]): inclusion patterns. Only files matching those patterns will be included in the result. excludes (list[str]): exclusion patterns. Files matching those patterns will be excluded from the result. Exclusions take precedence over inclusions. follow_symlinks (bool): If true, symlinks will be included in the resulting zip file See Also: :func:`_zip_files`, :func:`_find_files`. Raises: RuntimeError: when the generated archive would be empty. """ logger.info('lambda: base directory: %s', root) files = list(_find_files(root, includes, excludes, follow_symlinks)) if not files: raise RuntimeError('Empty list of files for Lambda payload. Check ' 'your include/exclude options for errors.') logger.info('lambda: adding %d files:', len(files)) for fname in files: logger.debug('lambda: + %s', fname) return _zip_files(files, root) def _head_object(s3_conn, bucket, key): """Retrieve information about an object in S3 if it exists. Args: s3_conn (botocore.client.S3): S3 connection to use for operations. bucket (str): name of the bucket containing the key. key (str): name of the key to lookup. Returns: dict: S3 object information, or None if the object does not exist. See the AWS documentation for explanation of the contents. Raises: botocore.exceptions.ClientError: any error from boto3 other than key not found is passed through. """ try: return s3_conn.head_object(Bucket=bucket, Key=key) except botocore.exceptions.ClientError as e: if e.response['Error']['Code'] == '404': return None else: raise def _upload_code(s3_conn, bucket, prefix, name, contents, content_hash, payload_acl): """Upload a ZIP file to S3 for use by Lambda. The key used for the upload will be unique based on the checksum of the contents. No changes will be made if the contents in S3 already match the expected contents. Args: s3_conn (botocore.client.S3): S3 connection to use for operations. bucket (str): name of the bucket to create. prefix (str): S3 prefix to prepend to the constructed key name for the uploaded file name (str): desired name of the Lambda function. Will be used to construct a key name for the uploaded file. contents (str): byte string with the content of the file upload. content_hash (str): md5 hash of the contents to be uploaded. payload_acl (str): The canned S3 object ACL to be applied to the uploaded payload Returns: troposphere.awslambda.Code: CloudFormation Lambda Code object, pointing to the uploaded payload in S3. Raises: botocore.exceptions.ClientError: any error from boto3 is passed through. """ logger.debug('lambda: ZIP hash: %s', content_hash) key = '{}lambda-{}-{}.zip'.format(prefix, name, content_hash) if _head_object(s3_conn, bucket, key): logger.info('lambda: object %s already exists, not uploading', key) else: logger.info('lambda: uploading object %s', key) s3_conn.put_object(Bucket=bucket, Key=key, Body=contents, ContentType='application/zip', ACL=payload_acl) return Code(S3Bucket=bucket, S3Key=key) def _check_pattern_list(patterns, key, default=None): """Validates file search patterns from user configuration. Acceptable input is a string (which will be converted to a singleton list), a list of strings, or anything falsy (such as None or an empty dictionary). Empty or unset input will be converted to a default. Args: patterns: input from user configuration (YAML). key (str): name of the configuration key the input came from, used for error display purposes. Keyword Args: default: value to return in case the input is empty or unset. Returns: list[str]: validated list of patterns Raises: ValueError: if the input is unacceptable. """ if not patterns: return default if isinstance(patterns, basestring): return [patterns] if isinstance(patterns, list): if all(isinstance(p, basestring) for p in patterns): return patterns raise ValueError("Invalid file patterns in key '{}': must be a string or " 'list of strings'.format(key)) def _upload_prebuilt_zip(s3_conn, bucket, prefix, name, options, path, payload_acl): logging.debug('lambda: using prebuilt ZIP %s', path) with open(path, 'rb') as zip_file: # Default to the MD5 of the ZIP if no explicit version is provided version = options.get('version') if not version: version = _calculate_prebuilt_hash(zip_file) zip_file.seek(0) return _upload_code(s3_conn, bucket, prefix, name, zip_file, version, payload_acl) def _build_and_upload_zip(s3_conn, bucket, prefix, name, options, path, follow_symlinks, payload_acl): includes = _check_pattern_list(options.get('include'), 'include', default=['**']) excludes = _check_pattern_list(options.get('exclude'), 'exclude', default=[]) # os.path.join will ignore other parameters if the right-most one is an # absolute path, which is exactly what we want. zip_contents, zip_version = _zip_from_file_patterns( path, includes, excludes, follow_symlinks) version = options.get('version') or zip_version return _upload_code(s3_conn, bucket, prefix, name, zip_contents, version, payload_acl) def _upload_function(s3_conn, bucket, prefix, name, options, follow_symlinks, payload_acl): """Builds a Lambda payload from user configuration and uploads it to S3. Args: s3_conn (botocore.client.S3): S3 connection to use for operations. bucket (str): name of the bucket to upload to. prefix (str): S3 prefix to prepend to the constructed key name for the uploaded file name (str): desired name of the Lambda function. Will be used to construct a key name for the uploaded file. options (dict): configuration for how to build the payload. Consists of the following keys: * path: base path to retrieve files from (mandatory). If not absolute, it will be interpreted as relative to the stacker configuration file directory, then converted to an absolute path. See :func:`stacker.util.get_config_directory`. * include: file patterns to include in the payload (optional). * exclude: file patterns to exclude from the payload (optional). follow_symlinks (bool): If true, symlinks will be included in the resulting zip file payload_acl (str): The canned S3 object ACL to be applied to the uploaded payload Returns: troposphere.awslambda.Code: CloudFormation AWS Lambda Code object, pointing to the uploaded object in S3. Raises: ValueError: if any configuration is invalid. botocore.exceptions.ClientError: any error from boto3 is passed through. """ try: path = os.path.expanduser(options['path']) except KeyError as e: raise ValueError( "missing required property '{}' in function '{}'".format( e.args[0], name)) if not os.path.isabs(path): path = os.path.abspath(os.path.join(get_config_directory(), path)) if path.endswith('.zip') and os.path.isfile(path): logging.debug('lambda: using prebuilt zip: %s', path) return _upload_prebuilt_zip(s3_conn, bucket, prefix, name, options, path, payload_acl) elif os.path.isdir(path): logging.debug('lambda: building from directory: %s', path) return _build_and_upload_zip(s3_conn, bucket, prefix, name, options, path, follow_symlinks, payload_acl) else: raise ValueError('Path must be an existing ZIP file or directory') def select_bucket_region(custom_bucket, hook_region, stacker_bucket_region, provider_region): """Returns the appropriate region to use when uploading functions. Select the appropriate region for the bucket where lambdas are uploaded in. Args: custom_bucket (str, None): The custom bucket name provided by the `bucket` kwarg of the aws_lambda hook, if provided. hook_region (str): The contents of the `bucket_region` argument to the hook. stacker_bucket_region (str): The contents of the `stacker_bucket_region` global setting. provider_region (str): The region being used by the provider. Returns: str: The appropriate region string. """ region = None if custom_bucket: region = hook_region else: region = stacker_bucket_region return region or provider_region def upload_lambda_functions(context, provider, **kwargs): """Builds Lambda payloads from user configuration and uploads them to S3. Constructs ZIP archives containing files matching specified patterns for each function, uploads the result to Amazon S3, then stores objects (of type :class:`troposphere.awslambda.Code`) in the context's hook data, ready to be referenced in blueprints. Configuration consists of some global options, and a dictionary of function specifications. In the specifications, each key indicating the name of the function (used for generating names for artifacts), and the value determines what files to include in the ZIP (see more details below). Payloads are uploaded to either a custom bucket or stackers default bucket, with the key containing it's checksum, to allow repeated uploads to be skipped in subsequent runs. The configuration settings are documented as keyword arguments below. Keyword Arguments: bucket (str, optional): Custom bucket to upload functions to. Omitting it will cause the default stacker bucket to be used. bucket_region (str, optional): The region in which the bucket should exist. If not given, the region will be either be that of the global `stacker_bucket_region` setting, or else the region in use by the provider. prefix (str, optional): S3 key prefix to prepend to the uploaded zip name. follow_symlinks (bool, optional): Will determine if symlinks should be followed and included with the zip artifact. Default: False payload_acl (str, optional): The canned S3 object ACL to be applied to the uploaded payload. Default: private functions (dict): Configurations of desired payloads to build. Keys correspond to function names, used to derive key names for the payload. Each value should itself be a dictionary, with the following data: * path (str): Base directory or path of a ZIP file of the Lambda function payload content. If it not an absolute path, it will be considered relative to the directory containing the stacker configuration file in use. When a directory, files contained will be added to the payload ZIP, according to the include and exclude patterns. If not patterns are provided, all files in the directory (respecting default exclusions) will be used. Files are stored in the archive with path names relative to this directory. So, for example, all the files contained directly under this directory will be added to the root of the ZIP file. When a ZIP file, it will be uploaded directly to S3. The hash of whole ZIP file will be used as the version key by default, which may cause spurious rebuilds when building the ZIP in different environments. To avoid that, explicitly provide a `version` option. * include(str or list[str], optional): Pattern or list of patterns of files to include in the payload. If provided, only files that match these patterns will be included in the payload. Omitting it is equivalent to accepting all files that are not otherwise excluded. * exclude(str or list[str], optional): Pattern or list of patterns of files to exclude from the payload. If provided, any files that match will be ignored, regardless of whether they match an inclusion pattern. Commonly ignored files are already excluded by default, such as ``.git``, ``.svn``, ``__pycache__``, ``*.pyc``, ``.gitignore``, etc. * version(str, optional): Value to use as the version for the current function, which will be used to determine if a payload already exists in S3. The value can be any string, such as a version number or a git commit. Note that when setting this value, to re-build/re-upload a payload you must change the version manually. Examples: .. Hook configuration. .. code-block:: yaml pre_build: - path: stacker.hooks.aws_lambda.upload_lambda_functions required: true enabled: true data_key: lambda args: bucket: custom-bucket follow_symlinks: true prefix: cloudformation-custom-resources/ payload_acl: authenticated-read functions: MyFunction: path: ./lambda_functions include: - '*.py' - '*.txt' exclude: - '*.pyc' - test/ .. Blueprint usage .. code-block:: python from troposphere.awslambda import Function from stacker.blueprints.base import Blueprint class LambdaBlueprint(Blueprint): def create_template(self): code = self.context.hook_data['lambda']['MyFunction'] self.template.add_resource( Function( 'MyFunction', Code=code, Handler='my_function.handler', Role='...', Runtime='python2.7' ) ) """ custom_bucket = kwargs.get('bucket') if not custom_bucket: bucket_name = context.bucket_name logger.info("lambda: using default bucket from stacker: %s", bucket_name) else: bucket_name = custom_bucket logger.info("lambda: using custom bucket: %s", bucket_name) custom_bucket_region = kwargs.get("bucket_region") if not custom_bucket and custom_bucket_region: raise ValueError("Cannot specify `bucket_region` without specifying " "`bucket`.") bucket_region = select_bucket_region( custom_bucket, custom_bucket_region, context.config.stacker_bucket_region, provider.region ) # Check if we should walk / follow symlinks follow_symlinks = kwargs.get('follow_symlinks', False) if not isinstance(follow_symlinks, bool): raise ValueError('follow_symlinks option must be a boolean') # Check for S3 object acl. Valid values from: # https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl payload_acl = kwargs.get('payload_acl', 'private') # Always use the global client for s3 session = get_session(bucket_region) s3_client = session.client('s3') ensure_s3_bucket(s3_client, bucket_name, bucket_region) prefix = kwargs.get('prefix', '') results = {} for name, options in kwargs['functions'].items(): results[name] = _upload_function(s3_client, bucket_name, prefix, name, options, follow_symlinks, payload_acl) return results ================================================ FILE: stacker/hooks/command.py ================================================ import logging import os from subprocess import PIPE, Popen from stacker.exceptions import ImproperlyConfigured logger = logging.getLogger(__name__) def _devnull(): return open(os.devnull, 'wb') def run_command(provider, context, command, capture=False, interactive=False, ignore_status=False, quiet=False, stdin=None, env=None, **kwargs): """Run a custom command as a hook Keyword Arguments: command (list or str): Command to run capture (bool, optional): If enabled, capture the command's stdout and stderr, and return them in the hook result. Default: false interactive (bool, optional): If enabled, allow the command to interact with stdin. Otherwise, stdin will be set to the null device. Default: false ignore_status (bool, optional): Don't fail the hook if the command returns a non-zero status. Default: false quiet (bool, optional): Redirect the command's stdout and stderr to the null device, silencing all output. Should not be enaled if `capture` is also enabled. Default: false stdin (str, optional): String to send to the stdin of the command. Implicitly disables `interactive`. env (dict, optional): Dictionary of environment variable overrides for the command context. Will be merged with the current environment. **kwargs: Any other arguments will be forwarded to the `subprocess.Popen` function. Interesting ones include: `cwd` and `shell`. Examples: .. code-block:: yaml pre_build: - path: stacker.hooks.command.run_command required: true enabled: true data_key: copy_env args: command: ['cp', 'environment.template', 'environment'] - path: stacker.hooks.command.run_command required: true enabled: true data_key: get_git_commit args: command: ['git', 'rev-parse', 'HEAD'] cwd: ./my-git-repo capture: true - path: stacker.hooks.command.run_command args: command: `cd $PROJECT_DIR/project; npm install' env: PROJECT_DIR: ./my-project shell: true """ if quiet and capture: raise ImproperlyConfigured( __name__ + '.run_command', 'Cannot enable `quiet` and `capture` options simultaneously') if quiet: out_err_type = _devnull() elif capture: out_err_type = PIPE else: out_err_type = None if interactive: in_type = None elif stdin: in_type = PIPE else: in_type = _devnull() if env: full_env = os.environ.copy() full_env.update(env) env = full_env logger.info('Running command: %s', command) proc = Popen(command, stdin=in_type, stdout=out_err_type, stderr=out_err_type, env=env, **kwargs) try: out, err = proc.communicate(stdin) status = proc.wait() if status == 0 or ignore_status: return { 'returncode': proc.returncode, 'stdout': out, 'stderr': err } # Don't print the command line again if we already did earlier if logger.isEnabledFor(logging.INFO): logger.warn('Command failed with returncode %d', status) else: logger.warn('Command failed with returncode %d: %s', status, command) return None finally: if proc.returncode is None: proc.kill() ================================================ FILE: stacker/hooks/ecs.py ================================================ # A lot of this code exists to deal w/ the broken ECS connect_to_region # function, and will be removed once this pull request is accepted: # https://github.com/boto/boto/pull/3143 from past.builtins import basestring import logging from stacker.session_cache import get_session logger = logging.getLogger(__name__) def create_clusters(provider, context, **kwargs): """Creates ECS clusters. Expects a "clusters" argument, which should contain a list of cluster names to create. Args: provider (:class:`stacker.providers.base.BaseProvider`): provider instance context (:class:`stacker.context.Context`): context instance Returns: boolean for whether or not the hook succeeded. """ conn = get_session(provider.region).client('ecs') try: clusters = kwargs["clusters"] except KeyError: logger.error("setup_clusters hook missing \"clusters\" argument") return False if isinstance(clusters, basestring): clusters = [clusters] cluster_info = {} for cluster in clusters: logger.debug("Creating ECS cluster: %s", cluster) r = conn.create_cluster(clusterName=cluster) cluster_info[r["cluster"]["clusterName"]] = r return {"clusters": cluster_info} ================================================ FILE: stacker/hooks/iam.py ================================================ import copy import logging from stacker.session_cache import get_session from botocore.exceptions import ClientError from awacs.aws import Statement, Allow, Policy from awacs import ecs from awacs.helpers.trust import get_ecs_assumerole_policy from . import utils logger = logging.getLogger(__name__) def create_ecs_service_role(provider, context, **kwargs): """Used to create the ecsServieRole, which has to be named exactly that currently, so cannot be created via CloudFormation. See: http://docs.aws.amazon.com/AmazonECS/latest/developerguide/IAM_policies.html#service_IAM_role Args: provider (:class:`stacker.providers.base.BaseProvider`): provider instance context (:class:`stacker.context.Context`): context instance Returns: boolean for whether or not the hook succeeded. """ role_name = kwargs.get("role_name", "ecsServiceRole") client = get_session(provider.region).client('iam') try: client.create_role( RoleName=role_name, AssumeRolePolicyDocument=get_ecs_assumerole_policy().to_json() ) except ClientError as e: if "already exists" in str(e): pass else: raise policy = Policy( Version='2012-10-17', Statement=[ Statement( Effect=Allow, Resource=["*"], Action=[ecs.CreateCluster, ecs.DeregisterContainerInstance, ecs.DiscoverPollEndpoint, ecs.Poll, ecs.Action("Submit*")] ) ]) client.put_role_policy( RoleName=role_name, PolicyName="AmazonEC2ContainerServiceRolePolicy", PolicyDocument=policy.to_json() ) return True def _get_cert_arn_from_response(response): result = copy.deepcopy(response) # GET response returns this extra key if "ServerCertificate" in response: result = response["ServerCertificate"] return result["ServerCertificateMetadata"]["Arn"] def get_cert_contents(kwargs): """Builds parameters with server cert file contents. Args: kwargs(dict): The keyword args passed to ensure_server_cert_exists, optionally containing the paths to the cert, key and chain files. Returns: dict: A dictionary containing the appropriate parameters to supply to upload_server_certificate. An empty dictionary if there is a problem. """ paths = { "certificate": kwargs.get("path_to_certificate"), "private_key": kwargs.get("path_to_private_key"), "chain": kwargs.get("path_to_chain"), } for key, value in paths.items(): if value is not None: continue path = input("Path to %s (skip): " % (key,)) if path == "skip" or not path.strip(): continue paths[key] = path parameters = { "ServerCertificateName": kwargs.get("cert_name"), } for key, path in paths.items(): if not path: continue # Allow passing of file like object for tests try: contents = path.read() except AttributeError: with open(utils.full_path(path)) as read_file: contents = read_file.read() if key == "certificate": parameters["CertificateBody"] = contents elif key == "private_key": parameters["PrivateKey"] = contents elif key == "chain": parameters["CertificateChain"] = contents return parameters def ensure_server_cert_exists(provider, context, **kwargs): client = get_session(provider.region).client('iam') cert_name = kwargs["cert_name"] status = "unknown" try: response = client.get_server_certificate( ServerCertificateName=cert_name ) cert_arn = _get_cert_arn_from_response(response) status = "exists" logger.info("certificate exists: %s (%s)", cert_name, cert_arn) except ClientError: if kwargs.get("prompt", True): upload = input( "Certificate '%s' wasn't found. Upload it now? (yes/no) " % ( cert_name, ) ) if upload != "yes": return False parameters = get_cert_contents(kwargs) if not parameters: return False response = client.upload_server_certificate(**parameters) cert_arn = _get_cert_arn_from_response(response) status = "uploaded" logger.info( "uploaded certificate: %s (%s)", cert_name, cert_arn, ) return { "status": status, "cert_name": cert_name, "cert_arn": cert_arn, } ================================================ FILE: stacker/hooks/keypair.py ================================================ import logging import os import sys from botocore.exceptions import ClientError from stacker.session_cache import get_session from stacker.hooks import utils from stacker.ui import get_raw_input logger = logging.getLogger(__name__) KEYPAIR_LOG_MESSAGE = "keypair: %s (%s) %s" def get_existing_key_pair(ec2, keypair_name): resp = ec2.describe_key_pairs() keypair = next((kp for kp in resp["KeyPairs"] if kp["KeyName"] == keypair_name), None) if keypair: logger.info(KEYPAIR_LOG_MESSAGE, keypair["KeyName"], keypair["KeyFingerprint"], "exists") return { "status": "exists", "key_name": keypair["KeyName"], "fingerprint": keypair["KeyFingerprint"], } logger.info("keypair: \"%s\" not found", keypair_name) return None def import_key_pair(ec2, keypair_name, public_key_data): keypair = ec2.import_key_pair( KeyName=keypair_name, PublicKeyMaterial=public_key_data.strip(), DryRun=False) logger.info(KEYPAIR_LOG_MESSAGE, keypair["KeyName"], keypair["KeyFingerprint"], "imported") return keypair def read_public_key_file(path): try: with open(utils.full_path(path), 'rb') as f: data = f.read() if not data.startswith(b"ssh-rsa"): raise ValueError( "Bad public key data, must be an RSA key in SSH authorized " "keys format (beginning with `ssh-rsa`)") return data.strip() except (ValueError, IOError, OSError) as e: logger.error("Failed to read public key file {}: {}".format( path, e)) return None def create_key_pair_from_public_key_file(ec2, keypair_name, public_key_path): public_key_data = read_public_key_file(public_key_path) if not public_key_data: return None keypair = import_key_pair(ec2, keypair_name, public_key_data) return { "status": "imported", "key_name": keypair["KeyName"], "fingerprint": keypair["KeyFingerprint"], } def create_key_pair_in_ssm(ec2, ssm, keypair_name, parameter_name, kms_key_id=None): keypair = create_key_pair(ec2, keypair_name) try: kms_key_label = 'default' kms_args = {} if kms_key_id: kms_key_label = kms_key_id kms_args = {"KeyId": kms_key_id} logger.info("Storing generated key in SSM parameter \"%s\" " "using KMS key \"%s\"", parameter_name, kms_key_label) ssm.put_parameter( Name=parameter_name, Description="SSH private key for KeyPair \"{}\" " "(generated by Stacker)".format(keypair_name), Value=keypair["KeyMaterial"], Type="SecureString", Overwrite=False, **kms_args) except ClientError: # Erase the key pair if we failed to store it in SSM, since the # private key will be lost anyway logger.exception("Failed to store generated key in SSM, deleting " "created key pair as private key will be lost") ec2.delete_key_pair(KeyName=keypair_name, DryRun=False) return None return { "status": "created", "key_name": keypair["KeyName"], "fingerprint": keypair["KeyFingerprint"], } def create_key_pair(ec2, keypair_name): keypair = ec2.create_key_pair(KeyName=keypair_name, DryRun=False) logger.info(KEYPAIR_LOG_MESSAGE, keypair["KeyName"], keypair["KeyFingerprint"], "created") return keypair def create_key_pair_local(ec2, keypair_name, dest_dir): dest_dir = utils.full_path(dest_dir) if not os.path.isdir(dest_dir): logger.error("\"%s\" is not a valid directory", dest_dir) return None file_name = "{0}.pem".format(keypair_name) key_path = os.path.join(dest_dir, file_name) if os.path.isfile(key_path): # This mimics the old boto2 keypair.save error logger.error("\"%s\" already exists in \"%s\" directory", file_name, dest_dir) return None # Open the file before creating the key pair to catch errors early with open(key_path, "wb") as f: keypair = create_key_pair(ec2, keypair_name) f.write(keypair["KeyMaterial"].encode("ascii")) return { "status": "created", "key_name": keypair["KeyName"], "fingerprint": keypair["KeyFingerprint"], "file_path": key_path } def interactive_prompt(keypair_name, ): if not sys.stdin.isatty(): return None, None try: while True: action = get_raw_input( "import or create keypair \"%s\"? (import/create/cancel) " % ( keypair_name, ) ) if action.lower() == "cancel": break if action.lower() in ("i", "import"): path = get_raw_input("path to keypair file: ") return "import", path.strip() if action.lower() == "create": path = get_raw_input("directory to save keyfile: ") return "create", path.strip() except (EOFError, KeyboardInterrupt): return None, None return None, None def ensure_keypair_exists(provider, context, **kwargs): """Ensure a specific keypair exists within AWS. If the key doesn't exist, upload it. Args: provider (:class:`stacker.providers.base.BaseProvider`): provider instance context (:class:`stacker.context.Context`): context instance keypair (str): name of the key pair to create ssm_parameter_name (str, optional): path to an SSM store parameter to receive the generated private key, instead of importing it or storing it locally. ssm_key_id (str, optional): ID of a KMS key to encrypt the SSM parameter with. If omitted, the default key will be used. public_key_path (str, optional): path to a public key file to be imported instead of generating a new key. Incompatible with the SSM options, as the private key will not be available for storing. Returns: In case of failure ``False``, otherwise a dict containing: status (str): one of "exists", "imported" or "created" key_name (str): name of the key pair fingerprint (str): fingerprint of the key pair file_path (str, optional): if a new key was created, the path to the file where the private key was stored """ keypair_name = kwargs["keypair"] ssm_parameter_name = kwargs.get("ssm_parameter_name") ssm_key_id = kwargs.get("ssm_key_id") public_key_path = kwargs.get("public_key_path") if public_key_path and ssm_parameter_name: logger.error("public_key_path and ssm_parameter_name cannot be " "specified at the same time") return False session = get_session(region=provider.region, profile=kwargs.get("profile")) ec2 = session.client("ec2") keypair = get_existing_key_pair(ec2, keypair_name) if keypair: return keypair if public_key_path: keypair = create_key_pair_from_public_key_file( ec2, keypair_name, public_key_path) elif ssm_parameter_name: ssm = session.client('ssm') keypair = create_key_pair_in_ssm( ec2, ssm, keypair_name, ssm_parameter_name, ssm_key_id) else: action, path = interactive_prompt(keypair_name) if action == "import": keypair = create_key_pair_from_public_key_file( ec2, keypair_name, path) elif action == "create": keypair = create_key_pair_local(ec2, keypair_name, path) else: logger.warning("no action to find keypair, failing") if not keypair: return False return keypair ================================================ FILE: stacker/hooks/route53.py ================================================ import logging from stacker.session_cache import get_session from stacker.util import create_route53_zone logger = logging.getLogger(__name__) def create_domain(provider, context, **kwargs): """Create a domain within route53. Args: provider (:class:`stacker.providers.base.BaseProvider`): provider instance context (:class:`stacker.context.Context`): context instance Returns: boolean for whether or not the hook succeeded. """ session = get_session(provider.region) client = session.client("route53") domain = kwargs.get("domain") if not domain: logger.error("domain argument or BaseDomain variable not provided.") return False zone_id = create_route53_zone(client, domain) return {"domain": domain, "zone_id": zone_id} ================================================ FILE: stacker/hooks/utils.py ================================================ import os import sys import collections.abc import logging from stacker.util import load_object_from_string logger = logging.getLogger(__name__) def full_path(path): return os.path.abspath(os.path.expanduser(path)) def handle_hooks(stage, hooks, provider, context): """ Used to handle pre/post_build hooks. These are pieces of code that we want to run before/after the builder builds the stacks. Args: stage (string): The current stage (pre_run, post_run, etc). hooks (list): A list of :class:`stacker.config.Hook` containing the hooks to execute. provider (:class:`stacker.provider.base.BaseProvider`): The provider the current stack is using. context (:class:`stacker.context.Context`): The current stacker context. """ if not hooks: logger.debug("No %s hooks defined.", stage) return hook_paths = [] for i, h in enumerate(hooks): try: hook_paths.append(h.path) except KeyError: raise ValueError("%s hook #%d missing path." % (stage, i)) logger.info("Executing %s hooks: %s", stage, ", ".join(hook_paths)) for hook in hooks: data_key = hook.data_key required = hook.required kwargs = hook.args or {} enabled = hook.enabled if not enabled: logger.debug("hook with method %s is disabled, skipping", hook.path) continue try: method = load_object_from_string(hook.path) except (AttributeError, ImportError): logger.exception("Unable to load method at %s:", hook.path) if required: raise continue try: result = method(context=context, provider=provider, **kwargs) except Exception: logger.exception("Method %s threw an exception:", hook.path) if required: raise continue if not result: if required: logger.error("Required hook %s failed. Return value: %s", hook.path, result) sys.exit(1) logger.warning("Non-required hook %s failed. Return value: %s", hook.path, result) else: if isinstance(result, collections.abc.Mapping): if data_key: logger.debug("Adding result for hook %s to context in " "data_key %s.", hook.path, data_key) context.set_hook_data(data_key, result) else: logger.debug("Hook %s returned result data, but no data " "key set, so ignoring.", hook.path) ================================================ FILE: stacker/logger/__init__.py ================================================ import sys import logging DEBUG_FORMAT = ("[%(asctime)s] %(levelname)s %(threadName)s " "%(name)s:%(lineno)d(%(funcName)s): %(message)s") INFO_FORMAT = ("[%(asctime)s] %(message)s") COLOR_FORMAT = ("[%(asctime)s] \033[%(color)sm%(message)s\033[39m") ISO_8601 = "%Y-%m-%dT%H:%M:%S" class ColorFormatter(logging.Formatter): """ Handles colorizing formatted log messages if color provided. """ def format(self, record): if 'color' not in record.__dict__: record.__dict__['color'] = 37 msg = super(ColorFormatter, self).format(record) return msg def setup_logging(verbosity, formats=None): """ Configure a proper logger based on verbosity and optional log formats. Args: verbosity (int): 0, 1, 2 formats (dict): Optional, looks for `info`, `color`, and `debug` keys which may override the associated default log formats. """ if formats is None: formats = {} log_level = logging.INFO log_format = formats.get("info", INFO_FORMAT) if sys.stdout.isatty(): log_format = formats.get("color", COLOR_FORMAT) if verbosity > 0: log_level = logging.DEBUG log_format = formats.get("debug", DEBUG_FORMAT) if verbosity < 2: logging.getLogger("botocore").setLevel(logging.CRITICAL) hdlr = logging.StreamHandler() hdlr.setFormatter(ColorFormatter(log_format, ISO_8601)) logging.root.addHandler(hdlr) logging.root.setLevel(log_level) ================================================ FILE: stacker/lookups/__init__.py ================================================ from past.builtins import basestring from collections import namedtuple import re # export resolve_lookups at this level from .registry import resolve_lookups # NOQA from .registry import register_lookup_handler # NOQA # TODO: we can remove the optionality of of the type in a later release, it # is only included to allow for an error to be thrown while people are # converting their configuration files to 1.0 LOOKUP_REGEX = re.compile(""" \$\{ # opening brace for the lookup ((?P[._\-a-zA-Z0-9]*(?=\s)) # type of lookup, must be followed by a # space ?\s* # any number of spaces separating the # type from the input (?P[@\+\/,\.\?_\-a-zA-Z0-9\:\s=\[\]\*]+) # the input value to the lookup )\} # closing brace of the lookup """, re.VERBOSE) Lookup = namedtuple("Lookup", ("type", "input", "raw")) def extract_lookups_from_string(value): """Extract any lookups within a string. Args: value (str): string value we're extracting lookups from Returns: list: list of :class:`stacker.lookups.Lookup` if any """ lookups = set() for match in LOOKUP_REGEX.finditer(value): groupdict = match.groupdict() raw = match.groups()[0] lookup_type = groupdict["type"] lookup_input = groupdict["input"] lookups.add(Lookup(lookup_type, lookup_input, raw)) return lookups def extract_lookups(value): """Recursively extracts any stack lookups within the data structure. Args: value (one of str, list, dict): a structure that contains lookups to output values Returns: list: list of lookups if any """ lookups = set() if isinstance(value, basestring): lookups = lookups.union(extract_lookups_from_string(value)) elif isinstance(value, list): for v in value: lookups = lookups.union(extract_lookups(v)) elif isinstance(value, dict): for v in value.values(): lookups = lookups.union(extract_lookups(v)) return lookups ================================================ FILE: stacker/lookups/handlers/__init__.py ================================================ class LookupHandler(object): @classmethod def handle(cls, value, context, provider): """ Perform the actual lookup :param value: Parameter(s) given to this lookup :type value: str :param context: :param provider: :return: Looked-up value :rtype: str """ raise NotImplementedError() @classmethod def dependencies(cls, lookup_data): """ Calculate any dependencies required to perform this lookup. Note that lookup_data may not be (completely) resolved at this time. :param lookup_data: Parameter(s) given to this lookup :type lookup_data VariableValue :return: Set of stack names (str) this lookup depends on :rtype: set """ del lookup_data # unused in this implementation return set() ================================================ FILE: stacker/lookups/handlers/ami.py ================================================ from stacker.session_cache import get_session import re import operator from . import LookupHandler from ...util import read_value_from_path TYPE_NAME = "ami" class ImageNotFound(Exception): def __init__(self, search_string): self.search_string = search_string message = ("Unable to find ec2 image with search string: {}").format( search_string ) super(ImageNotFound, self).__init__(message) class AmiLookup(LookupHandler): @classmethod def handle(cls, value, provider, **kwargs): """Fetch the most recent AMI Id using a filter For example: ${ami [@]owners:self,account,amazon name_regex:serverX-[0-9]+ architecture:x64,i386} The above fetches the most recent AMI where owner is self account or amazon and the ami name matches the regex described, the architecture will be either x64 or i386 You can also optionally specify the region in which to perform the AMI lookup. Valid arguments: owners (comma delimited) REQUIRED ONCE: aws_account_id | amazon | self name_regex (a regex) REQUIRED ONCE: e.g. my-ubuntu-server-[0-9]+ executable_users (comma delimited) OPTIONAL ONCE: aws_account_id | amazon | self Any other arguments specified are sent as filters to the aws api For example, "architecture:x86_64" will add a filter """ # noqa value = read_value_from_path(value) if "@" in value: region, value = value.split("@", 1) else: region = provider.region ec2 = get_session(region).client('ec2') values = {} describe_args = {} # now find any other arguments that can be filters matches = re.findall('([0-9a-zA-z_-]+:[^\s$]+)', value) for match in matches: k, v = match.split(':', 1) values[k] = v if not values.get('owners'): raise Exception("'owners' value required when using ami") owners = values.pop('owners').split(',') describe_args["Owners"] = owners if not values.get('name_regex'): raise Exception("'name_regex' value required when using ami") name_regex = values.pop('name_regex') executable_users = None if values.get('executable_users'): executable_users = values.pop('executable_users').split(',') describe_args["ExecutableUsers"] = executable_users filters = [] for k, v in values.items(): filters.append({"Name": k, "Values": v.split(',')}) describe_args["Filters"] = filters result = ec2.describe_images(**describe_args) images = sorted(result['Images'], key=operator.itemgetter('CreationDate'), reverse=True) for image in images: if re.match("^%s$" % name_regex, image.get('Name', '')): return image['ImageId'] raise ImageNotFound(value) ================================================ FILE: stacker/lookups/handlers/default.py ================================================ from . import LookupHandler TYPE_NAME = "default" class DefaultLookup(LookupHandler): @classmethod def handle(cls, value, **kwargs): """Use a value from the environment or fall back to a default if the environment doesn't contain the variable. Format of value: :: For example: Groups: ${default app_security_groups::sg-12345,sg-67890} If `app_security_groups` is defined in the environment, its defined value will be returned. Otherwise, `sg-12345,sg-67890` will be the returned value. This allows defaults to be set at the config file level. """ try: env_var_name, default_val = value.split("::", 1) except ValueError: raise ValueError("Invalid value for default: %s. Must be in " ":: format." % value) if env_var_name in kwargs['context'].environment: return kwargs['context'].environment[env_var_name] else: return default_val ================================================ FILE: stacker/lookups/handlers/dynamodb.py ================================================ from botocore.exceptions import ClientError import re from stacker.session_cache import get_session from . import LookupHandler from ...util import read_value_from_path TYPE_NAME = 'dynamodb' class DynamodbLookup(LookupHandler): @classmethod def handle(cls, value, **kwargs): """Get a value from a dynamodb table dynamodb field types should be in the following format: [:]@:.... Note: The region is optional, and defaults to the environment's `AWS_DEFAULT_REGION` if not specified. """ value = read_value_from_path(value) table_info = None table_keys = None region = None table_name = None if '@' in value: table_info, table_keys = value.split('@', 1) if ':' in table_info: region, table_name = table_info.split(':', 1) else: table_name = table_info else: raise ValueError('Please make sure to include a tablename') if not table_name: raise ValueError('Please make sure to include a dynamodb table ' 'name') table_lookup, table_keys = table_keys.split(':', 1) table_keys = table_keys.split('.') key_dict = _lookup_key_parse(table_keys) new_keys = key_dict['new_keys'] clean_table_keys = key_dict['clean_table_keys'] projection_expression = _build_projection_expression(clean_table_keys) # lookup the data from dynamodb dynamodb = get_session(region).client('dynamodb') try: response = dynamodb.get_item( TableName=table_name, Key={ table_lookup: new_keys[0] }, ProjectionExpression=projection_expression ) except ClientError as e: if e.response['Error']['Code'] == 'ResourceNotFoundException': raise ValueError( 'Cannot find the dynamodb table: {}'.format(table_name)) elif e.response['Error']['Code'] == 'ValidationException': raise ValueError( 'No dynamodb record matched the partition key: ' '{}'.format(table_lookup)) else: raise ValueError('The dynamodb lookup {} had an error: ' '{}'.format(value, e)) # find and return the key from the dynamo data returned if 'Item' in response: return (_get_val_from_ddb_data(response['Item'], new_keys[1:])) else: raise ValueError( 'The dynamodb record could not be found using the following ' 'key: {}'.format(new_keys[0])) def _lookup_key_parse(table_keys): """Return the order in which the stacks should be executed. Args: dependencies (dict): a dictionary where each key should be the fully qualified name of a stack whose value is an array of fully qualified stack names that the stack depends on. This is used to generate the order in which the stacks should be executed. Returns: dict: includes a dict of lookup types with data types ('new_keys') and a list of the lookups with without ('clean_table_keys') """ # we need to parse the key lookup passed in regex_matcher = '\[([^\]]+)]' valid_dynamodb_datatypes = ['M', 'S', 'N', 'L'] clean_table_keys = [] new_keys = [] for key in table_keys: match = re.search(regex_matcher, key) if match: # the datatypes are pulled from the dynamodb docs if match.group(1) in valid_dynamodb_datatypes: match_val = str(match.group(1)) key = key.replace(match.group(0), '') new_keys.append({match_val: key}) clean_table_keys.append(key) else: raise ValueError( ('Stacker does not support looking up the datatype: {}') .format(str(match.group(1)))) else: new_keys.append({'S': key}) clean_table_keys.append(key) key_dict = {} key_dict['new_keys'] = new_keys key_dict['clean_table_keys'] = clean_table_keys return key_dict def _build_projection_expression(clean_table_keys): """Given cleaned up keys, this will return a projection expression for the dynamodb lookup. Args: clean_table_keys (dict): keys without the data types attached Returns: str: A projection expression for the dynamodb lookup. """ projection_expression = '' for key in clean_table_keys[:-1]: projection_expression += ('{},').format(key) projection_expression += clean_table_keys[-1] return projection_expression def _get_val_from_ddb_data(data, keylist): """Given a dictionary of dynamodb data (including the datatypes) and a properly structured keylist, it will return the value of the lookup Args: data (dict): the raw dynamodb data keylist(list): a list of keys to lookup. This must include the datatype Returns: various: It returns the value from the dynamodb record, and casts it to a matching python datatype """ next_type = None # iterate through the keylist to find the matching key/datatype for k in keylist: for k1 in k: if next_type is None: data = data[k[k1]] else: temp_dict = data[next_type] data = temp_dict[k[k1]] next_type = k1 if next_type == 'L': # if type is list, convert it to a list and return return _convert_ddb_list_to_list(data[next_type]) if next_type == 'N': # TODO: handle various types of 'number' datatypes, (e.g. int, double) # if a number, convert to an int and return return int(data[next_type]) # else, just assume its a string and return return str(data[next_type]) def _convert_ddb_list_to_list(conversion_list): """Given a dynamodb list, it will return a python list without the dynamodb datatypes Args: conversion_list (dict): a dynamodb list which includes the datatypes Returns: list: Returns a sanitized list without the dynamodb datatypes """ ret_list = [] for v in conversion_list: for v1 in v: ret_list.append(v[v1]) return ret_list ================================================ FILE: stacker/lookups/handlers/envvar.py ================================================ import os from . import LookupHandler from ...util import read_value_from_path TYPE_NAME = "envvar" class EnvvarLookup(LookupHandler): @classmethod def handle(cls, value, **kwargs): """Retrieve an environment variable. For example: # In stacker we would reference the environment variable like this: conf_key: ${envvar ENV_VAR_NAME} You can optionally store the value in a file, ie: $ cat envvar_value.txt ENV_VAR_NAME and reference it within stacker (NOTE: the path should be relative to the stacker config file): conf_key: ${envvar file://envvar_value.txt} # Both of the above would resolve to conf_key: ENV_VALUE """ value = read_value_from_path(value) try: return os.environ[value] except KeyError: raise ValueError('EnvVar "{}" does not exist'.format(value)) ================================================ FILE: stacker/lookups/handlers/file.py ================================================ import base64 import json import re from collections.abc import Mapping, Sequence import yaml from troposphere import GenericHelperFn, Base64 from . import LookupHandler from ...util import read_value_from_path TYPE_NAME = "file" _PARAMETER_PATTERN = re.compile(r'{{([::|\w]+)}}') class FileLookup(LookupHandler): @classmethod def handle(cls, value, **kwargs): """Translate a filename into the file contents. Fields should use the following format:: : For example:: # We've written a file to /some/path: $ echo "hello there" > /some/path # In stacker we would reference the contents of this file with the # following conf_key: ${file plain:file://some/path} # The above would resolve to conf_key: hello there # Or, if we used wanted a base64 encoded copy of the file data conf_key: ${file base64:file://some/path} # The above would resolve to conf_key: aGVsbG8gdGhlcmUK Supported codecs: - plain - base64 - encode the plain text file at the given path with base64 prior to returning it - parameterized - the same as plain, but additionally supports referencing template parameters to create userdata that's supplemented with information from the template, as is commonly needed in EC2 UserData. For example, given a template parameter of BucketName, the file could contain the following text:: #!/bin/sh aws s3 sync s3://{{BucketName}}/somepath /somepath and then you could use something like this in the YAML config file:: UserData: ${file parameterized:/path/to/file} resulting in the UserData parameter being defined as:: { "Fn::Join" : ["", [ "#!/bin/sh\\naws s3 sync s3://", {"Ref" : "BucketName"}, "/somepath /somepath" ]] } - parameterized-b64 - the same as parameterized, with the results additionally wrapped in *{ "Fn::Base64": ... }* , which is what you actually need for EC2 UserData When using parameterized-b64 for UserData, you should use a variable defined as such: .. code-block:: python from troposphere import AWSHelperFn "UserData": { "type": AWSHelperFn, "description": "Instance user data", "default": Ref("AWS::NoValue") } and then assign UserData in a LaunchConfiguration or Instance to *self.get_variables()["UserData"]*. Note that we use AWSHelperFn as the type because the parameterized-b64 codec returns either a Base64 or a GenericHelperFn troposphere object """ try: codec, path = value.split(":", 1) except ValueError: raise TypeError( "File value must be of the format" " \":\" (got %s)" % (value) ) value = read_value_from_path(path) return CODECS[codec](value) def _parameterize_string(raw): """Substitute placeholders in a string using CloudFormation references Args: raw (`str`): String to be processed. Byte strings are not supported; decode them before passing them to this function. Returns: `str` | :class:`troposphere.GenericHelperFn`: An expression with placeholders from the input replaced, suitable to be passed to Troposphere to be included in CloudFormation template. This will be the input string without modification if no substitutions are found, and a composition of CloudFormation calls otherwise. """ parts = [] s_index = 0 for match in _PARAMETER_PATTERN.finditer(raw): parts.append(raw[s_index:match.start()]) parts.append({u"Ref": match.group(1)}) s_index = match.end() if not parts: return GenericHelperFn(raw) parts.append(raw[s_index:]) return GenericHelperFn({u"Fn::Join": [u"", parts]}) def parameterized_codec(raw, b64): """Parameterize a string, possibly encoding it as Base64 afterwards Args: raw (`str` | `bytes`): String to be processed. Byte strings will be interpreted as UTF-8. b64 (`bool`): Whether to wrap the output in a Base64 CloudFormation call Returns: :class:`troposphere.AWSHelperFn`: output to be included in a CloudFormation template. """ if isinstance(raw, bytes): raw = raw.decode('utf-8') result = _parameterize_string(raw) # Note, since we want a raw JSON object (not a string) output in the # template, we wrap the result in GenericHelperFn (not needed if we're # using Base64) return Base64(result.data) if b64 else result def _parameterize_obj(obj): """Recursively parameterize all strings contained in an object. Parameterizes all values of a Mapping, all items of a Sequence, an unicode string, or pass other objects through unmodified. Byte strings will be interpreted as UTF-8. Args: obj: data to parameterize Return: A parameterized object to be included in a CloudFormation template. Mappings are converted to `dict`, Sequences are converted to `list`, and strings possibly replaced by compositions of function calls. """ if isinstance(obj, Mapping): return dict((key, _parameterize_obj(value)) for key, value in obj.items()) elif isinstance(obj, bytes): return _parameterize_string(obj.decode('utf8')) elif isinstance(obj, str): return _parameterize_string(obj) elif isinstance(obj, Sequence): return list(_parameterize_obj(item) for item in obj) else: return obj class SafeUnicodeLoader(yaml.SafeLoader): def construct_yaml_str(self, node): return self.construct_scalar(node) def yaml_codec(raw, parameterized=False): data = yaml.load(raw, Loader=SafeUnicodeLoader) return _parameterize_obj(data) if parameterized else data def json_codec(raw, parameterized=False): data = json.loads(raw) return _parameterize_obj(data) if parameterized else data CODECS = { "plain": lambda x: x, "base64": lambda x: base64.b64encode(x.encode('utf8')).decode('utf-8'), "parameterized": lambda x: parameterized_codec(x, False), "parameterized-b64": lambda x: parameterized_codec(x, True), "yaml": lambda x: yaml_codec(x, parameterized=False), "yaml-parameterized": lambda x: yaml_codec(x, parameterized=True), "json": lambda x: json_codec(x, parameterized=False), "json-parameterized": lambda x: json_codec(x, parameterized=True), } ================================================ FILE: stacker/lookups/handlers/hook_data.py ================================================ from . import LookupHandler TYPE_NAME = "hook_data" class HookDataLookup(LookupHandler): @classmethod def handle(cls, value, context, **kwargs): """Returns the value of a key for a given hook in hook_data. Format of value: :: """ try: hook_name, key = value.split("::") except ValueError: raise ValueError("Invalid value for hook_data: %s. Must be in " ":: format." % value) return context.hook_data[hook_name][key] ================================================ FILE: stacker/lookups/handlers/kms.py ================================================ import codecs import sys from stacker.session_cache import get_session from . import LookupHandler from ...util import read_value_from_path TYPE_NAME = "kms" class KmsLookup(LookupHandler): @classmethod def handle(cls, value, **kwargs): """Decrypt the specified value with a master key in KMS. kmssimple field types should be in the following format: [@] Note: The region is optional, and defaults to the environment's `AWS_DEFAULT_REGION` if not specified. For example: # We use the aws cli to get the encrypted value for the string # "PASSWORD" using the master key called "myStackerKey" in # us-east-1 $ aws --region us-east-1 kms encrypt --key-id alias/myStackerKey \ --plaintext "PASSWORD" --output text --query CiphertextBlob CiD6bC8t2Y<...encrypted blob...> # In stacker we would reference the encrypted value like: conf_key: ${kms us-east-1@CiD6bC8t2Y<...encrypted blob...>} You can optionally store the encrypted value in a file, ie: kms_value.txt us-east-1@CiD6bC8t2Y<...encrypted blob...> and reference it within stacker (NOTE: the path should be relative to the stacker config file): conf_key: ${kms file://kms_value.txt} # Both of the above would resolve to conf_key: PASSWORD """ value = read_value_from_path(value) region = None if "@" in value: region, value = value.split("@", 1) kms = get_session(region).client('kms') # encode str value as an utf-8 bytestring for use with codecs.decode. value = value.encode('utf-8') # get raw but still encrypted value from base64 version. decoded = codecs.decode(value, 'base64') # check python version in your system python3_or_later = sys.version_info[0] >= 3 # decrypt and return the plain text raw value. if python3_or_later: return kms.decrypt(CiphertextBlob=decoded)["Plaintext"]\ .decode('utf-8') else: return kms.decrypt(CiphertextBlob=decoded)["Plaintext"] ================================================ FILE: stacker/lookups/handlers/output.py ================================================ import re from collections import namedtuple from . import LookupHandler TYPE_NAME = "output" Output = namedtuple("Output", ("stack_name", "output_name")) class OutputLookup(LookupHandler): @classmethod def handle(cls, value, context=None, **kwargs): """Fetch an output from the designated stack. Args: value (str): string with the following format: ::, ie. some-stack::SomeOutput context (:class:`stacker.context.Context`): stacker context Returns: str: output from the specified stack """ if context is None: raise ValueError('Context is required') d = deconstruct(value) stack = context.get_stack(d.stack_name) return stack.outputs[d.output_name] @classmethod def dependencies(cls, lookup_data): # try to get the stack name stack_name = '' for data_item in lookup_data: if not data_item.resolved(): # We encountered an unresolved substitution. # StackName is calculated dynamically based on context: # e.g. ${output ${default var::source}::name} # Stop here return set() stack_name = stack_name + data_item.value() match = re.search(r'::', stack_name) if match: stack_name = stack_name[0:match.start()] return {stack_name} # else: try to append the next item # We added all lookup_data, and still couldn't find a `::`... # Probably an error... return set() def deconstruct(value): try: stack_name, output_name = value.split("::") except ValueError: raise ValueError("output handler requires syntax " "of ::. Got: %s" % value) return Output(stack_name, output_name) ================================================ FILE: stacker/lookups/handlers/rxref.py ================================================ """Handler for fetching outputs from fully qualified stacks. The `output` handler supports fetching outputs from stacks created within a sigle config file. Sometimes it's useful to fetch outputs from stacks created outside of the current config file. `rxref` supports this by not using the :class:`stacker.context.Context` to expand the fqn of the stack. Example: conf_value: ${rxref some-relative-fully-qualified-stack-name::SomeOutputName} """ from . import LookupHandler from .output import deconstruct TYPE_NAME = "rxref" class RxrefLookup(LookupHandler): @classmethod def handle(cls, value, provider=None, context=None, **kwargs): """Fetch an output from the designated stack. Args: value (str): string with the following format: ::, ie. some-stack::SomeOutput provider (:class:`stacker.provider.base.BaseProvider`): subclass of the base provider context (:class:`stacker.context.Context`): stacker context Returns: str: output from the specified stack """ if provider is None: raise ValueError('Provider is required') if context is None: raise ValueError('Context is required') d = deconstruct(value) stack_fqn = context.get_fqn(d.stack_name) output = provider.get_output(stack_fqn, d.output_name) return output ================================================ FILE: stacker/lookups/handlers/split.py ================================================ from . import LookupHandler TYPE_NAME = "split" class SplitLookup(LookupHandler): @classmethod def handle(cls, value, **kwargs): """Split the supplied string on the given delimiter, providing a list. Format of value: :: For example: Subnets: ${split ,::subnet-1,subnet-2,subnet-3} Would result in the variable `Subnets` getting a list consisting of: ["subnet-1", "subnet-2", "subnet-3"] This is particularly useful when getting an output from another stack that contains a list. For example, the standard vpc blueprint outputs the list of Subnets it creates as a pair of Outputs (PublicSubnets, PrivateSubnets) that are comma separated, so you could use this in your config: Subnets: ${split ,::${output vpc::PrivateSubnets}} """ try: delimiter, text = value.split("::", 1) except ValueError: raise ValueError("Invalid value for split: %s. Must be in " ":: format." % value) return text.split(delimiter) ================================================ FILE: stacker/lookups/handlers/ssmstore.py ================================================ from stacker.session_cache import get_session from . import LookupHandler from ...util import read_value_from_path TYPE_NAME = "ssmstore" class SsmstoreLookup(LookupHandler): @classmethod def handle(cls, value, **kwargs): """Retrieve (and decrypt if applicable) a parameter from AWS SSM Parameter Store. ssmstore field types should be in the following format: [@]ssmkey Note: The region is optional, and defaults to us-east-1 if not given. For example: # In stacker we would reference the encrypted value like: conf_key: ${ssmstore us-east-1@ssmkey} You can optionally store the value in a file, ie: ssmstore_value.txt us-east-1@ssmkey and reference it within stacker (NOTE: the path should be relative to the stacker config file): conf_key: ${ssmstore file://ssmstore_value.txt} # Both of the above would resolve to conf_key: PASSWORD """ value = read_value_from_path(value) region = "us-east-1" if "@" in value: region, value = value.split("@", 1) client = get_session(region).client("ssm") response = client.get_parameters( Names=[ value, ], WithDecryption=True ) if 'Parameters' in response: return str(response['Parameters'][0]['Value']) raise ValueError('SSMKey "{}" does not exist in region {}'.format( value, region)) ================================================ FILE: stacker/lookups/handlers/xref.py ================================================ """Handler for fetching outputs from fully qualified stacks. The `output` handler supports fetching outputs from stacks created within a sigle config file. Sometimes it's useful to fetch outputs from stacks created outside of the current config file. `xref` supports this by not using the :class:`stacker.context.Context` to expand the fqn of the stack. Example: conf_value: ${xref some-fully-qualified-stack-name::SomeOutputName} """ from . import LookupHandler from .output import deconstruct TYPE_NAME = "xref" class XrefLookup(LookupHandler): @classmethod def handle(cls, value, provider=None, **kwargs): """Fetch an output from the designated stack. Args: value (str): string with the following format: ::, ie. some-stack::SomeOutput provider (:class:`stacker.provider.base.BaseProvider`): subclass of the base provider Returns: str: output from the specified stack """ if provider is None: raise ValueError('Provider is required') d = deconstruct(value) stack_fqn = d.stack_name output = provider.get_output(stack_fqn, d.output_name) return output ================================================ FILE: stacker/lookups/registry.py ================================================ import logging import warnings from past.builtins import basestring from ..exceptions import UnknownLookupType, FailedVariableLookup from ..util import load_object_from_string from .handlers import output from .handlers import kms from .handlers import xref from .handlers import ssmstore from .handlers import dynamodb from .handlers import envvar from .handlers import rxref from .handlers import ami from .handlers import file as file_handler from .handlers import split from .handlers import default from .handlers import hook_data LOOKUP_HANDLERS = {} def register_lookup_handler(lookup_type, handler_or_path): """Register a lookup handler. Args: lookup_type (str): Name to register the handler under handler_or_path (OneOf[func, str]): a function or a path to a handler """ handler = handler_or_path if isinstance(handler_or_path, basestring): handler = load_object_from_string(handler_or_path) LOOKUP_HANDLERS[lookup_type] = handler if type(handler) != type: # Hander is a not a new-style handler logger = logging.getLogger(__name__) logger.warning("Registering lookup `%s`: Please upgrade to use the " "new style of Lookups." % lookup_type) warnings.warn( # For some reason, this does not show up... # Leaving it in anyway "Lookup `%s`: Please upgrade to use the new style of Lookups" "." % lookup_type, DeprecationWarning, stacklevel=2, ) def unregister_lookup_handler(lookup_type): """Unregister the specified lookup type. This is useful when testing various lookup types if you want to unregister the lookup type after the test runs. Args: lookup_type (str): Name of the lookup type to unregister """ LOOKUP_HANDLERS.pop(lookup_type, None) def resolve_lookups(variable, context, provider): """Resolve a set of lookups. Args: variable (:class:`stacker.variables.Variable`): The variable resolving it's lookups. context (:class:`stacker.context.Context`): stacker context provider (:class:`stacker.provider.base.BaseProvider`): subclass of the base provider Returns: dict: dict of Lookup -> resolved value """ resolved_lookups = {} for lookup in variable.lookups: try: handler = LOOKUP_HANDLERS[lookup.type] except KeyError: raise UnknownLookupType(lookup) try: resolved_lookups[lookup] = handler( value=lookup.input, context=context, provider=provider, ) except Exception as e: raise FailedVariableLookup(variable.name, lookup, e) return resolved_lookups register_lookup_handler(output.TYPE_NAME, output.OutputLookup) register_lookup_handler(kms.TYPE_NAME, kms.KmsLookup) register_lookup_handler(ssmstore.TYPE_NAME, ssmstore.SsmstoreLookup) register_lookup_handler(envvar.TYPE_NAME, envvar.EnvvarLookup) register_lookup_handler(xref.TYPE_NAME, xref.XrefLookup) register_lookup_handler(rxref.TYPE_NAME, rxref.RxrefLookup) register_lookup_handler(ami.TYPE_NAME, ami.AmiLookup) register_lookup_handler(file_handler.TYPE_NAME, file_handler.FileLookup) register_lookup_handler(split.TYPE_NAME, split.SplitLookup) register_lookup_handler(default.TYPE_NAME, default.DefaultLookup) register_lookup_handler(hook_data.TYPE_NAME, hook_data.HookDataLookup) register_lookup_handler(dynamodb.TYPE_NAME, dynamodb.DynamodbLookup) ================================================ FILE: stacker/plan.py ================================================ import os import logging import time import uuid import threading from .util import stack_template_key_name from .exceptions import ( GraphError, PlanFailed, ) from .ui import ui from .dag import DAG, DAGValidationError, walk from .status import ( FailedStatus, PENDING, SUBMITTED, COMPLETE, SKIPPED, FAILED, ) logger = logging.getLogger(__name__) COLOR_CODES = { SUBMITTED.code: 33, # yellow COMPLETE.code: 32, # green FAILED.code: 31, # red } def log_step(step): msg = "%s: %s" % (step, step.status.name) if step.status.reason: msg += " (%s)" % (step.status.reason) color_code = COLOR_CODES.get(step.status.code, 37) ui.info(msg, extra={"color": color_code}) class Step(object): """State machine for executing generic actions related to stacks. Args: stack (:class:`stacker.stack.Stack`): the stack associated with this step fn (func): the function to run to execute the step. This function will be ran multiple times until the step is "done". watch_func (func): an optional function that will be called to "tail" the step action. """ def __init__(self, stack, fn, watch_func=None): self.stack = stack self.status = PENDING self.last_updated = time.time() self.fn = fn self.watch_func = watch_func def __repr__(self): return "" % (self.stack.name,) def __str__(self): return self.stack.name def run(self): """Runs this step until it has completed successfully, or been skipped. """ stop_watcher = threading.Event() watcher = None if self.watch_func: watcher = threading.Thread( target=self.watch_func, args=(self.stack, stop_watcher) ) watcher.start() try: while not self.done: self._run_once() finally: if watcher: stop_watcher.set() watcher.join() return self.ok def _run_once(self): try: status = self.fn(self.stack, status=self.status) except Exception as e: logger.exception(e) status = FailedStatus(reason=str(e)) self.set_status(status) return status @property def name(self): return self.stack.name @property def requires(self): return self.stack.requires @property def required_by(self): return self.stack.required_by @property def completed(self): """Returns True if the step is in a COMPLETE state.""" return self.status == COMPLETE @property def skipped(self): """Returns True if the step is in a SKIPPED state.""" return self.status == SKIPPED @property def failed(self): """Returns True if the step is in a FAILED state.""" return self.status == FAILED @property def done(self): """Returns True if the step is finished (either COMPLETE, SKIPPED or FAILED) """ return self.completed or self.skipped or self.failed @property def ok(self): """Returns True if the step is finished (either COMPLETE or SKIPPED)""" return self.completed or self.skipped @property def submitted(self): """Returns True if the step is SUBMITTED, COMPLETE, or SKIPPED.""" return self.status >= SUBMITTED def set_status(self, status): """Sets the current step's status. Args: status (:class:`Status ` object): The status to set the step to. """ if status is not self.status: logger.debug("Setting %s state to %s.", self.stack.name, status.name) self.status = status self.last_updated = time.time() if self.stack.logging: log_step(self) def complete(self): """A shortcut for set_status(COMPLETE)""" self.set_status(COMPLETE) def skip(self): """A shortcut for set_status(SKIPPED)""" self.set_status(SKIPPED) def submit(self): """A shortcut for set_status(SUBMITTED)""" self.set_status(SUBMITTED) def build_plan(description, graph, targets=None, reverse=False): """Builds a plan from a list of steps. Args: description (str): an arbitrary string to describe the plan. graph (:class:`Graph`): a list of :class:`Graph` to execute. targets (list): an optional list of step names to filter the graph to. If provided, only these steps, and their transitive dependencies will be executed. If no targets are specified, every node in the graph will be executed. reverse (bool): If provided, the graph will be walked in reverse order (dependencies last). """ # If we want to execute the plan in reverse (e.g. Destroy), transpose the # graph. if reverse: graph = graph.transposed() # If we only want to build a specific target, filter the graph. if targets: nodes = [] for target in targets: for k, step in graph.steps.items(): if step.name == target: nodes.append(step.name) graph = graph.filtered(nodes) return Plan(description=description, graph=graph) def build_graph(steps): """Builds a graph of steps. Args: steps (list): a list of :class:`Step` objects to execute. """ graph = Graph() for step in steps: graph.add_step(step) for step in steps: for dep in step.requires: graph.connect(step.name, dep) for parent in step.required_by: graph.connect(parent, step.name) return graph class Graph(object): """Graph represents a graph of steps. The :class:`Graph` helps organize the steps needed to execute a particular action for a set of :class:`stacker.stack.Stack` objects. When initialized with a set of steps, it will first build a Directed Acyclic Graph from the steps and their dependencies. Example: >>> dag = DAG() >>> a = Step("a", fn=build) >>> b = Step("b", fn=build) >>> dag.add_step(a) >>> dag.add_step(b) >>> dag.connect(a, b) Args: steps (list): an optional list of :class:`Step` objects to execute. dag (:class:`stacker.dag.DAG`): an optional :class:`stacker.dag.DAG` object. If one is not provided, a new one will be initialized. """ def __init__(self, steps=None, dag=None): self.steps = steps or {} self.dag = dag or DAG() def add_step(self, step): self.steps[step.name] = step self.dag.add_node(step.name) def connect(self, step, dep): try: self.dag.add_edge(step, dep) except KeyError as e: raise GraphError(e, step, dep) except DAGValidationError as e: raise GraphError(e, step, dep) def transitive_reduction(self): self.dag.transitive_reduction() def walk(self, walker, walk_func): def fn(step_name): step = self.steps[step_name] return walk_func(step) return walker(self.dag, fn) def downstream(self, step_name): """Returns the direct dependencies of the given step""" return list(self.steps[dep] for dep in self.dag.downstream(step_name)) def transposed(self): """Returns a "transposed" version of this graph. Useful for walking in reverse. """ return Graph(steps=self.steps, dag=self.dag.transpose()) def filtered(self, step_names): """Returns a "filtered" version of this graph.""" return Graph(steps=self.steps, dag=self.dag.filter(step_names)) def topological_sort(self): nodes = self.dag.topological_sort() return [self.steps[step_name] for step_name in nodes] def to_dict(self): return self.dag.graph class Plan(object): """A convenience class for working on a Graph. Args: description (str): description of the plan. graph (:class:`Graph`): a graph of steps. """ def __init__(self, description, graph): self.id = uuid.uuid4() self.description = description self.graph = graph def outline(self, level=logging.INFO, message=""): """Print an outline of the actions the plan is going to take. The outline will represent the rough ordering of the steps that will be taken. Args: level (int, optional): a valid log level that should be used to log the outline message (str, optional): a message that will be logged to the user after the outline has been logged. """ steps = 1 logger.log(level, "Plan \"%s\":", self.description) for step in self.steps: logger.log( level, " - step: %s: target: \"%s\", action: \"%s\"", steps, step.name, step.fn.__name__, ) steps += 1 if message: logger.log(level, message) def dump(self, directory, context, provider=None): logger.info("Dumping \"%s\"...", self.description) directory = os.path.expanduser(directory) if not os.path.exists(directory): os.makedirs(directory) def walk_func(step): step.stack.resolve( context=context, provider=provider, ) blueprint = step.stack.blueprint filename = stack_template_key_name(blueprint) path = os.path.join(directory, filename) blueprint_dir = os.path.dirname(path) if not os.path.exists(blueprint_dir): os.makedirs(blueprint_dir) logger.info("Writing stack \"%s\" -> %s", step.name, path) with open(path, "w") as f: f.write(blueprint.rendered) return True return self.graph.walk(walk, walk_func) def execute(self, *args, **kwargs): """Walks each step in the underlying graph, and raises an exception if any of the steps fail. Raises: PlanFailed: Raised if any of the steps fail. """ self.walk(*args, **kwargs) failed_steps = [step for step in self.steps if step.status == FAILED] if failed_steps: raise PlanFailed(failed_steps) def walk(self, walker): """Walks each step in the underlying graph, in topological order. Args: walker (func): a walker function to be passed to :class:`stacker.dag.DAG` to walk the graph. """ def walk_func(step): # Before we execute the step, we need to ensure that it's # transitive dependencies are all in an "ok" state. If not, we # won't execute this step. for dep in self.graph.downstream(step.name): if not dep.ok: step.set_status(FailedStatus("dependency has failed")) return step.ok return step.run() return self.graph.walk(walker, walk_func) @property def steps(self): steps = self.graph.topological_sort() steps.reverse() return steps @property def step_names(self): return [step.name for step in self.steps] def keys(self): return self.step_names ================================================ FILE: stacker/providers/__init__.py ================================================ ================================================ FILE: stacker/providers/aws/__init__.py ================================================ ================================================ FILE: stacker/providers/aws/default.py ================================================ import json import yaml import logging import time import urllib.parse import sys # thread safe, memoized, provider builder. from threading import Lock import botocore.exceptions from botocore.config import Config from ..base import BaseProvider from ... import exceptions from ...ui import ui from ...util import parse_cloudformation_template from stacker.session_cache import get_session from ...actions.diff import ( DictValue, diff_parameters, format_params_diff as format_diff ) logger = logging.getLogger(__name__) # This value controls the maximum number of times a CloudFormation API call # will be attempted, after being throttled. This value is used in an # exponential backoff algorithm to determine how long the client should wait # until attempting a retry: # # base * growth_factor ^ (attempts - 1) # # A value of 10 here would cause the worst case wait time for the last retry to # be ~8 mins: # # 1 * 2 ^ (10 - 1) = 512 seconds # # References: # https://github.com/boto/botocore/blob/1.6.1/botocore/retryhandler.py#L39-L58 # https://github.com/boto/botocore/blob/1.6.1/botocore/data/_retry.json#L97-L121 MAX_ATTEMPTS = 10 # Updated this to 15 retries with a 1 second sleep between retries. This is # only used when a call to `get_events` fails due to the stack not being # found. This is often the case because Cloudformation is taking too long # to create the stack. 15 seconds should, hopefully, be plenty of time for # the stack to start showing up in the API. MAX_TAIL_RETRIES = 15 TAIL_RETRY_SLEEP = 1 GET_EVENTS_SLEEP = 1 DEFAULT_CAPABILITIES = ["CAPABILITY_NAMED_IAM", "CAPABILITY_AUTO_EXPAND"] def get_cloudformation_client(session): config = Config( retries=dict( max_attempts=MAX_ATTEMPTS ) ) return session.client('cloudformation', config=config) def get_output_dict(stack): """Returns a dict of key/values for the outputs for a given CF stack. Args: stack (dict): The stack object to get outputs from. Returns: dict: A dictionary with key/values for each output on the stack. """ outputs = {} if 'Outputs' not in stack: return outputs for output in stack['Outputs']: logger.debug(" %s %s: %s", stack['StackName'], output['OutputKey'], output['OutputValue']) outputs[output['OutputKey']] = output['OutputValue'] return outputs def s3_fallback(fqn, template, parameters, tags, method, change_set_name=None, service_role=None): logger.warn("DEPRECATION WARNING: Falling back to legacy " "stacker S3 bucket region for templates. See " "http://stacker.readthedocs.io/en/latest/config.html#s3-bucket" " for more information.") # extra line break on purpose to avoid status updates removing URL # from view logger.warn("\n") logger.debug("Modifying the S3 TemplateURL to point to " "us-east-1 endpoint") template_url = template.url template_url_parsed = urllib.parse.urlparse(template_url) template_url_parsed = template_url_parsed._replace( netloc="s3.amazonaws.com") template_url = urllib.parse.urlunparse(template_url_parsed) logger.debug("Using template_url: %s", template_url) args = generate_cloudformation_args( fqn, parameters, tags, template, service_role=service_role, change_set_name=get_change_set_name() ) response = method(**args) return response def get_change_set_name(): """Return a valid Change Set Name. The name has to satisfy the following regex: [a-zA-Z][-a-zA-Z0-9]* And must be unique across all change sets. """ return 'change-set-{}'.format(int(time.time())) def requires_replacement(changeset): """Return the changes within the changeset that require replacement. Args: changeset (list): List of changes Returns: list: A list of changes that require replacement, if any. """ return [r for r in changeset if r["ResourceChange"].get( "Replacement", False) == "True"] def output_full_changeset(full_changeset=None, params_diff=None, answer=None, fqn=None): """Optionally output full changeset. Args: full_changeset (list, optional): A list of the full changeset that will be output if the user specifies verbose. params_diff (list, optional): A list of DictValue detailing the differences between two parameters returned by :func:`stacker.actions.diff.diff_dictionaries` answer (str, optional): predetermined answer to the prompt if it has already been answered or inferred. fqn (str, optional): fully qualified name of the stack. """ if not answer: answer = ui.ask('Show full change set? [y/n] ').lower() if answer == 'n': return if answer in ['y', 'v']: if fqn: msg = '%s full changeset' % (fqn) else: msg = 'Full changeset' if params_diff: logger.info( "%s:\n\n%s\n%s", msg, format_params_diff(params_diff), yaml.safe_dump(full_changeset), ) else: logger.info( "%s:\n%s", msg, yaml.safe_dump(full_changeset), ) return raise exceptions.CancelExecution def ask_for_approval(full_changeset=None, params_diff=None, include_verbose=False, fqn=None): """Prompt the user for approval to execute a change set. Args: full_changeset (list, optional): A list of the full changeset that will be output if the user specifies verbose. params_diff (list, optional): A list of DictValue detailing the differences between two parameters returned by :func:`stacker.actions.diff.diff_dictionaries` include_verbose (bool, optional): Boolean for whether or not to include the verbose option. fqn (str): fully qualified name of the stack. """ approval_options = ['y', 'n'] if include_verbose: approval_options.append('v') approve = ui.ask("Execute the above changes? [{}] ".format( '/'.join(approval_options))).lower() if include_verbose and approve == "v": output_full_changeset(full_changeset=full_changeset, params_diff=params_diff, answer=approve, fqn=fqn) return ask_for_approval(fqn=fqn) elif approve != "y": raise exceptions.CancelExecution def output_summary(fqn, action, changeset, params_diff, replacements_only=False): """Log a summary of the changeset. Args: fqn (string): fully qualified name of the stack action (string): action to include in the log message changeset (list): AWS changeset params_diff (list): A list of dictionaries detailing the differences between two parameters returned by :func:`stacker.actions.diff.diff_dictionaries` replacements_only (bool, optional): boolean for whether or not we only want to list replacements """ replacements = [] changes = [] for change in changeset: resource = change['ResourceChange'] replacement = resource.get('Replacement') == 'True' summary = '- %s %s (%s)' % ( resource['Action'], resource['LogicalResourceId'], resource['ResourceType'], ) if replacement: replacements.append(summary) else: changes.append(summary) summary = '' if params_diff: summary += summarize_params_diff(params_diff) if replacements: if not replacements_only: summary += 'Replacements:\n' summary += '\n'.join(replacements) if changes: if summary: summary += '\n' summary += 'Changes:\n%s' % ('\n'.join(changes)) logger.info('%s %s:\n%s', fqn, action, summary) def format_params_diff(params_diff): """ Just a wrapper for stacker.actions.diff.format_params_diff for testing purposes. """ return format_diff(params_diff) def summarize_params_diff(params_diff): summary = '' added_summary = [v.key for v in params_diff if v.status() is DictValue.ADDED] if added_summary: summary += 'Parameters Added: %s\n' % ', '.join(added_summary) removed_summary = [v.key for v in params_diff if v.status() is DictValue.REMOVED] if removed_summary: summary += 'Parameters Removed: %s\n' % ', '.join(removed_summary) modified_summary = [v.key for v in params_diff if v.status() is DictValue.MODIFIED] if modified_summary: summary += 'Parameters Modified: %s\n' % ', '.join(modified_summary) return summary def wait_till_change_set_complete(cfn_client, change_set_id, try_count=25, sleep_time=.5, max_sleep=3): """ Checks state of a changeset, returning when it is in a complete state. Since changesets can take a little bit of time to get into a complete state, we need to poll it until it does so. This will try to get the state `try_count` times, waiting `sleep_time` * 2 seconds between each try up to the `max_sleep` number of seconds. If, after that time, the changeset is not in a complete state it fails. These default settings will wait a little over one minute. Args: cfn_client (:class:`botocore.client.CloudFormation`): Used to query cloudformation. change_set_id (str): The unique changeset id to wait for. try_count (int): Number of times to try the call. sleep_time (int): Time to sleep between attempts. max_sleep (int): Max time to sleep during backoff Return: dict: The response from cloudformation for the describe_change_set call. """ complete = False response = None for i in range(try_count): response = cfn_client.describe_change_set( ChangeSetName=change_set_id, ) complete = response["Status"] in ("FAILED", "CREATE_COMPLETE") if complete: break if sleep_time == max_sleep: logger.debug( "Still waiting on changeset for another %s seconds", sleep_time ) time.sleep(sleep_time) # exponential backoff with max sleep_time = min(sleep_time * 2, max_sleep) if not complete: raise exceptions.ChangesetDidNotStabilize(change_set_id) return response def create_change_set( cfn_client, fqn, template, parameters, tags, change_set_type='UPDATE', replacements_only=False, service_role=None, notification_arns=None ): logger.debug("Attempting to create change set of type %s for stack: %s.", change_set_type, fqn) args = generate_cloudformation_args( fqn, parameters, tags, template, change_set_type=change_set_type, service_role=service_role, change_set_name=get_change_set_name(), notification_arns=notification_arns ) try: response = cfn_client.create_change_set(**args) except botocore.exceptions.ClientError as e: if e.response['Error']['Message'] == ('TemplateURL must reference ' 'a valid S3 object to which ' 'you have access.'): response = s3_fallback(fqn, template, parameters, tags, cfn_client.create_change_set, get_change_set_name(), service_role) else: raise change_set_id = response["Id"] response = wait_till_change_set_complete( cfn_client, change_set_id ) status = response["Status"] if status == "FAILED": status_reason = response["StatusReason"] if ("didn't contain changes" in response["StatusReason"] or "No updates are to be performed" in response["StatusReason"]): logger.debug( "Stack %s did not change, not updating and removing " "changeset.", fqn, ) cfn_client.delete_change_set(ChangeSetName=change_set_id) raise exceptions.StackDidNotChange() logger.warn( "Got strange status, '%s' for changeset '%s'. Not deleting for " "further investigation - you will need to delete the changeset " "manually.", status, change_set_id ) raise exceptions.UnhandledChangeSetStatus( fqn, change_set_id, status, status_reason ) execution_status = response["ExecutionStatus"] if execution_status != "AVAILABLE": raise exceptions.UnableToExecuteChangeSet(fqn, change_set_id, execution_status) changes = response["Changes"] return changes, change_set_id def check_tags_contain(actual, expected): """Check if a set of AWS resource tags is contained in another Every tag key in `expected` must be present in `actual`, and have the same value. Extra keys in `actual` but not in `expected` are ignored. Args: actual (list): Set of tags to be verified, usually from the description of a resource. Each item must be a `dict` containing `Key` and `Value` items. expected (list): Set of tags that must be present in `actual` (in the same format). """ actual_set = set((item["Key"], item["Value"]) for item in actual) expected_set = set((item["Key"], item["Value"]) for item in expected) return actual_set >= expected_set def generate_cloudformation_args( stack_name, parameters, tags, template, capabilities=DEFAULT_CAPABILITIES, change_set_type=None, service_role=None, stack_policy=None, change_set_name=None, notification_arns=None, ): """Used to generate the args for common cloudformation API interactions. This is used for create_stack/update_stack/create_change_set calls in cloudformation. Args: stack_name (str): The fully qualified stack name in Cloudformation. parameters (list): A list of dictionaries that defines the parameter list to be applied to the Cloudformation stack. tags (list): A list of dictionaries that defines the tags that should be applied to the Cloudformation stack. template (:class:`stacker.provider.base.Template`): The template object. capabilities (list, optional): A list of capabilities to use when updating Cloudformation. change_set_type (str, optional): An optional change set type to use with create_change_set. service_role (str, optional): An optional service role to use when interacting with Cloudformation. stack_policy (:class:`stacker.providers.base.Template`): A template object representing a stack policy. change_set_name (str, optional): An optional change set name to use with create_change_set. notification_arns (list, optional): An optional list of SNS topic ARNs to send CloudFormation Events to. Returns: dict: A dictionary of arguments to be used in the Cloudformation API call. """ args = { "StackName": stack_name, "Parameters": parameters, "Tags": tags, "Capabilities": capabilities, } if service_role: args["RoleARN"] = service_role if change_set_name: args["ChangeSetName"] = change_set_name if notification_arns: args["NotificationARNs"] = notification_arns if change_set_type: args["ChangeSetType"] = change_set_type if template.url: args["TemplateURL"] = template.url else: args["TemplateBody"] = template.body # When creating args for CreateChangeSet, don't include the stack policy, # since ChangeSets don't support it. if not change_set_name: args.update(generate_stack_policy_args(stack_policy)) return args def generate_stack_policy_args(stack_policy=None): """ Converts a stack policy object into keyword args. Args: stack_policy (:class:`stacker.providers.base.Template`): A template object representing a stack policy. Returns: dict: A dictionary of keyword arguments to be used elsewhere. """ args = {} if stack_policy: logger.debug("Stack has a stack policy") if stack_policy.url: # stacker currently does not support uploading stack policies to # S3, so this will never get hit (unless your implementing S3 # uploads, and then you're probably reading this comment about why # the exception below was raised :)) # # args["StackPolicyURL"] = stack_policy.url raise NotImplementedError else: args["StackPolicyBody"] = stack_policy.body return args class ProviderBuilder(object): """Implements a Memoized ProviderBuilder for the AWS provider.""" def __init__(self, region=None, **kwargs): self.region = region self.kwargs = kwargs self.providers = {} self.lock = Lock() def build(self, region=None, profile=None): """Get or create the provider for the given region and profile.""" with self.lock: # memoization lookup key derived from region + profile. key = "{}-{}".format(profile, region) try: # assume provider is in provider dictionary. provider = self.providers[key] except KeyError: msg = "Missed memoized lookup ({}), creating new AWS Provider." logger.debug(msg.format(key)) if not region: region = self.region # memoize the result for later. self.providers[key] = Provider( get_session(region=region, profile=profile), region=region, **self.kwargs ) provider = self.providers[key] return provider class Provider(BaseProvider): """AWS CloudFormation Provider""" DELETED_STATUS = "DELETE_COMPLETE" IN_PROGRESS_STATUSES = ( "CREATE_IN_PROGRESS", "IMPORT_IN_PROGRESS", "UPDATE_IN_PROGRESS", "DELETE_IN_PROGRESS", "UPDATE_COMPLETE_CLEANUP_IN_PROGRESS", ) ROLLING_BACK_STATUSES = ( "ROLLBACK_IN_PROGRESS", "IMPORT_ROLLBACK_IN_PROGRESS", "UPDATE_ROLLBACK_IN_PROGRESS" ) FAILED_STATUSES = ( "CREATE_FAILED", "ROLLBACK_FAILED", "ROLLBACK_COMPLETE", "DELETE_FAILED", "IMPORT_ROLLBACK_FAILED", "UPDATE_ROLLBACK_FAILED", # Note: UPDATE_ROLLBACK_COMPLETE is in both the FAILED and COMPLETE # sets, because we need to wait for it when a rollback is triggered, # but still mark the stack as failed. "UPDATE_ROLLBACK_COMPLETE", ) COMPLETE_STATUSES = ( "CREATE_COMPLETE", "DELETE_COMPLETE", "IMPORT_COMPLETE", "UPDATE_COMPLETE", "IMPORT_ROLLBACK_COMPLETE", "UPDATE_ROLLBACK_COMPLETE", ) RECREATION_STATUSES = ( "CREATE_FAILED", "ROLLBACK_FAILED", "ROLLBACK_COMPLETE" ) REVIEW_STATUS = "REVIEW_IN_PROGRESS" def __init__(self, session, region=None, interactive=False, replacements_only=False, recreate_failed=False, service_role=None, **kwargs): self._outputs = {} self.region = region self.cloudformation = get_cloudformation_client(session) self.interactive = interactive # replacements only is only used in interactive mode self.replacements_only = interactive and replacements_only self.recreate_failed = interactive or recreate_failed self.service_role = service_role def get_stack(self, stack_name, **kwargs): try: return self.cloudformation.describe_stacks( StackName=stack_name)['Stacks'][0] except botocore.exceptions.ClientError as e: if "does not exist" not in str(e): raise raise exceptions.StackDoesNotExist(stack_name) def get_stack_status(self, stack, **kwargs): return stack['StackStatus'] def is_stack_completed(self, stack, **kwargs): return self.get_stack_status(stack) in self.COMPLETE_STATUSES def is_stack_in_progress(self, stack, **kwargs): return self.get_stack_status(stack) in self.IN_PROGRESS_STATUSES def is_stack_destroyed(self, stack, **kwargs): return self.get_stack_status(stack) == self.DELETED_STATUS def is_stack_recreatable(self, stack, **kwargs): return self.get_stack_status(stack) in self.RECREATION_STATUSES def is_stack_rolling_back(self, stack, **kwargs): return self.get_stack_status(stack) in self.ROLLING_BACK_STATUSES def is_stack_failed(self, stack, **kwargs): return self.get_stack_status(stack) in self.FAILED_STATUSES def is_stack_in_review(self, stack, **kwargs): return self.get_stack_status(stack) == self.REVIEW_STATUS def tail_stack(self, stack, cancel, log_func=None, **kwargs): def _log_func(e): event_args = [e['ResourceStatus'], e['ResourceType'], e.get('ResourceStatusReason', None)] # filter out any values that are empty event_args = [arg for arg in event_args if arg] template = " ".join(["[%s]"] + ["%s" for _ in event_args]) logger.info(template, *([stack.fqn] + event_args)) log_func = log_func or _log_func logger.info("Tailing stack: %s", stack.fqn) attempts = 0 while True: attempts += 1 try: self.tail(stack.fqn, cancel=cancel, log_func=log_func, include_initial=False) break except botocore.exceptions.ClientError as e: if "does not exist" in str(e) and attempts < MAX_TAIL_RETRIES: # stack might be in the process of launching, wait for a # second and try again if cancel.wait(TAIL_RETRY_SLEEP): return continue else: raise @staticmethod def _tail_print(e): print("%s %s %s" % (e['ResourceStatus'], e['ResourceType'], e['EventId'])) def get_events(self, stack_name, chronological=True): """Get the events in batches and return in chronological order""" next_token = None event_list = [] while True: if next_token is not None: events = self.cloudformation.describe_stack_events( StackName=stack_name, NextToken=next_token ) else: events = self.cloudformation.describe_stack_events( StackName=stack_name ) event_list.append(events['StackEvents']) next_token = events.get('NextToken', None) if next_token is None: break time.sleep(GET_EVENTS_SLEEP) if chronological: return reversed(sum(event_list, [])) else: return sum(event_list, []) def get_rollback_status_reason(self, stack_name): """Process events and returns latest roll back reason""" event = next((item for item in self.get_events(stack_name, False) if item["ResourceStatus"] == "UPDATE_ROLLBACK_IN_PROGRESS"), None) if event: reason = event["ResourceStatusReason"] return reason else: event = next((item for item in self.get_events(stack_name) if item["ResourceStatus"] == "ROLLBACK_IN_PROGRESS"), None) reason = event["ResourceStatusReason"] return reason def tail(self, stack_name, cancel, log_func=_tail_print, sleep_time=5, include_initial=True): """Show and then tail the event log""" # First dump the full list of events in chronological order and keep # track of the events we've seen already seen = set() initial_events = self.get_events(stack_name) for e in initial_events: if include_initial: log_func(e) seen.add(e['EventId']) # Now keep looping through and dump the new events while True: events = self.get_events(stack_name) for e in events: if e['EventId'] not in seen: log_func(e) seen.add(e['EventId']) if cancel.wait(sleep_time): return def destroy_stack(self, stack, **kwargs): logger.debug("Destroying stack: %s" % (self.get_stack_name(stack))) args = {"StackName": self.get_stack_name(stack)} if self.service_role: args["RoleARN"] = self.service_role self.cloudformation.delete_stack(**args) return True def create_stack( self, fqn, template, parameters, tags, force_change_set=False, stack_policy=None, notification_arns=None, **kwargs ): """Create a new Cloudformation stack. Args: fqn (str): The fully qualified name of the Cloudformation stack. template (:class:`stacker.providers.base.Template`): A Template object to use when creating the stack. parameters (list): A list of dictionaries that defines the parameter list to be applied to the Cloudformation stack. tags (list): A list of dictionaries that defines the tags that should be applied to the Cloudformation stack. force_change_set (bool): Whether or not to force change set use. stack_policy (:class:`stacker.providers.base.Template`): A template object representing a stack policy. notification_arns (list, optional): An optional list of SNS topic ARNs to send CloudFormation Events to. """ logger.debug("Attempting to create stack %s:.", fqn) logger.debug(" parameters: %s", parameters) logger.debug(" tags: %s", tags) if template.url: logger.debug(" template_url: %s", template.url) else: logger.debug(" no template url, uploading template " "directly.") if force_change_set: logger.debug("force_change_set set to True, creating stack with " "changeset.") _changes, change_set_id = create_change_set( self.cloudformation, fqn, template, parameters, tags, 'CREATE', service_role=self.service_role, **kwargs ) self.cloudformation.execute_change_set( ChangeSetName=change_set_id, ) else: args = generate_cloudformation_args( fqn, parameters, tags, template, service_role=self.service_role, stack_policy=stack_policy, notification_arns=notification_arns ) try: self.cloudformation.create_stack(**args) except botocore.exceptions.ClientError as e: if e.response['Error']['Message'] == ('TemplateURL must ' 'reference a valid S3 ' 'object to which you ' 'have access.'): s3_fallback(fqn, template, parameters, tags, self.cloudformation.create_stack, self.service_role) else: raise def select_update_method(self, force_interactive, force_change_set): """Select the correct update method when updating a stack. Args: force_interactive (str): Whether or not to force interactive mode no matter what mode the provider is in. force_change_set (bool): Whether or not to force change set use. Returns: function: The correct object method to use when updating. """ if self.interactive or force_interactive: return self.interactive_update_stack elif force_change_set: return self.noninteractive_changeset_update else: return self.default_update_stack def prepare_stack_for_update(self, stack, tags): """Prepare a stack for updating It may involve deleting the stack if is has failed it's initial creation. The deletion is only allowed if: - The stack contains all the tags configured in the current context; - The stack is in one of the statuses considered safe to re-create - ``recreate_failed`` is enabled, due to either being explicitly enabled by the user, or because interactive mode is on. Args: stack (dict): a stack object returned from get_stack tags (list): list of expected tags that must be present in the stack if it must be re-created Returns: bool: True if the stack can be updated, False if it must be re-created """ if self.is_stack_destroyed(stack): return False elif self.is_stack_completed(stack): return True stack_name = self.get_stack_name(stack) stack_status = self.get_stack_status(stack) if self.is_stack_in_progress(stack): raise exceptions.StackUpdateBadStatus( stack_name, stack_status, 'Update already in-progress') if not self.is_stack_recreatable(stack): raise exceptions.StackUpdateBadStatus( stack_name, stack_status, 'Unsupported state for re-creation') if not self.recreate_failed: raise exceptions.StackUpdateBadStatus( stack_name, stack_status, 'Stack re-creation is disabled. Run stacker again with the ' '--recreate-failed option to force it to be deleted and ' 'created from scratch.') stack_tags = self.get_stack_tags(stack) if not check_tags_contain(stack_tags, tags): raise exceptions.StackUpdateBadStatus( stack_name, stack_status, 'Tags differ from current configuration, possibly not created ' 'with stacker') if self.interactive: sys.stdout.write( 'The \"%s\" stack is in a failed state (%s).\n' 'It cannot be updated, but it can be deleted and re-created.\n' 'All its current resources will IRREVERSIBLY DESTROYED.\n' 'Proceed carefully!\n\n' % (stack_name, stack_status)) sys.stdout.flush() ask_for_approval(include_verbose=False, fqn=stack_name) logger.warn('Destroying stack \"%s\" for re-creation', stack_name) self.destroy_stack(stack) return False def update_stack(self, fqn, template, old_parameters, parameters, tags, force_interactive=False, force_change_set=False, stack_policy=None, **kwargs): """Update a Cloudformation stack. Args: fqn (str): The fully qualified name of the Cloudformation stack. template (:class:`stacker.providers.base.Template`): A Template object to use when updating the stack. old_parameters (list): A list of dictionaries that defines the parameter list on the existing Cloudformation stack. parameters (list): A list of dictionaries that defines the parameter list to be applied to the Cloudformation stack. tags (list): A list of dictionaries that defines the tags that should be applied to the Cloudformation stack. force_interactive (bool): A flag that indicates whether the update should be interactive. If set to True, interactive mode will be used no matter if the provider is in interactive mode or not. False will follow the behavior of the provider. force_change_set (bool): A flag that indicates whether the update must be executed with a change set. stack_policy (:class:`stacker.providers.base.Template`): A template object representing a stack policy. """ logger.debug("Attempting to update stack %s:", fqn) logger.debug(" parameters: %s", parameters) logger.debug(" tags: %s", tags) if template.url: logger.debug(" template_url: %s", template.url) else: logger.debug(" no template url, uploading template directly.") update_method = self.select_update_method(force_interactive, force_change_set) return update_method(fqn, template, old_parameters, parameters, stack_policy=stack_policy, tags=tags, **kwargs) def deal_with_changeset_stack_policy(self, fqn, stack_policy): """ Set a stack policy when using changesets. ChangeSets don't allow you to set stack policies in the same call to update them. This sets it before executing the changeset if the stack policy is passed in. Args: stack_policy (:class:`stacker.providers.base.Template`): A template object representing a stack policy. """ if stack_policy: kwargs = generate_stack_policy_args(stack_policy) kwargs["StackName"] = fqn logger.debug("Setting stack policy on %s.", fqn) self.cloudformation.set_stack_policy(**kwargs) def interactive_update_stack(self, fqn, template, old_parameters, parameters, stack_policy, tags, **kwargs): """Update a Cloudformation stack in interactive mode. Args: fqn (str): The fully qualified name of the Cloudformation stack. template (:class:`stacker.providers.base.Template`): A Template object to use when updating the stack. old_parameters (list): A list of dictionaries that defines the parameter list on the existing Cloudformation stack. parameters (list): A list of dictionaries that defines the parameter list to be applied to the Cloudformation stack. stack_policy (:class:`stacker.providers.base.Template`): A template object representing a stack policy. tags (list): A list of dictionaries that defines the tags that should be applied to the Cloudformation stack. """ logger.debug("Using interactive provider mode for %s.", fqn) changes, change_set_id = create_change_set( self.cloudformation, fqn, template, parameters, tags, 'UPDATE', service_role=self.service_role, **kwargs ) old_parameters_as_dict = self.params_as_dict(old_parameters) new_parameters_as_dict = self.params_as_dict( [x if 'ParameterValue' in x else {'ParameterKey': x['ParameterKey'], 'ParameterValue': old_parameters_as_dict[x['ParameterKey']]} for x in parameters] ) params_diff = diff_parameters( old_parameters_as_dict, new_parameters_as_dict) action = "replacements" if self.replacements_only else "changes" full_changeset = changes if self.replacements_only: changes = requires_replacement(changes) if changes or params_diff: ui.lock() try: output_summary(fqn, action, changes, params_diff, replacements_only=self.replacements_only) ask_for_approval( full_changeset=full_changeset, params_diff=params_diff, include_verbose=True, fqn=fqn, ) finally: ui.unlock() self.deal_with_changeset_stack_policy(fqn, stack_policy) self.cloudformation.execute_change_set( ChangeSetName=change_set_id, ) def noninteractive_changeset_update(self, fqn, template, old_parameters, parameters, stack_policy, tags, **kwargs): """Update a Cloudformation stack using a change set. This is required for stacks with a defined Transform (i.e. SAM), as the default update_stack API cannot be used with them. Args: fqn (str): The fully qualified name of the Cloudformation stack. template (:class:`stacker.providers.base.Template`): A Template object to use when updating the stack. old_parameters (list): A list of dictionaries that defines the parameter list on the existing Cloudformation stack. parameters (list): A list of dictionaries that defines the parameter list to be applied to the Cloudformation stack. stack_policy (:class:`stacker.providers.base.Template`): A template object representing a stack policy. tags (list): A list of dictionaries that defines the tags that should be applied to the Cloudformation stack. """ logger.debug("Using noninterative changeset provider mode " "for %s.", fqn) _changes, change_set_id = create_change_set( self.cloudformation, fqn, template, parameters, tags, 'UPDATE', service_role=self.service_role, **kwargs ) self.deal_with_changeset_stack_policy(fqn, stack_policy) self.cloudformation.execute_change_set( ChangeSetName=change_set_id, ) def default_update_stack(self, fqn, template, old_parameters, parameters, tags, stack_policy=None, notification_arns=[], **kwargs): """Update a Cloudformation stack in default mode. Args: fqn (str): The fully qualified name of the Cloudformation stack. template (:class:`stacker.providers.base.Template`): A Template object to use when updating the stack. old_parameters (list): A list of dictionaries that defines the parameter list on the existing Cloudformation stack. parameters (list): A list of dictionaries that defines the parameter list to be applied to the Cloudformation stack. tags (list): A list of dictionaries that defines the tags that should be applied to the Cloudformation stack. stack_policy (:class:`stacker.providers.base.Template`): A template object representing a stack policy. """ logger.debug("Using default provider mode for %s.", fqn) args = generate_cloudformation_args( fqn, parameters, tags, template, service_role=self.service_role, stack_policy=stack_policy, notification_arns=notification_arns ) try: self.cloudformation.update_stack(**args) except botocore.exceptions.ClientError as e: if "No updates are to be performed." in str(e): logger.debug( "Stack %s did not change, not updating.", fqn, ) raise exceptions.StackDidNotChange elif e.response['Error']['Message'] == ('TemplateURL must ' 'reference a valid ' 'S3 object to which ' 'you have access.'): s3_fallback(fqn, template, parameters, tags, self.cloudformation.update_stack, self.service_role) else: raise def get_stack_name(self, stack, **kwargs): return stack['StackName'] def get_stack_tags(self, stack, **kwargs): return stack['Tags'] def get_outputs(self, stack_name, *args, **kwargs): if stack_name not in self._outputs: stack = self.get_stack(stack_name) self._outputs[stack_name] = get_output_dict(stack) return self._outputs[stack_name] def get_output_dict(self, stack): return get_output_dict(stack) def get_stack_info(self, stack): """ Get the template and parameters of the stack currently in AWS Returns [ template, parameters ] """ stack_name = stack['StackId'] try: template = self.cloudformation.get_template( StackName=stack_name)['TemplateBody'] except botocore.exceptions.ClientError as e: if "does not exist" not in str(e): raise raise exceptions.StackDoesNotExist(stack_name) parameters = self.params_as_dict(stack.get('Parameters', [])) if isinstance(template, str): # handle yaml templates template = parse_cloudformation_template(template) return [json.dumps(template), parameters] def get_stack_changes(self, stack, template, parameters, tags, **kwargs): """Get the changes from a ChangeSet. Args: stack (:class:`stacker.stack.Stack`): the stack to get changes template (:class:`stacker.providers.base.Template`): A Template object to compaired to. parameters (list): A list of dictionaries that defines the parameter list to be applied to the Cloudformation stack. tags (list): A list of dictionaries that defines the tags that should be applied to the Cloudformation stack. Returns: dict: Stack outputs with inferred changes. """ try: stack_details = self.get_stack(stack.fqn) # handling for orphaned changeset temp stacks if self.get_stack_status( stack_details) == self.REVIEW_STATUS: raise exceptions.StackDoesNotExist(stack.fqn) _old_template, old_params = self.get_stack_info( stack_details ) old_template = parse_cloudformation_template(_old_template) change_type = 'UPDATE' except exceptions.StackDoesNotExist: old_params = {} old_template = {} change_type = 'CREATE' changes, change_set_id = create_change_set( self.cloudformation, stack.fqn, template, parameters, tags, change_type, service_role=self.service_role, **kwargs ) new_parameters_as_dict = self.params_as_dict( [x if 'ParameterValue' in x else {'ParameterKey': x['ParameterKey'], 'ParameterValue': old_params[x['ParameterKey']]} for x in parameters] ) params_diff = diff_parameters(old_params, new_parameters_as_dict) if changes or params_diff: ui.lock() try: if self.interactive: output_summary(stack.fqn, 'changes', changes, params_diff, replacements_only=self.replacements_only) output_full_changeset(full_changeset=changes, params_diff=params_diff, fqn=stack.fqn) else: output_full_changeset(full_changeset=changes, params_diff=params_diff, answer='y', fqn=stack.fqn) finally: ui.unlock() self.cloudformation.delete_change_set( ChangeSetName=change_set_id ) # ensure current stack outputs are loaded self.get_outputs(stack.fqn) # infer which outputs may have changed refs_to_invalidate = [] for change in changes: resc_change = change.get('ResourceChange', {}) if resc_change.get('Type') == 'Add': continue # we don't care about anything new # scope of changes that can invalidate a change if resc_change and (resc_change.get('Replacement') == 'True' or 'Properties' in resc_change['Scope']): logger.debug('%s added to invalidation list for %s', resc_change['LogicalResourceId'], stack.fqn) refs_to_invalidate.append(resc_change['LogicalResourceId']) # invalidate cached outputs with inferred changes for output, props in old_template.get('Outputs', {}).items(): if any(r in str(props['Value']) for r in refs_to_invalidate): self._outputs[stack.fqn].pop(output) logger.debug('Removed %s from the outputs of %s', output, stack.fqn) # push values for new + invalidated outputs to outputs for output_name, output_params in \ stack.blueprint.get_output_definitions().items(): if output_name not in self._outputs[stack.fqn]: self._outputs[stack.fqn][output_name] = ( ''.format( stack.fqn, output_name, str(output_params['Value']) ) ) # when creating a changeset for a new stack, CFN creates a temporary # stack with a status of REVIEW_IN_PROGRESS. this is only removed if # the changeset is executed or it is manually deleted. if change_type == 'CREATE': try: temp_stack = self.get_stack(stack.fqn) if self.is_stack_in_review(temp_stack): logger.debug('Removing temporary stack that is created ' 'with a ChangeSet of type "CREATE"') self.destroy_stack(temp_stack) except exceptions.StackDoesNotExist: # not an issue if the stack was already cleaned up logger.debug('Stack does not exist: %s', stack.fqn) return self.get_outputs(stack.fqn) @staticmethod def params_as_dict(parameters_list): parameters = dict() for p in parameters_list: parameters[p['ParameterKey']] = p['ParameterValue'] return parameters ================================================ FILE: stacker/providers/base.py ================================================ def not_implemented(method): raise NotImplementedError("Provider does not support '%s' " "method." % method) class BaseProviderBuilder(object): def build(self, region=None): not_implemented("build") class BaseProvider(object): def get_stack(self, stack_name, *args, **kwargs): # pylint: disable=unused-argument not_implemented("get_stack") def create_stack(self, *args, **kwargs): # pylint: disable=unused-argument not_implemented("create_stack") def update_stack(self, *args, **kwargs): # pylint: disable=unused-argument not_implemented("update_stack") def destroy_stack(self, *args, **kwargs): # pylint: disable=unused-argument not_implemented("destroy_stack") def get_stack_status(self, stack_name, *args, **kwargs): # pylint: disable=unused-argument not_implemented("get_stack_status") def get_outputs(self, stack_name, *args, **kwargs): # pylint: disable=unused-argument not_implemented("get_outputs") def get_output(self, stack_name, output): # pylint: disable=unused-argument return self.get_outputs(stack_name)[output] class Template(object): """A value object that represents a CloudFormation stack template, which could be optionally uploaded to s3. Presence of the url attribute indicates that the template was uploaded to S3, and the uploaded template should be used for CreateStack/UpdateStack calls. """ def __init__(self, url=None, body=None): self.url = url self.body = body ================================================ FILE: stacker/session_cache.py ================================================ import boto3 import logging from .ui import ui logger = logging.getLogger(__name__) # A global credential cache that can be shared among boto3 sessions. This is # inherently threadsafe thanks to the GIL: # https://docs.python.org/3/glossary.html#term-global-interpreter-lock credential_cache = {} default_profile = None def get_session(region, profile=None): """Creates a boto3 session with a cache Args: region (str): The region for the session profile (str): The profile for the session Returns: :class:`boto3.session.Session`: A boto3 session with credential caching """ if profile is None: logger.debug("No AWS profile explicitly provided. " "Falling back to default.") profile = default_profile logger.debug("Building session using profile \"%s\" in region \"%s\"" % (profile, region)) session = boto3.Session(region_name=region, profile_name=profile) c = session._session.get_component('credential_provider') provider = c.get_provider('assume-role') provider.cache = credential_cache provider._prompter = ui.getpass return session ================================================ FILE: stacker/stack.py ================================================ import copy from . import util from .variables import ( Variable, resolve_variables, ) from .blueprints.raw import RawTemplateBlueprint def _gather_variables(stack_def): """Merges context provided & stack defined variables. If multiple stacks have a variable with the same name, we can specify the value for a specific stack by passing in the variable name as: `::`. This variable value will only be used for that specific stack. Order of precedence: - context defined stack specific variables (ie. SomeStack::SomeVariable) - context defined non-specific variables - variable defined within the stack definition Args: stack_def (dict): The stack definition being worked on. Returns: dict: Contains key/value pairs of the collected variables. Raises: AttributeError: Raised when the stack definitition contains an invalid attribute. Currently only when using old parameters, rather than variables. """ variable_values = copy.deepcopy(stack_def.variables or {}) return [Variable(k, v) for k, v in variable_values.items()] class Stack(object): """Represents gathered information about a stack to be built/updated. Args: definition (:class:`stacker.config.Stack`): A stack definition. context (:class:`stacker.context.Context`): Current context for building the stack. mappings (dict, optional): Cloudformation mappings passed to the blueprint. locked (bool, optional): Whether or not the stack is locked. force (bool, optional): Whether to force updates on this stack. enabled (bool, optional): Whether this stack is enabled. protected (boot, optional): Whether this stack is protected. notification_arns (list, optional): An optional list of SNS topic ARNs to send CloudFormation Events to. """ def __init__( self, definition, context, variables=None, mappings=None, locked=False, force=False, enabled=True, protected=False, notification_arns=None, ): self.logging = True self.name = definition.name self.fqn = context.get_fqn(definition.stack_name or self.name) self.region = definition.region self.profile = definition.profile self.definition = definition self.variables = _gather_variables(definition) self.mappings = mappings self.locked = locked self.force = force self.enabled = enabled self.protected = protected self.context = context self.outputs = None self.in_progress_behavior = definition.in_progress_behavior self.notification_arns = notification_arns def __repr__(self): return self.fqn @property def required_by(self): return self.definition.required_by or [] @property def requires(self): requires = set(self.definition.requires or []) # Add any dependencies based on output lookups for variable in self.variables: deps = variable.dependencies() if self.name in deps: message = ( "Variable %s in stack %s has a circular reference" ) % (variable.name, self.name) raise ValueError(message) requires.update(deps) return requires @property def stack_policy(self): if not hasattr(self, "_stack_policy"): self._stack_policy = None if self.definition.stack_policy_path: with open(self.definition.stack_policy_path) as f: self._stack_policy = f.read() return self._stack_policy @property def blueprint(self): if not hasattr(self, "_blueprint"): kwargs = {} blueprint_class = None if self.definition.class_path: class_path = self.definition.class_path blueprint_class = util.load_object_from_string(class_path) if not hasattr(blueprint_class, "rendered"): raise AttributeError("Stack class %s does not have a " "\"rendered\" " "attribute." % (class_path,)) elif self.definition.template_path: blueprint_class = RawTemplateBlueprint kwargs["raw_template_path"] = self.definition.template_path else: raise AttributeError("Stack does not have a defined class or " "template path.") self._blueprint = blueprint_class( name=self.name, context=self.context, mappings=self.mappings, description=self.definition.description, **kwargs ) return self._blueprint @property def tags(self): """Returns the tags that should be set on this stack. Includes both the global tags, as well as any stack specific tags or overrides. Returns: dict: dictionary of tags """ tags = self.definition.tags or {} return dict(self.context.tags, **tags) @property def parameter_values(self): """Return all CloudFormation Parameters for the stack. CloudFormation Parameters can be specified via Blueprint Variables with a :class:`stacker.blueprints.variables.types.CFNType` `type`. Returns: dict: dictionary of : . """ return self.blueprint.get_parameter_values() @property def all_parameter_definitions(self): """Return a list of all parameters in the blueprint/template.""" return self.blueprint.get_parameter_definitions() @property def required_parameter_definitions(self): """Return all the required CloudFormation Parameters for the stack.""" return self.blueprint.get_required_parameter_definitions() def resolve(self, context, provider): """Resolve the Stack variables. This resolves the Stack variables and then prepares the Blueprint for rendering by passing the resolved variables to the Blueprint. Args: context (:class:`stacker.context.Context`): stacker context provider (:class:`stacker.provider.base.BaseProvider`): subclass of the base provider """ resolve_variables(self.variables, context, provider) self.blueprint.resolve_variables(self.variables) def set_outputs(self, outputs): self.outputs = outputs ================================================ FILE: stacker/status.py ================================================ import operator class Status(object): def __init__(self, name, code, reason=None): self.name = name self.code = code self.reason = reason or getattr(self, "reason", None) def _comparison(self, operator, other): if hasattr(other, "code"): return operator(self.code, other.code) return NotImplemented def __eq__(self, other): return self._comparison(operator.eq, other) def __ne__(self, other): return self._comparison(operator.ne, other) def __lt__(self, other): return self._comparison(operator.lt, other) def __gt__(self, other): return self._comparison(operator.gt, other) def __le__(self, other): return self._comparison(operator.le, other) def __ge__(self, other): return self._comparison(operator.ge, other) class PendingStatus(Status): def __init__(self, reason=None): super(PendingStatus, self).__init__("pending", 0, reason) class SubmittedStatus(Status): def __init__(self, reason=None): super(SubmittedStatus, self).__init__("submitted", 1, reason) class CompleteStatus(Status): def __init__(self, reason=None): super(CompleteStatus, self).__init__("complete", 2, reason) class SkippedStatus(Status): def __init__(self, reason=None): super(SkippedStatus, self).__init__("skipped", 3, reason) class FailedStatus(Status): def __init__(self, reason=None): super(FailedStatus, self).__init__("failed", 4, reason) class NotSubmittedStatus(SkippedStatus): reason = "disabled" class NotUpdatedStatus(SkippedStatus): reason = "locked" class DidNotChangeStatus(SkippedStatus): reason = "nochange" class StackDoesNotExist(SkippedStatus): reason = "does not exist in cloudformation" PENDING = PendingStatus() WAITING = PendingStatus(reason="waiting") SUBMITTED = SubmittedStatus() COMPLETE = CompleteStatus() SKIPPED = SkippedStatus() FAILED = FailedStatus() INTERRUPTED = FailedStatus(reason="interrupted") ================================================ FILE: stacker/target.py ================================================ class Target(object): """A "target" is just a node in the stacker graph that does nothing, except specify dependencies. These can be useful as a means of logically grouping a set of stacks together that can be targeted with the `--targets` flag. """ def __init__(self, definition): self.name = definition.name self.requires = definition.requires or [] self.required_by = definition.required_by or [] self.logging = False ================================================ FILE: stacker/tests/__init__.py ================================================ ================================================ FILE: stacker/tests/actions/__init__.py ================================================ ================================================ FILE: stacker/tests/actions/test_base.py ================================================ import unittest import mock import botocore.exceptions from botocore.stub import Stubber, ANY from stacker.actions.base import ( BaseAction ) from stacker.blueprints.base import Blueprint from stacker.providers.aws.default import Provider from stacker.session_cache import get_session from stacker.tests.factories import ( MockProviderBuilder, mock_context, ) MOCK_VERSION = "01234abcdef" class TestBlueprint(Blueprint): @property def version(self): return MOCK_VERSION VARIABLES = { "Param1": {"default": "default", "type": str}, } class TestBaseAction(unittest.TestCase): def test_ensure_cfn_bucket_exists(self): session = get_session("us-east-1") provider = Provider(session) action = BaseAction( context=mock_context("mynamespace"), provider_builder=MockProviderBuilder(provider) ) stubber = Stubber(action.s3_conn) stubber.add_response( "head_bucket", service_response={}, expected_params={ "Bucket": ANY, } ) with stubber: action.ensure_cfn_bucket() def test_ensure_cfn_bucket_doesnt_exist_us_east(self): session = get_session("us-east-1") provider = Provider(session) action = BaseAction( context=mock_context("mynamespace"), provider_builder=MockProviderBuilder(provider) ) stubber = Stubber(action.s3_conn) stubber.add_client_error( "head_bucket", service_error_code="NoSuchBucket", service_message="Not Found", http_status_code=404, ) stubber.add_response( "create_bucket", service_response={}, expected_params={ "Bucket": ANY, } ) with stubber: action.ensure_cfn_bucket() def test_ensure_cfn_bucket_doesnt_exist_us_west(self): session = get_session("us-west-1") provider = Provider(session) action = BaseAction( context=mock_context("mynamespace"), provider_builder=MockProviderBuilder(provider, region="us-west-1") ) stubber = Stubber(action.s3_conn) stubber.add_client_error( "head_bucket", service_error_code="NoSuchBucket", service_message="Not Found", http_status_code=404, ) stubber.add_response( "create_bucket", service_response={}, expected_params={ "Bucket": ANY, "CreateBucketConfiguration": { "LocationConstraint": "us-west-1", } } ) with stubber: action.ensure_cfn_bucket() def test_ensure_cfn_forbidden(self): session = get_session("us-west-1") provider = Provider(session) action = BaseAction( context=mock_context("mynamespace"), provider_builder=MockProviderBuilder(provider) ) stubber = Stubber(action.s3_conn) stubber.add_client_error( "head_bucket", service_error_code="AccessDenied", service_message="Forbidden", http_status_code=403, ) with stubber: with self.assertRaises(botocore.exceptions.ClientError): action.ensure_cfn_bucket() def test_stack_template_url(self): context = mock_context("mynamespace") blueprint = TestBlueprint(name="myblueprint", context=context) region = "us-east-1" endpoint = "https://example.com" session = get_session(region) provider = Provider(session) action = BaseAction( context=context, provider_builder=MockProviderBuilder(provider, region=region) ) with mock.patch('stacker.actions.base.get_s3_endpoint', autospec=True, return_value=endpoint): self.assertEqual( action.stack_template_url(blueprint), "%s/%s/stack_templates/%s/%s-%s.json" % ( endpoint, "stacker-mynamespace", "mynamespace-myblueprint", "myblueprint", MOCK_VERSION ) ) ================================================ FILE: stacker/tests/actions/test_build.py ================================================ import unittest from collections import namedtuple import mock from stacker import exceptions from stacker.actions import build from stacker.session_cache import get_session from stacker.actions.build import ( _resolve_parameters, _handle_missing_parameters, UsePreviousParameterValue, ) from stacker.blueprints.variables.types import CFNString from stacker.context import Context, Config from stacker.exceptions import StackDidNotChange, StackDoesNotExist from stacker.providers.base import BaseProvider from stacker.providers.aws.default import Provider from stacker.status import ( NotSubmittedStatus, COMPLETE, PENDING, SKIPPED, SUBMITTED, FAILED ) from ..factories import MockThreadingEvent, MockProviderBuilder def mock_stack_parameters(parameters): return { 'Parameters': [ {'ParameterKey': k, 'ParameterValue': v} for k, v in parameters.items() ] } class TestProvider(BaseProvider): def __init__(self, outputs=None, *args, **kwargs): self._outputs = outputs or {} def set_outputs(self, outputs): self._outputs = outputs def get_stack(self, stack_name, **kwargs): if stack_name not in self._outputs: raise exceptions.StackDoesNotExist(stack_name) return {"name": stack_name, "outputs": self._outputs[stack_name]} def get_outputs(self, stack_name, *args, **kwargs): stack = self.get_stack(stack_name) return stack["outputs"] class TestBuildAction(unittest.TestCase): def setUp(self): self.context = Context(config=Config({"namespace": "namespace"})) self.provider = TestProvider() self.build_action = build.Action( self.context, provider_builder=MockProviderBuilder(self.provider)) def _get_context(self, **kwargs): config = Config({ "namespace": "namespace", "stacks": [ {"name": "vpc"}, {"name": "bastion", "variables": { "test": "${output vpc::something}"}}, {"name": "db", "variables": { "test": "${output vpc::something}", "else": "${output bastion::something}"}}, {"name": "other", "variables": {}} ], }) return Context(config=config, **kwargs) def test_handle_missing_params(self): existing_stack_param_dict = { "StackName": "teststack", "Address": "192.168.0.1" } existing_stack_params = mock_stack_parameters( existing_stack_param_dict ) all_params = existing_stack_param_dict.keys() required = ["Address"] parameter_values = {"Address": "192.168.0.1"} expected_params = {"StackName": UsePreviousParameterValue, "Address": "192.168.0.1"} result = _handle_missing_parameters(parameter_values, all_params, required, existing_stack_params) self.assertEqual(sorted(result), sorted(list(expected_params.items()))) def test_missing_params_no_existing_stack(self): all_params = ["Address", "StackName"] required = ["Address"] parameter_values = {} with self.assertRaises(exceptions.MissingParameterException) as cm: _handle_missing_parameters(parameter_values, all_params, required) self.assertEqual(cm.exception.parameters, required) def test_existing_stack_params_dont_override_given_params(self): existing_stack_param_dict = { "StackName": "teststack", "Address": "192.168.0.1" } existing_stack_params = mock_stack_parameters( existing_stack_param_dict ) all_params = existing_stack_param_dict.keys() required = ["Address"] parameter_values = {"Address": "10.0.0.1"} result = _handle_missing_parameters(parameter_values, all_params, required, existing_stack_params) self.assertEqual( sorted(result), sorted(list(parameter_values.items())) ) def test_generate_plan(self): context = self._get_context() build_action = build.Action(context, cancel=MockThreadingEvent()) plan = build_action._generate_plan() self.assertEqual( { 'db': set(['bastion', 'vpc']), 'bastion': set(['vpc']), 'other': set([]), 'vpc': set([])}, plan.graph.to_dict() ) def test_dont_execute_plan_when_outline_specified(self): context = self._get_context() build_action = build.Action(context, cancel=MockThreadingEvent()) with mock.patch.object(build_action, "_generate_plan") as \ mock_generate_plan: build_action.run(outline=True) self.assertEqual(mock_generate_plan().execute.call_count, 0) def test_execute_plan_when_outline_not_specified(self): context = self._get_context() build_action = build.Action(context, cancel=MockThreadingEvent()) with mock.patch.object(build_action, "_generate_plan") as \ mock_generate_plan: build_action.run(outline=False) self.assertEqual(mock_generate_plan().execute.call_count, 1) def test_should_update(self): test_scenario = namedtuple("test_scenario", ["locked", "force", "result"]) test_scenarios = ( test_scenario(locked=False, force=False, result=True), test_scenario(locked=False, force=True, result=True), test_scenario(locked=True, force=False, result=False), test_scenario(locked=True, force=True, result=True) ) mock_stack = mock.MagicMock(["locked", "force", "name"]) mock_stack.name = "test-stack" for t in test_scenarios: mock_stack.locked = t.locked mock_stack.force = t.force self.assertEqual(build.should_update(mock_stack), t.result) def test_should_ensure_cfn_bucket(self): test_scenarios = [ {"outline": False, "dump": False, "result": True}, {"outline": True, "dump": False, "result": False}, {"outline": False, "dump": True, "result": False}, {"outline": True, "dump": True, "result": False}, {"outline": True, "dump": "DUMP", "result": False} ] for scenario in test_scenarios: outline = scenario["outline"] dump = scenario["dump"] result = scenario["result"] try: self.assertEqual( build.should_ensure_cfn_bucket(outline, dump), result) except AssertionError as e: e.args += ("scenario", str(scenario)) raise def test_should_submit(self): test_scenario = namedtuple("test_scenario", ["enabled", "result"]) test_scenarios = ( test_scenario(enabled=False, result=False), test_scenario(enabled=True, result=True), ) mock_stack = mock.MagicMock(["enabled", "name"]) mock_stack.name = "test-stack" for t in test_scenarios: mock_stack.enabled = t.enabled self.assertEqual(build.should_submit(mock_stack), t.result) class TestLaunchStack(TestBuildAction): def setUp(self): self.context = self._get_context() self.session = get_session(region=None) self.provider = Provider(self.session, interactive=False, recreate_failed=False) provider_builder = MockProviderBuilder(self.provider) self.build_action = build.Action(self.context, provider_builder=provider_builder, cancel=MockThreadingEvent()) self.stack = mock.MagicMock() self.stack.region = None self.stack.name = 'vpc' self.stack.fqn = 'vpc' self.stack.blueprint.rendered = '{}' self.stack.locked = False self.stack_status = None plan = self.build_action._generate_plan() self.step = plan.steps[0] self.step.stack = self.stack def patch_object(*args, **kwargs): m = mock.patch.object(*args, **kwargs) self.addCleanup(m.stop) m.start() def get_stack(name, *args, **kwargs): if name != self.stack.name or not self.stack_status: raise StackDoesNotExist(name) return {'StackName': self.stack.name, 'StackStatus': self.stack_status, 'Outputs': [], 'Tags': []} def get_events(name, *args, **kwargs): return [{'ResourceStatus': 'ROLLBACK_IN_PROGRESS', 'ResourceStatusReason': 'CFN fail'}] patch_object(self.provider, 'get_stack', side_effect=get_stack) patch_object(self.provider, 'update_stack') patch_object(self.provider, 'create_stack') patch_object(self.provider, 'destroy_stack') patch_object(self.provider, 'get_events', side_effect=get_events) patch_object(self.build_action, "s3_stack_push") def _advance(self, new_provider_status, expected_status, expected_reason): self.stack_status = new_provider_status status = self.step._run_once() self.assertEqual(status, expected_status) self.assertEqual(status.reason, expected_reason) def test_launch_stack_disabled(self): self.assertEqual(self.step.status, PENDING) self.stack.enabled = False self._advance(None, NotSubmittedStatus(), "disabled") def test_launch_stack_create(self): # initial status should be PENDING self.assertEqual(self.step.status, PENDING) # initial run should return SUBMITTED since we've passed off to CF self._advance(None, SUBMITTED, "creating new stack") # status should stay as SUBMITTED when the stack becomes available self._advance('CREATE_IN_PROGRESS', SUBMITTED, "creating new stack") # status should become COMPLETE once the stack finishes self._advance('CREATE_COMPLETE', COMPLETE, "creating new stack") def test_launch_stack_create_rollback(self): # initial status should be PENDING self.assertEqual(self.step.status, PENDING) # initial run should return SUBMITTED since we've passed off to CF self._advance(None, SUBMITTED, "creating new stack") # provider should now return the CF stack since it exists self._advance("CREATE_IN_PROGRESS", SUBMITTED, "creating new stack") # rollback should be noticed self._advance("ROLLBACK_IN_PROGRESS", SUBMITTED, "rolling back new stack") # rollback should not be added twice to the reason self._advance("ROLLBACK_IN_PROGRESS", SUBMITTED, "rolling back new stack") # rollback should finish with failure self._advance("ROLLBACK_COMPLETE", FAILED, "rolled back new stack") def test_launch_stack_recreate(self): self.provider.recreate_failed = True # initial status should be PENDING self.assertEqual(self.step.status, PENDING) # first action with an existing failed stack should be deleting it self._advance("ROLLBACK_COMPLETE", SUBMITTED, "destroying stack for re-creation") # status should stay as submitted during deletion self._advance("DELETE_IN_PROGRESS", SUBMITTED, "destroying stack for re-creation") # deletion being complete must trigger re-creation self._advance("DELETE_COMPLETE", SUBMITTED, "re-creating stack") # re-creation should continue as SUBMITTED self._advance("CREATE_IN_PROGRESS", SUBMITTED, "re-creating stack") # re-creation should finish with success self._advance("CREATE_COMPLETE", COMPLETE, "re-creating stack") def test_launch_stack_update_skipped(self): # initial status should be PENDING self.assertEqual(self.step.status, PENDING) # start the upgrade, that will be skipped self.provider.update_stack.side_effect = StackDidNotChange self._advance("CREATE_COMPLETE", SKIPPED, "nochange") def test_launch_stack_update_rollback(self): # initial status should be PENDING self.assertEqual(self.step.status, PENDING) # initial run should return SUBMITTED since we've passed off to CF self._advance("CREATE_COMPLETE", SUBMITTED, "updating existing stack") # update should continue as SUBMITTED self._advance("UPDATE_IN_PROGRESS", SUBMITTED, "updating existing stack") # rollback should be noticed self._advance("UPDATE_ROLLBACK_IN_PROGRESS", SUBMITTED, "rolling back update") # rollback should finish with failure self._advance("UPDATE_ROLLBACK_COMPLETE", FAILED, "rolled back update") def test_launch_stack_update_success(self): # initial status should be PENDING self.assertEqual(self.step.status, PENDING) # initial run should return SUBMITTED since we've passed off to CF self._advance("CREATE_COMPLETE", SUBMITTED, "updating existing stack") # update should continue as SUBMITTED self._advance("UPDATE_IN_PROGRESS", SUBMITTED, "updating existing stack") # update should finish with sucess self._advance("UPDATE_COMPLETE", COMPLETE, "updating existing stack") class TestFunctions(unittest.TestCase): """ test module level functions """ def setUp(self): self.ctx = Context({"namespace": "test"}) self.prov = mock.MagicMock() self.bp = mock.MagicMock() def test_resolve_parameters_unused_parameter(self): self.bp.get_parameter_definitions.return_value = { "a": { "type": CFNString, "description": "A"}, "b": { "type": CFNString, "description": "B"} } params = {"a": "Apple", "c": "Carrot"} p = _resolve_parameters(params, self.bp) self.assertNotIn("c", p) self.assertIn("a", p) def test_resolve_parameters_none_conversion(self): self.bp.get_parameter_definitions.return_value = { "a": { "type": CFNString, "description": "A"}, "b": { "type": CFNString, "description": "B"} } params = {"a": None, "c": "Carrot"} p = _resolve_parameters(params, self.bp) self.assertNotIn("a", p) def test_resolve_parameters_booleans(self): self.bp.get_parameter_definitions.return_value = { "a": { "type": CFNString, "description": "A"}, "b": { "type": CFNString, "description": "B"}, } params = {"a": True, "b": False} p = _resolve_parameters(params, self.bp) self.assertEquals("true", p["a"]) self.assertEquals("false", p["b"]) ================================================ FILE: stacker/tests/actions/test_destroy.py ================================================ import unittest import mock from stacker.actions import destroy from stacker.context import Context, Config from stacker.exceptions import StackDoesNotExist from stacker.status import ( COMPLETE, PENDING, SKIPPED, SUBMITTED, ) from ..factories import MockThreadingEvent, MockProviderBuilder class MockStack(object): """Mock our local Stacker stack and an AWS provider stack""" def __init__(self, name, tags=None, **kwargs): self.name = name self.fqn = name self.region = None self.profile = None self.requires = [] class TestDestroyAction(unittest.TestCase): def setUp(self): config = Config({ "namespace": "namespace", "stacks": [ {"name": "vpc"}, {"name": "bastion", "requires": ["vpc"]}, {"name": "instance", "requires": ["vpc", "bastion"]}, {"name": "db", "requires": ["instance", "vpc", "bastion"]}, {"name": "other", "requires": ["db"]}, ], }) self.context = Context(config=config) self.action = destroy.Action(self.context, cancel=MockThreadingEvent()) def test_generate_plan(self): plan = self.action._generate_plan() self.assertEqual( { 'vpc': set( ['db', 'instance', 'bastion']), 'other': set([]), 'bastion': set( ['instance', 'db']), 'instance': set( ['db']), 'db': set( ['other'])}, plan.graph.to_dict() ) def test_only_execute_plan_when_forced(self): with mock.patch.object(self.action, "_generate_plan") as \ mock_generate_plan: self.action.run(force=False) self.assertEqual(mock_generate_plan().execute.call_count, 0) def test_execute_plan_when_forced(self): with mock.patch.object(self.action, "_generate_plan") as \ mock_generate_plan: self.action.run(force=True) self.assertEqual(mock_generate_plan().execute.call_count, 1) def test_destroy_stack_complete_if_state_submitted(self): # Simulate the provider not being able to find the stack (a result of # it being successfully deleted) provider = mock.MagicMock() provider.get_stack.side_effect = StackDoesNotExist("mock") self.action.provider_builder = MockProviderBuilder(provider) status = self.action._destroy_stack(MockStack("vpc"), status=PENDING) # if we haven't processed the step (ie. has never been SUBMITTED, # should be skipped) self.assertEqual(status, SKIPPED) status = self.action._destroy_stack(MockStack("vpc"), status=SUBMITTED) # if we have processed the step and then can't find the stack, it means # we successfully deleted it self.assertEqual(status, COMPLETE) def test_destroy_stack_step_statuses(self): mock_provider = mock.MagicMock() stacks_dict = self.context.get_stacks_dict() def get_stack(stack_name): return stacks_dict.get(stack_name) plan = self.action._generate_plan() step = plan.steps[0] # we need the AWS provider to generate the plan, but swap it for # the mock one to make the test easier self.action.provider_builder = MockProviderBuilder(mock_provider) # simulate stack doesn't exist and we haven't submitted anything for # deletion mock_provider.get_stack.side_effect = StackDoesNotExist("mock") step.run() self.assertEqual(step.status, SKIPPED) # simulate stack getting successfully deleted mock_provider.get_stack.side_effect = get_stack mock_provider.is_stack_destroyed.return_value = False mock_provider.is_stack_in_progress.return_value = False step._run_once() self.assertEqual(step.status, SUBMITTED) mock_provider.is_stack_destroyed.return_value = False mock_provider.is_stack_in_progress.return_value = True step._run_once() self.assertEqual(step.status, SUBMITTED) mock_provider.is_stack_destroyed.return_value = True mock_provider.is_stack_in_progress.return_value = False step._run_once() self.assertEqual(step.status, COMPLETE) ================================================ FILE: stacker/tests/actions/test_diff.py ================================================ import unittest from operator import attrgetter from stacker.actions.diff import ( diff_dictionaries, diff_parameters, DictValue ) class TestDictValueFormat(unittest.TestCase): def test_status(self): added = DictValue("k0", None, "value_0") self.assertEqual(added.status(), DictValue.ADDED) removed = DictValue("k1", "value_1", None) self.assertEqual(removed.status(), DictValue.REMOVED) modified = DictValue("k2", "value_1", "value_2") self.assertEqual(modified.status(), DictValue.MODIFIED) unmodified = DictValue("k3", "value_1", "value_1") self.assertEqual(unmodified.status(), DictValue.UNMODIFIED) def test_format(self): added = DictValue("k0", None, "value_0") self.assertEqual(added.changes(), ['+%s = %s' % (added.key, added.new_value)]) removed = DictValue("k1", "value_1", None) self.assertEqual(removed.changes(), ['-%s = %s' % (removed.key, removed.old_value)]) modified = DictValue("k2", "value_1", "value_2") self.assertEqual(modified.changes(), [ '-%s = %s' % (modified.key, modified.old_value), '+%s = %s' % (modified.key, modified.new_value) ]) unmodified = DictValue("k3", "value_1", "value_1") self.assertEqual(unmodified.changes(), [' %s = %s' % ( unmodified.key, unmodified.old_value)]) self.assertEqual(unmodified.changes(), [' %s = %s' % ( unmodified.key, unmodified.new_value)]) class TestDiffDictionary(unittest.TestCase): def test_diff_dictionaries(self): old_dict = { "a": "Apple", "b": "Banana", "c": "Corn", } new_dict = { "a": "Apple", "b": "Bob", "d": "Doug", } [count, changes] = diff_dictionaries(old_dict, new_dict) self.assertEqual(count, 3) expected_output = [ DictValue("a", "Apple", "Apple"), DictValue("b", "Banana", "Bob"), DictValue("c", "Corn", None), DictValue("d", None, "Doug"), ] expected_output.sort(key=attrgetter("key")) # compare all the outputs to the expected change for expected_change in expected_output: change = changes.pop(0) self.assertEqual(change, expected_change) # No extra output self.assertEqual(len(changes), 0) class TestDiffParameters(unittest.TestCase): def test_diff_parameters_no_changes(self): old_params = { "a": "Apple" } new_params = { "a": "Apple" } param_diffs = diff_parameters(old_params, new_params) self.assertEquals(param_diffs, []) ================================================ FILE: stacker/tests/blueprints/__init__.py ================================================ ================================================ FILE: stacker/tests/blueprints/test_base.py ================================================ import unittest import sys from mock import patch from mock import MagicMock from troposphere import ( Base64, Ref, s3, sns ) from stacker.blueprints.base import ( Blueprint, CFNParameter, build_parameter, validate_allowed_values, validate_variable_type, resolve_variable, parse_user_data ) from stacker.blueprints.variables.types import ( CFNCommaDelimitedList, CFNNumber, CFNString, EC2AvailabilityZoneNameList, TroposphereType, ) from stacker.exceptions import ( InvalidLookupCombination, MissingVariable, UnresolvedVariable, UnresolvedVariables, ValidatorError, VariableTypeRequired, InvalidUserdataPlaceholder ) from stacker.variables import Variable from stacker.lookups import register_lookup_handler from ..factories import mock_context def mock_lookup_handler(value, provider=None, context=None, fqn=False, **kwargs): return value register_lookup_handler("mock", mock_lookup_handler) class TestBuildParameter(unittest.TestCase): def test_base_parameter(self): p = build_parameter("BasicParam", {"type": "String"}) p.validate() self.assertEquals(p.Type, "String") class TestBlueprintRendering(unittest.TestCase): def test_to_json(self): class TestBlueprint(Blueprint): VARIABLES = { "Param1": {"default": "default", "type": CFNString}, "Param2": {"type": CFNNumber}, "Param3": {"type": CFNCommaDelimitedList}, "Param4": {"default": "foo", "type": str}, "Param5": {"default": 5, "type": int} } def create_template(self): self.template.set_version('2010-09-09') self.template.set_description('TestBlueprint') expected_json = """{ "AWSTemplateFormatVersion": "2010-09-09", "Description": "TestBlueprint", "Parameters": { "Param1": { "Default": "default", "Type": "String" }, "Param2": { "Type": "Number" }, "Param3": { "Type": "CommaDelimitedList" } }, "Resources": {} }""" self.assertEqual( TestBlueprint(name="test", context=mock_context()).to_json(), expected_json, ) class TestBaseBlueprint(unittest.TestCase): def test_add_output(self): output_name = "MyOutput1" output_value = "OutputValue" class TestBlueprint(Blueprint): VARIABLES = {} def create_template(self): self.template.set_version('2010-09-09') self.template.set_description('TestBlueprint') self.add_output(output_name, output_value) bp = TestBlueprint(name="test", context=mock_context()) bp.render_template() self.assertEqual(bp.template.outputs[output_name].properties["Value"], output_value) class TestVariables(unittest.TestCase): def test_defined_variables(self): class TestBlueprint(Blueprint): VARIABLES = { "Param1": {"default": "default", "type": str}, } blueprint = TestBlueprint(name="test", context=MagicMock()) self.assertEqual( blueprint.defined_variables(), blueprint.VARIABLES, ) def test_defined_variables_subclass(self): class TestBlueprint(Blueprint): VARIABLES = { "Param1": {"default": 0, "type": int}, "Param2": {"default": 0, "type": int}, } class TestBlueprintSublcass(TestBlueprint): def defined_variables(self): variables = super(TestBlueprintSublcass, self).defined_variables() variables["Param2"]["default"] = 1 variables["Param3"] = {"default": 1, "type": int} return variables blueprint = TestBlueprintSublcass(name="test", context=MagicMock()) variables = blueprint.defined_variables() self.assertEqual(len(variables), 3) self.assertEqual(variables["Param2"]["default"], 1) def test_get_variables_unresolved_variables(self): class TestBlueprint(Blueprint): pass blueprint = TestBlueprint(name="test", context=MagicMock()) with self.assertRaises(UnresolvedVariables): blueprint.get_variables() def test_set_description(self): class TestBlueprint(Blueprint): VARIABLES = { "Param1": {"default": "default", "type": str}, } def create_template(self): return description = "my blueprint description" context = mock_context() blueprint = TestBlueprint(name="test", context=context, description=description) blueprint.render_template() self.assertEquals(description, blueprint.template.description) def test_validate_variable_type_cfntype(self): var_name = "testVar" var_type = CFNString provided_value = "abc" value = validate_variable_type(var_name, var_type, provided_value) self.assertIsInstance(value, CFNParameter) def test_validate_variable_type_cfntype_none_value(self): var_name = "testVar" var_type = CFNString provided_value = None with self.assertRaises(ValueError): validate_variable_type(var_name, var_type, provided_value) def test_validate_variable_type_matching_type(self): var_name = "testVar" var_type = str provided_value = "abc" value = validate_variable_type(var_name, var_type, provided_value) self.assertEqual(value, provided_value) # This tests that validate_variable_type doesn't change the original value # even if it could. IE: A string "1" shouldn't be valid for an int. # See: https://github.com/remind101/stacker/pull/266 def test_strict_validate_variable_type(self): var_name = "testVar" var_type = int provided_value = "1" with self.assertRaises(ValueError): validate_variable_type(var_name, var_type, provided_value) def test_validate_variable_type_invalid_value(self): var_name = "testVar" var_type = int provided_value = "abc" with self.assertRaises(ValueError): validate_variable_type(var_name, var_type, provided_value) def test_resolve_variable_no_type_on_variable_definition(self): var_name = "testVar" var_def = {} provided_variable = None blueprint_name = "testBlueprint" with self.assertRaises(VariableTypeRequired): resolve_variable(var_name, var_def, provided_variable, blueprint_name) def test_resolve_variable_no_provided_with_default(self): var_name = "testVar" default_value = "foo" var_def = {"default": default_value, "type": str} provided_variable = None blueprint_name = "testBlueprint" value = resolve_variable(var_name, var_def, provided_variable, blueprint_name) self.assertEqual(default_value, value) def test_resolve_variable_no_provided_without_default(self): var_name = "testVar" var_def = {"type": str} provided_variable = None blueprint_name = "testBlueprint" with self.assertRaises(MissingVariable): resolve_variable(var_name, var_def, provided_variable, blueprint_name) def test_resolve_variable_provided_not_resolved(self): var_name = "testVar" var_def = {"type": str} provided_variable = Variable(var_name, "${mock abc}") blueprint_name = "testBlueprint" with self.assertRaises(UnresolvedVariable): resolve_variable(var_name, var_def, provided_variable, blueprint_name) def _resolve_troposphere_var(self, tpe, value, **kwargs): var_name = "testVar" var_def = {"type": TroposphereType(tpe, **kwargs)} provided_variable = Variable(var_name, value) blueprint_name = "testBlueprint" return resolve_variable(var_name, var_def, provided_variable, blueprint_name) def test_resolve_variable_troposphere_type_resource_single(self): bucket_defs = {"MyBucket": {"BucketName": "some-bucket"}} bucket = self._resolve_troposphere_var(s3.Bucket, bucket_defs) self.assertTrue(isinstance(bucket, s3.Bucket)) self.assertEqual(bucket.properties, bucket_defs[bucket.title]) self.assertEqual(bucket.title, "MyBucket") def test_resolve_variable_troposphere_type_resource_optional(self): bucket = self._resolve_troposphere_var(s3.Bucket, None, optional=True) self.assertEqual(bucket, None) def test_resolve_variable_troposphere_type_value_blank_required(self): with self.assertRaises(ValidatorError): self._resolve_troposphere_var(s3.Bucket, None) def test_resolve_variable_troposphere_type_resource_many(self): bucket_defs = { "FirstBucket": {"BucketName": "some-bucket"}, "SecondBucket": {"BucketName": "some-other-bucket"} } buckets = self._resolve_troposphere_var(s3.Bucket, bucket_defs, many=True) for bucket in buckets: self.assertTrue(isinstance(bucket, s3.Bucket)) self.assertEqual(bucket.properties, bucket_defs[bucket.title]) def test_resolve_variable_troposphere_type_resource_many_empty(self): buckets = self._resolve_troposphere_var(s3.Bucket, {}, many=True) self.assertEqual(buckets, []) def test_resolve_variable_troposphere_type_resource_fail(self): # Do this to silence the error reporting here: # https://github.com/cloudtools/troposphere/commit/dc8abd5c with open("/dev/null", "w") as devnull: _stderr = sys.stderr sys.stderr = devnull with self.assertRaises(ValidatorError): self._resolve_troposphere_var(s3.Bucket, {"MyBucket": {"BucketName": 1}}) sys.stderr = _stderr def test_resolve_variable_troposphere_type_props_single(self): sub_defs = {"Endpoint": "test", "Protocol": "lambda"} # Note that sns.Subscription != sns.SubscriptionResource. The former # is a property type, the latter is a complete resource. sub = self._resolve_troposphere_var(sns.Subscription, sub_defs) self.assertTrue(isinstance(sub, sns.Subscription)) self.assertEqual(sub.properties, sub_defs) def test_resolve_variable_troposphere_type_props_optional(self): sub = self._resolve_troposphere_var(sns.Subscription, None, optional=True) self.assertEqual(sub, None) def test_resolve_variable_troposphere_type_props_many(self): sub_defs = [ {"Endpoint": "test1", "Protocol": "lambda"}, {"Endpoint": "test2", "Protocol": "lambda"} ] subs = self._resolve_troposphere_var(sns.Subscription, sub_defs, many=True) for i, sub in enumerate(subs): self.assertTrue(isinstance(sub, sns.Subscription)) self.assertEqual(sub.properties, sub_defs[i]) def test_resolve_variable_troposphere_type_props_many_empty(self): subs = self._resolve_troposphere_var(sns.Subscription, [], many=True) self.assertEqual(subs, []) def test_resolve_variable_troposphere_type_props_fail(self): with self.assertRaises(ValidatorError): self._resolve_troposphere_var(sns.Subscription, {}) def test_resolve_variable_troposphere_type_unvalidated(self): self._resolve_troposphere_var(sns.Subscription, {}, validate=False) def test_resolve_variable_troposphere_type_optional_many(self): res = self._resolve_troposphere_var(sns.Subscription, {}, many=True, optional=True) self.assertIsNone(res) def test_resolve_variable_provided_resolved(self): var_name = "testVar" var_def = {"type": str} provided_variable = Variable(var_name, "${mock 1}") provided_variable.resolve(context=MagicMock(), provider=MagicMock()) blueprint_name = "testBlueprint" value = resolve_variable(var_name, var_def, provided_variable, blueprint_name) self.assertEqual(value, "1") def test_resolve_variable_allowed_values(self): var_name = "testVar" var_def = {"type": str, "allowed_values": ["allowed"]} provided_variable = Variable(var_name, "not_allowed") blueprint_name = "testBlueprint" with self.assertRaises(ValueError): resolve_variable(var_name, var_def, provided_variable, blueprint_name) provided_variable = Variable(var_name, "allowed") value = resolve_variable(var_name, var_def, provided_variable, blueprint_name) self.assertEqual(value, "allowed") def test_resolve_variable_validator_valid_value(self): def triple_validator(value): if len(value) != 3: raise ValueError return value var_name = "testVar" var_def = {"type": list, "validator": triple_validator} var_value = [1, 2, 3] provided_variable = Variable(var_name, var_value) blueprint_name = "testBlueprint" value = resolve_variable(var_name, var_def, provided_variable, blueprint_name) self.assertEqual(value, var_value) def test_resolve_variable_validator_invalid_value(self): def triple_validator(value): if len(value) != 3: raise ValueError("Must be a triple.") return value var_name = "testVar" var_def = {"type": list, "validator": triple_validator} var_value = [1, 2] provided_variable = Variable(var_name, var_value) blueprint_name = "testBlueprint" with self.assertRaises(ValidatorError) as cm: resolve_variable(var_name, var_def, provided_variable, blueprint_name) exc = cm.exception.exception # The wrapped exception self.assertIsInstance(exc, ValueError) def test_resolve_variables(self): class TestBlueprint(Blueprint): VARIABLES = { "Param1": {"default": 0, "type": int}, "Param2": {"type": str}, } blueprint = TestBlueprint(name="test", context=MagicMock()) variables = [ Variable("Param1", 1), Variable("Param2", "${output other-stack::Output}"), Variable("Param3", 3), ] variables[1]._value._resolve("Test Output") blueprint.resolve_variables(variables) self.assertEqual(blueprint.resolved_variables["Param1"], 1) self.assertEqual(blueprint.resolved_variables["Param2"], "Test Output") self.assertIsNone(blueprint.resolved_variables.get("Param3")) def test_resolve_variables_lookup_returns_non_string(self): class TestBlueprint(Blueprint): VARIABLES = { "Param1": {"type": list}, } def return_list_something(*_args, **_kwargs): return ["something"] register_lookup_handler("custom", return_list_something) blueprint = TestBlueprint(name="test", context=MagicMock()) variables = [Variable("Param1", "${custom non-string-return-val}")] for var in variables: var._value.resolve({}, {}) blueprint.resolve_variables(variables) self.assertEqual(blueprint.resolved_variables["Param1"], ["something"]) def test_resolve_variables_lookup_returns_troposphere_obj(self): class TestBlueprint(Blueprint): VARIABLES = { "Param1": {"type": Base64}, } def return_obj(*_args, **_kwargs): return Base64("test") register_lookup_handler("custom", return_obj) blueprint = TestBlueprint(name="test", context=MagicMock()) variables = [Variable("Param1", "${custom non-string-return-val}")] for var in variables: var._value.resolve({}, {}) blueprint.resolve_variables(variables) self.assertEqual(blueprint.resolved_variables["Param1"].data, Base64("test").data) def test_resolve_variables_lookup_returns_non_string_invalid_combo(self): class TestBlueprint(Blueprint): VARIABLES = { "Param1": {"type": list}, } def return_list_something(*_args, **_kwargs): return ["something"] register_lookup_handler("custom", return_list_something) variable = Variable( "Param1", "${custom non-string-return-val},${output some-stack::Output}", ) variable._value[0].resolve({}, {}) with self.assertRaises(InvalidLookupCombination): variable.value() def test_get_variables(self): class TestBlueprint(Blueprint): VARIABLES = { "Param1": {"type": int}, "Param2": {"type": str}, } blueprint = TestBlueprint(name="test", context=MagicMock()) variables = [Variable("Param1", 1), Variable("Param2", "Test Output")] blueprint.resolve_variables(variables) variables = blueprint.get_variables() self.assertEqual(variables["Param1"], 1) self.assertEqual(variables["Param2"], "Test Output") def test_resolve_variables_missing_variable(self): class TestBlueprint(Blueprint): VARIABLES = { "Param1": {"type": int}, "Param2": {"type": str}, } blueprint = TestBlueprint(name="test", context=MagicMock()) variables = [Variable("Param1", 1)] with self.assertRaises(MissingVariable): blueprint.resolve_variables(variables) def test_resolve_variables_incorrect_type(self): class TestBlueprint(Blueprint): VARIABLES = { "Param1": {"type": int}, } blueprint = TestBlueprint(name="test", context=MagicMock()) variables = [Variable("Param1", "Something")] with self.assertRaises(ValueError): blueprint.resolve_variables(variables) def test_get_variables_default_value(self): class TestBlueprint(Blueprint): VARIABLES = { "Param1": {"type": int, "default": 1}, "Param2": {"type": str}, } blueprint = TestBlueprint(name="test", context=MagicMock()) variables = [Variable("Param2", "Test Output")] blueprint.resolve_variables(variables) variables = blueprint.get_variables() self.assertEqual(variables["Param1"], 1) self.assertEqual(variables["Param2"], "Test Output") def test_resolve_variables_convert_type(self): class TestBlueprint(Blueprint): VARIABLES = { "Param1": {"type": int}, } blueprint = TestBlueprint(name="test", context=MagicMock()) variables = [Variable("Param1", 1)] blueprint.resolve_variables(variables) variables = blueprint.get_variables() self.assertTrue(isinstance(variables["Param1"], int)) def test_resolve_variables_cfn_type(self): class TestBlueprint(Blueprint): VARIABLES = { "Param1": {"type": CFNString}, } blueprint = TestBlueprint(name="test", context=MagicMock()) variables = [Variable("Param1", "Value")] blueprint.resolve_variables(variables) variables = blueprint.get_variables() self.assertTrue(isinstance(variables["Param1"], CFNParameter)) def test_resolve_variables_cfn_number(self): class TestBlueprint(Blueprint): VARIABLES = { "Param1": {"type": CFNNumber}, } blueprint = TestBlueprint(name="test", context=MagicMock()) variables = [Variable("Param1", 1)] blueprint.resolve_variables(variables) variables = blueprint.get_variables() self.assertTrue(isinstance(variables["Param1"], CFNParameter)) self.assertEqual(variables["Param1"].value, "1") def test_resolve_variables_cfn_type_list(self): class TestBlueprint(Blueprint): VARIABLES = { "Param1": {"type": EC2AvailabilityZoneNameList}, } blueprint = TestBlueprint(name="test", context=MagicMock()) variables = [Variable("Param1", ["us-east-1", "us-west-2"])] blueprint.resolve_variables(variables) variables = blueprint.get_variables() self.assertTrue(isinstance(variables["Param1"], CFNParameter)) self.assertEqual(variables["Param1"].value, ["us-east-1", "us-west-2"]) self.assertEqual(variables["Param1"].ref.data, Ref("Param1").data) parameters = blueprint.get_parameter_values() self.assertEqual(parameters["Param1"], ["us-east-1", "us-west-2"]) def test_resolve_variables_cfn_type_list_invalid_value(self): class TestBlueprint(Blueprint): VARIABLES = { "Param1": {"type": EC2AvailabilityZoneNameList}, } blueprint = TestBlueprint(name="test", context=MagicMock()) variables = [Variable("Param1", {"main": "us-east-1"})] with self.assertRaises(ValueError): blueprint.resolve_variables(variables) variables = blueprint.get_variables() def test_get_parameter_definitions_cfn_type_list(self): class TestBlueprint(Blueprint): VARIABLES = { "Param1": {"type": EC2AvailabilityZoneNameList}, } blueprint = TestBlueprint(name="test", context=MagicMock()) parameters = blueprint.get_parameter_definitions() self.assertTrue("Param1" in parameters) parameter = parameters["Param1"] self.assertEqual(parameter["type"], "List") def test_get_parameter_definitions_cfn_type(self): class TestBlueprint(Blueprint): VARIABLES = { "Param1": {"type": CFNString}, } blueprint = TestBlueprint(name="test", context=MagicMock()) parameters = blueprint.get_parameter_definitions() self.assertTrue("Param1" in parameters) parameter = parameters["Param1"] self.assertEqual(parameter["type"], "String") def test_get_required_parameter_definitions_cfn_type(self): class TestBlueprint(Blueprint): VARIABLES = { "Param1": {"type": CFNString}, } blueprint = TestBlueprint(name="test", context=MagicMock()) blueprint.setup_parameters() params = blueprint.get_required_parameter_definitions() self.assertEqual(list(params.keys())[0], "Param1") def test_get_parameter_values(self): class TestBlueprint(Blueprint): VARIABLES = { "Param1": {"type": int}, "Param2": {"type": CFNString}, } blueprint = TestBlueprint(name="test", context=MagicMock()) variables = [Variable("Param1", 1), Variable("Param2", "Value")] blueprint.resolve_variables(variables) variables = blueprint.get_variables() self.assertEqual(len(variables), 2) parameters = blueprint.get_parameter_values() self.assertEqual(len(parameters), 1) self.assertEqual(parameters["Param2"], "Value") def test_validate_allowed_values(self): allowed_values = ['allowed'] valid = validate_allowed_values(allowed_values, "not_allowed") self.assertFalse(valid) valid = validate_allowed_values(allowed_values, "allowed") self.assertTrue(valid) def test_blueprint_with_parameters_fails(self): class TestBlueprint(Blueprint): PARAMETERS = { "Param2": {"default": 0, "type": "Integer"}, } with self.assertRaises(AttributeError): TestBlueprint(name="test", context=MagicMock()) class TestBlueprint(Blueprint): LOCAL_PARAMETERS = { "Param2": {"default": 0, "type": "Integer"}, } with self.assertRaises(AttributeError): TestBlueprint(name="test", context=MagicMock()) def test_variable_exists_but_value_is_none(self): var_name = "testVar" var_def = {"type": str} var_value = None provided_variable = Variable(var_name, var_value) blueprint_name = "testBlueprint" with self.assertRaises(ValueError): resolve_variable(var_name, var_def, provided_variable, blueprint_name) class TestCFNParameter(unittest.TestCase): def test_cfnparameter_convert_boolean(self): p = CFNParameter("myParameter", True) self.assertEqual(p.value, "true") p = CFNParameter("myParameter", False) self.assertEqual(p.value, "false") # Test to make sure other types aren't affected p = CFNParameter("myParameter", 0) self.assertEqual(p.value, "0") p = CFNParameter("myParameter", "myString") self.assertEqual(p.value, "myString") def test_parse_user_data(self): expected = 'name: tom, last: taubkin and $' variables = { 'name': 'tom', 'last': 'taubkin' } raw_user_data = 'name: ${name}, last: $last and $$' blueprint_name = 'test' res = parse_user_data(variables, raw_user_data, blueprint_name) self.assertEqual(res, expected) def test_parse_user_data_missing_variable(self): variables = { 'name': 'tom', } raw_user_data = 'name: ${name}, last: $last and $$' blueprint_name = 'test' with self.assertRaises(MissingVariable): parse_user_data(variables, raw_user_data, blueprint_name) def test_parse_user_data_invaled_placeholder(self): raw_user_data = '$100' blueprint_name = 'test' with self.assertRaises(InvalidUserdataPlaceholder): parse_user_data({}, raw_user_data, blueprint_name) @patch('stacker.blueprints.base.read_value_from_path', return_value='contents') @patch('stacker.blueprints.base.parse_user_data') def test_read_user_data(self, parse_mock, file_mock): class TestBlueprint(Blueprint): VARIABLES = {} blueprint = TestBlueprint(name="blueprint_name", context=MagicMock()) blueprint.resolve_variables({}) blueprint.read_user_data('file://test.txt') file_mock.assert_called_with('file://test.txt') parse_mock.assert_called_with({}, 'contents', 'blueprint_name') ================================================ FILE: stacker/tests/blueprints/test_raw.py ================================================ """Test module for blueprint-from-raw-template module.""" import json import unittest from mock import MagicMock from stacker.blueprints.raw import ( get_template_params, get_template_path, RawTemplateBlueprint ) from stacker.variables import Variable from ..factories import mock_context RAW_JSON_TEMPLATE_PATH = 'stacker/tests/fixtures/cfn_template.json' RAW_YAML_TEMPLATE_PATH = 'stacker/tests/fixtures/cfn_template.yaml' RAW_J2_TEMPLATE_PATH = 'stacker/tests/fixtures/cfn_template.json.j2' def test_get_template_path_local_file(tmpdir): """Verify get_template_path finding a file relative to CWD.""" template_path = tmpdir.join('cfn_template.json') template_path.ensure() with tmpdir.as_cwd(): result = get_template_path('cfn_template.json') assert template_path.samefile(result) def test_get_template_path_invalid_file(tmpdir): """Verify get_template_path with an invalid filename.""" with tmpdir.as_cwd(): assert get_template_path('cfn_template.json') is None def test_get_template_path_file_in_syspath(tmpdir, monkeypatch): """Verify get_template_path with a file in sys.path. This ensures templates are able to be retrieved from remote packages. """ template_path = tmpdir.join('cfn_template.json') template_path.ensure() monkeypatch.syspath_prepend(tmpdir) result = get_template_path(template_path.basename) assert template_path.samefile(result) def test_get_template_params(): """Verify get_template_params function operation.""" template_dict = { "AWSTemplateFormatVersion": "2010-09-09", "Description": "TestTemplate", "Parameters": { "Param1": { "Type": "String" }, "Param2": { "Default": "default", "Type": "CommaDelimitedList" } }, "Resources": {} } template_params = { "Param1": { "Type": "String" }, "Param2": { "Default": "default", "Type": "CommaDelimitedList" } } assert get_template_params(template_dict) == template_params class TestBlueprintRendering(unittest.TestCase): """Test class for blueprint rendering.""" def test_to_json(self): """Verify to_json method operation.""" expected_json = json.dumps( { "AWSTemplateFormatVersion": "2010-09-09", "Description": "TestTemplate", "Parameters": { "Param1": { "Type": "String" }, "Param2": { "Default": "default", "Type": "CommaDelimitedList" } }, "Resources": { "Dummy": { "Type": "AWS::SNS::Topic", "Properties": { "DisplayName": {"Ref": "Param1"} } } }, "Outputs": { "DummyId": { "Value": "dummy-1234" } } }, sort_keys=True, indent=4 ) self.assertEqual( RawTemplateBlueprint( name="test", context=mock_context(), raw_template_path=RAW_JSON_TEMPLATE_PATH).to_json(), expected_json ) def test_j2_to_json(self): """Verify jinja2 template parsing.""" expected_json = json.dumps( { "AWSTemplateFormatVersion": "2010-09-09", "Description": "TestTemplate", "Parameters": { "Param1": { "Type": "String" }, "Param2": { "Default": "default", "Type": "CommaDelimitedList" } }, "Resources": { "Dummy": { "Type": "AWS::CloudFormation::WaitConditionHandle" } }, "Outputs": { "DummyId": { "Value": "dummy-bar-param1val-foo-1234" } } }, sort_keys=True, indent=4 ) blueprint = RawTemplateBlueprint( name="stack1", context=mock_context( extra_config_args={'stacks': [{'name': 'stack1', 'template_path': 'unused', 'variables': { 'Param1': 'param1val', 'bar': 'foo'}}]}, environment={'foo': 'bar'}), raw_template_path=RAW_J2_TEMPLATE_PATH ) blueprint.resolve_variables([Variable("Param1", "param1val"), Variable("bar", "foo")]) self.assertEqual( expected_json, blueprint.to_json() ) class TestVariables(unittest.TestCase): """Test class for blueprint variable methods.""" def test_get_parameter_definitions_json(self): # noqa pylint: disable=invalid-name """Verify get_parameter_definitions method with json raw template.""" blueprint = RawTemplateBlueprint( name="test", context=MagicMock(), raw_template_path=RAW_JSON_TEMPLATE_PATH) parameters = blueprint.get_parameter_definitions() self.assertEqual( parameters, {"Param1": {"Type": "String"}, "Param2": {"Default": "default", "Type": "CommaDelimitedList"}}) def test_get_parameter_definitions_yaml(self): # noqa pylint: disable=invalid-name """Verify get_parameter_definitions method with yaml raw template.""" blueprint = RawTemplateBlueprint( name="test", context=MagicMock(), raw_template_path=RAW_YAML_TEMPLATE_PATH ) parameters = blueprint.get_parameter_definitions() self.assertEqual( parameters, {"Param1": {"Type": "String"}, "Param2": {"Default": "default", "Type": "CommaDelimitedList"}}) def test_get_required_parameter_definitions_json(self): # noqa pylint: disable=invalid-name """Verify get_required_param... method with json raw template.""" blueprint = RawTemplateBlueprint( name="test", context=MagicMock(), raw_template_path=RAW_JSON_TEMPLATE_PATH ) self.assertEqual( blueprint.get_required_parameter_definitions(), {"Param1": {"Type": "String"}}) def test_get_required_parameter_definitions_yaml(self): # noqa pylint: disable=invalid-name """Verify get_required_param... method with yaml raw template.""" blueprint = RawTemplateBlueprint( name="test", context=MagicMock(), raw_template_path=RAW_YAML_TEMPLATE_PATH ) self.assertEqual( blueprint.get_required_parameter_definitions(), {"Param1": {"Type": "String"}}) ================================================ FILE: stacker/tests/blueprints/test_testutil.py ================================================ import unittest from troposphere import ecr from ...blueprints.testutil import BlueprintTestCase from ...blueprints.base import Blueprint from ...context import Context from ...variables import Variable class Repositories(Blueprint): """ Simple blueprint to test our test cases. """ VARIABLES = { "Repositories": { "type": list, "description": "A list of repository names to create." } } def create_template(self): t = self.template variables = self.get_variables() for repo in variables["Repositories"]: t.add_resource( ecr.Repository( "%sRepository" % repo, RepositoryName=repo, ) ) class TestRepositories(BlueprintTestCase): def test_create_template_passes(self): ctx = Context({'namespace': 'test'}) blueprint = Repositories('test_repo', ctx) blueprint.resolve_variables([ Variable('Repositories', ["repo1", "repo2"]) ]) blueprint.create_template() self.assertRenderedBlueprint(blueprint) def test_create_template_fails(self): ctx = Context({'namespace': 'test'}) blueprint = Repositories('test_repo', ctx) blueprint.resolve_variables([ Variable('Repositories', ["repo1", "repo2", "repo3"]) ]) blueprint.create_template() with self.assertRaises(AssertionError): self.assertRenderedBlueprint(blueprint) if __name__ == '__main__': unittest.main() ================================================ FILE: stacker/tests/conftest.py ================================================ import logging import os import pytest import py.path logger = logging.getLogger(__name__) @pytest.fixture(scope='session', autouse=True) def aws_credentials(): # Handle change in https://github.com/spulec/moto/issues/1924 # Ensure AWS SDK finds some (bogus) credentials in the environment and # doesn't try to use other providers. overrides = { 'AWS_ACCESS_KEY_ID': 'testing', 'AWS_SECRET_ACCESS_KEY': 'testing', 'AWS_DEFAULT_REGION': 'us-east-1' } saved_env = {} for key, value in overrides.items(): logger.info('Overriding env var: {}={}'.format(key, value)) saved_env[key] = os.environ.get(key, None) os.environ[key] = value yield for key, value in saved_env.items(): logger.info('Restoring saved env var: {}={}'.format(key, value)) if value is None: del os.environ[key] else: os.environ[key] = value saved_env.clear() @pytest.fixture(scope="package") def stacker_fixture_dir(): path = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'fixtures') return py.path.local(path) ================================================ FILE: stacker/tests/factories.py ================================================ from mock import MagicMock from stacker.context import Context from stacker.config import Config, Stack from stacker.lookups import Lookup class MockThreadingEvent(object): def wait(self, timeout=None): return False class MockProviderBuilder(object): def __init__(self, provider, region=None): self.provider = provider self.region = region def build(self, region=None, profile=None): return self.provider def mock_provider(**kwargs): return MagicMock(**kwargs) def mock_context(namespace="default", extra_config_args=None, **kwargs): config_args = {"namespace": namespace} if extra_config_args: config_args.update(extra_config_args) config = Config(config_args) if kwargs.get("environment"): return Context( config=config, **kwargs) return Context( config=config, environment={}, **kwargs) def generate_definition(base_name, stack_id, **overrides): definition = { "name": "%s.%d" % (base_name, stack_id), "class_path": "stacker.tests.fixtures.mock_blueprints.%s" % ( base_name.upper()), "requires": [] } definition.update(overrides) return Stack(definition) def mock_lookup(lookup_input, lookup_type, raw=None): if raw is None: raw = "%s %s" % (lookup_type, lookup_input) return Lookup(type=lookup_type, input=lookup_input, raw=raw) class SessionStub(object): """Stubber class for boto3 sessions made with session_cache.get_session() This is a helper class that should be used when trying to stub out get_session() calls using the boto3.stubber. Example Usage: @mock.patch('stacker.lookups.handlers.myfile.get_session', return_value=sessionStub(client)) def myfile_test(self, client_stub): ... Attributes: client_stub (:class:`boto3.session.Session`:): boto3 session stub """ def __init__(self, client_stub): self.client_stub = client_stub def client(self, region): """Returns the stubbed client object Args: region (str): So boto3 won't complain Returns: :class:`boto3.session.Session`: The stubbed boto3 session """ return self.client_stub ================================================ FILE: stacker/tests/fixtures/__init__.py ================================================ ================================================ FILE: stacker/tests/fixtures/basic.env ================================================ namespace: test.stacker ================================================ FILE: stacker/tests/fixtures/cfn_template.json ================================================ { "AWSTemplateFormatVersion": "2010-09-09", "Description": "TestTemplate", "Parameters": { "Param1": { "Type": "String" }, "Param2": { "Default": "default", "Type": "CommaDelimitedList" } }, "Resources": { "Dummy": { "Type": "AWS::SNS::Topic", "Properties": { "DisplayName": {"Ref" : "Param1"} } } }, "Outputs": { "DummyId": { "Value": "dummy-1234" } } } ================================================ FILE: stacker/tests/fixtures/cfn_template.json.j2 ================================================ { "AWSTemplateFormatVersion": "2010-09-09", "Description": "TestTemplate", "Parameters": { "Param1": { "Type": "String" }, "Param2": { "Default": "default", "Type": "CommaDelimitedList" } }, "Resources": { "Dummy": { "Type": "AWS::CloudFormation::WaitConditionHandle" } }, "Outputs": { "DummyId": { "Value": "dummy-{{ context.environment.foo }}-{{ variables.Param1 }}-{{ variables.bar }}-1234" } } } ================================================ FILE: stacker/tests/fixtures/cfn_template.yaml ================================================ AWSTemplateFormatVersion: "2010-09-09" Description: TestTemplate Parameters: Param1: Type: String Param2: Default: default Type: CommaDelimitedList Resources: Bucket: Type: AWS::S3::Bucket Properties: BucketName: !Join - "-" - - !Ref "AWS::StackName" - !Ref "AWS::Region" Dummy: Type: AWS::CloudFormation::WaitConditionHandle Outputs: DummyId: Value: dummy-1234 ================================================ FILE: stacker/tests/fixtures/keypair/fingerprint ================================================ d7:50:1f:78:55:5f:22:c1:f6:88:c6:5d:82:4f:94:4f ================================================ FILE: stacker/tests/fixtures/keypair/id_rsa ================================================ -----BEGIN OPENSSH PRIVATE KEY----- b3BlbnNzaC1rZXktdjEAAAAABG5vbmUAAAAEbm9uZQAAAAAAAAABAAABFwAAAAdzc2gtcn NhAAAAAwEAAQAAAQEA7rF34ExOHgT+dDYJUswkhBpyC+vnK+ptx+nGQDTkPj9aP1uAXbXA C97KK+Ihou0jniYKPJMHsjEK4a7eh2ihoK6JkYs9+y0MeGCAHAYuGXdNt5jv1e0XNgoYdf JloC0pgOp4Po9+4qeuOds8bb9IxwM/aSaJWygaSc22ZTzeOWQk5PXJNH0lR0ZelUUkj0HK aouuV6UX/t+czTghgnNZgDjk5sOfUNmugN7fJi+6/dWjOaukDkJttfZXLRTPDux0SZw4Jo RqZ40cBNS8ipLVk24BWeEjVlNl6rrFDtO4yrkscz7plwXlPiRLcdCdbamcCZaRrdkftKje 5ypz5dvocQAAA9DJ0TBmydEwZgAAAAdzc2gtcnNhAAABAQDusXfgTE4eBP50NglSzCSEGn IL6+cr6m3H6cZANOQ+P1o/W4BdtcAL3sor4iGi7SOeJgo8kweyMQrhrt6HaKGgromRiz37 LQx4YIAcBi4Zd023mO/V7Rc2Chh18mWgLSmA6ng+j37ip6452zxtv0jHAz9pJolbKBpJzb ZlPN45ZCTk9ck0fSVHRl6VRSSPQcpqi65XpRf+35zNOCGCc1mAOOTmw59Q2a6A3t8mL7r9 1aM5q6QOQm219lctFM8O7HRJnDgmhGpnjRwE1LyKktWTbgFZ4SNWU2XqusUO07jKuSxzPu mXBeU+JEtx0J1tqZwJlpGt2R+0qN7nKnPl2+hxAAAAAwEAAQAAAQAwMUSy1LUw+nElpYNc ZDs7MNu17HtQMpTXuCt+6y7qIoBmKmNQiFGuE91d3tpLuvVmCOgoMsdrAtvflR741/dKKf M8n5B0FjReWZ2ECvtjyOK4HvjNiIEXOBKYPcim/ndSwARnHTHRMWnL5KfewLBA/jbfVBiH fyFPpWkeJ5v2mg3EDCkTCj7mBZwXYkX8uZ1IN6CZJ9kWNaPO3kloTlamgs6pd/5+OmMGWc /vhfJQppaJjW58y7D7zCpncHg3Yf0HZsgWRTGJO93TxuyzDlAXITVGwqcz7InTVQZS1XTx 3FNmIpb0lDtVrKGxwvR/7gP6DpxMlKkzoCg3j1o8tHvBAAAAgQDuZCVAAqQFrY4ZH2TluP SFulXuTiT4mgQivAwI6ysMxjpX1IGBTgDvHXJ0xyW4LN7pCvg8hRAhsPlaNBX24nNfOGmn QMYp/qAZG5JP2vEJmDUKmEJ77Twwmk+k0zXfyZyfo7rgpF4c5W2EFnV7xiMtBTKbAj4HMn qGPYDPGpySTwAAAIEA+w72mMctM2yd9Sxyg5b7ZlhuNyKW1oHcEvLoEpTtru0f8gh7C3HT C0SiuTOth2xoHUWnbo4Yv5FV3gSoQ/rd1sWbkpEZMwbaPGsTA8bkCn2eItsjfrQx+6oY1U HgZDrkjbByB3KQiq+VioKsrUmgfT/UgBq2tSnHqcYB56Eqj0sAAACBAPNkMvCstNJGS4FN nSCGXghoYqKHivZN/IjWP33t/cr72lGp1yCY5S6FCn+JdNrojKYk2VXOSF5xc3fZllbr7W hmhXRr/csQkymXMDkJHnsdhpMeoEZm7wBjUx+hE1+QbNF63kZMe9sjm5y/YRu7W7H6ngme kb5FW97sspLYX8WzAAAAF2RhbmllbGt6YUBkYW5pZWwtcGMubGFuAQID -----END OPENSSH PRIVATE KEY----- ================================================ FILE: stacker/tests/fixtures/keypair/id_rsa.pub ================================================ ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEAklOUpkDHrfHY17SbrmTIpNLTGK9Tjom/BWDSUGPl+nafzlHDTYW7hdI4yZ5ew18JH4JW9jbhUFrviQzM7xlELEVf4h9lFX5QVkbPppSwg0cda3Pbv7kOdJ/MTyBlWXFCR+HAo3FXRitBqxiX1nKhXpHAZsMciLq8V6RjsNAQwdsdMFvSlVK/7XAt3FaoJoAsncM1Q9x5+3V0Ww68/eIFmb1zuUFljQJKprrX88XypNDvjYNby6vw/Pb0rwert/EnmZ+AW4OZPnTPI89ZPmVMLuayrD2cE86Z/il8b+gw3r3+1nKatmIkjn2so1d01QraTlMqVSsbxNrRFi9wrf+M7Q== ================================================ FILE: stacker/tests/fixtures/mock_blueprints.py ================================================ from troposphere import GetAtt, Output, Sub, Ref from troposphere import iam from awacs.aws import Policy, Statement, AWSPrincipal import awacs import awacs.s3 import awacs.cloudformation import awacs.iam import awacs.sts import awacs.sns from troposphere.cloudformation import WaitCondition, WaitConditionHandle from stacker.blueprints.base import Blueprint from stacker.blueprints.variables.types import ( CFNCommaDelimitedList, CFNNumber, CFNString, EC2KeyPairKeyName, EC2SecurityGroupId, EC2SubnetIdList, EC2VPCId, ) class FunctionalTests(Blueprint): """This creates a stack with an IAM user and access key for running the functional tests for stacker. """ VARIABLES = { "StackerNamespace": { "type": CFNString, "description": "The stacker namespace that the tests will use. " "Access to cloudformation will be restricted to " "only allow access to stacks with this prefix."}, "StackerBucket": { "type": CFNString, "description": "The name of the bucket that the tests will use " "for uploading templates."} } def create_template(self): t = self.template bucket_arn = Sub("arn:aws:s3:::${StackerBucket}*") objects_arn = Sub("arn:aws:s3:::${StackerBucket}*/*") cloudformation_scope = Sub( "arn:aws:cloudformation:*:${AWS::AccountId}:" "stack/${StackerNamespace}-*") sns_scope = Sub( "arn:aws:sns:*:${AWS::AccountId}:" "${StackerNamespace}-*") changeset_scope = "*" # This represents the precise IAM permissions that stacker itself # needs. stacker_policy = iam.Policy( PolicyName="Stacker", PolicyDocument=Policy( Statement=[ Statement( Effect="Allow", Resource=["*"], Action=[awacs.s3.ListAllMyBuckets] ), Statement( Effect="Allow", Resource=[bucket_arn], Action=[ awacs.s3.ListBucket, awacs.s3.GetBucketLocation, awacs.s3.CreateBucket, awacs.s3.DeleteBucket, ] ), Statement( Effect="Allow", Resource=[bucket_arn], Action=[ awacs.s3.GetObject, awacs.s3.GetObjectAcl, awacs.s3.PutObject, awacs.s3.PutObjectAcl, ] ), Statement( Effect="Allow", Resource=[objects_arn], Action=[ awacs.s3.DeleteObject, ] ), Statement( Effect="Allow", Resource=[changeset_scope], Action=[ awacs.cloudformation.DescribeChangeSet, awacs.cloudformation.ExecuteChangeSet, awacs.cloudformation.DeleteChangeSet, ] ), Statement( Effect="Deny", Resource=[Ref("AWS::StackId")], Action=[awacs.cloudformation.Action("*")] ), Statement( Effect="Allow", Resource=[cloudformation_scope], Action=[ awacs.cloudformation.GetTemplate, awacs.cloudformation.CreateChangeSet, awacs.cloudformation.DeleteChangeSet, awacs.cloudformation.DeleteStack, awacs.cloudformation.CreateStack, awacs.cloudformation.UpdateStack, awacs.cloudformation.SetStackPolicy, awacs.cloudformation.DescribeStacks, awacs.cloudformation.DescribeStackEvents ] ), Statement( Effect="Allow", Resource=[sns_scope], Action=[ awacs.sns.CreateTopic, awacs.sns.DeleteTopic, awacs.sns.GetTopicAttributes ] ) ] ) ) principal = AWSPrincipal(Ref("AWS::AccountId")) role = t.add_resource( iam.Role( "FunctionalTestRole", AssumeRolePolicyDocument=Policy( Statement=[ Statement( Effect="Allow", Action=[ awacs.sts.AssumeRole], Principal=principal)]), Policies=[ stacker_policy])) assumerole_policy = iam.Policy( PolicyName="AssumeRole", PolicyDocument=Policy( Statement=[ Statement( Effect="Allow", Resource=[GetAtt(role, "Arn")], Action=[ awacs.sts.AssumeRole])])) user = t.add_resource( iam.User( "FunctionalTestUser", Policies=[ stacker_policy, assumerole_policy])) key = t.add_resource( iam.AccessKey( "FunctionalTestKey", Serial=1, UserName=Ref(user))) t.add_output(Output("User", Value=Ref(user))) t.add_output(Output("AccessKeyId", Value=Ref(key))) t.add_output( Output( "SecretAccessKey", Value=GetAtt("FunctionalTestKey", "SecretAccessKey"))) t.add_output( Output( "FunctionalTestRole", Value=GetAtt(role, "Arn"))) class Dummy(Blueprint): VARIABLES = { "StringVariable": { "type": str, "default": ""} } def create_template(self): self.template.add_resource(WaitConditionHandle("Dummy")) self.template.add_output(Output("DummyId", Value="dummy-1234")) self.template.add_output(Output("Region", Value=Ref("AWS::Region"))) class Dummy2(Blueprint): """ This blueprint allows tests of only additional resources to occur. Just swap out the Dummy class for Dummy2 on the same stack. """ VARIABLES = { "StringVariable": { "type": str, "default": ""} } def create_template(self): self.template.add_resource(WaitConditionHandle("Dummy")) self.template.add_output(Output("DummyId", Value="dummy-1234")) self.template.add_resource(WaitConditionHandle("Dummy2")) class LongRunningDummy(Blueprint): """ Meant to be an attempt to create a cheap blueprint that takes a little bit of time to create/rollback/destroy to avoid some of the race conditions we've seen in some of our functional tests. """ VARIABLES = { "Count": { "type": int, "description": "The # of WaitConditonHandles to create.", "default": 1, }, "BreakLast": { "type": bool, "description": "Whether or not to break the last WaitConditon " "by creating an invalid WaitConditionHandle.", "default": True, }, "OutputValue": { "type": str, "description": "The value to put in an output to allow for " "updates.", "default": "DefaultOutput", }, } def create_template(self): v = self.get_variables() t = self.template base_name = "Dummy" for i in range(v["Count"]): name = "%s%s" % (base_name, i) last_name = None if i: last_name = "%s%s" % (base_name, i - 1) wch = WaitConditionHandle(name) if last_name is not None: wch.DependsOn = last_name t.add_resource(wch) self.add_output("OutputValue", str(v["OutputValue"])) self.add_output("WCHCount", str(v["Count"])) if v["BreakLast"]: t.add_resource( WaitCondition( "BrokenWaitCondition", Handle=wch.Ref(), # Timeout is made deliberately large so CF rejects it Timeout=2 ** 32, Count=0 ) ) class Broken(Blueprint): """ This blueprint deliberately fails validation, so that it can be used to test re-creation of a failed stack """ VARIABLES = { "StringVariable": { "type": str, "default": ""} } def create_template(self): t = self.template t.add_resource(WaitConditionHandle("BrokenDummy")) t.add_resource(WaitCondition( "BrokenWaitCondition", Handle=Ref("BrokenDummy"), # Timeout is made deliberately large so CF rejects it Timeout=2 ** 32, Count=0)) t.add_output(Output("DummyId", Value="dummy-1234")) class VPC(Blueprint): VARIABLES = { "AZCount": { "type": int, "default": 2, }, "PrivateSubnets": { "type": CFNCommaDelimitedList, "description": "Comma separated list of subnets to use for " "non-public hosts. NOTE: Must have as many subnets " "as AZCount"}, "PublicSubnets": { "type": CFNCommaDelimitedList, "description": "Comma separated list of subnets to use for " "public hosts. NOTE: Must have as many subnets " "as AZCount"}, "InstanceType": { "type": CFNString, "description": "NAT EC2 instance type.", "default": "m3.medium"}, "BaseDomain": { "type": CFNString, "default": "", "description": "Base domain for the stack."}, "InternalDomain": { "type": CFNString, "default": "", "description": "Internal domain name, if you have one."}, "CidrBlock": { "type": CFNString, "description": "Base CIDR block for subnets.", "default": "10.128.0.0/16"}, "ImageName": { "type": CFNString, "description": "The image name to use from the AMIMap (usually " "found in the config file.)", "default": "NAT"}, "UseNatGateway": { "type": CFNString, "allowed_values": ["true", "false"], "description": "If set to true, will configure a NAT Gateway" "instead of NAT instances.", "default": "false"}, } def create_template(self): self.template.add_resource(WaitConditionHandle("VPC")) class DiffTester(Blueprint): VARIABLES = { "InstanceType": { "type": CFNString, "description": "NAT EC2 instance type.", "default": "m3.medium"}, "WaitConditionCount": { "type": int, "description": "Number of WaitConditionHandle resources " "to add to the template"} } def create_template(self): for i in range(self.get_variables()["WaitConditionCount"]): self.template.add_resource(WaitConditionHandle("VPC%d" % i)) class Bastion(Blueprint): VARIABLES = { "VpcId": {"type": EC2VPCId, "description": "Vpc Id"}, "DefaultSG": {"type": EC2SecurityGroupId, "description": "Top level security group."}, "PublicSubnets": {"type": EC2SubnetIdList, "description": "Subnets to deploy public " "instances in."}, "PrivateSubnets": {"type": EC2SubnetIdList, "description": "Subnets to deploy private " "instances in."}, "AvailabilityZones": {"type": CFNCommaDelimitedList, "description": "Availability Zones to deploy " "instances in."}, "InstanceType": {"type": CFNString, "description": "EC2 Instance Type", "default": "m3.medium"}, "MinSize": {"type": CFNNumber, "description": "Minimum # of instances.", "default": "1"}, "MaxSize": {"type": CFNNumber, "description": "Maximum # of instances.", "default": "5"}, "SshKeyName": {"type": EC2KeyPairKeyName}, "OfficeNetwork": { "type": CFNString, "description": "CIDR block allowed to connect to bastion hosts."}, "ImageName": { "type": CFNString, "description": "The image name to use from the AMIMap (usually " "found in the config file.)", "default": "bastion"}, } def create_template(self): return class PreOneOhBastion(Blueprint): """Used to ensure old blueprints won't be usable in 1.0""" PARAMETERS = { "VpcId": {"type": "AWS::EC2::VPC::Id", "description": "Vpc Id"}, "DefaultSG": {"type": "AWS::EC2::SecurityGroup::Id", "description": "Top level security group."}, "PublicSubnets": {"type": "List", "description": "Subnets to deploy public " "instances in."}, "PrivateSubnets": {"type": "List", "description": "Subnets to deploy private " "instances in."}, "AvailabilityZones": {"type": "CommaDelimitedList", "description": "Availability Zones to deploy " "instances in."}, "InstanceType": {"type": "String", "description": "EC2 Instance Type", "default": "m3.medium"}, "MinSize": {"type": "Number", "description": "Minimum # of instances.", "default": "1"}, "MaxSize": {"type": "Number", "description": "Maximum # of instances.", "default": "5"}, "SshKeyName": {"type": "AWS::EC2::KeyPair::KeyName"}, "OfficeNetwork": { "type": "String", "description": "CIDR block allowed to connect to bastion hosts."}, "ImageName": { "type": "String", "description": "The image name to use from the AMIMap (usually " "found in the config file.)", "default": "bastion"}, } def create_template(self): return ================================================ FILE: stacker/tests/fixtures/mock_hooks.py ================================================ def mock_hook(provider, context, **kwargs): return {"result": kwargs["value"]} ================================================ FILE: stacker/tests/fixtures/mock_lookups.py ================================================ TYPE_NAME = "mock" def handler(value, **kwargs): return "mock" ================================================ FILE: stacker/tests/fixtures/not-basic.env ================================================ namespace: test.stacker environment: test ================================================ FILE: stacker/tests/fixtures/parameter_resolution/template.yml ================================================ # used in functional test suites, to fix https://github.com/cloudtools/stacker/pull/615 AWSTemplateFormatVersion: "2010-09-09" Parameters: NormalParam: Type: String SecretParam: Type: String Default: default-secret NoEcho: true Outputs: NormalParam: Value: !Ref "NormalParam" SecretParam: Value: !Ref "SecretParam" Resources: WaitConditionHandle: Type: "AWS::CloudFormation::WaitConditionHandle" ================================================ FILE: stacker/tests/fixtures/vpc-bastion-db-web-pre-1.0.yaml ================================================ # Hooks require a path. # If the build should stop when a hook fails, set required to true. # pre_build happens before the build # post_build happens after the build pre_build: - path: stacker.hooks.route53.create_domain required: true enabled: true # Additional args can be passed as a dict of key/value pairs # args: # BaseDomain: foo # post_build: mappings: AmiMap: us-east-1: NAT: ami-ad227cc4 ubuntu1404: &ubuntu1404 ami-74e27e1c # Setting an anchor bastion: *ubuntu1404 # Using the anchor above us-west-2: NAT: ami-290f4119 ubuntu1404west2: &ubuntu1404west2 ami-5189a661 bastion: *ubuntu1404west2 vpc_parameters: &vpc_parameters VpcId: vpc::VpcId # parametrs with ::'s in them refer to :: DefaultSG: vpc::DefaultSG PublicSubnets: vpc::PublicSubnets PrivateSubnets: vpc::PrivateSubnets AvailabilityZones: vpc::AvailabilityZones stacks: - name: vpc class_path: stacker.tests.fixtures.mock_blueprints.VPC variables: InstanceType: m3.medium SshKeyName: default ImageName: NAT # Only build 2 AZs, can be overridden with -p on the command line # Note: If you want more than 4 AZs you should add more subnets below # Also you need at least 2 AZs in order to use the DB because # of the fact that the DB blueprint uses MultiAZ AZCount: 2 # Enough subnets for 4 AZs PublicSubnets: 10.128.0.0/24,10.128.1.0/24,10.128.2.0/24,10.128.3.0/24 PrivateSubnets: 10.128.8.0/22,10.128.12.0/22,10.128.16.0/22,10.128.20.0/22 # Uncomment if you want an internal hosted zone for the VPC # If provided, it will be added to the dns search path of the DHCP # Options #InternalDomain: internal - name: bastion class_path: stacker.tests.fixtures.mock_blueprints.Bastion ## !! This should break, parameters not allowed in 1.0 parameters: # Extends the parameters dict with the contents of the vpc_parameters # anchor. Basically we're including all VPC Outputs in the parameters # of the bastion stack. Note: Stacker figures out, automatically, which # parameters the stack actually needs and only submits those to each # stack. For example, most stacks are in the PrivateSubnets, but not # the PublicSubnets, but stacker deals with it for you. << : *vpc_parameters InstanceType: m3.medium OfficeNetwork: 203.0.113.0/24 MinSize: 2 MaxSize: 2 SshKeyName: default ImageName: bastion ================================================ FILE: stacker/tests/fixtures/vpc-bastion-db-web.yaml ================================================ # Hooks require a path. # If the build should stop when a hook fails, set required to true. # pre_build happens before the build # post_build happens after the build pre_build: - path: stacker.hooks.route53.create_domain required: true enabled: true # Additional args can be passed as a dict of key/value pairs # args: # BaseDomain: foo # post_build: mappings: AmiMap: us-east-1: NAT: ami-ad227cc4 ubuntu1404: &ubuntu1404 ami-74e27e1c # Setting an anchor bastion: *ubuntu1404 # Using the anchor above us-west-2: NAT: ami-290f4119 ubuntu1404west2: &ubuntu1404west2 ami-5189a661 bastion: *ubuntu1404west2 vpc_parameters: &vpc_parameters VpcId: vpc::VpcId # parametrs with ::'s in them refer to :: DefaultSG: vpc::DefaultSG PublicSubnets: vpc::PublicSubnets PrivateSubnets: vpc::PrivateSubnets AvailabilityZones: vpc::AvailabilityZones stacks: - name: vpc class_path: stacker.tests.fixtures.mock_blueprints.VPC variables: InstanceType: m3.medium SshKeyName: default ImageName: NAT # Only build 2 AZs, can be overridden with -p on the command line # Note: If you want more than 4 AZs you should add more subnets below # Also you need at least 2 AZs in order to use the DB because # of the fact that the DB blueprint uses MultiAZ AZCount: 2 # Enough subnets for 4 AZs PublicSubnets: 10.128.0.0/24,10.128.1.0/24,10.128.2.0/24,10.128.3.0/24 PrivateSubnets: 10.128.8.0/22,10.128.12.0/22,10.128.16.0/22,10.128.20.0/22 # Uncomment if you want an internal hosted zone for the VPC # If provided, it will be added to the dns search path of the DHCP # Options #InternalDomain: internal - name: bastion class_path: stacker.tests.fixtures.mock_blueprints.Bastion variables: # Extends the parameters dict with the contents of the vpc_parameters # anchor. Basically we're including all VPC Outputs in the parameters # of the bastion stack. Note: Stacker figures out, automatically, which # parameters the stack actually needs and only submits those to each # stack. For example, most stacks are in the PrivateSubnets, but not # the PublicSubnets, but stacker deals with it for you. << : *vpc_parameters InstanceType: m3.medium OfficeNetwork: 203.0.113.0/24 MinSize: 2 MaxSize: 2 SshKeyName: default ImageName: bastion ================================================ FILE: stacker/tests/fixtures/vpc-custom-log-format-info.yaml ================================================ log_formats: info: "[%(asctime)s] ${environment} custom log format - %(message)s" stacks: - name: vpc class_path: stacker.tests.fixtures.mock_blueprints.VPC variables: InstanceType: m3.medium SshKeyName: default ImageName: NAT # Only build 2 AZs, can be overridden with -p on the command line # Note: If you want more than 4 AZs you should add more subnets below # Also you need at least 2 AZs in order to use the DB because # of the fact that the DB blueprint uses MultiAZ AZCount: 2 # Enough subnets for 4 AZs PublicSubnets: 10.128.0.0/24,10.128.1.0/24,10.128.2.0/24,10.128.3.0/24 PrivateSubnets: 10.128.8.0/22,10.128.12.0/22,10.128.16.0/22,10.128.20.0/22 ================================================ FILE: stacker/tests/hooks/__init__.py ================================================ ================================================ FILE: stacker/tests/hooks/test_aws_lambda.py ================================================ import os.path import os import mock import random from io import BytesIO as StringIO from zipfile import ZipFile import boto3 import botocore import pytest from moto import mock_s3 from troposphere.awslambda import Code from stacker.hooks.aws_lambda import ( ZIP_PERMS_MASK, _calculate_hash, select_bucket_region, upload_lambda_functions, ) from ..factories import mock_context, mock_provider REGION = "us-east-1" @pytest.fixture def all_files(tmpdir): files = ( 'f1/f1.py', 'f1/f1.pyc', 'f1/__init__.py', 'f1/test/__init__.py', 'f1/test/f1.py', 'f1/test/f1.pyc', 'f1/test2/test.txt', 'f2/f2.js' ) def create(): for file in files: f = tmpdir.join(file) f.write(b'', ensure=True) yield f return list(create()) @pytest.fixture def f1_files(tmpdir, all_files): return [p for p in all_files if p.relto(tmpdir).startswith('f1')] @pytest.fixture def f2_files(tmpdir, all_files): return [p for p in all_files if p.relto(tmpdir).startswith('f2')] @pytest.fixture(scope='package') def prebuilt_zip(stacker_fixture_dir): path = stacker_fixture_dir.join('test.zip') content = path.read_binary() md5 = 'c6fb602d9bde5a522856adabe9949f63' return dict(path=path, md5=md5, contents=content) @pytest.fixture(autouse=True) def s3(): with mock_s3(): yield boto3.client('s3', region_name=REGION) def assert_s3_zip_file_list(s3, bucket, key, files, root=None): object_info = s3.get_object(Bucket=bucket, Key=key) zip_data = StringIO(object_info['Body'].read()) expected_files = set() for f in files: rel_path = os.path.relpath(str(f), str(root)) if root else str(f) expected_files.add(rel_path) found_files = set() with ZipFile(zip_data, 'r') as zip_file: for zip_info in zip_file.infolist(): perms = (zip_info.external_attr & ZIP_PERMS_MASK) >> 16 assert perms in (0o755, 0o644) found_files.add(zip_info.filename) assert found_files == set(expected_files) def assert_s3_zip_contents(s3, bucket, key, contents): object_info = s3.get_object(Bucket=bucket, Key=key) zip_data = object_info['Body'].read() assert zip_data == contents def assert_s3_bucket(s3, bucket, present=True): try: s3.head_bucket(Bucket=bucket) except botocore.exceptions.ClientError as e: if e.response['Error']['Code'] == '404': if present: pytest.fail('s3: bucket {} does not exist'.format(bucket)) else: raise else: if not present: pytest.fail('s3: bucket {} should not exist'.format(bucket)) @pytest.fixture def context(): return mock_context() @pytest.fixture def provider(): return mock_provider(region=REGION) @pytest.fixture def run_hook(context, provider): def run(**kwargs): return upload_lambda_functions(context=context, provider=provider, **kwargs) return run def test_bucket_default(s3, context, run_hook): result = run_hook(functions={}) assert result is not None assert_s3_bucket(s3, context.bucket_name, present=True) def test_bucket_custom(s3, context, run_hook): result = run_hook(bucket='custom', functions={}) assert result is not None assert_s3_bucket(s3, context.bucket_name, present=False) assert_s3_bucket(s3, 'custom', present=True) def test_prefix(tmpdir, s3, all_files, f1_files, run_hook): root = tmpdir.join('f1') results = run_hook( prefix='cloudformation-custom-resources/', functions={ 'MyFunction': { 'path': str(root) } }) assert results is not None code = results.get('MyFunction') assert isinstance(code, Code) assert_s3_zip_file_list(s3, code.S3Bucket, code.S3Key, f1_files, root=root) assert code.S3Key.startswith( 'cloudformation-custom-resources/lambda-MyFunction-') def test_prefix_missing(tmpdir, s3, all_files, f1_files, run_hook): root = tmpdir.join('f1') results = run_hook( functions={ 'MyFunction': { 'path': str(root) } } ) assert results is not None code = results.get('MyFunction') assert isinstance(code, Code) assert_s3_zip_file_list(s3, code.S3Bucket, code.S3Key, f1_files, root=root) assert code.S3Key.startswith('lambda-MyFunction-') def test_path_missing(run_hook): msg = "missing required property 'path' in function 'MyFunction'" with pytest.raises(ValueError, match=msg): run_hook( functions={ 'MyFunction': { } } ) def test_path_non_zip_non_dir(tmpdir, all_files, run_hook): root = tmpdir msg = 'Path must be an existing ZIP file or directory' with pytest.raises(ValueError, match=msg): run_hook( functions={ 'MyFunction': { 'path': str(root.join('test.txt')) } } ) def test_path_relative(tmpdir, s3, run_hook): root = tmpdir root.join('test/test.py').write(b'', ensure=True) get_config_directory = 'stacker.hooks.aws_lambda.get_config_directory' with mock.patch(get_config_directory, return_value=str(root)): results = run_hook( functions={ 'MyFunction': { 'path': 'test' } } ) assert results is not None code = results.get('MyFunction') assert isinstance(code, Code) assert_s3_zip_file_list(s3, code.S3Bucket, code.S3Key, ['test.py']) def test_path_home_relative(tmpdir, s3, run_hook): root = tmpdir test_path = '~/test' orig_expanduser = os.path.expanduser tmpdir.join('test.py').write(b'') def expanduser(path): return str(root) if path == test_path else orig_expanduser(path) with mock.patch('os.path.expanduser', side_effect=expanduser): results = run_hook( functions={ 'MyFunction': { 'path': test_path } } ) assert results is not None code = results.get('MyFunction') assert isinstance(code, Code) assert_s3_zip_file_list(s3, code.S3Bucket, code.S3Key, ['test.py']) def test_multiple_functions(tmpdir, s3, all_files, f1_files, f2_files, run_hook): root1 = tmpdir.join('f1') root2 = tmpdir.join('f2') results = run_hook( functions={ 'MyFunction': { 'path': str(root1) }, 'OtherFunction': { 'path': str(root2) } } ) assert results is not None f1_code = results.get('MyFunction') assert isinstance(f1_code, Code) assert_s3_zip_file_list(s3, f1_code.S3Bucket, f1_code.S3Key, f1_files, root=root1) f2_code = results.get('OtherFunction') assert isinstance(f2_code, Code) assert_s3_zip_file_list(s3, f2_code.S3Bucket, f2_code.S3Key, f2_files, root=root2) def test_patterns_invalid(tmpdir, run_hook): root = tmpdir msg = ("Invalid file patterns in key 'include': must be a string or " 'list of strings') with pytest.raises(ValueError, match=msg): run_hook( functions={ 'MyFunction': { 'path': str(root), 'include': {'invalid': 'invalid'} } } ) def test_patterns_include(tmpdir, s3, all_files, run_hook): root = tmpdir.join('f1') results = run_hook( functions={ 'MyFunction': { 'path': str(root), 'include': ['*.py', 'test2/'] } } ) assert results is not None code = results.get('MyFunction') assert isinstance(code, Code) assert_s3_zip_file_list(s3, code.S3Bucket, code.S3Key, [ 'f1.py', '__init__.py', 'test/__init__.py', 'test/f1.py', 'test2/test.txt' ]) def test_patterns_exclude(tmpdir, s3, all_files, run_hook): root = tmpdir.join('f1') results = run_hook( functions={ 'MyFunction': { 'path': str(root), 'exclude': ['*.pyc', 'test/'] } } ) assert results is not None code = results.get('MyFunction') assert isinstance(code, Code) assert_s3_zip_file_list(s3, code.S3Bucket, code.S3Key, [ 'f1.py', '__init__.py', 'test2/test.txt' ]) @mock_s3 def test_patterns_include_exclude(tmpdir, s3, all_files, run_hook): root = tmpdir.join('f1') results = run_hook(functions={ 'MyFunction': { 'path': str(root), 'include': '*.py', 'exclude': 'test/' } }) assert results is not None code = results.get('MyFunction') assert isinstance(code, Code) assert_s3_zip_file_list(s3, code.S3Bucket, code.S3Key, [ 'f1.py', '__init__.py' ]) def test_patterns_exclude_all(tmpdir, all_files, run_hook): root = tmpdir.join('f1') msg = ('Empty list of files for Lambda payload. Check your ' 'include/exclude options for errors.') with pytest.raises(RuntimeError, match=msg): run_hook( functions={ 'MyFunction': { 'path': str(root), 'exclude': ['**'] } } ) def test_idempotence(tmpdir, s3, all_files, run_hook): root = tmpdir.join('f1') bucket_name = 'test' functions = { 'MyFunction': { 'path': str(root) } } s3.create_bucket(Bucket=bucket_name) previous = None for i in range(2): results = run_hook(bucket=bucket_name, functions=functions) assert results is not None code = results.get('MyFunction') assert isinstance(code, Code) if not previous: previous = code.S3Key continue assert previous == code.S3Key def test_calculate_hash(tmpdir, all_files, f1_files, f2_files): root = tmpdir all_hash_1 = _calculate_hash(map(str, all_files), str(root)) all_hash_2 = _calculate_hash(map(str, all_files), str(root)) f1_hash = _calculate_hash(map(str, f1_files), str(root)) f2_hash = _calculate_hash(map(str, f2_files), str(root)) assert all_hash_1 == all_hash_2 assert f1_hash != all_hash_1 assert f2_hash != all_hash_1 assert f1_hash != f2_hash def test_calculate_hash_diff_filename_same_contents(tmpdir, all_files): root = tmpdir files = all_files[:2] tmpdir.join(files[0]).write('data', ensure=True) tmpdir.join(files[1]).write('data', ensure=True) hash1 = _calculate_hash([str(files[0])], str(root)) hash2 = _calculate_hash([str(files[1])], str(root)) assert hash1 != hash2 def test_calculate_hash_different_ordering(tmpdir, all_files): root = tmpdir all_files_diff_order = random.sample(all_files, k=len(all_files)) hash1 = _calculate_hash(map(str, all_files), str(root)) hash2 = _calculate_hash(map(str, all_files_diff_order), str(root)) assert hash1 == hash2 @pytest.mark.parametrize( 'case', [ dict( custom_bucket="myBucket", hook_region="us-east-1", stacker_bucket_region="us-west-1", provider_region="eu-west-1", result="us-east-1" ), dict( custom_bucket="myBucket", hook_region=None, stacker_bucket_region="us-west-1", provider_region="eu-west-1", result="eu-west-1"), dict( custom_bucket=None, hook_region="us-east-1", stacker_bucket_region="us-west-1", provider_region="eu-west-1", result="us-west-1"), dict( custom_bucket=None, hook_region="us-east-1", stacker_bucket_region=None, provider_region="eu-west-1", result="eu-west-1") ] ) def test_select_bucket_region(case): result = case.pop('result') assert select_bucket_region(**case) == result def test_follow_symlink_nonbool(run_hook): msg = "follow_symlinks option must be a boolean" with pytest.raises(ValueError, match=msg): run_hook( follow_symlinks="raiseValueError", functions={ 'MyFunction': { } } ) @pytest.fixture def linked_dir(tmpdir): linked_dir = tmpdir.join('linked') linked_dir.mksymlinkto(tmpdir.join('f1')) return linked_dir def test_follow_symlink_true(tmpdir, s3, all_files, f1_files, run_hook, linked_dir): root = tmpdir results = run_hook( follow_symlinks=True, functions={ 'MyFunction': { 'path': str(root) } } ) assert results is not None code = results.get('MyFunction') assert isinstance(code, Code) linked_files = [p for p in linked_dir.visit() if p.check(file=1)] assert_s3_zip_file_list(s3, code.S3Bucket, code.S3Key, all_files + linked_files, root=tmpdir) def test_follow_symlink_false(tmpdir, s3, all_files, run_hook, linked_dir): root = tmpdir results = run_hook( follow_symlinks=False, functions={ 'MyFunction': { 'path': str(root) } } ) assert results is not None code = results.get('MyFunction') assert isinstance(code, Code) assert_s3_zip_file_list(s3, code.S3Bucket, code.S3Key, all_files, root=tmpdir) ================================================ FILE: stacker/tests/hooks/test_command.py ================================================ import os import unittest from subprocess import PIPE import mock from stacker.context import Context from stacker.config import Config from stacker.hooks.command import run_command from ..factories import mock_provider class MockProcess(object): def __init__(self, returncode=0, stdout='', stderr=''): self.returncode = returncode self.stdout = stdout self.stderr = stderr self.stdin = None def communicate(self, stdin): self.stdin = stdin return (self.stdout, self.stderr) def wait(self): return self.returncode def kill(self): return class TestCommandHook(unittest.TestCase): def setUp(self): self.context = Context( config=Config({'namespace': 'test', 'stacker_bucket': 'test'})) self.provider = mock_provider(region="us-east-1") self.mock_process = MockProcess() self.popen_mock = \ mock.patch('stacker.hooks.command.Popen', return_value=self.mock_process).start() self.devnull = mock.Mock() self.devnull_mock = \ mock.patch('stacker.hooks.command._devnull', return_value=self.devnull).start() def tearDown(self): self.devnull_mock.stop() self.popen_mock.stop() def run_hook(self, **kwargs): real_kwargs = { 'context': self.context, 'provider': self.provider, } real_kwargs.update(kwargs) return run_command(**real_kwargs) def test_command_ok(self): self.mock_process.returncode = 0 self.mock_process.stdout = None self.mock_process.stderr = None results = self.run_hook(command=['foo']) self.assertEqual( results, {'returncode': 0, 'stdout': None, 'stderr': None}) self.popen_mock.assert_called_once_with( ['foo'], stdin=self.devnull, stdout=None, stderr=None, env=None) def test_command_fail(self): self.mock_process.returncode = 1 self.mock_process.stdout = None self.mock_process.stderr = None results = self.run_hook(command=['foo']) self.assertEqual(results, None) self.popen_mock.assert_called_once_with( ['foo'], stdin=self.devnull, stdout=None, stderr=None, env=None) def test_command_ignore_status(self): self.mock_process.returncode = 1 self.mock_process.stdout = None self.mock_process.stderr = None results = self.run_hook(command=['foo'], ignore_status=True) self.assertEqual( results, {'returncode': 1, 'stdout': None, 'stderr': None}) self.popen_mock.assert_called_once_with( ['foo'], stdin=self.devnull, stdout=None, stderr=None, env=None) def test_command_quiet(self): self.mock_process.returncode = 0 self.mock_process.stdout = None self.mock_process.stderr = None results = self.run_hook(command=['foo'], quiet=True) self.assertEqual( results, {'returncode': 0, 'stdout': None, 'stderr': None}) self.popen_mock.assert_called_once_with( ['foo'], stdin=self.devnull, stdout=self.devnull, stderr=self.devnull, env=None) def test_command_interactive(self): self.mock_process.returncode = 0 self.mock_process.stdout = None self.mock_process.stderr = None results = self.run_hook(command=['foo'], interactive=True) self.assertEqual( results, {'returncode': 0, 'stdout': None, 'stderr': None}) self.popen_mock.assert_called_once_with( ['foo'], stdin=None, stdout=None, stderr=None, env=None) def test_command_input(self): self.mock_process.returncode = 0 self.mock_process.stdout = None self.mock_process.stderr = None results = self.run_hook(command=['foo'], stdin='hello world') self.assertEqual( results, {'returncode': 0, 'stdout': None, 'stderr': None}) self.popen_mock.assert_called_once_with( ['foo'], stdin=PIPE, stdout=None, stderr=None, env=None) self.assertEqual(self.mock_process.stdin, 'hello world') def test_command_capture(self): self.mock_process.returncode = 0 self.mock_process.stdout = 'hello' self.mock_process.stderr = 'world' results = self.run_hook(command=['foo'], capture=True) self.assertEqual( results, {'returncode': 0, 'stdout': 'hello', 'stderr': 'world'}) self.popen_mock.assert_called_once_with( ['foo'], stdin=self.devnull, stdout=PIPE, stderr=PIPE, env=None) def test_command_env(self): self.mock_process.returncode = 0 self.mock_process.stdout = None self.mock_process.stderr = None with mock.patch.dict(os.environ, {'foo': 'bar'}, clear=True): results = self.run_hook(command=['foo'], env={'hello': 'world'}) self.assertEqual(results, {'returncode': 0, 'stdout': None, 'stderr': None}) self.popen_mock.assert_called_once_with( ['foo'], stdin=self.devnull, stdout=None, stderr=None, env={'hello': 'world', 'foo': 'bar'}) ================================================ FILE: stacker/tests/hooks/test_ecs.py ================================================ import unittest import boto3 from moto import mock_ecs from testfixtures import LogCapture from stacker.hooks.ecs import create_clusters from ..factories import ( mock_context, mock_provider, ) REGION = "us-east-1" class TestECSHooks(unittest.TestCase): def setUp(self): self.provider = mock_provider(region=REGION) self.context = mock_context(namespace="fake") def test_create_single_cluster(self): with mock_ecs(): cluster = "test-cluster" logger = "stacker.hooks.ecs" client = boto3.client("ecs", region_name=REGION) response = client.list_clusters() self.assertEqual(len(response["clusterArns"]), 0) with LogCapture(logger) as logs: self.assertTrue( create_clusters( provider=self.provider, context=self.context, clusters=cluster, ) ) logs.check( ( logger, "DEBUG", "Creating ECS cluster: %s" % cluster ) ) response = client.list_clusters() self.assertEqual(len(response["clusterArns"]), 1) def test_create_multiple_clusters(self): with mock_ecs(): clusters = ("test-cluster0", "test-cluster1") logger = "stacker.hooks.ecs" client = boto3.client("ecs", region_name=REGION) response = client.list_clusters() self.assertEqual(len(response["clusterArns"]), 0) for cluster in clusters: with LogCapture(logger) as logs: self.assertTrue( create_clusters( provider=self.provider, context=self.context, clusters=cluster, ) ) logs.check( ( logger, "DEBUG", "Creating ECS cluster: %s" % cluster ) ) response = client.list_clusters() self.assertEqual(len(response["clusterArns"]), 2) def test_fail_create_cluster(self): with mock_ecs(): logger = "stacker.hooks.ecs" client = boto3.client("ecs", region_name=REGION) response = client.list_clusters() self.assertEqual(len(response["clusterArns"]), 0) with LogCapture(logger) as logs: create_clusters( provider=self.provider, context=self.context ) logs.check( ( logger, "ERROR", "setup_clusters hook missing \"clusters\" argument" ) ) response = client.list_clusters() self.assertEqual(len(response["clusterArns"]), 0) ================================================ FILE: stacker/tests/hooks/test_iam.py ================================================ import unittest import boto3 from botocore.exceptions import ClientError from moto import mock_iam from stacker.hooks.iam import ( create_ecs_service_role, _get_cert_arn_from_response, ) from awacs.helpers.trust import get_ecs_assumerole_policy from ..factories import ( mock_context, mock_provider, ) REGION = "us-east-1" # No test for stacker.hooks.iam.ensure_server_cert_exists until # updated version of moto is imported # (https://github.com/spulec/moto/pull/679) merged class TestIAMHooks(unittest.TestCase): def setUp(self): self.context = mock_context(namespace="fake") self.provider = mock_provider(region=REGION) def test_get_cert_arn_from_response(self): arn = "fake-arn" # Creation response response = { "ServerCertificateMetadata": { "Arn": arn } } self.assertEqual(_get_cert_arn_from_response(response), arn) # Existing cert response response = {"ServerCertificate": response} self.assertEqual(_get_cert_arn_from_response(response), arn) def test_create_service_role(self): role_name = "ecsServiceRole" policy_name = "AmazonEC2ContainerServiceRolePolicy" with mock_iam(): client = boto3.client("iam", region_name=REGION) with self.assertRaises(ClientError): client.get_role(RoleName=role_name) self.assertTrue( create_ecs_service_role( context=self.context, provider=self.provider, ) ) role = client.get_role(RoleName=role_name) self.assertIn("Role", role) self.assertEqual(role_name, role["Role"]["RoleName"]) client.get_role_policy( RoleName=role_name, PolicyName=policy_name ) def test_create_service_role_already_exists(self): role_name = "ecsServiceRole" policy_name = "AmazonEC2ContainerServiceRolePolicy" with mock_iam(): client = boto3.client("iam", region_name=REGION) client.create_role( RoleName=role_name, AssumeRolePolicyDocument=get_ecs_assumerole_policy().to_json() ) self.assertTrue( create_ecs_service_role( context=self.context, provider=self.provider, ) ) role = client.get_role(RoleName=role_name) self.assertIn("Role", role) self.assertEqual(role_name, role["Role"]["RoleName"]) client.get_role_policy( RoleName=role_name, PolicyName=policy_name ) ================================================ FILE: stacker/tests/hooks/test_keypair.py ================================================ import sys from collections import namedtuple from contextlib import contextmanager import mock import pytest import boto3 from moto import mock_ec2, mock_ssm from stacker.hooks.keypair import ensure_keypair_exists from ..factories import mock_context, mock_provider REGION = "us-east-1" KEY_PAIR_NAME = "FakeKey" SSHKey = namedtuple('SSHKey', 'public_key private_key fingerprint') @pytest.fixture(scope="module") def ssh_key(stacker_fixture_dir): base = stacker_fixture_dir.join('keypair') return SSHKey( private_key=base.join('id_rsa').read_binary(), public_key=base.join('id_rsa.pub').read_binary(), fingerprint=base.join('fingerprint').read_text('ascii').strip()) @pytest.fixture def provider(): return mock_provider(region=REGION) @pytest.fixture def context(): return mock_context(namespace="fake") @pytest.fixture(autouse=True) def ec2(ssh_key): # Force moto to generate a deterministic key pair on creation. # Can be replaced by something more sensible when # https://github.com/spulec/moto/pull/2108 is merged key_pair = {'fingerprint': ssh_key.fingerprint, 'material': ssh_key.private_key.decode('ascii')} with mock.patch('moto.ec2.models.random_key_pair', side_effect=[key_pair]): with mock_ec2(): yield @pytest.fixture(autouse=True) def ssm(): with mock_ssm(): yield @contextmanager def mock_input(lines=(), isatty=True): with mock.patch('stacker.hooks.keypair.get_raw_input', side_effect=lines) as m: with mock.patch.object(sys.stdin, 'isatty', return_value=isatty): yield m def assert_key_present(hook_result, key_name, fingerprint): assert hook_result['key_name'] == key_name assert hook_result['fingerprint'] == fingerprint ec2 = boto3.client('ec2') response = ec2.describe_key_pairs(KeyNames=[key_name], DryRun=False) key_pairs = response['KeyPairs'] assert len(key_pairs) == 1 assert key_pairs[0]['KeyName'] == key_name assert key_pairs[0]['KeyFingerprint'] == fingerprint def test_param_validation(provider, context): result = ensure_keypair_exists(provider, context, keypair=KEY_PAIR_NAME, ssm_parameter_name='test', public_key_path='test') assert result is False def test_keypair_exists(provider, context): ec2 = boto3.client('ec2') keypair = ec2.create_key_pair(KeyName=KEY_PAIR_NAME) result = ensure_keypair_exists(provider, context, keypair=KEY_PAIR_NAME) expected = dict( status='exists', key_name=KEY_PAIR_NAME, fingerprint=keypair['KeyFingerprint']) assert result == expected def test_import_file(tmpdir, provider, context, ssh_key): pkey = tmpdir.join("id_rsa.pub") pkey.write(ssh_key.public_key) result = ensure_keypair_exists(provider, context, keypair=KEY_PAIR_NAME, public_key_path=str(pkey)) assert_key_present(result, KEY_PAIR_NAME, ssh_key.fingerprint) assert result['status'] == 'imported' def test_import_bad_key_data(tmpdir, provider, context): pkey = tmpdir.join("id_rsa.pub") pkey.write('garbage') result = ensure_keypair_exists(provider, context, keypair=KEY_PAIR_NAME, public_key_path=str(pkey)) assert result is False @pytest.mark.parametrize('ssm_key_id', (None, 'my-key')) def test_create_in_ssm(provider, context, ssh_key, ssm_key_id): result = ensure_keypair_exists(provider, context, keypair=KEY_PAIR_NAME, ssm_parameter_name='param', ssm_key_id=ssm_key_id) assert_key_present(result, KEY_PAIR_NAME, ssh_key.fingerprint) assert result['status'] == 'created' ssm = boto3.client('ssm') param = ssm.get_parameter(Name='param', WithDecryption=True)['Parameter'] assert param['Value'] == ssh_key.private_key.decode('ascii') assert param['Type'] == 'SecureString' params = ssm.describe_parameters()['Parameters'] param_details = next(p for p in params if p['Name'] == 'param') assert param_details['Description'] == \ 'SSH private key for KeyPair "{}" (generated by Stacker)'.format( KEY_PAIR_NAME) # The default ssm key id ssm_key_id = ssm_key_id or "alias/aws/ssm" assert param_details.get('KeyId') == ssm_key_id def test_interactive_non_terminal_input(capsys, provider, context): with mock_input(isatty=False) as input: result = ensure_keypair_exists(provider, context, keypair=KEY_PAIR_NAME) input.assert_not_called() assert result is False output = capsys.readouterr() assert len(output.out) == 0 assert len(output.err) == 0 def test_interactive_retry_cancel(provider, context): lines = ['garbage', 'cancel'] with mock_input(lines) as input: result = ensure_keypair_exists( provider, context, keypair=KEY_PAIR_NAME) assert input.call_count == 2 assert result is False def test_interactive_import(tmpdir, provider, context, ssh_key): key_file = tmpdir.join("id_rsa.pub") key_file.write(ssh_key.public_key) lines = ['import', str(key_file)] with mock_input(lines): result = ensure_keypair_exists( provider, context, keypair=KEY_PAIR_NAME) assert_key_present(result, KEY_PAIR_NAME, ssh_key.fingerprint) assert result['status'] == 'imported' def test_interactive_create(tmpdir, provider, context, ssh_key): key_dir = tmpdir.join('keys') key_dir.ensure_dir() key_file = key_dir.join('{}.pem'.format(KEY_PAIR_NAME)) lines = ['create', str(key_dir)] with mock_input(lines): result = ensure_keypair_exists( provider, context, keypair=KEY_PAIR_NAME) assert_key_present(result, KEY_PAIR_NAME, ssh_key.fingerprint) assert result['status'] == 'created' assert key_file.samefile(result['file_path']) assert key_file.read_binary() == ssh_key.private_key def test_interactive_create_bad_dir(tmpdir, provider, context): key_dir = tmpdir.join('missing') lines = ['create', str(key_dir)] with mock_input(lines): result = ensure_keypair_exists( provider, context, keypair=KEY_PAIR_NAME) assert result is False def test_interactive_create_existing_file(tmpdir, provider, context): key_dir = tmpdir.join('keys') key_dir.ensure_dir() key_file = key_dir.join('{}.pem'.format(KEY_PAIR_NAME)) key_file.ensure() lines = ['create', str(key_dir)] with mock_input(lines): result = ensure_keypair_exists( provider, context, keypair=KEY_PAIR_NAME) assert result is False ================================================ FILE: stacker/tests/lookups/__init__.py ================================================ ================================================ FILE: stacker/tests/lookups/handlers/__init__.py ================================================ ================================================ FILE: stacker/tests/lookups/handlers/test_ami.py ================================================ import unittest import mock from botocore.stub import Stubber from stacker.lookups.handlers.ami import AmiLookup, ImageNotFound import boto3 from stacker.tests.factories import SessionStub, mock_provider REGION = "us-east-1" class TestAMILookup(unittest.TestCase): client = boto3.client("ec2", region_name=REGION) def setUp(self): self.stubber = Stubber(self.client) self.provider = mock_provider(region=REGION) @mock.patch("stacker.lookups.handlers.ami.get_session", return_value=SessionStub(client)) def test_basic_lookup_single_image(self, mock_client): image_id = "ami-fffccc111" self.stubber.add_response( "describe_images", { "Images": [ { "OwnerId": "897883143566", "Architecture": "x86_64", "CreationDate": "2011-02-13T01:17:44.000Z", "State": "available", "ImageId": image_id, "Name": "Fake Image 1", "VirtualizationType": "hvm", } ] } ) with self.stubber: value = AmiLookup.handle( value="owners:self name_regex:Fake\sImage\s\d", provider=self.provider ) self.assertEqual(value, image_id) @mock.patch("stacker.lookups.handlers.ami.get_session", return_value=SessionStub(client)) def test_basic_lookup_with_region(self, mock_client): image_id = "ami-fffccc111" self.stubber.add_response( "describe_images", { "Images": [ { "OwnerId": "897883143566", "Architecture": "x86_64", "CreationDate": "2011-02-13T01:17:44.000Z", "State": "available", "ImageId": image_id, "Name": "Fake Image 1", "VirtualizationType": "hvm", } ] } ) with self.stubber: value = AmiLookup.handle( value="us-west-1@owners:self name_regex:Fake\sImage\s\d", provider=self.provider ) self.assertEqual(value, image_id) @mock.patch("stacker.lookups.handlers.ami.get_session", return_value=SessionStub(client)) def test_basic_lookup_multiple_images(self, mock_client): image_id = "ami-fffccc111" self.stubber.add_response( "describe_images", { "Images": [ { "OwnerId": "897883143566", "Architecture": "x86_64", "CreationDate": "2011-02-13T01:17:44.000Z", "State": "available", "ImageId": "ami-fffccc110", "Name": "Fake Image 1", "VirtualizationType": "hvm", }, { "OwnerId": "897883143566", "Architecture": "x86_64", "CreationDate": "2011-02-14T01:17:44.000Z", "State": "available", "ImageId": image_id, "Name": "Fake Image 2", "VirtualizationType": "hvm", }, ] } ) with self.stubber: value = AmiLookup.handle( value="owners:self name_regex:Fake\sImage\s\d", provider=self.provider ) self.assertEqual(value, image_id) @mock.patch("stacker.lookups.handlers.ami.get_session", return_value=SessionStub(client)) def test_basic_lookup_multiple_images_name_match(self, mock_client): image_id = "ami-fffccc111" self.stubber.add_response( "describe_images", { "Images": [ { "OwnerId": "897883143566", "Architecture": "x86_64", "CreationDate": "2011-02-13T01:17:44.000Z", "State": "available", "ImageId": "ami-fffccc110", "Name": "Fa---ke Image 1", "VirtualizationType": "hvm", }, { "OwnerId": "897883143566", "Architecture": "x86_64", "CreationDate": "2011-02-14T01:17:44.000Z", "State": "available", "ImageId": image_id, "Name": "Fake Image 2", "VirtualizationType": "hvm", }, ] } ) with self.stubber: value = AmiLookup.handle( value="owners:self name_regex:Fake\sImage\s\d", provider=self.provider ) self.assertEqual(value, image_id) @mock.patch("stacker.lookups.handlers.ami.get_session", return_value=SessionStub(client)) def test_basic_lookup_no_matching_images(self, mock_client): self.stubber.add_response( "describe_images", { "Images": [] } ) with self.stubber: with self.assertRaises(ImageNotFound): AmiLookup.handle( value="owners:self name_regex:Fake\sImage\s\d", provider=self.provider ) @mock.patch("stacker.lookups.handlers.ami.get_session", return_value=SessionStub(client)) def test_basic_lookup_no_matching_images_from_name(self, mock_client): image_id = "ami-fffccc111" self.stubber.add_response( "describe_images", { "Images": [ { "OwnerId": "897883143566", "Architecture": "x86_64", "CreationDate": "2011-02-13T01:17:44.000Z", "State": "available", "ImageId": image_id, "Name": "Fake Image 1", "VirtualizationType": "hvm", } ] } ) with self.stubber: with self.assertRaises(ImageNotFound): AmiLookup.handle( value="owners:self name_regex:MyImage\s\d", provider=self.provider ) ================================================ FILE: stacker/tests/lookups/handlers/test_default.py ================================================ from mock import MagicMock import unittest from stacker.context import Context from stacker.lookups.handlers.default import DefaultLookup class TestDefaultLookup(unittest.TestCase): def setUp(self): self.provider = MagicMock() self.context = Context( environment={ 'namespace': 'test', 'env_var': 'val_in_env'} ) def test_env_var_present(self): lookup_val = "env_var::fallback" value = DefaultLookup.handle(lookup_val, provider=self.provider, context=self.context) assert value == 'val_in_env' def test_env_var_missing(self): lookup_val = "bad_env_var::fallback" value = DefaultLookup.handle(lookup_val, provider=self.provider, context=self.context) assert value == 'fallback' def test_invalid_value(self): value = "env_var:fallback" with self.assertRaises(ValueError): DefaultLookup.handle(value) ================================================ FILE: stacker/tests/lookups/handlers/test_dynamodb.py ================================================ import unittest import mock from botocore.stub import Stubber from stacker.lookups.handlers.dynamodb import DynamodbLookup import boto3 from stacker.tests.factories import SessionStub class TestDynamoDBHandler(unittest.TestCase): client = boto3.client('dynamodb', region_name='us-east-1') def setUp(self): self.stubber = Stubber(self.client) self.get_parameters_response = {'Item': {'TestMap': {'M': { 'String1': {'S': 'StringVal1'}, 'List1': {'L': [ {'S': 'ListVal1'}, {'S': 'ListVal2'}]}, 'Number1': {'N': '12345'}, }}}} @mock.patch('stacker.lookups.handlers.dynamodb.get_session', return_value=SessionStub(client)) def test_dynamodb_handler(self, mock_client): expected_params = { 'TableName': 'TestTable', 'Key': { 'TestKey': {'S': 'TestVal'} }, 'ProjectionExpression': 'TestVal,TestMap,String1' } base_lookup_key = 'TestTable@TestKey:TestVal.TestMap[M].String1' base_lookup_key_valid = 'StringVal1' self.stubber.add_response('get_item', self.get_parameters_response, expected_params) with self.stubber: value = DynamodbLookup.handle(base_lookup_key) self.assertEqual(value, base_lookup_key_valid) @mock.patch('stacker.lookups.handlers.dynamodb.get_session', return_value=SessionStub(client)) def test_dynamodb_number_handler(self, mock_client): expected_params = { 'TableName': 'TestTable', 'Key': { 'TestKey': {'S': 'TestVal'} }, 'ProjectionExpression': 'TestVal,TestMap,Number1' } base_lookup_key = 'TestTable@TestKey:TestVal.' \ 'TestMap[M].Number1[N]' base_lookup_key_valid = 12345 self.stubber.add_response('get_item', self.get_parameters_response, expected_params) with self.stubber: value = DynamodbLookup.handle(base_lookup_key) self.assertEqual(value, base_lookup_key_valid) @mock.patch('stacker.lookups.handlers.dynamodb.get_session', return_value=SessionStub(client)) def test_dynamodb_list_handler(self, mock_client): expected_params = { 'TableName': 'TestTable', 'Key': { 'TestKey': {'S': 'TestVal'} }, 'ProjectionExpression': 'TestVal,TestMap,List1' } base_lookup_key = 'TestTable@TestKey:TestVal.' \ 'TestMap[M].List1[L]' base_lookup_key_valid = ['ListVal1', 'ListVal2'] self.stubber.add_response('get_item', self.get_parameters_response, expected_params) with self.stubber: value = DynamodbLookup.handle(base_lookup_key) self.assertEqual(value, base_lookup_key_valid) @mock.patch('stacker.lookups.handlers.dynamodb.get_session', return_value=SessionStub(client)) def test_dynamodb_empty_table_handler(self, mock_client): expected_params = { 'TableName': '', 'Key': { 'TestKey': {'S': 'TestVal'} }, 'ProjectionExpression': 'TestVal,TestMap,String1' } base_lookup_key = '@TestKey:TestVal.TestMap[M].String1' self.stubber.add_response('get_item', self.get_parameters_response, expected_params) with self.stubber: try: DynamodbLookup.handle(base_lookup_key) except ValueError as e: self.assertEqual( 'Please make sure to include a dynamodb table name', str(e)) @mock.patch('stacker.lookups.handlers.dynamodb.get_session', return_value=SessionStub(client)) def test_dynamodb_missing_table_handler(self, mock_client): expected_params = { 'Key': { 'TestKey': {'S': 'TestVal'} }, 'ProjectionExpression': 'TestVal,TestMap,String1' } base_lookup_key = 'TestKey:TestVal.TestMap[M].String1' self.stubber.add_response('get_item', self.get_parameters_response, expected_params) with self.stubber: try: DynamodbLookup.handle(base_lookup_key) except ValueError as e: self.assertEqual( 'Please make sure to include a tablename', str(e)) @mock.patch('stacker.lookups.handlers.dynamodb.get_session', return_value=SessionStub(client)) def test_dynamodb_invalid_table_handler(self, mock_client): expected_params = { 'TableName': 'FakeTable', 'Key': { 'TestKey': {'S': 'TestVal'} }, 'ProjectionExpression': 'TestVal,TestMap,String1' } base_lookup_key = 'FakeTable@TestKey:TestVal.TestMap[M].String1' service_error_code = 'ResourceNotFoundException' self.stubber.add_client_error('get_item', service_error_code=service_error_code, expected_params=expected_params) with self.stubber: try: DynamodbLookup.handle(base_lookup_key) except ValueError as e: self.assertEqual( 'Cannot find the dynamodb table: FakeTable', str(e)) @mock.patch('stacker.lookups.handlers.dynamodb.get_session', return_value=SessionStub(client)) def test_dynamodb_invalid_partition_key_handler(self, mock_client): expected_params = { 'TableName': 'TestTable', 'Key': { 'FakeKey': {'S': 'TestVal'} }, 'ProjectionExpression': 'TestVal,TestMap,String1' } base_lookup_key = 'TestTable@FakeKey:TestVal.TestMap[M].String1' service_error_code = 'ValidationException' self.stubber.add_client_error('get_item', service_error_code=service_error_code, expected_params=expected_params) with self.stubber: try: DynamodbLookup.handle(base_lookup_key) except ValueError as e: self.assertEqual( 'No dynamodb record matched the partition key: FakeKey', str(e)) @mock.patch('stacker.lookups.handlers.dynamodb.get_session', return_value=SessionStub(client)) def test_dynamodb_invalid_partition_val_handler(self, mock_client): expected_params = { 'TableName': 'TestTable', 'Key': { 'TestKey': {'S': 'FakeVal'} }, 'ProjectionExpression': 'FakeVal,TestMap,String1' } empty_response = {'ResponseMetadata': {}} base_lookup_key = 'TestTable@TestKey:FakeVal.TestMap[M].String1' self.stubber.add_response('get_item', empty_response, expected_params) with self.stubber: try: DynamodbLookup.handle(base_lookup_key) except ValueError as e: self.assertEqual( 'The dynamodb record could not be found using ' 'the following key: {\'S\': \'FakeVal\'}', str(e)) ================================================ FILE: stacker/tests/lookups/handlers/test_envvar.py ================================================ import unittest from stacker.lookups.handlers.envvar import EnvvarLookup import os class TestEnvVarHandler(unittest.TestCase): def setUp(self): self.testkey = 'STACKER_ENVVAR_TESTCASE' self.invalidtestkey = 'STACKER_INVALID_ENVVAR_TESTCASE' self.testval = 'TestVal' os.environ[self.testkey] = self.testval def test_valid_envvar(self): value = EnvvarLookup.handle(self.testkey) self.assertEqual(value, self.testval) def test_invalid_envvar(self): with self.assertRaises(ValueError): EnvvarLookup.handle(self.invalidtestkey) ================================================ FILE: stacker/tests/lookups/handlers/test_file.py ================================================ # encoding: utf-8 import unittest import mock import base64 import yaml import json from troposphere import Base64, GenericHelperFn, Join from stacker.lookups.handlers.file import (json_codec, FileLookup, parameterized_codec, yaml_codec) def to_template_dict(obj): """Extract the CFN template dict of an object for test comparisons""" if hasattr(obj, 'to_dict') and callable(obj.to_dict): return obj.to_dict() elif isinstance(obj, dict): return dict((key, to_template_dict(value)) for (key, value) in obj.items()) elif isinstance(obj, (list, tuple)): return type(obj)(to_template_dict(item) for item in obj) else: return obj class TestFileTranslator(unittest.TestCase): @staticmethod def assertTemplateEqual(left, right): """ Assert that two codec results are equivalent Convert both sides to their template representations, since Troposphere objects are not natively comparable """ return to_template_dict(left) == to_template_dict(right) def test_parameterized_codec_b64(self): expected = Base64( Join(u'', [u'Test ', {u'Ref': u'Interpolation'}, u' Here']) ) out = parameterized_codec(u'Test {{Interpolation}} Here', True) self.assertEqual(Base64, out.__class__) self.assertTemplateEqual(expected, out) def test_parameterized_codec_plain(self): expected = Join(u'', [u'Test ', {u'Ref': u'Interpolation'}, u' Here']) out = parameterized_codec(u'Test {{Interpolation}} Here', False) self.assertEqual(GenericHelperFn, out.__class__) self.assertTemplateEqual(expected, out) def test_parameterized_codec_plain_no_interpolation(self): expected = u'Test Without Interpolation Here' out = parameterized_codec(u'Test Without Interpolation Here', False) self.assertEqual(GenericHelperFn, out.__class__) self.assertTemplateEqual(expected, out) def test_yaml_codec_raw(self): structured = { u'Test': [1, None, u'unicode ✓', {u'some': u'obj'}] } # Note: must use safe_dump, since regular dump adds !python/unicode # tags, which we don't want, or we can't be sure we're correctly # loading string as unicode. raw = yaml.safe_dump(structured) out = yaml_codec(raw, parameterized=False) self.assertEqual(structured, out) def test_yaml_codec_parameterized(self): processed = { u'Test': Join(u'', [u'Test ', {u'Ref': u'Interpolation'}, u' Here']) } structured = { u'Test': u'Test {{Interpolation}} Here' } raw = yaml.safe_dump(structured) out = yaml_codec(raw, parameterized=True) self.assertTemplateEqual(processed, out) def test_json_codec_raw(self): structured = { u'Test': [1, None, u'str', {u'some': u'obj'}] } raw = json.dumps(structured) out = json_codec(raw, parameterized=False) self.assertEqual(structured, out) def test_json_codec_parameterized(self): processed = { u'Test': Join(u'', [u'Test ', {u'Ref': u'Interpolation'}, u' Here']) } structured = { u'Test': u'Test {{Interpolation}} Here' } raw = json.dumps(structured) out = json_codec(raw, parameterized=True) self.assertTemplateEqual(processed, out) @mock.patch('stacker.lookups.handlers.file.read_value_from_path', return_value='') def test_file_loaded(self, content_mock): FileLookup.handle(u'plain:file://tmp/test') content_mock.assert_called_with(u'file://tmp/test') @mock.patch('stacker.lookups.handlers.file.read_value_from_path', return_value=u'Hello, world') def test_handler_plain(self, _): out = FileLookup.handle(u'plain:file://tmp/test') self.assertEqual(u'Hello, world', out) @mock.patch('stacker.lookups.handlers.file.read_value_from_path') def test_handler_b64(self, content_mock): plain = u'Hello, world' encoded = base64.b64encode(plain.encode('utf8')).decode('utf-8') content_mock.return_value = plain out = FileLookup.handle(u'base64:file://tmp/test') self.assertEqual(encoded, out) @mock.patch('stacker.lookups.handlers.file.parameterized_codec') @mock.patch('stacker.lookups.handlers.file.read_value_from_path') def test_handler_parameterized(self, content_mock, codec_mock): result = mock.Mock() codec_mock.return_value = result out = FileLookup.handle(u'parameterized:file://tmp/test') codec_mock.assert_called_once_with(content_mock.return_value, False) self.assertEqual(result, out) @mock.patch('stacker.lookups.handlers.file.parameterized_codec') @mock.patch('stacker.lookups.handlers.file.read_value_from_path') def test_handler_parameterized_b64(self, content_mock, codec_mock): result = mock.Mock() codec_mock.return_value = result out = FileLookup.handle(u'parameterized-b64:file://tmp/test') codec_mock.assert_called_once_with(content_mock.return_value, True) self.assertEqual(result, out) @mock.patch('stacker.lookups.handlers.file.yaml_codec') @mock.patch('stacker.lookups.handlers.file.read_value_from_path') def test_handler_yaml(self, content_mock, codec_mock): result = mock.Mock() codec_mock.return_value = result out = FileLookup.handle(u'yaml:file://tmp/test') codec_mock.assert_called_once_with(content_mock.return_value, parameterized=False) self.assertEqual(result, out) @mock.patch('stacker.lookups.handlers.file.yaml_codec') @mock.patch('stacker.lookups.handlers.file.read_value_from_path') def test_handler_yaml_parameterized(self, content_mock, codec_mock): result = mock.Mock() codec_mock.return_value = result out = FileLookup.handle(u'yaml-parameterized:file://tmp/test') codec_mock.assert_called_once_with(content_mock.return_value, parameterized=True) self.assertEqual(result, out) @mock.patch('stacker.lookups.handlers.file.json_codec') @mock.patch('stacker.lookups.handlers.file.read_value_from_path') def test_handler_json(self, content_mock, codec_mock): result = mock.Mock() codec_mock.return_value = result out = FileLookup.handle(u'json:file://tmp/test') codec_mock.assert_called_once_with(content_mock.return_value, parameterized=False) self.assertEqual(result, out) @mock.patch('stacker.lookups.handlers.file.json_codec') @mock.patch('stacker.lookups.handlers.file.read_value_from_path') def test_handler_json_parameterized(self, content_mock, codec_mock): result = mock.Mock() codec_mock.return_value = result out = FileLookup.handle(u'json-parameterized:file://tmp/test') codec_mock.assert_called_once_with(content_mock.return_value, parameterized=True) self.assertEqual(result, out) @mock.patch('stacker.lookups.handlers.file.read_value_from_path') def test_unknown_codec(self, _): with self.assertRaises(KeyError): FileLookup.handle(u'bad:file://tmp/test') ================================================ FILE: stacker/tests/lookups/handlers/test_hook_data.py ================================================ import unittest from stacker.context import Context from stacker.lookups.handlers.hook_data import HookDataLookup class TestHookDataLookup(unittest.TestCase): def setUp(self): self.ctx = Context({"namespace": "test-ns"}) self.ctx.set_hook_data("fake_hook", {"result": "good"}) def test_valid_hook_data(self): value = HookDataLookup.handle("fake_hook::result", context=self.ctx) self.assertEqual(value, "good") def test_invalid_hook_data(self): with self.assertRaises(KeyError): HookDataLookup.handle("fake_hook::bad_key", context=self.ctx) def test_bad_value_hook_data(self): with self.assertRaises(ValueError): HookDataLookup.handle("fake_hook", context=self.ctx) ================================================ FILE: stacker/tests/lookups/handlers/test_output.py ================================================ from mock import MagicMock import unittest from stacker.stack import Stack from ...factories import generate_definition from stacker.lookups.handlers.output import OutputLookup class TestOutputHandler(unittest.TestCase): def setUp(self): self.context = MagicMock() def test_output_handler(self): stack = Stack( definition=generate_definition("vpc", 1), context=self.context) stack.set_outputs({ "SomeOutput": "Test Output"}) self.context.get_stack.return_value = stack value = OutputLookup.handle("stack-name::SomeOutput", context=self.context) self.assertEqual(value, "Test Output") self.assertEqual(self.context.get_stack.call_count, 1) args = self.context.get_stack.call_args self.assertEqual(args[0][0], "stack-name") ================================================ FILE: stacker/tests/lookups/handlers/test_rxref.py ================================================ from mock import MagicMock import unittest from stacker.lookups.handlers.rxref import RxrefLookup from ....context import Context from ....config import Config class TestRxrefHandler(unittest.TestCase): def setUp(self): self.provider = MagicMock() self.context = Context( config=Config({"namespace": "ns"}) ) def test_rxref_handler(self): self.provider.get_output.return_value = "Test Output" value = RxrefLookup.handle("fully-qualified-stack-name::SomeOutput", provider=self.provider, context=self.context) self.assertEqual(value, "Test Output") args = self.provider.get_output.call_args self.assertEqual(args[0][0], "ns-fully-qualified-stack-name") self.assertEqual(args[0][1], "SomeOutput") ================================================ FILE: stacker/tests/lookups/handlers/test_split.py ================================================ import unittest from stacker.lookups.handlers.split import SplitLookup class TestSplitLookup(unittest.TestCase): def test_single_character_split(self): value = ",::a,b,c" expected = ["a", "b", "c"] assert SplitLookup.handle(value) == expected def test_multi_character_split(self): value = ",,::a,,b,c" expected = ["a", "b,c"] assert SplitLookup.handle(value) == expected def test_invalid_value_split(self): value = ",:a,b,c" with self.assertRaises(ValueError): SplitLookup.handle(value) ================================================ FILE: stacker/tests/lookups/handlers/test_ssmstore.py ================================================ import unittest import mock from botocore.stub import Stubber from stacker.lookups.handlers.ssmstore import SsmstoreLookup import boto3 from stacker.tests.factories import SessionStub class TestSSMStoreHandler(unittest.TestCase): client = boto3.client('ssm', region_name='us-east-1') def setUp(self): self.stubber = Stubber(self.client) self.get_parameters_response = { 'Parameters': [ { 'Name': 'ssmkey', 'Type': 'String', 'Value': 'ssmvalue' } ], 'InvalidParameters': [ 'invalidssmparam' ] } self.invalid_get_parameters_response = { 'InvalidParameters': [ 'ssmkey' ] } self.expected_params = { 'Names': ['ssmkey'], 'WithDecryption': True } self.ssmkey = "ssmkey" self.ssmvalue = "ssmvalue" @mock.patch('stacker.lookups.handlers.ssmstore.get_session', return_value=SessionStub(client)) def test_ssmstore_handler(self, mock_client): self.stubber.add_response('get_parameters', self.get_parameters_response, self.expected_params) with self.stubber: value = SsmstoreLookup.handle(self.ssmkey) self.assertEqual(value, self.ssmvalue) self.assertIsInstance(value, str) @mock.patch('stacker.lookups.handlers.ssmstore.get_session', return_value=SessionStub(client)) def test_ssmstore_invalid_value_handler(self, mock_client): self.stubber.add_response('get_parameters', self.invalid_get_parameters_response, self.expected_params) with self.stubber: try: SsmstoreLookup.handle(self.ssmkey) except ValueError: assert True @mock.patch('stacker.lookups.handlers.ssmstore.get_session', return_value=SessionStub(client)) def test_ssmstore_handler_with_region(self, mock_client): self.stubber.add_response('get_parameters', self.get_parameters_response, self.expected_params) region = "us-east-1" temp_value = "%s@%s" % (region, self.ssmkey) with self.stubber: value = SsmstoreLookup.handle(temp_value) self.assertEqual(value, self.ssmvalue) ================================================ FILE: stacker/tests/lookups/handlers/test_xref.py ================================================ from mock import MagicMock import unittest from stacker.lookups.handlers.xref import XrefLookup class TestXrefHandler(unittest.TestCase): def setUp(self): self.provider = MagicMock() self.context = MagicMock() def test_xref_handler(self): self.provider.get_output.return_value = "Test Output" value = XrefLookup.handle("fully-qualified-stack-name::SomeOutput", provider=self.provider, context=self.context) self.assertEqual(value, "Test Output") self.assertEqual(self.context.get_fqn.call_count, 0) args = self.provider.get_output.call_args self.assertEqual(args[0][0], "fully-qualified-stack-name") self.assertEqual(args[0][1], "SomeOutput") ================================================ FILE: stacker/tests/lookups/test_registry.py ================================================ import unittest from mock import MagicMock from stacker.exceptions import ( UnknownLookupType, FailedVariableLookup, ) from stacker.lookups.registry import LOOKUP_HANDLERS from stacker.variables import Variable, VariableValueLookup from ..factories import ( mock_context, mock_provider, ) class TestRegistry(unittest.TestCase): def setUp(self): self.ctx = mock_context() self.provider = mock_provider() def test_autoloaded_lookup_handlers(self): handlers = [ "output", "xref", "kms", "ssmstore", "envvar", "rxref", "ami", "file", "split", "default", "hook_data", "dynamodb", ] for handler in handlers: try: LOOKUP_HANDLERS[handler] except KeyError: self.assertTrue( False, "Lookup handler: '{}' was not registered".format(handler), ) def test_resolve_lookups_string_unknown_lookup(self): with self.assertRaises(UnknownLookupType): Variable("MyVar", "${bad_lookup foo}") def test_resolve_lookups_list_unknown_lookup(self): with self.assertRaises(UnknownLookupType): Variable( "MyVar", [ "${bad_lookup foo}", "random string", ] ) def resolve_lookups_with_output_handler_raise_valueerror(self, variable): """Mock output handler to throw ValueError, then run resolve_lookups on the given variable. """ mock_handler = MagicMock(side_effect=ValueError("Error")) # find the only lookup in the variable for value in variable._value: if isinstance(value, VariableValueLookup): value.handler = mock_handler with self.assertRaises(FailedVariableLookup) as cm: variable.resolve(self.ctx, self.provider) self.assertIsInstance(cm.exception.error, ValueError) def test_resolve_lookups_string_failed_variable_lookup(self): variable = Variable("MyVar", "${output foo::bar}") self.resolve_lookups_with_output_handler_raise_valueerror(variable) def test_resolve_lookups_list_failed_variable_lookup(self): variable = Variable( "MyVar", [ "random string", "${output foo::bar}", "random string", ] ) self.resolve_lookups_with_output_handler_raise_valueerror(variable) ================================================ FILE: stacker/tests/providers/__init__.py ================================================ ================================================ FILE: stacker/tests/providers/aws/__init__.py ================================================ ================================================ FILE: stacker/tests/providers/aws/test_default.py ================================================ import copy from datetime import datetime import os.path import random import string import threading import unittest from mock import patch, MagicMock from botocore.stub import Stubber from botocore.exceptions import ClientError, UnStubbedResponseError import boto3 from stacker.actions.diff import DictValue from stacker.providers.base import Template from stacker.session_cache import get_session from stacker.providers.aws import default from stacker.providers.aws.default import ( DEFAULT_CAPABILITIES, MAX_TAIL_RETRIES, Provider, requires_replacement, ask_for_approval, wait_till_change_set_complete, create_change_set, summarize_params_diff, generate_cloudformation_args, output_full_changeset ) from stacker import exceptions from stacker.stack import Stack def random_string(length=12): """ Returns a random string of variable length. Args: length (int): The # of characters to use in the random string. Returns: str: The random string. """ return ''.join( [random.choice(string.ascii_letters) for _ in range(length)]) def generate_describe_stacks_stack(stack_name, creation_time=None, stack_status="CREATE_COMPLETE", tags=None): tags = tags or [] return { "StackName": stack_name, "StackId": stack_name, "CreationTime": creation_time or datetime(2015, 1, 1), "StackStatus": stack_status, "Tags": tags } def generate_get_template(file_name='cfn_template.json', stages_available=['Original']): fixture_dir = os.path.join(os.path.dirname(__file__), '../../fixtures') with open(os.path.join(fixture_dir, file_name), 'r') as f: return { "StagesAvailable": stages_available, "TemplateBody": f.read() } def generate_stack_object(stack_name, outputs=None): mock_stack = MagicMock(['name', 'fqn', 'blueprint']) if not outputs: outputs = { "FakeOutput": { "Value": {"Ref": "FakeResource"} } } mock_stack.name = stack_name mock_stack.fqn = stack_name mock_stack.blueprint = MagicMock(['get_output_definitions']) mock_stack.blueprint.get_output_definitions = MagicMock( return_value=outputs ) return mock_stack def generate_resource_change(replacement=True): resource_change = { "Action": "Modify", "Details": [], "LogicalResourceId": "Fake", "PhysicalResourceId": "arn:aws:fake", "Replacement": "True" if replacement else "False", "ResourceType": "AWS::Fake", "Scope": ["Properties"], } return { "ResourceChange": resource_change, "Type": "Resource", } def generate_change_set_response(status, execution_status="AVAILABLE", changes=[], status_reason="FAKE"): return { "ChangeSetName": "string", "ChangeSetId": "string", "StackId": "string", "StackName": "string", "Description": "string", "Parameters": [ { "ParameterKey": "string", "ParameterValue": "string", "UsePreviousValue": False }, ], "CreationTime": datetime(2015, 1, 1), "ExecutionStatus": execution_status, "Status": status, "StatusReason": status_reason, "NotificationARNs": [ "string", ], "Capabilities": [ "CAPABILITY_NAMED_IAM", "CAPABILITY_AUTO_EXPAND" ], "Tags": [ { "Key": "string", "Value": "string" }, ], "Changes": changes, "NextToken": "string" } def generate_change(action="Modify", resource_type="EC2::Instance", replacement="False", requires_recreation="Never"): """ Generate a minimal change for a changeset """ return { "Type": "Resource", "ResourceChange": { "Action": action, "LogicalResourceId": random_string(), "PhysicalResourceId": random_string(), "ResourceType": resource_type, "Replacement": replacement, "Scope": ["Properties"], "Details": [ { "Target": { "Attribute": "Properties", "Name": random_string(), "RequiresRecreation": requires_recreation }, "Evaluation": "Static", "ChangeSource": "ResourceReference", "CausingEntity": random_string() }, ] } } class TestMethods(unittest.TestCase): def setUp(self): self.cfn = boto3.client("cloudformation") self.stubber = Stubber(self.cfn) def test_requires_replacement(self): changeset = [ generate_resource_change(), generate_resource_change(replacement=False), generate_resource_change(), ] replacement = requires_replacement(changeset) self.assertEqual(len(replacement), 2) for resource in replacement: self.assertEqual(resource["ResourceChange"]["Replacement"], "True") def test_summarize_params_diff(self): unmodified_param = DictValue("ParamA", "new-param-value", "new-param-value") modified_param = DictValue("ParamB", "param-b-old-value", "param-b-new-value-delta") added_param = DictValue("ParamC", None, "param-c-new-value") removed_param = DictValue("ParamD", "param-d-old-value", None) params_diff = [ unmodified_param, modified_param, added_param, removed_param, ] self.assertEqual(summarize_params_diff([]), "") self.assertEqual(summarize_params_diff(params_diff), '\n'.join([ "Parameters Added: ParamC", "Parameters Removed: ParamD", "Parameters Modified: ParamB\n", ])) only_modified_params_diff = [modified_param] self.assertEqual(summarize_params_diff(only_modified_params_diff), "Parameters Modified: ParamB\n") only_added_params_diff = [added_param] self.assertEqual(summarize_params_diff(only_added_params_diff), "Parameters Added: ParamC\n") only_removed_params_diff = [removed_param] self.assertEqual(summarize_params_diff(only_removed_params_diff), "Parameters Removed: ParamD\n") def test_ask_for_approval(self): get_input_path = "stacker.ui.get_raw_input" with patch(get_input_path, return_value="y"): self.assertIsNone(ask_for_approval([], [], None)) for v in ("n", "N", "x", "\n"): with patch(get_input_path, return_value=v): with self.assertRaises(exceptions.CancelExecution): ask_for_approval([], []) with patch(get_input_path, side_effect=["v", "n"]) as mock_get_input: with patch( "stacker.providers.aws.default.output_full_changeset" ) as mock_full_changeset: with self.assertRaises(exceptions.CancelExecution): ask_for_approval([], [], True) self.assertEqual(mock_full_changeset.call_count, 1) self.assertEqual(mock_get_input.call_count, 2) def test_ask_for_approval_with_params_diff(self): get_input_path = "stacker.ui.get_raw_input" params_diff = [ DictValue('ParamA', None, 'new-param-value'), DictValue('ParamB', 'param-b-old-value', 'param-b-new-value-delta') ] with patch(get_input_path, return_value="y"): self.assertIsNone(ask_for_approval([], params_diff, None)) for v in ("n", "N", "x", "\n"): with patch(get_input_path, return_value=v): with self.assertRaises(exceptions.CancelExecution): ask_for_approval([], params_diff) with patch(get_input_path, side_effect=["v", "n"]) as mock_get_input: with patch( "stacker.providers.aws.default.output_full_changeset" ) as mock_full_changeset: with self.assertRaises(exceptions.CancelExecution): ask_for_approval([], params_diff, True) self.assertEqual(mock_full_changeset.call_count, 1) self.assertEqual(mock_get_input.call_count, 2) @patch("stacker.providers.aws.default.format_params_diff") @patch('stacker.providers.aws.default.yaml.safe_dump') def test_output_full_changeset(self, mock_safe_dump, patched_format): get_input_path = "stacker.ui.get_raw_input" safe_dump_counter = 0 for v in ['y', 'v', 'Y', 'V']: with patch(get_input_path, return_value=v) as prompt: self.assertIsNone(output_full_changeset(full_changeset=[], params_diff=[], fqn=None)) self.assertEqual(prompt.call_count, 1) safe_dump_counter += 1 self.assertEqual(mock_safe_dump.call_count, safe_dump_counter) self.assertEqual(patched_format.call_count, 0) for v in ['n', 'N']: with patch(get_input_path, return_value=v) as prompt: output_full_changeset(full_changeset=[], params_diff=[], answer=None, fqn=None) self.assertEqual(prompt.call_count, 1) self.assertEqual(mock_safe_dump.call_count, safe_dump_counter) self.assertEqual(patched_format.call_count, 0) with self.assertRaises(exceptions.CancelExecution): output_full_changeset(full_changeset=[], params_diff=[], answer='x', fqn=None) output_full_changeset(full_changeset=[], params_diff=['mock'], answer='y', fqn=None) safe_dump_counter += 1 self.assertEqual(mock_safe_dump.call_count, safe_dump_counter) self.assertEqual(patched_format.call_count, 1) def test_wait_till_change_set_complete_success(self): self.stubber.add_response( "describe_change_set", generate_change_set_response("CREATE_COMPLETE") ) with self.stubber: wait_till_change_set_complete(self.cfn, "FAKEID") self.stubber.add_response( "describe_change_set", generate_change_set_response("FAILED") ) with self.stubber: wait_till_change_set_complete(self.cfn, "FAKEID") def test_wait_till_change_set_complete_failed(self): # Need 2 responses for try_count for i in range(2): self.stubber.add_response( "describe_change_set", generate_change_set_response("CREATE_PENDING") ) with self.stubber: with self.assertRaises(exceptions.ChangesetDidNotStabilize): wait_till_change_set_complete(self.cfn, "FAKEID", try_count=2, sleep_time=.1) def test_create_change_set_stack_did_not_change(self): self.stubber.add_response( "create_change_set", {'Id': 'CHANGESETID', 'StackId': 'STACKID'} ) self.stubber.add_response( "describe_change_set", generate_change_set_response( "FAILED", status_reason="Stack didn't contain changes." ) ) self.stubber.add_response( "delete_change_set", {}, expected_params={"ChangeSetName": "CHANGESETID"} ) with self.stubber: with self.assertRaises(exceptions.StackDidNotChange): create_change_set( cfn_client=self.cfn, fqn="my-fake-stack", template=Template(url="http://fake.template.url.com/"), parameters=[], tags=[] ) def test_create_change_set_unhandled_failed_status(self): self.stubber.add_response( "create_change_set", {'Id': 'CHANGESETID', 'StackId': 'STACKID'} ) self.stubber.add_response( "describe_change_set", generate_change_set_response( "FAILED", status_reason="Some random bad thing." ) ) with self.stubber: with self.assertRaises(exceptions.UnhandledChangeSetStatus): create_change_set( cfn_client=self.cfn, fqn="my-fake-stack", template=Template(url="http://fake.template.url.com/"), parameters=[], tags=[] ) def test_create_change_set_bad_execution_status(self): self.stubber.add_response( "create_change_set", {'Id': 'CHANGESETID', 'StackId': 'STACKID'} ) self.stubber.add_response( "describe_change_set", generate_change_set_response( status="CREATE_COMPLETE", execution_status="UNAVAILABLE", ) ) with self.stubber: with self.assertRaises(exceptions.UnableToExecuteChangeSet): create_change_set( cfn_client=self.cfn, fqn="my-fake-stack", template=Template(url="http://fake.template.url.com/"), parameters=[], tags=[] ) def test_generate_cloudformation_args(self): stack_name = "mystack" template_url = "http://fake.s3url.com/blah.json" template_body = '{"fake_body": "woot"}' std_args = { "stack_name": stack_name, "parameters": [], "tags": [], "template": Template(url=template_url) } std_return = { "StackName": stack_name, "Parameters": [], "Tags": [], "Capabilities": DEFAULT_CAPABILITIES, "TemplateURL": template_url, } result = generate_cloudformation_args(**std_args) self.assertEqual(result, std_return) result = generate_cloudformation_args(service_role="FakeRole", **std_args) service_role_result = copy.deepcopy(std_return) service_role_result["RoleARN"] = "FakeRole" self.assertEqual(result, service_role_result) result = generate_cloudformation_args(change_set_name="MyChanges", **std_args) change_set_result = copy.deepcopy(std_return) change_set_result["ChangeSetName"] = "MyChanges" self.assertEqual(result, change_set_result) # Check stack policy stack_policy = Template(body="{}") result = generate_cloudformation_args(stack_policy=stack_policy, **std_args) stack_policy_result = copy.deepcopy(std_return) stack_policy_result["StackPolicyBody"] = "{}" self.assertEqual(result, stack_policy_result) # If not TemplateURL is provided, use TemplateBody std_args["template"] = Template(body=template_body) template_body_result = copy.deepcopy(std_return) del(template_body_result["TemplateURL"]) template_body_result["TemplateBody"] = template_body result = generate_cloudformation_args(**std_args) self.assertEqual(result, template_body_result) def test_generate_cloudformation_args_with_notification_arns(self): stack_name = "mystack" template_url = "http://fake.s3url.com/blah.json" std_args = { "stack_name": stack_name, "parameters": [], "tags": [], "template": Template(url=template_url), "notification_arns": [ "arn:aws:sns:us-east-1:1234567890:test-cf-deploy-notify-sns-topic-CfDeployNotify" # noqa ] } std_return = { "StackName": stack_name, "Parameters": [], "Tags": [], "Capabilities": DEFAULT_CAPABILITIES, "TemplateURL": template_url, "NotificationARNs": [ "arn:aws:sns:us-east-1:1234567890:test-cf-deploy-notify-sns-topic-CfDeployNotify" # noqa ] } result = generate_cloudformation_args(**std_args) self.assertEqual(result, std_return) class TestProviderDefaultMode(unittest.TestCase): def setUp(self): region = "us-east-1" self.session = get_session(region=region) self.provider = Provider( self.session, region=region, recreate_failed=False) self.stubber = Stubber(self.provider.cloudformation) def test_get_stack_stack_does_not_exist(self): stack_name = "MockStack" self.stubber.add_client_error( "describe_stacks", service_error_code="ValidationError", service_message="Stack with id %s does not exist" % stack_name, expected_params={"StackName": stack_name} ) with self.assertRaises(exceptions.StackDoesNotExist): with self.stubber: self.provider.get_stack(stack_name) def test_get_stack_stack_exists(self): stack_name = "MockStack" stack_response = { "Stacks": [generate_describe_stacks_stack(stack_name)] } self.stubber.add_response( "describe_stacks", stack_response, expected_params={"StackName": stack_name} ) with self.stubber: response = self.provider.get_stack(stack_name) self.assertEqual(response["StackName"], stack_name) def test_select_update_method(self): for i in [[{'force_interactive': True, 'force_change_set': False}, self.provider.interactive_update_stack], [{'force_interactive': False, 'force_change_set': False}, self.provider.default_update_stack], [{'force_interactive': False, 'force_change_set': True}, self.provider.noninteractive_changeset_update], [{'force_interactive': True, 'force_change_set': True}, self.provider.interactive_update_stack]]: self.assertEquals( self.provider.select_update_method(**i[0]), i[1] ) def test_prepare_stack_for_update_completed(self): stack_name = "MockStack" stack = generate_describe_stacks_stack( stack_name, stack_status="UPDATE_COMPLETE") with self.stubber: self.assertTrue( self.provider.prepare_stack_for_update(stack, [])) def test_prepare_stack_for_update_in_progress(self): stack_name = "MockStack" stack = generate_describe_stacks_stack( stack_name, stack_status="UPDATE_IN_PROGRESS") with self.assertRaises(exceptions.StackUpdateBadStatus) as raised: with self.stubber: self.provider.prepare_stack_for_update(stack, []) self.assertIn('in-progress', str(raised.exception)) def test_prepare_stack_for_update_non_recreatable(self): stack_name = "MockStack" stack = generate_describe_stacks_stack( stack_name, stack_status="REVIEW_IN_PROGRESS") with self.assertRaises(exceptions.StackUpdateBadStatus) as raised: with self.stubber: self.provider.prepare_stack_for_update(stack, []) self.assertIn('Unsupported state', str(raised.exception)) def test_prepare_stack_for_update_disallowed(self): stack_name = "MockStack" stack = generate_describe_stacks_stack( stack_name, stack_status="ROLLBACK_COMPLETE") with self.assertRaises(exceptions.StackUpdateBadStatus) as raised: with self.stubber: self.provider.prepare_stack_for_update(stack, []) self.assertIn('re-creation is disabled', str(raised.exception)) # Ensure we point out to the user how to enable re-creation self.assertIn('--recreate-failed', str(raised.exception)) def test_prepare_stack_for_update_bad_tags(self): stack_name = "MockStack" stack = generate_describe_stacks_stack( stack_name, stack_status="ROLLBACK_COMPLETE") self.provider.recreate_failed = True with self.assertRaises(exceptions.StackUpdateBadStatus) as raised: with self.stubber: self.provider.prepare_stack_for_update( stack, tags=[{'Key': 'stacker_namespace', 'Value': 'test'}]) self.assertIn('tags differ', str(raised.exception).lower()) def test_prepare_stack_for_update_recreate(self): stack_name = "MockStack" stack = generate_describe_stacks_stack( stack_name, stack_status="ROLLBACK_COMPLETE") self.stubber.add_response( "delete_stack", {}, expected_params={"StackName": stack_name} ) self.provider.recreate_failed = True with self.stubber: self.assertFalse( self.provider.prepare_stack_for_update(stack, [])) def test_noninteractive_changeset_update_no_stack_policy(self): stack_name = "MockStack" self.stubber.add_response( "create_change_set", {'Id': 'CHANGESETID', 'StackId': 'STACKID'} ) changes = [] changes.append(generate_change()) self.stubber.add_response( "describe_change_set", generate_change_set_response( status="CREATE_COMPLETE", execution_status="AVAILABLE", changes=changes, ) ) self.stubber.add_response("execute_change_set", {}) with self.stubber: self.provider.noninteractive_changeset_update( fqn=stack_name, template=Template(url="http://fake.template.url.com/"), old_parameters=[], parameters=[], stack_policy=None, tags=[], ) def test_noninteractive_changeset_update_with_stack_policy(self): stack_name = "MockStack" self.stubber.add_response( "create_change_set", {'Id': 'CHANGESETID', 'StackId': 'STACKID'} ) changes = [] changes.append(generate_change()) self.stubber.add_response( "describe_change_set", generate_change_set_response( status="CREATE_COMPLETE", execution_status="AVAILABLE", changes=changes, ) ) self.stubber.add_response("set_stack_policy", {}) self.stubber.add_response("execute_change_set", {}) with self.stubber: self.provider.noninteractive_changeset_update( fqn=stack_name, template=Template(url="http://fake.template.url.com/"), old_parameters=[], parameters=[], stack_policy=Template(body="{}"), tags=[], ) @patch('stacker.providers.aws.default.output_full_changeset') def test_get_stack_changes_update(self, mock_output_full_cs): stack_name = "MockStack" mock_stack = generate_stack_object(stack_name) self.stubber.add_response( 'describe_stacks', {'Stacks': [generate_describe_stacks_stack(stack_name)]} ) self.stubber.add_response( 'get_template', generate_get_template('cfn_template.yaml') ) self.stubber.add_response( "create_change_set", {'Id': 'CHANGESETID', 'StackId': stack_name} ) changes = [] changes.append(generate_change()) self.stubber.add_response( "describe_change_set", generate_change_set_response( status="CREATE_COMPLETE", execution_status="AVAILABLE", changes=changes, ) ) self.stubber.add_response("delete_change_set", {}) self.stubber.add_response( 'describe_stacks', {'Stacks': [generate_describe_stacks_stack(stack_name)]} ) with self.stubber: result = self.provider.get_stack_changes( stack=mock_stack, template=Template( url="http://fake.template.url.com/" ), parameters=[], tags=[]) mock_output_full_cs.assert_called_with(full_changeset=changes, params_diff=[], fqn=stack_name, answer='y') expected_outputs = { 'FakeOutput': ''.format( str({"Ref": "FakeResource"}) ) } self.assertEqual(self.provider.get_outputs(stack_name), expected_outputs) self.assertEqual(result, expected_outputs) @patch('stacker.providers.aws.default.output_full_changeset') def test_get_stack_changes_create(self, mock_output_full_cs): stack_name = "MockStack" mock_stack = generate_stack_object(stack_name) self.stubber.add_response( 'describe_stacks', {'Stacks': [generate_describe_stacks_stack( stack_name, stack_status='REVIEW_IN_PROGRESS' )]} ) self.stubber.add_response( "create_change_set", {'Id': 'CHANGESETID', 'StackId': stack_name} ) changes = [] changes.append(generate_change()) self.stubber.add_response( "describe_change_set", generate_change_set_response( status="CREATE_COMPLETE", execution_status="AVAILABLE", changes=changes, ) ) self.stubber.add_response("delete_change_set", {}) self.stubber.add_response( 'describe_stacks', {'Stacks': [generate_describe_stacks_stack( stack_name, stack_status='REVIEW_IN_PROGRESS' )]} ) self.stubber.add_response( 'describe_stacks', {'Stacks': [generate_describe_stacks_stack( stack_name, stack_status='REVIEW_IN_PROGRESS' )]} ) self.stubber.add_response("delete_stack", {}) with self.stubber: self.provider.get_stack_changes( stack=mock_stack, template=Template( url="http://fake.template.url.com/" ), parameters=[], tags=[]) mock_output_full_cs.assert_called_with(full_changeset=changes, params_diff=[], fqn=stack_name, answer='y') def test_tail_stack_retry_on_missing_stack(self): stack_name = "SlowToCreateStack" stack = MagicMock(spec=Stack) stack.fqn = "my-namespace-{}".format(stack_name) default.TAIL_RETRY_SLEEP = .01 # Ensure the stack never appears before we run out of retries for i in range(MAX_TAIL_RETRIES + 5): self.stubber.add_client_error( "describe_stack_events", service_error_code="ValidationError", service_message="Stack [{}] does not exist".format(stack_name), http_status_code=400, response_meta={"attempt": i + 1}, ) with self.stubber: try: self.provider.tail_stack(stack, threading.Event()) except ClientError as exc: self.assertEqual( exc.response["ResponseMetadata"]["attempt"], MAX_TAIL_RETRIES ) def test_tail_stack_retry_on_missing_stack_eventual_success(self): stack_name = "SlowToCreateStack" stack = MagicMock(spec=Stack) stack.fqn = "my-namespace-{}".format(stack_name) default.TAIL_RETRY_SLEEP = .01 default.GET_EVENTS_SLEEP = .01 rcvd_events = [] def mock_log_func(e): rcvd_events.append(e) def valid_event_response(stack, event_id): return { "StackEvents": [ { "StackId": stack.fqn + "12345", "EventId": event_id, "StackName": stack.fqn, "Timestamp": datetime.now() }, ] } # Ensure the stack never appears before we run out of retries for i in range(3): self.stubber.add_client_error( "describe_stack_events", service_error_code="ValidationError", service_message="Stack [{}] does not exist".format(stack_name), http_status_code=400, response_meta={"attempt": i + 1}, ) self.stubber.add_response( "describe_stack_events", valid_event_response(stack, "InitialEvents") ) self.stubber.add_response( "describe_stack_events", valid_event_response(stack, "Event1") ) with self.stubber: try: self.provider.tail_stack(stack, threading.Event(), log_func=mock_log_func) except UnStubbedResponseError: # Eventually we run out of responses - could not happen in # regular execution # normally this would just be dealt with when the threads were # shutdown, but doing so here is a little difficult because # we can't control the `tail_stack` loop pass self.assertEqual(rcvd_events[0]["EventId"], "Event1") class TestProviderInteractiveMode(unittest.TestCase): def setUp(self): region = "us-east-1" self.session = get_session(region=region) self.provider = Provider( self.session, interactive=True, recreate_failed=True) self.stubber = Stubber(self.provider.cloudformation) def test_successful_init(self): replacements = True p = Provider(self.session, interactive=True, replacements_only=replacements) self.assertEqual(p.replacements_only, replacements) @patch("stacker.providers.aws.default.ask_for_approval") def test_update_stack_execute_success_no_stack_policy(self, patched_approval): stack_name = "my-fake-stack" self.stubber.add_response( "create_change_set", {'Id': 'CHANGESETID', 'StackId': 'STACKID'} ) changes = [] changes.append(generate_change()) self.stubber.add_response( "describe_change_set", generate_change_set_response( status="CREATE_COMPLETE", execution_status="AVAILABLE", changes=changes, ) ) self.stubber.add_response("execute_change_set", {}) with self.stubber: self.provider.update_stack( fqn=stack_name, template=Template(url="http://fake.template.url.com/"), old_parameters=[], parameters=[], tags=[] ) patched_approval.assert_called_with(full_changeset=changes, params_diff=[], include_verbose=True, fqn=stack_name) self.assertEqual(patched_approval.call_count, 1) @patch("stacker.providers.aws.default.ask_for_approval") def test_update_stack_execute_success_with_stack_policy(self, patched_approval): stack_name = "my-fake-stack" self.stubber.add_response( "create_change_set", {'Id': 'CHANGESETID', 'StackId': 'STACKID'} ) changes = [] changes.append(generate_change()) self.stubber.add_response( "describe_change_set", generate_change_set_response( status="CREATE_COMPLETE", execution_status="AVAILABLE", changes=changes, ) ) self.stubber.add_response("set_stack_policy", {}) self.stubber.add_response("execute_change_set", {}) with self.stubber: self.provider.update_stack( fqn=stack_name, template=Template(url="http://fake.template.url.com/"), old_parameters=[], parameters=[], tags=[], stack_policy=Template(body="{}"), ) patched_approval.assert_called_with(full_changeset=changes, params_diff=[], include_verbose=True, fqn=stack_name) self.assertEqual(patched_approval.call_count, 1) def test_select_update_method(self): for i in [[{'force_interactive': False, 'force_change_set': False}, self.provider.interactive_update_stack], [{'force_interactive': True, 'force_change_set': False}, self.provider.interactive_update_stack], [{'force_interactive': False, 'force_change_set': True}, self.provider.interactive_update_stack], [{'force_interactive': True, 'force_change_set': True}, self.provider.interactive_update_stack]]: self.assertEquals( self.provider.select_update_method(**i[0]), i[1] ) @patch('stacker.providers.aws.default.output_full_changeset') @patch('stacker.providers.aws.default.output_summary') def test_get_stack_changes_interactive(self, mock_output_summary, mock_output_full_cs): stack_name = "MockStack" mock_stack = generate_stack_object(stack_name) self.stubber.add_response( 'describe_stacks', {'Stacks': [generate_describe_stacks_stack(stack_name)]} ) self.stubber.add_response( 'get_template', generate_get_template('cfn_template.yaml') ) self.stubber.add_response( "create_change_set", {'Id': 'CHANGESETID', 'StackId': stack_name} ) changes = [] changes.append(generate_change()) self.stubber.add_response( "describe_change_set", generate_change_set_response( status="CREATE_COMPLETE", execution_status="AVAILABLE", changes=changes, ) ) self.stubber.add_response("delete_change_set", {}) self.stubber.add_response( 'describe_stacks', {'Stacks': [generate_describe_stacks_stack(stack_name)]} ) with self.stubber: self.provider.get_stack_changes( stack=mock_stack, template=Template( url="http://fake.template.url.com/" ), parameters=[], tags=[]) mock_output_summary.assert_called_with(stack_name, 'changes', changes, [], replacements_only=False) mock_output_full_cs.assert_called_with(full_changeset=changes, params_diff=[], fqn=stack_name) ================================================ FILE: stacker/tests/test_config.py ================================================ import sys import unittest import yaml from stacker.config import ( render_parse_load, load, render, parse, dump, process_remote_sources ) from stacker.config import Config, Stack from stacker.environment import ( parse_environment, parse_yaml_environment ) from stacker import exceptions from stacker.lookups.registry import LOOKUP_HANDLERS from yaml.constructor import ConstructorError config = """a: $a b: $b c: $c""" class TestConfig(unittest.TestCase): def test_render_missing_env(self): env = {"a": "A"} with self.assertRaises(exceptions.MissingEnvironment) as expected: render(config, env) self.assertEqual(expected.exception.key, "b") def test_render_no_variable_config(self): c = render("namespace: prod", {}) self.assertEqual("namespace: prod", c) def test_render_valid_env_substitution(self): c = render("namespace: $namespace", {"namespace": "prod"}) self.assertEqual("namespace: prod", c) def test_render_blank_env_values(self): conf = """namespace: ${namespace}""" e = parse_environment("""namespace:""") c = render(conf, e) self.assertEqual("namespace: ", c) e = parse_environment("""namespace: !!str""") c = render(conf, e) self.assertEqual("namespace: !!str", c) def test_render_yaml(self): conf = """ namespace: ${namespace} list_var: ${env_list} dict_var: ${env_dict} str_var: ${env_str} nested_list: - ${list_1} - ${dict_1} - ${str_1} nested_dict: a: ${list_1} b: ${dict_1} c: ${str_1} empty: ${empty_string} substr: prefix-${str_1}-suffix multiple: ${str_1}-${str_2} dont_match_this: ${output something} """ env = """ namespace: test env_list: &listAnchor - a - b - c env_dict: &dictAnchor a: 1 b: 2 c: 3 env_str: Hello World! list_1: *listAnchor dict_1: *dictAnchor str_1: another str str_2: hello empty_string: "" """ e = parse_yaml_environment(env) c = render(conf, e) # Parse the YAML again, so that we can check structure pc = yaml.safe_load(c) exp_dict = {'a': 1, 'b': 2, 'c': 3} exp_list = ['a', 'b', 'c'] self.assertEquals(pc['namespace'], 'test') self.assertEquals(pc['list_var'], exp_list) self.assertEquals(pc['dict_var'], exp_dict) self.assertEquals(pc['str_var'], 'Hello World!') self.assertEquals(pc['nested_list'][0], exp_list) self.assertEquals(pc['nested_list'][1], exp_dict) self.assertEquals(pc['nested_list'][2], 'another str') self.assertEquals(pc['nested_dict']['a'], exp_list) self.assertEquals(pc['nested_dict']['b'], exp_dict) self.assertEquals(pc['nested_dict']['c'], 'another str') self.assertEquals(pc['empty'], '') self.assertEquals(pc['substr'], 'prefix-another str-suffix') self.assertEquals(pc['multiple'], 'another str-hello') self.assertEquals(pc['dont_match_this'], '${output something}') def test_render_yaml_errors(self): # We shouldn't be able to substitute an object into a string conf = "something: prefix-${var_name}" env = """ var_name: foo: bar """ e = parse_yaml_environment(env) with self.assertRaises(exceptions.WrongEnvironmentType): render(conf, e) # Missing keys need to raise errors too conf = "something: ${variable}" env = "some_other_variable: 5" e = parse_yaml_environment(env) with self.assertRaises(exceptions.MissingEnvironment): render(conf, e) def test_config_validate_missing_stack_source(self): config = Config({ "namespace": "prod", "stacks": [ { "name": "bastion"}]}) with self.assertRaises(exceptions.InvalidConfig) as ex: config.validate() stack_errors = ex.exception.errors['stacks'][0] self.assertEquals( stack_errors['template_path'][0].__str__(), "class_path or template_path is required.") self.assertEquals( stack_errors['class_path'][0].__str__(), "class_path or template_path is required.") def test_config_validate_missing_stack_source_when_locked(self): config = Config({ "namespace": "prod", "stacks": [ { "name": "bastion", "locked": True}]}) config.validate() def test_config_validate_stack_class_and_template_paths(self): config = Config({ "namespace": "prod", "stacks": [ { "name": "bastion", "class_path": "foo", "template_path": "bar"}]}) with self.assertRaises(exceptions.InvalidConfig) as ex: config.validate() stack_errors = ex.exception.errors['stacks'][0] self.assertEquals( stack_errors['template_path'][0].__str__(), "class_path cannot be present when template_path is provided.") self.assertEquals( stack_errors['class_path'][0].__str__(), "template_path cannot be present when class_path is provided.") def test_config_validate_missing_name(self): config = Config({ "namespace": "prod", "stacks": [ { "class_path": "blueprints.Bastion"}]}) with self.assertRaises(exceptions.InvalidConfig) as ex: config.validate() error = ex.exception.errors['stacks'][0]['name'].errors[0] self.assertEquals( error.__str__(), "This field is required.") def test_config_validate_duplicate_stack_names(self): config = Config({ "namespace": "prod", "stacks": [ { "name": "bastion", "class_path": "blueprints.Bastion"}, { "name": "bastion", "class_path": "blueprints.BastionV2"}]}) with self.assertRaises(exceptions.InvalidConfig) as ex: config.validate() error = ex.exception.errors['stacks'][0] self.assertEquals( error.__str__(), "Duplicate stack bastion found at index 0.") def test_dump_unicode(self): config = Config() config.namespace = "test" self.assertEquals(dump(config), b"""namespace: test stacks: [] """) config = Config({"namespace": "test"}) # Ensure that we're producing standard yaml, that doesn't include # python specific objects. self.assertNotEquals( dump(config), b"namespace: !!python/unicode 'test'\n") self.assertEquals(dump(config), b"""namespace: test stacks: [] """) def test_parse_tags(self): config = parse(""" namespace: prod tags: "a:b": "c" "hello": 1 simple_tag: simple value """) self.assertEquals(config.tags, { "a:b": "c", "hello": "1", "simple_tag": "simple value"}) def test_parse_with_arbitrary_anchors(self): config = parse(""" namespace: prod common_variables: &common_variables Foo: bar stacks: - name: vpc class_path: blueprints.VPC variables: << : *common_variables """) stack = config.stacks[0] self.assertEquals(stack.variables, {"Foo": "bar"}) def test_parse_with_deprecated_parameters(self): config = parse(""" namespace: prod stacks: - name: vpc class_path: blueprints.VPC parameters: Foo: bar """) with self.assertRaises(exceptions.InvalidConfig) as ex: config.validate() error = ex.exception.errors['stacks'][0]['parameters'][0] self.assertEquals( error.__str__(), "DEPRECATION: Stack definition vpc contains deprecated " "'parameters', rather than 'variables'. You are required to update" " your config. See https://stacker.readthedocs.io/en/latest/c" "onfig.html#variables for additional information.") def test_config_build(self): vpc = Stack({"name": "vpc", "class_path": "blueprints.VPC"}) config = Config({"namespace": "prod", "stacks": [vpc]}) self.assertEquals(config.namespace, "prod") self.assertEquals(config.stacks[0].name, "vpc") self.assertEquals(config["namespace"], "prod") config.validate() def test_parse(self): config_with_lists = """ namespace: prod stacker_bucket: stacker-prod pre_build: - path: stacker.hooks.route53.create_domain required: true enabled: true args: domain: mydomain.com post_build: - path: stacker.hooks.route53.create_domain required: true enabled: true args: domain: mydomain.com pre_destroy: - path: stacker.hooks.route53.create_domain required: true enabled: true args: domain: mydomain.com post_destroy: - path: stacker.hooks.route53.create_domain required: true enabled: true args: domain: mydomain.com package_sources: s3: - bucket: acmecorpbucket key: public/acmecorp-blueprints-v1.zip - bucket: examplecorpbucket key: public/examplecorp-blueprints-v2.tar.gz requester_pays: true - bucket: anotherexamplebucket key: example-blueprints-v3.tar.gz use_latest: false paths: - foo configs: - foo/config.yml git: - uri: git@github.com:acmecorp/stacker_blueprints.git - uri: git@github.com:remind101/stacker_blueprints.git tag: 1.0.0 paths: - stacker_blueprints - uri: git@github.com:contoso/webapp.git branch: staging - uri: git@github.com:contoso/foo.git commit: 12345678 paths: - bar configs: - bar/moreconfig.yml tags: environment: production stacks: - name: vpc class_path: blueprints.VPC variables: PrivateSubnets: - 10.0.0.0/24 - name: bastion class_path: blueprints.Bastion requires: ['vpc'] variables: VpcId: ${output vpc::VpcId} """ config_with_dicts = """ namespace: prod stacker_bucket: stacker-prod pre_build: prebuild_createdomain: path: stacker.hooks.route53.create_domain required: true enabled: true args: domain: mydomain.com post_build: postbuild_createdomain: path: stacker.hooks.route53.create_domain required: true enabled: true args: domain: mydomain.com pre_destroy: predestroy_createdomain: path: stacker.hooks.route53.create_domain required: true enabled: true args: domain: mydomain.com post_destroy: postdestroy_createdomain: path: stacker.hooks.route53.create_domain required: true enabled: true args: domain: mydomain.com package_sources: s3: - bucket: acmecorpbucket key: public/acmecorp-blueprints-v1.zip - bucket: examplecorpbucket key: public/examplecorp-blueprints-v2.tar.gz requester_pays: true - bucket: anotherexamplebucket key: example-blueprints-v3.tar.gz use_latest: false paths: - foo configs: - foo/config.yml git: - uri: git@github.com:acmecorp/stacker_blueprints.git - uri: git@github.com:remind101/stacker_blueprints.git tag: 1.0.0 paths: - stacker_blueprints - uri: git@github.com:contoso/webapp.git branch: staging - uri: git@github.com:contoso/foo.git commit: 12345678 paths: - bar configs: - bar/moreconfig.yml tags: environment: production stacks: vpc: class_path: blueprints.VPC variables: PrivateSubnets: - 10.0.0.0/24 bastion: class_path: blueprints.Bastion requires: ['vpc'] variables: VpcId: ${output vpc::VpcId} """ for raw_config in [config_with_lists, config_with_dicts]: config = parse(raw_config) config.validate() self.assertEqual(config.namespace, "prod") self.assertEqual(config.stacker_bucket, "stacker-prod") for hooks in [config.pre_build, config.post_build, config.pre_destroy, config.post_destroy]: self.assertEqual( hooks[0].path, "stacker.hooks.route53.create_domain") self.assertEqual( hooks[0].required, True) self.assertEqual( hooks[0].args, {"domain": "mydomain.com"}) self.assertEqual( config.package_sources.s3[0].bucket, "acmecorpbucket") self.assertEqual( config.package_sources.s3[0].key, "public/acmecorp-blueprints-v1.zip") self.assertEqual( config.package_sources.s3[1].bucket, "examplecorpbucket") self.assertEqual( config.package_sources.s3[1].key, "public/examplecorp-blueprints-v2.tar.gz") self.assertEqual( config.package_sources.s3[1].requester_pays, True) self.assertEqual( config.package_sources.s3[2].use_latest, False) self.assertEqual( config.package_sources.git[0].uri, "git@github.com:acmecorp/stacker_blueprints.git") self.assertEqual( config.package_sources.git[1].uri, "git@github.com:remind101/stacker_blueprints.git") self.assertEqual( config.package_sources.git[1].tag, "1.0.0") self.assertEqual( config.package_sources.git[1].paths, ["stacker_blueprints"]) self.assertEqual( config.package_sources.git[2].branch, "staging") self.assertEqual(config.tags, {"environment": "production"}) self.assertEqual(len(config.stacks), 2) vpc_index = next( i for (i, d) in enumerate(config.stacks) if d.name == "vpc" ) vpc = config.stacks[vpc_index] self.assertEqual(vpc.name, "vpc") self.assertEqual(vpc.class_path, "blueprints.VPC") self.assertEqual(vpc.requires, None) self.assertEqual(vpc.variables, {"PrivateSubnets": ["10.0.0.0/24"]}) bastion_index = next( i for (i, d) in enumerate(config.stacks) if d.name == "bastion" ) bastion = config.stacks[bastion_index] self.assertEqual(bastion.name, "bastion") self.assertEqual(bastion.class_path, "blueprints.Bastion") self.assertEqual(bastion.requires, ["vpc"]) self.assertEqual(bastion.variables, {"VpcId": "${output vpc::VpcId}"}) def test_dump_complex(self): config = Config({ "namespace": "prod", "stacks": [ Stack({ "name": "vpc", "class_path": "blueprints.VPC"}), Stack({ "name": "bastion", "class_path": "blueprints.Bastion", "requires": ["vpc"]})]}) self.assertEqual(dump(config), b"""namespace: prod stacks: - class_path: blueprints.VPC enabled: true locked: false name: vpc protected: false - class_path: blueprints.Bastion enabled: true locked: false name: bastion protected: false requires: - vpc """) def test_load_register_custom_lookups(self): config = Config({ "lookups": { "custom": "importlib.import_module"}}) load(config) self.assertTrue(callable(LOOKUP_HANDLERS["custom"])) def test_load_adds_sys_path(self): config = Config({"sys_path": "/foo/bar"}) load(config) self.assertIn("/foo/bar", sys.path) def test_process_empty_remote_sources(self): config = """ namespace: prod stacks: - name: vpc class_path: blueprints.VPC """ self.assertEqual(config, process_remote_sources(config)) def test_lookup_with_sys_path(self): config = Config({ "sys_path": "stacker/tests", "lookups": { "custom": "fixtures.mock_lookups.handler"}}) load(config) self.assertTrue(callable(LOOKUP_HANDLERS["custom"])) def test_render_parse_load_namespace_fallback(self): conf = """ stacks: - name: vpc class_path: blueprints.VPC """ config = render_parse_load( conf, environment={"namespace": "prod"}, validate=False) config.validate() self.assertEquals(config.namespace, "prod") def test_allow_most_keys_to_be_duplicates_for_overrides(self): yaml_config = """ namespace: prod stacks: - name: vpc class_path: blueprints.VPC variables: CIDR: 192.168.1.0/24 CIDR: 192.168.2.0/24 """ doc = parse(yaml_config) self.assertEqual( doc["stacks"][0]["variables"]["CIDR"], "192.168.2.0/24" ) yaml_config = """ default_variables: &default_variables CIDR: 192.168.1.0/24 namespace: prod stacks: - name: vpc class_path: blueprints.VPC variables: << : *default_variables CIDR: 192.168.2.0/24 """ doc = parse(yaml_config) self.assertEqual( doc["stacks"][0]["variables"]["CIDR"], "192.168.2.0/24" ) def test_raise_constructor_error_on_keyword_duplicate_key(self): """Some keys should never have a duplicate sibling. For example we treat `class_path` as a special "keyword" and disallow dupes.""" yaml_config = """ namespace: prod stacks: - name: vpc class_path: blueprints.VPC class_path: blueprints.Fake """ with self.assertRaises(ConstructorError): parse(yaml_config) def test_raise_construct_error_on_duplicate_stack_name_dict(self): """Some mappings should never have a duplicate children. For example we treat `stacks` as a special mapping and disallow dupe children keys.""" yaml_config = """ namespace: prod stacks: my_vpc: class_path: blueprints.VPC1 my_vpc: class_path: blueprints.VPC2 """ with self.assertRaises(ConstructorError): parse(yaml_config) def test_parse_invalid_inner_keys(self): yaml_config = """ namespace: prod stacks: - name: vpc class_path: blueprints.VPC garbage: yes variables: Foo: bar """ with self.assertRaises(exceptions.InvalidConfig): parse(yaml_config) if __name__ == '__main__': unittest.main() ================================================ FILE: stacker/tests/test_context.py ================================================ import unittest from stacker.context import Context, get_fqn from stacker.config import load, Config from stacker.hooks.utils import handle_hooks class TestContext(unittest.TestCase): def setUp(self): self.config = Config({ "namespace": "namespace", "stacks": [ {"name": "stack1"}, {"name": "stack2"}]}) def test_context_optional_keys_set(self): context = Context( config=Config({}), stack_names=["stack"], ) self.assertEqual(context.mappings, {}) self.assertEqual(context.stack_names, ["stack"]) def test_context_get_stacks(self): context = Context(config=self.config) self.assertEqual(len(context.get_stacks()), 2) def test_context_get_stacks_dict_use_fqn(self): context = Context(config=self.config) stacks_dict = context.get_stacks_dict() stack_names = sorted(stacks_dict.keys()) self.assertEqual(stack_names[0], "namespace-stack1") self.assertEqual(stack_names[1], "namespace-stack2") def test_context_get_fqn(self): context = Context(config=self.config) fqn = context.get_fqn() self.assertEqual(fqn, "namespace") def test_context_get_fqn_replace_dot(self): context = Context(config=Config({"namespace": "my.namespace"})) fqn = context.get_fqn() self.assertEqual(fqn, "my-namespace") def test_context_get_fqn_empty_namespace(self): context = Context(config=Config({"namespace": ""})) fqn = context.get_fqn("vpc") self.assertEqual(fqn, "vpc") self.assertEqual(context.tags, {}) def test_context_namespace(self): context = Context(config=Config({"namespace": "namespace"})) self.assertEqual(context.namespace, "namespace") def test_context_get_fqn_stack_name(self): context = Context(config=self.config) fqn = context.get_fqn("stack1") self.assertEqual(fqn, "namespace-stack1") def test_context_default_bucket_name(self): context = Context(config=Config({"namespace": "test"})) self.assertEqual(context.bucket_name, "stacker-test") def test_context_bucket_name_is_overriden_but_is_none(self): config = Config({"namespace": "test", "stacker_bucket": ""}) context = Context(config=config) self.assertEqual(context.bucket_name, None) config = Config({"namespace": "test", "stacker_bucket": None}) context = Context(config=config) self.assertEqual(context.bucket_name, "stacker-test") def test_context_bucket_name_is_overriden(self): config = Config({"namespace": "test", "stacker_bucket": "bucket123"}) context = Context(config=config) self.assertEqual(context.bucket_name, "bucket123") def test_context_default_bucket_no_namespace(self): context = Context(config=Config({"namespace": ""})) self.assertEqual(context.bucket_name, None) context = Context(config=Config({"namespace": None})) self.assertEqual(context.bucket_name, None) context = Context( config=Config({"namespace": None, "stacker_bucket": ""})) self.assertEqual(context.bucket_name, None) def test_context_namespace_delimiter_is_overriden_and_not_none(self): config = Config({"namespace": "namespace", "namespace_delimiter": "_"}) context = Context(config=config) fqn = context.get_fqn("stack1") self.assertEqual(fqn, "namespace_stack1") def test_context_namespace_delimiter_is_overriden_and_is_empty(self): config = Config({"namespace": "namespace", "namespace_delimiter": ""}) context = Context(config=config) fqn = context.get_fqn("stack1") self.assertEqual(fqn, "namespacestack1") def test_context_tags_with_empty_map(self): config = Config({"namespace": "test", "tags": {}}) context = Context(config=config) self.assertEqual(context.tags, {}) def test_context_no_tags_specified(self): config = Config({"namespace": "test"}) context = Context(config=config) self.assertEqual(context.tags, {"stacker_namespace": "test"}) def test_hook_with_sys_path(self): config = Config({ "namespace": "test", "sys_path": "stacker/tests", "pre_build": [ { "data_key": "myHook", "path": "fixtures.mock_hooks.mock_hook", "required": True, "args": { "value": "mockResult"}}]}) load(config) context = Context(config=config) stage = "pre_build" handle_hooks(stage, context.config[stage], "mock-region-1", context) self.assertEqual("mockResult", context.hook_data["myHook"]["result"]) class TestFunctions(unittest.TestCase): """ Test the module level functions """ def test_get_fqn_redundant_base(self): base = "woot" name = "woot-blah" self.assertEqual(get_fqn(base, '-', name), name) self.assertEqual(get_fqn(base, '', name), name) self.assertEqual(get_fqn(base, '_', name), "woot_woot-blah") def test_get_fqn_only_base(self): base = "woot" self.assertEqual(get_fqn(base, '-'), base) self.assertEqual(get_fqn(base, ''), base) self.assertEqual(get_fqn(base, '_'), base) def test_get_fqn_full(self): base = "woot" name = "blah" self.assertEqual(get_fqn(base, '-', name), "%s-%s" % (base, name)) self.assertEqual(get_fqn(base, '', name), "%s%s" % (base, name)) self.assertEqual(get_fqn(base, '_', name), "%s_%s" % (base, name)) if __name__ == '__main__': unittest.main() ================================================ FILE: stacker/tests/test_dag.py ================================================ """ Tests on the DAG implementation """ import threading import pytest from stacker.dag import ( DAG, DAGValidationError, ThreadedWalker, UnlimitedSemaphore ) @pytest.fixture def empty_dag(): return DAG() @pytest.fixture def basic_dag(): dag = DAG() dag.from_dict({'a': ['b', 'c'], 'b': ['d'], 'c': ['d'], 'd': []}) return dag def test_add_node(empty_dag): dag = empty_dag dag.add_node('a') assert dag.graph == {'a': set()} def test_transpose(basic_dag): dag = basic_dag transposed = dag.transpose() assert transposed.graph == {'d': set(['c', 'b']), 'c': set(['a']), 'b': set(['a']), 'a': set([])} def test_add_edge(empty_dag): dag = empty_dag dag.add_node('a') dag.add_node('b') dag.add_edge('a', 'b') assert dag.graph == {'a': set('b'), 'b': set()} def test_from_dict(empty_dag): dag = empty_dag dag.from_dict({'a': ['b', 'c'], 'b': ['d'], 'c': ['d'], 'd': []}) assert dag.graph == {'a': set(['b', 'c']), 'b': set('d'), 'c': set('d'), 'd': set()} def test_reset_graph(empty_dag): dag = empty_dag dag.add_node('a') assert dag.graph == {'a': set()} dag.reset_graph() assert dag.graph == {} def test_walk(empty_dag): dag = empty_dag # b and c should be executed at the same time. dag.from_dict({'a': ['b', 'c'], 'b': ['d'], 'c': ['d'], 'd': []}) nodes = [] def walk_func(n): nodes.append(n) return True dag.walk(walk_func) assert nodes == ['d', 'c', 'b', 'a'] or nodes == ['d', 'b', 'c', 'a'] def test_ind_nodes(basic_dag): dag = basic_dag assert dag.ind_nodes() == ['a'] def test_topological_sort(empty_dag): dag = empty_dag dag.from_dict({'a': [], 'b': ['a'], 'c': ['b']}) assert dag.topological_sort() == ['c', 'b', 'a'] def test_successful_validation(basic_dag): dag = basic_dag assert dag.validate()[0] == True # noqa: E712 def test_failed_validation(empty_dag): dag = empty_dag with pytest.raises(DAGValidationError): dag.from_dict({'a': ['b'], 'b': ['a']}) def test_downstream(basic_dag): dag = basic_dag assert set(dag.downstream('a')) == set(['b', 'c']) def test_all_downstreams(basic_dag): dag = basic_dag assert dag.all_downstreams('a') == ['b', 'c', 'd'] assert dag.all_downstreams('b') == ['d'] assert dag.all_downstreams('d') == [] def test_all_downstreams_pass_graph(empty_dag): dag = empty_dag dag.from_dict({'a': ['c'], 'b': ['d'], 'c': ['d'], 'd': []}) assert dag.all_downstreams('a') == ['c', 'd'] assert dag.all_downstreams('b') == ['d'] assert dag.all_downstreams('d') == [] def test_predecessors(basic_dag): dag = basic_dag assert set(dag.predecessors('a')) == set([]) assert set(dag.predecessors('b')) == set(['a']) assert set(dag.predecessors('c')) == set(['a']) assert set(dag.predecessors('d')) == set(['b', 'c']) def test_filter(basic_dag): dag = basic_dag dag2 = dag.filter(['b', 'c']) assert dag2.graph == {'b': set('d'), 'c': set('d'), 'd': set()} def test_all_leaves(basic_dag): dag = basic_dag assert dag.all_leaves() == ['d'] def test_size(basic_dag): dag = basic_dag assert dag.size() == 4 dag.delete_node('a') assert dag.size() == 3 def test_transitive_reduction_no_reduction(empty_dag): dag = empty_dag dag.from_dict({'a': ['b', 'c'], 'b': ['d'], 'c': ['d'], 'd': []}) dag.transitive_reduction() assert dag.graph == {'a': set(['b', 'c']), 'b': set('d'), 'c': set('d'), 'd': set()} def test_transitive_reduction(empty_dag): dag = empty_dag # https://en.wikipedia.org/wiki/Transitive_reduction#/media/File:Tred-G.svg dag.from_dict({'a': ['b', 'c', 'd', 'e'], 'b': ['d'], 'c': ['d', 'e'], 'd': ['e'], 'e': []}) dag.transitive_reduction() # https://en.wikipedia.org/wiki/Transitive_reduction#/media/File:Tred-Gprime.svg assert dag.graph == {'a': set(['b', 'c']), 'b': set('d'), 'c': set('d'), 'd': set('e'), 'e': set()} def test_transitive_deep_reduction(empty_dag): dag = empty_dag # https://en.wikipedia.org/wiki/Transitive_reduction#/media/File:Tred-G.svg dag.from_dict({ 'a': ['b', 'd'], 'b': ['c'], 'c': ['d'], 'd': [], }) dag.transitive_reduction() # https://en.wikipedia.org/wiki/Transitive_reduction#/media/File:Tred-Gprime.svg assert dag.graph == {'a': set('b'), 'b': set('c'), 'c': set('d'), 'd': set()} def test_threaded_walker(empty_dag): dag = empty_dag walker = ThreadedWalker(UnlimitedSemaphore()) # b and c should be executed at the same time. dag.from_dict({'a': ['b', 'c'], 'b': ['d'], 'c': ['d'], 'd': []}) lock = threading.Lock() # Protects nodes from concurrent access nodes = [] def walk_func(n): lock.acquire() nodes.append(n) lock.release() return True walker.walk(dag, walk_func) assert nodes == ['d', 'c', 'b', 'a'] or nodes == ['d', 'b', 'c', 'a'] ================================================ FILE: stacker/tests/test_environment.py ================================================ import unittest from stacker.environment import ( DictWithSourceType, parse_environment ) test_env = """key1: value1 # some: comment # here: about # key2 key2: value2 # another comment here key3: some:complex::value # one more here as well key4: :otherValue: key5: @value """ test_error_env = """key1: valu1 error """ class TestEnvironment(unittest.TestCase): def test_simple_key_value_parsing(self): parsed_env = parse_environment(test_env) self.assertTrue(isinstance(parsed_env, DictWithSourceType)) self.assertEqual(parsed_env["key1"], "value1") self.assertEqual(parsed_env["key2"], "value2") self.assertEqual(parsed_env["key3"], "some:complex::value") self.assertEqual(parsed_env["key4"], ":otherValue:") self.assertEqual(parsed_env["key5"], "@value") self.assertEqual(len(parsed_env), 5) def test_simple_key_value_parsing_exception(self): with self.assertRaises(ValueError): parse_environment(test_error_env) def test_blank_value(self): e = """key1:""" parsed = parse_environment(e) self.assertEqual(parsed["key1"], "") ================================================ FILE: stacker/tests/test_lookups.py ================================================ import unittest from stacker.lookups import extract_lookups, extract_lookups_from_string class TestLookupExtraction(unittest.TestCase): def test_no_lookups(self): lookups = extract_lookups("value") self.assertEqual(lookups, set()) def test_single_lookup_string(self): lookups = extract_lookups("${output fakeStack::FakeOutput}") self.assertEqual(len(lookups), 1) def test_multiple_lookups_string(self): lookups = extract_lookups( "url://${output fakeStack::FakeOutput}@" "${output fakeStack::FakeOutput2}" ) self.assertEqual(len(lookups), 2) self.assertEqual(list(lookups)[0].type, "output") def test_lookups_list(self): lookups = extract_lookups([ "something", "${output fakeStack::FakeOutput}" ]) self.assertEqual(len(lookups), 1) def test_lookups_dict(self): lookups = extract_lookups({ "something": "${output fakeStack::FakeOutput}", "other": "value", }) self.assertEqual(len(lookups), 1) def test_lookups_mixed(self): lookups = extract_lookups({ "something": "${output fakeStack::FakeOutput}", "list": ["value", "${output fakeStack::FakeOutput2}"], "dict": { "other": "value", "another": "${output fakeStack::FakeOutput3}", }, }) self.assertEqual(len(lookups), 3) def test_nested_lookups_string(self): lookups = extract_lookups( "${noop ${output stack::Output},${output stack::Output2}}" ) self.assertEqual(len(lookups), 2) def test_comma_delimited(self): lookups = extract_lookups("${noop val1,val2}") self.assertEqual(len(lookups), 1) def test_kms_lookup(self): lookups = extract_lookups("${kms CiADsGxJp1mCR21fjsVjVxr7RwuO2FE3ZJqC4iG0Lm+HkRKwAQEBAgB4A7BsSadZgkdtX47FY1ca+0cLjthRN2SaguIhtC5vh5EAAACHMIGEBgkqhkiG9w0BBwagdzB1AgEAMHAGCSqGSIb3DQEHATAeBglghkgBZQMEAS4wEQQM3IKyEoNEQVxN3BaaAgEQgEOpqa0rcl3WpHOmblAqL1rOPRyokO3YXcJAAB37h/WKLpZZRAWV2h9C67xjlsj3ebg+QIU91T/}") # NOQA self.assertEqual(len(lookups), 1) lookup = list(lookups)[0] self.assertEqual(lookup.type, "kms") self.assertEqual(lookup.input, "CiADsGxJp1mCR21fjsVjVxr7RwuO2FE3ZJqC4iG0Lm+HkRKwAQEBAgB4A7BsSadZgkdtX47FY1ca+0cLjthRN2SaguIhtC5vh5EAAACHMIGEBgkqhkiG9w0BBwagdzB1AgEAMHAGCSqGSIb3DQEHATAeBglghkgBZQMEAS4wEQQM3IKyEoNEQVxN3BaaAgEQgEOpqa0rcl3WpHOmblAqL1rOPRyokO3YXcJAAB37h/WKLpZZRAWV2h9C67xjlsj3ebg+QIU91T/") # NOQA def test_kms_lookup_with_equals(self): lookups = extract_lookups("${kms us-east-1@AQECAHjLp186mZ+mgXTQSytth/ibiIdwBm8CZAzZNSaSkSRqswAAAG4wbAYJKoZIhvcNAQcGoF8wXQIBADBYBgkqhkiG9w0BBwEwHgYJYIZIAWUDBAEuMBEEDLNmhGU6fe4vp175MAIBEIAr+8tUpi7SDzOZm+FFyYvWXhs4hEEyaazIn2dP8a+yHzZYDSVYGRpfUz34bQ==}") # NOQA self.assertEqual(len(lookups), 1) lookup = list(lookups)[0] self.assertEqual(lookup.type, "kms") self.assertEqual(lookup.input, "us-east-1@AQECAHjLp186mZ+mgXTQSytth/ibiIdwBm8CZAzZNSaSkSRqswAAAG4wbAYJKoZIhvcNAQcGoF8wXQIBADBYBgkqhkiG9w0BBwEwHgYJYIZIAWUDBAEuMBEEDLNmhGU6fe4vp175MAIBEIAr+8tUpi7SDzOZm+FFyYvWXhs4hEEyaazIn2dP8a+yHzZYDSVYGRpfUz34bQ==") # NOQA def test_kms_lookup_with_region(self): lookups = extract_lookups("${kms us-west-2@CiADsGxJp1mCR21fjsVjVxr7RwuO2FE3ZJqC4iG0Lm+HkRKwAQEBAgB4A7BsSadZgkdtX47FY1ca+0cLjthRN2SaguIhtC5vh5EAAACHMIGEBgkqhkiG9w0BBwagdzB1AgEAMHAGCSqGSIb3DQEHATAeBglghkgBZQMEAS4wEQQM3IKyEoNEQVxN3BaaAgEQgEOpqa0rcl3WpHOmblAqL1rOPRyokO3YXcJAAB37h/WKLpZZRAWV2h9C67xjlsj3ebg+QIU91T/}") # NOQA self.assertEqual(len(lookups), 1) lookup = list(lookups)[0] self.assertEqual(lookup.type, "kms") self.assertEqual(lookup.input, "us-west-2@CiADsGxJp1mCR21fjsVjVxr7RwuO2FE3ZJqC4iG0Lm+HkRKwAQEBAgB4A7BsSadZgkdtX47FY1ca+0cLjthRN2SaguIhtC5vh5EAAACHMIGEBgkqhkiG9w0BBwagdzB1AgEAMHAGCSqGSIb3DQEHATAeBglghkgBZQMEAS4wEQQM3IKyEoNEQVxN3BaaAgEQgEOpqa0rcl3WpHOmblAqL1rOPRyokO3YXcJAAB37h/WKLpZZRAWV2h9C67xjlsj3ebg+QIU91T/") # NOQA def test_kms_file_lookup(self): lookups = extract_lookups("${kms file://path/to/some/file.txt}") self.assertEqual(len(lookups), 1) lookup = list(lookups)[0] self.assertEqual(lookup.type, "kms") self.assertEqual(lookup.input, "file://path/to/some/file.txt") def test_valid_extract_lookups_from_string(self): _type = "output" _input = "vpc::PublicSubnets" value = "${%s %s}" % (_type, _input) lookups = extract_lookups_from_string(value) lookup = lookups.pop() assert lookup.type == _type assert lookup.input == _input assert lookup.raw == "%s %s" % (_type, _input) ================================================ FILE: stacker/tests/test_parse_user_data.py ================================================ import unittest import yaml from ..tokenize_userdata import cf_tokenize class TestCfTokenize(unittest.TestCase): def test_tokenize(self): user_data = [ "field0", "Ref(\"SshKey\")", "field1", "Fn::GetAtt(\"Blah\", \"Woot\")" ] ud = yaml.dump(user_data) parts = cf_tokenize(ud) self.assertIsInstance(parts[1], dict) self.assertIsInstance(parts[3], dict) self.assertEqual(parts[1]["Ref"], "SshKey") self.assertEqual(parts[3]["Fn::GetAtt"], ["Blah", "Woot"]) self.assertEqual(len(parts), 5) ================================================ FILE: stacker/tests/test_plan.py ================================================ import os import shutil import tempfile import unittest import mock from stacker.context import Context, Config from stacker.dag import walk from stacker.util import stack_template_key_name from stacker.lookups.registry import ( register_lookup_handler, unregister_lookup_handler, ) from stacker.plan import ( Step, build_plan, build_graph, ) from stacker.exceptions import ( CancelExecution, GraphError, PlanFailed, ) from stacker.status import ( SUBMITTED, COMPLETE, SKIPPED, FAILED, ) from stacker.stack import Stack from .factories import generate_definition count = 0 class TestStep(unittest.TestCase): def setUp(self): stack = mock.MagicMock() stack.name = "stack" stack.fqn = "namespace-stack" self.step = Step(stack=stack, fn=None) def test_status(self): self.assertFalse(self.step.submitted) self.assertFalse(self.step.completed) self.step.submit() self.assertEqual(self.step.status, SUBMITTED) self.assertTrue(self.step.submitted) self.assertFalse(self.step.completed) self.step.complete() self.assertEqual(self.step.status, COMPLETE) self.assertNotEqual(self.step.status, SUBMITTED) self.assertTrue(self.step.submitted) self.assertTrue(self.step.completed) self.assertNotEqual(self.step.status, True) self.assertNotEqual(self.step.status, False) self.assertNotEqual(self.step.status, 'banana') class TestPlan(unittest.TestCase): def setUp(self): self.count = 0 self.config = Config({"namespace": "namespace"}) self.context = Context(config=self.config) register_lookup_handler("noop", lambda **kwargs: "test") def tearDown(self): unregister_lookup_handler("noop") def test_plan(self): vpc = Stack( definition=generate_definition('vpc', 1), context=self.context) bastion = Stack( definition=generate_definition('bastion', 1, requires=[vpc.name]), context=self.context) graph = build_graph([ Step(vpc, fn=None), Step(bastion, fn=None)]) plan = build_plan(description="Test", graph=graph) self.assertEqual(plan.graph.to_dict(), { 'bastion.1': set(['vpc.1']), 'vpc.1': set([])}) def test_execute_plan(self): vpc = Stack( definition=generate_definition('vpc', 1), context=self.context) bastion = Stack( definition=generate_definition('bastion', 1, requires=[vpc.name]), context=self.context) calls = [] def fn(stack, status=None): calls.append(stack.fqn) return COMPLETE graph = build_graph([Step(vpc, fn), Step(bastion, fn)]) plan = build_plan( description="Test", graph=graph) plan.execute(walk) self.assertEquals(calls, ['namespace-vpc.1', 'namespace-bastion.1']) def test_execute_plan_locked(self): # Locked stacks still need to have their requires evaluated when # they're being created. vpc = Stack( definition=generate_definition('vpc', 1), context=self.context) bastion = Stack( definition=generate_definition('bastion', 1, requires=[vpc.name]), locked=True, context=self.context) calls = [] def fn(stack, status=None): calls.append(stack.fqn) return COMPLETE graph = build_graph([Step(vpc, fn), Step(bastion, fn)]) plan = build_plan( description="Test", graph=graph) plan.execute(walk) self.assertEquals(calls, ['namespace-vpc.1', 'namespace-bastion.1']) def test_execute_plan_filtered(self): vpc = Stack( definition=generate_definition('vpc', 1), context=self.context) db = Stack( definition=generate_definition('db', 1, requires=[vpc.name]), context=self.context) app = Stack( definition=generate_definition('app', 1, requires=[db.name]), context=self.context) calls = [] def fn(stack, status=None): calls.append(stack.fqn) return COMPLETE graph = build_graph([ Step(vpc, fn), Step(db, fn), Step(app, fn)]) plan = build_plan( description="Test", graph=graph, targets=['db.1']) plan.execute(walk) self.assertEquals(calls, [ 'namespace-vpc.1', 'namespace-db.1']) def test_execute_plan_exception(self): vpc = Stack( definition=generate_definition('vpc', 1), context=self.context) bastion = Stack( definition=generate_definition('bastion', 1, requires=[vpc.name]), context=self.context) calls = [] def fn(stack, status=None): calls.append(stack.fqn) if stack.name == vpc_step.name: raise ValueError('Boom') return COMPLETE vpc_step = Step(vpc, fn) bastion_step = Step(bastion, fn) graph = build_graph([vpc_step, bastion_step]) plan = build_plan(description="Test", graph=graph) with self.assertRaises(PlanFailed): plan.execute(walk) self.assertEquals(calls, ['namespace-vpc.1']) self.assertEquals(vpc_step.status, FAILED) def test_execute_plan_skipped(self): vpc = Stack( definition=generate_definition('vpc', 1), context=self.context) bastion = Stack( definition=generate_definition('bastion', 1, requires=[vpc.name]), context=self.context) calls = [] def fn(stack, status=None): calls.append(stack.fqn) if stack.fqn == vpc_step.name: return SKIPPED return COMPLETE vpc_step = Step(vpc, fn) bastion_step = Step(bastion, fn) graph = build_graph([vpc_step, bastion_step]) plan = build_plan(description="Test", graph=graph) plan.execute(walk) self.assertEquals(calls, ['namespace-vpc.1', 'namespace-bastion.1']) def test_execute_plan_failed(self): vpc = Stack( definition=generate_definition('vpc', 1), context=self.context) bastion = Stack( definition=generate_definition('bastion', 1, requires=[vpc.name]), context=self.context) db = Stack( definition=generate_definition('db', 1), context=self.context) calls = [] def fn(stack, status=None): calls.append(stack.fqn) if stack.name == vpc_step.name: return FAILED return COMPLETE vpc_step = Step(vpc, fn) bastion_step = Step(bastion, fn) db_step = Step(db, fn) graph = build_graph([ vpc_step, bastion_step, db_step]) plan = build_plan(description="Test", graph=graph) with self.assertRaises(PlanFailed): plan.execute(walk) calls.sort() self.assertEquals(calls, ['namespace-db.1', 'namespace-vpc.1']) def test_execute_plan_cancelled(self): vpc = Stack( definition=generate_definition('vpc', 1), context=self.context) bastion = Stack( definition=generate_definition('bastion', 1, requires=[vpc.name]), context=self.context) calls = [] def fn(stack, status=None): calls.append(stack.fqn) if stack.fqn == vpc_step.name: raise CancelExecution return COMPLETE vpc_step = Step(vpc, fn) bastion_step = Step(bastion, fn) graph = build_graph([vpc_step, bastion_step]) plan = build_plan(description="Test", graph=graph) plan.execute(walk) self.assertEquals(calls, ['namespace-vpc.1', 'namespace-bastion.1']) def test_build_graph_missing_dependency(self): bastion = Stack( definition=generate_definition( 'bastion', 1, requires=['vpc.1']), context=self.context) with self.assertRaises(GraphError) as expected: build_graph([Step(bastion, None)]) message_starts = ( "Error detected when adding 'vpc.1' " "as a dependency of 'bastion.1':" ) message_contains = "dependent node vpc.1 does not exist" self.assertTrue(str(expected.exception).startswith(message_starts)) self.assertTrue(message_contains in str(expected.exception)) def test_build_graph_cyclic_dependencies(self): vpc = Stack( definition=generate_definition( 'vpc', 1), context=self.context) db = Stack( definition=generate_definition( 'db', 1, requires=['app.1']), context=self.context) app = Stack( definition=generate_definition( 'app', 1, requires=['db.1']), context=self.context) with self.assertRaises(GraphError) as expected: build_graph([Step(vpc, None), Step(db, None), Step(app, None)]) message = ("Error detected when adding 'db.1' " "as a dependency of 'app.1': graph is " "not acyclic") self.assertEqual(str(expected.exception), message) def test_dump(self, *args): requires = None steps = [] for i in range(5): overrides = { "variables": { "PublicSubnets": "1", "SshKeyName": "1", "PrivateSubnets": "1", "Random": "${noop something}", }, "requires": requires, } stack = Stack( definition=generate_definition('vpc', i, **overrides), context=self.context) requires = [stack.name] steps += [Step(stack, None)] graph = build_graph(steps) plan = build_plan(description="Test", graph=graph) tmp_dir = tempfile.mkdtemp() try: plan.dump(tmp_dir, context=self.context) for step in plan.steps: template_path = os.path.join( tmp_dir, stack_template_key_name(step.stack.blueprint)) self.assertTrue(os.path.isfile(template_path)) finally: shutil.rmtree(tmp_dir) ================================================ FILE: stacker/tests/test_stack.py ================================================ from mock import MagicMock import unittest from stacker.lookups import register_lookup_handler from stacker.context import Context from stacker.config import Config from stacker.stack import Stack from .factories import generate_definition class TestStack(unittest.TestCase): def setUp(self): self.sd = {"name": "test"} self.config = Config({"namespace": "namespace"}) self.context = Context(config=self.config) self.stack = Stack( definition=generate_definition("vpc", 1), context=self.context, ) register_lookup_handler("noop", lambda **kwargs: "test") def test_stack_requires(self): definition = generate_definition( base_name="vpc", stack_id=1, variables={ "Var1": "${noop fakeStack3::FakeOutput}", "Var2": ( "some.template.value:${output fakeStack2::FakeOutput}:" "${output fakeStack::FakeOutput}" ), "Var3": "${output fakeStack::FakeOutput}," "${output fakeStack2::FakeOutput}", }, requires=["fakeStack"], ) stack = Stack(definition=definition, context=self.context) self.assertEqual(len(stack.requires), 2) self.assertIn( "fakeStack", stack.requires, ) self.assertIn( "fakeStack2", stack.requires, ) def test_stack_requires_circular_ref(self): definition = generate_definition( base_name="vpc", stack_id=1, variables={ "Var1": "${output vpc.1::FakeOutput}", }, ) stack = Stack(definition=definition, context=self.context) with self.assertRaises(ValueError): stack.requires def test_stack_cfn_parameters(self): definition = generate_definition( base_name="vpc", stack_id=1, variables={ "Param1": "${output fakeStack::FakeOutput}", }, ) stack = Stack(definition=definition, context=self.context) stack._blueprint = MagicMock() stack._blueprint.get_parameter_values.return_value = { "Param2": "Some Resolved Value", } self.assertEqual(len(stack.parameter_values), 1) param = stack.parameter_values["Param2"] self.assertEqual(param, "Some Resolved Value") def test_stack_tags_default(self): self.config.tags = {"environment": "prod"} definition = generate_definition( base_name="vpc", stack_id=1 ) stack = Stack(definition=definition, context=self.context) self.assertEquals(stack.tags, {"environment": "prod"}) def test_stack_tags_override(self): self.config.tags = {"environment": "prod"} definition = generate_definition( base_name="vpc", stack_id=1, tags={"environment": "stage"} ) stack = Stack(definition=definition, context=self.context) self.assertEquals(stack.tags, {"environment": "stage"}) def test_stack_tags_extra(self): self.config.tags = {"environment": "prod"} definition = generate_definition( base_name="vpc", stack_id=1, tags={"app": "graph"} ) stack = Stack(definition=definition, context=self.context) self.assertEquals(stack.tags, {"environment": "prod", "app": "graph"}) if __name__ == '__main__': unittest.main() ================================================ FILE: stacker/tests/test_stacker.py ================================================ import unittest from stacker.commands import Stacker from stacker.exceptions import InvalidConfig class TestStacker(unittest.TestCase): def test_stacker_build_parse_args(self): stacker = Stacker() args = stacker.parse_args( ["build", "-r", "us-west-2", "-e", "namespace=test.override", "stacker/tests/fixtures/basic.env", "stacker/tests/fixtures/vpc-bastion-db-web.yaml"] ) self.assertEqual(args.region, "us-west-2") self.assertFalse(args.outline) # verify namespace was modified self.assertEqual(args.environment["namespace"], "test.override") def test_stacker_build_parse_args_region_from_env(self): stacker = Stacker() args = stacker.parse_args( ["build", "-e", "namespace=test.override", "stacker/tests/fixtures/basic.env", "stacker/tests/fixtures/vpc-bastion-db-web.yaml"] ) self.assertEqual(args.region, None) def test_stacker_build_context_passed_to_blueprint(self): stacker = Stacker() args = stacker.parse_args( ["build", "-r", "us-west-2", "stacker/tests/fixtures/basic.env", "stacker/tests/fixtures/vpc-bastion-db-web.yaml"] ) stacker.configure(args) stacks_dict = args.context.get_stacks_dict() blueprint = stacks_dict[args.context.get_fqn("bastion")].blueprint self.assertTrue(hasattr(blueprint, "context")) blueprint.render_template() # verify that the bastion blueprint only contains blueprint variables, # not BaseDomain, AZCount or CidrBlock. Any variables that get passed # in from the command line shouldn't be resovled at the blueprint level self.assertNotIn("BaseDomain", blueprint.template.parameters) self.assertNotIn("AZCount", blueprint.template.parameters) self.assertNotIn("CidrBlock", blueprint.template.parameters) def test_stacker_blueprint_property_access_does_not_reset_blueprint(self): stacker = Stacker() args = stacker.parse_args( ["build", "-r", "us-west-2", "stacker/tests/fixtures/basic.env", "stacker/tests/fixtures/vpc-bastion-db-web.yaml"] ) stacker.configure(args) stacks_dict = args.context.get_stacks_dict() bastion_stack = stacks_dict[args.context.get_fqn("bastion")] bastion_stack.blueprint.render_template() self.assertIn("DefaultSG", bastion_stack.blueprint.template.parameters) def test_stacker_build_context_stack_names_specified(self): stacker = Stacker() args = stacker.parse_args( ["build", "-r", "us-west-2", "stacker/tests/fixtures/basic.env", "stacker/tests/fixtures/vpc-bastion-db-web.yaml", "--stacks", "vpc", "--stacks", "bastion"] ) stacker.configure(args) stacks = args.context.get_stacks() self.assertEqual(len(stacks), 2) def test_stacker_build_fail_when_parameters_in_stack_def(self): stacker = Stacker() args = stacker.parse_args( ["build", "-r", "us-west-2", "stacker/tests/fixtures/basic.env", "stacker/tests/fixtures/vpc-bastion-db-web-pre-1.0.yaml"] ) with self.assertRaises(InvalidConfig): stacker.configure(args) def test_stacker_build_custom_info_log_format(self): stacker = Stacker() args = stacker.parse_args( [ "build", "-r", "us-west-2", "stacker/tests/fixtures/not-basic.env", "stacker/tests/fixtures/vpc-custom-log-format-info.yaml" ] ) stacker.configure(args) self.assertEqual( stacker.config.log_formats["info"], '[%(asctime)s] test custom log format - %(message)s' ) self.assertIsNone( stacker.config.log_formats.get("color") ) self.assertIsNone( stacker.config.log_formats.get("debug") ) if __name__ == '__main__': unittest.main() ================================================ FILE: stacker/tests/test_util.py ================================================ import unittest import string import os import queue import mock import boto3 from stacker.config import Hook, GitPackageSource from stacker.util import ( cf_safe_name, load_object_from_string, camel_to_snake, merge_map, yaml_to_ordered_dict, get_client_region, get_s3_endpoint, s3_bucket_location_constraint, parse_cloudformation_template, Extractor, TarExtractor, TarGzipExtractor, ZipExtractor, SourceProcessor ) from stacker.hooks.utils import handle_hooks from .factories import ( mock_context, mock_provider, ) regions = ["us-east-1", "cn-north-1", "ap-northeast-1", "eu-west-1", "ap-southeast-1", "ap-southeast-2", "us-west-2", "us-gov-west-1", "us-west-1", "eu-central-1", "sa-east-1"] def mock_create_cache_directories(self, **kwargs): # Don't actually need the directories created in testing return 1 class TestUtil(unittest.TestCase): def test_cf_safe_name(self): tests = ( ("abc-def", "AbcDef"), ("GhI", "GhI"), ("jKlm.noP", "JKlmNoP") ) for t in tests: self.assertEqual(cf_safe_name(t[0]), t[1]) def test_load_object_from_string(self): tests = ( ("string.Template", string.Template), ("os.path.basename", os.path.basename), ("string.ascii_letters", string.ascii_letters) ) for t in tests: self.assertIs(load_object_from_string(t[0]), t[1]) def test_camel_to_snake(self): tests = ( ("TestTemplate", "test_template"), ("testTemplate", "test_template"), ("test_Template", "test__template"), ("testtemplate", "testtemplate"), ) for t in tests: self.assertEqual(camel_to_snake(t[0]), t[1]) def test_merge_map(self): tests = [ # 2 lists of stacks defined [{'stacks': [{'stack1': {'variables': {'a': 'b'}}}]}, {'stacks': [{'stack2': {'variables': {'c': 'd'}}}]}, {'stacks': [ {'stack1': { 'variables': { 'a': 'b'}}}, {'stack2': { 'variables': { 'c': 'd'}}}]}], # A list of stacks combined with a higher precedence dict of stacks [{'stacks': [{'stack1': {'variables': {'a': 'b'}}}]}, {'stacks': {'stack2': {'variables': {'c': 'd'}}}}, {'stacks': {'stack2': {'variables': {'c': 'd'}}}}], # 2 dicts of stacks with non-overlapping variables merged [{'stacks': {'stack1': {'variables': {'a': 'b'}}}}, {'stacks': {'stack1': {'variables': {'c': 'd'}}}}, {'stacks': { 'stack1': { 'variables': { 'a': 'b', 'c': 'd'}}}}], # 2 dicts of stacks with overlapping variables merged [{'stacks': {'stack1': {'variables': {'a': 'b'}}}}, {'stacks': {'stack1': {'variables': {'a': 'c'}}}}, {'stacks': {'stack1': {'variables': {'a': 'c'}}}}], ] for t in tests: self.assertEqual(merge_map(t[0], t[1]), t[2]) def test_yaml_to_ordered_dict(self): raw_config = """ pre_build: hook2: path: foo.bar hook1: path: foo1.bar1 """ config = yaml_to_ordered_dict(raw_config) self.assertEqual(list(config['pre_build'].keys())[0], 'hook2') self.assertEqual(config['pre_build']['hook2']['path'], 'foo.bar') def test_get_client_region(self): regions = ["us-east-1", "us-west-1", "eu-west-1", "sa-east-1"] for region in regions: client = boto3.client("s3", region_name=region) self.assertEqual(get_client_region(client), region) def test_get_s3_endpoint(self): endpoint_url = "https://example.com" client = boto3.client("s3", region_name="us-east-1", endpoint_url=endpoint_url) self.assertEqual(get_s3_endpoint(client), endpoint_url) def test_s3_bucket_location_constraint(self): tests = ( ("us-east-1", ""), ("us-west-1", "us-west-1") ) for region, result in tests: self.assertEqual( s3_bucket_location_constraint(region), result ) def test_parse_cloudformation_template(self): template = """AWSTemplateFormatVersion: "2010-09-09" Parameters: Param1: Type: String Resources: Bucket: Type: AWS::S3::Bucket Properties: BucketName: !Join - "-" - - !Ref "AWS::StackName" - !Ref "AWS::Region" Outputs: DummyId: Value: dummy-1234""" parsed_template = { 'AWSTemplateFormatVersion': '2010-09-09', 'Outputs': {'DummyId': {'Value': 'dummy-1234'}}, 'Parameters': {'Param1': {'Type': 'String'}}, 'Resources': { 'Bucket': {'Type': 'AWS::S3::Bucket', 'Properties': { 'BucketName': { u'Fn::Join': [ '-', [{u'Ref': u'AWS::StackName'}, {u'Ref': u'AWS::Region'}] ] } }} } } self.assertEqual( parse_cloudformation_template(template), parsed_template ) def test_extractors(self): self.assertEqual(Extractor('test.zip').archive, 'test.zip') self.assertEqual(TarExtractor().extension(), '.tar') self.assertEqual(TarGzipExtractor().extension(), '.tar.gz') self.assertEqual(ZipExtractor().extension(), '.zip') for i in [TarExtractor(), ZipExtractor(), ZipExtractor()]: i.set_archive('/tmp/foo') self.assertEqual(i.archive.endswith(i.extension()), True) def test_SourceProcessor_helpers(self): with mock.patch.object(SourceProcessor, 'create_cache_directories', new=mock_create_cache_directories): sp = SourceProcessor(sources={}) self.assertEqual( sp.sanitize_git_path('git@github.com:foo/bar.git'), 'git_github.com_foo_bar' ) self.assertEqual( sp.sanitize_uri_path('http://example.com/foo/bar.gz@1'), 'http___example.com_foo_bar.gz_1' ) self.assertEqual( sp.sanitize_git_path('git@github.com:foo/bar.git', 'v1'), 'git_github.com_foo_bar-v1' ) for i in [GitPackageSource({'branch': 'foo'}), {'branch': 'foo'}]: self.assertEqual( sp.determine_git_ls_remote_ref(i), 'refs/heads/foo' ) for i in [{'uri': 'git@foo'}, {'tag': 'foo'}, {'commit': '1234'}]: self.assertEqual( sp.determine_git_ls_remote_ref(GitPackageSource(i)), 'HEAD' ) self.assertEqual( sp.determine_git_ls_remote_ref(i), 'HEAD' ) self.assertEqual( sp.git_ls_remote('https://github.com/remind101/stacker.git', 'refs/heads/release-1.0'), b'857b4834980e582874d70feef77bb064b60762d1' ) bad_configs = [{'uri': 'x', 'commit': '1234', 'tag': 'v1', 'branch': 'x'}, {'uri': 'x', 'commit': '1234', 'tag': 'v1'}, {'uri': 'x', 'commit': '1234', 'branch': 'x'}, {'uri': 'x', 'tag': 'v1', 'branch': 'x'}, {'uri': 'x', 'commit': '1234', 'branch': 'x'}] for i in bad_configs: with self.assertRaises(ImportError): sp.determine_git_ref(GitPackageSource(i)) with self.assertRaises(ImportError): sp.determine_git_ref(i) self.assertEqual( sp.determine_git_ref( GitPackageSource({'uri': 'https://github.com/remind101/' 'stacker.git', 'branch': 'release-1.0'})), '857b4834980e582874d70feef77bb064b60762d1' ) self.assertEqual( sp.determine_git_ref( GitPackageSource({'uri': 'git@foo', 'commit': '1234'})), '1234' ) self.assertEqual( sp.determine_git_ref({'uri': 'git@foo', 'commit': '1234'}), '1234' ) self.assertEqual( sp.determine_git_ref( GitPackageSource({'uri': 'git@foo', 'tag': 'v1.0.0'})), 'v1.0.0' ) self.assertEqual( sp.determine_git_ref({'uri': 'git@foo', 'tag': 'v1.0.0'}), 'v1.0.0' ) hook_queue = queue.Queue() def mock_hook(*args, **kwargs): hook_queue.put(kwargs) return True def fail_hook(*args, **kwargs): return None def exception_hook(*args, **kwargs): raise Exception def context_hook(*args, **kwargs): return "context" in kwargs def result_hook(*args, **kwargs): return {"foo": "bar"} class TestHooks(unittest.TestCase): def setUp(self): self.context = mock_context(namespace="namespace") self.provider = mock_provider(region="us-east-1") def test_empty_hook_stage(self): hooks = [] handle_hooks("fake", hooks, self.provider, self.context) self.assertTrue(hook_queue.empty()) def test_missing_required_hook(self): hooks = [Hook({"path": "not.a.real.path", "required": True})] with self.assertRaises(ImportError): handle_hooks("missing", hooks, self.provider, self.context) def test_missing_required_hook_method(self): hooks = [{"path": "stacker.hooks.blah", "required": True}] with self.assertRaises(AttributeError): handle_hooks("missing", hooks, self.provider, self.context) def test_missing_non_required_hook_method(self): hooks = [Hook({"path": "stacker.hooks.blah", "required": False})] handle_hooks("missing", hooks, self.provider, self.context) self.assertTrue(hook_queue.empty()) def test_default_required_hook(self): hooks = [Hook({"path": "stacker.hooks.blah"})] with self.assertRaises(AttributeError): handle_hooks("missing", hooks, self.provider, self.context) def test_valid_hook(self): hooks = [ Hook({"path": "stacker.tests.test_util.mock_hook", "required": True})] handle_hooks("missing", hooks, self.provider, self.context) good = hook_queue.get_nowait() self.assertEqual(good["provider"].region, "us-east-1") with self.assertRaises(queue.Empty): hook_queue.get_nowait() def test_valid_enabled_hook(self): hooks = [ Hook({"path": "stacker.tests.test_util.mock_hook", "required": True, "enabled": True})] handle_hooks("missing", hooks, self.provider, self.context) good = hook_queue.get_nowait() self.assertEqual(good["provider"].region, "us-east-1") with self.assertRaises(queue.Empty): hook_queue.get_nowait() def test_valid_enabled_false_hook(self): hooks = [ Hook({"path": "stacker.tests.test_util.mock_hook", "required": True, "enabled": False})] handle_hooks("missing", hooks, self.provider, self.context) self.assertTrue(hook_queue.empty()) def test_context_provided_to_hook(self): hooks = [ Hook({"path": "stacker.tests.test_util.context_hook", "required": True})] handle_hooks("missing", hooks, "us-east-1", self.context) def test_hook_failure(self): hooks = [ Hook({"path": "stacker.tests.test_util.fail_hook", "required": True})] with self.assertRaises(SystemExit): handle_hooks("fail", hooks, self.provider, self.context) hooks = [{"path": "stacker.tests.test_util.exception_hook", "required": True}] with self.assertRaises(Exception): handle_hooks("fail", hooks, self.provider, self.context) hooks = [ Hook({"path": "stacker.tests.test_util.exception_hook", "required": False})] # Should pass handle_hooks("ignore_exception", hooks, self.provider, self.context) def test_return_data_hook(self): hooks = [ Hook({ "path": "stacker.tests.test_util.result_hook", "data_key": "my_hook_results" }), # Shouldn't return data Hook({ "path": "stacker.tests.test_util.context_hook" }) ] handle_hooks("result", hooks, "us-east-1", self.context) self.assertEqual( self.context.hook_data["my_hook_results"]["foo"], "bar" ) # Verify only the first hook resulted in stored data self.assertEqual( list(self.context.hook_data.keys()), ["my_hook_results"] ) def test_return_data_hook_duplicate_key(self): hooks = [ Hook({ "path": "stacker.tests.test_util.result_hook", "data_key": "my_hook_results" }), Hook({ "path": "stacker.tests.test_util.result_hook", "data_key": "my_hook_results" }) ] with self.assertRaises(KeyError): handle_hooks("result", hooks, "us-east-1", self.context) class TestException1(Exception): pass class TestException2(Exception): pass class TestExceptionRetries(unittest.TestCase): def setUp(self): self.counter = 0 def _works_immediately(self, a, b, x=None, y=None): self.counter += 1 return [a, b, x, y] def _works_second_attempt(self, a, b, x=None, y=None): self.counter += 1 if self.counter == 2: return [a, b, x, y] raise Exception("Broke.") def _second_raises_exception2(self, a, b, x=None, y=None): self.counter += 1 if self.counter == 2: return [a, b, x, y] raise TestException2("Broke.") def _throws_exception2(self, a, b, x=None, y=None): self.counter += 1 raise TestException2("Broke.") ================================================ FILE: stacker/tests/test_variables.py ================================================ import unittest from mock import MagicMock from troposphere import s3 from stacker.blueprints.variables.types import TroposphereType from stacker.variables import Variable from stacker.lookups import register_lookup_handler from stacker.stack import Stack from .factories import generate_definition class TestVariables(unittest.TestCase): def setUp(self): self.provider = MagicMock() self.context = MagicMock() def test_variable_replace_no_lookups(self): var = Variable("Param1", "2") self.assertEqual(var.value, "2") def test_variable_replace_simple_lookup(self): var = Variable("Param1", "${output fakeStack::FakeOutput}") var._value._resolve("resolved") self.assertEqual(var.value, "resolved") def test_variable_resolve_simple_lookup(self): stack = Stack( definition=generate_definition("vpc", 1), context=self.context) stack.set_outputs({ "FakeOutput": "resolved", "FakeOutput2": "resolved2", }) self.context.get_stack.return_value = stack var = Variable("Param1", "${output fakeStack::FakeOutput}") var.resolve(self.context, self.provider) self.assertTrue(var.resolved) self.assertEqual(var.value, "resolved") def test_variable_resolve_default_lookup_empty(self): var = Variable("Param1", "${default fakeStack::}") var.resolve(self.context, self.provider) self.assertTrue(var.resolved) self.assertEqual(var.value, "") def test_variable_replace_multiple_lookups_string(self): var = Variable( "Param1", "url://" # 0 "${output fakeStack::FakeOutput}" # 1 "@" # 2 "${output fakeStack::FakeOutput2}", # 3 ) var._value[1]._resolve("resolved") var._value[3]._resolve("resolved2") self.assertEqual(var.value, "url://resolved@resolved2") def test_variable_resolve_multiple_lookups_string(self): var = Variable( "Param1", "url://${output fakeStack::FakeOutput}@" "${output fakeStack::FakeOutput2}", ) stack = Stack( definition=generate_definition("vpc", 1), context=self.context) stack.set_outputs({ "FakeOutput": "resolved", "FakeOutput2": "resolved2", }) self.context.get_stack.return_value = stack var.resolve(self.context, self.provider) self.assertTrue(var.resolved) self.assertEqual(var.value, "url://resolved@resolved2") def test_variable_replace_no_lookups_list(self): var = Variable("Param1", ["something", "here"]) self.assertEqual(var.value, ["something", "here"]) def test_variable_replace_lookups_list(self): value = ["something", # 0 "${output fakeStack::FakeOutput}", # 1 "${output fakeStack::FakeOutput2}" # 2 ] var = Variable("Param1", value) var._value[1]._resolve("resolved") var._value[2]._resolve("resolved2") self.assertEqual(var.value, ["something", "resolved", "resolved2"]) def test_variable_replace_lookups_dict(self): value = { "something": "${output fakeStack::FakeOutput}", "other": "${output fakeStack::FakeOutput2}", } var = Variable("Param1", value) var._value["something"]._resolve("resolved") var._value["other"]._resolve("resolved2") self.assertEqual(var.value, {"something": "resolved", "other": "resolved2"}) def test_variable_replace_lookups_mixed(self): value = { "something": [ "${output fakeStack::FakeOutput}", "other", ], "here": { "other": "${output fakeStack::FakeOutput2}", "same": "${output fakeStack::FakeOutput}", "mixed": "something:${output fakeStack::FakeOutput3}", }, } var = Variable("Param1", value) var._value["something"][0]._resolve("resolved") var._value["here"]["other"]._resolve("resolved2") var._value["here"]["same"]._resolve("resolved") var._value["here"]["mixed"][1]._resolve("resolved3") self.assertEqual(var.value, { "something": [ "resolved", "other", ], "here": { "other": "resolved2", "same": "resolved", "mixed": "something:resolved3", }, }) def test_variable_resolve_nested_lookup(self): stack = Stack( definition=generate_definition("vpc", 1), context=self.context) stack.set_outputs({ "FakeOutput": "resolved", "FakeOutput2": "resolved2", }) def mock_handler(value, context, provider, **kwargs): return "looked up: {}".format(value) register_lookup_handler("lookup", mock_handler) self.context.get_stack.return_value = stack var = Variable( "Param1", "${lookup ${lookup ${output fakeStack::FakeOutput}}}", ) var.resolve(self.context, self.provider) self.assertTrue(var.resolved) self.assertEqual(var.value, "looked up: looked up: resolved") def test_troposphere_type_no_from_dict(self): with self.assertRaises(ValueError): TroposphereType(object) with self.assertRaises(ValueError): TroposphereType(object, many=True) def test_troposphere_type_create(self): troposphere_type = TroposphereType(s3.Bucket) created = troposphere_type.create( {"MyBucket": {"BucketName": "test-bucket"}}) self.assertTrue(isinstance(created, s3.Bucket)) self.assertTrue(created.properties["BucketName"], "test-bucket") def test_troposphere_type_create_multiple(self): troposphere_type = TroposphereType(s3.Bucket, many=True) created = troposphere_type.create({ "FirstBucket": {"BucketName": "test-bucket"}, "SecondBucket": {"BucketName": "other-test-bucket"}, }) self.assertTrue(isinstance(created, list)) ================================================ FILE: stacker/tokenize_userdata.py ================================================ import re from troposphere import Ref, GetAtt HELPERS = { "Ref": Ref, "Fn::GetAtt": GetAtt } split_string = "(" + "|".join([r"%s\([^)]+\)" % h for h in HELPERS]) + ")" replace_string = \ r"(?P%s)\((?P['\"]?[^)]+['\"]?)+\)" % '|'.join(HELPERS) split_re = re.compile(split_string) replace_re = re.compile(replace_string) def cf_tokenize(s): """ Parses UserData for Cloudformation helper functions. http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/user-data.html http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/intrinsic-function-reference.html http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/quickref-cloudformation.html#scenario-userdata-base64 It breaks apart the given string at each recognized function (see HELPERS) and instantiates the helper function objects in place of those. Returns a list of parts as a result. Useful when used with Join() and Base64() CloudFormation functions to produce user data. ie: Base64(Join('', cf_tokenize(userdata_string))) """ t = [] parts = split_re.split(s) for part in parts: cf_func = replace_re.search(part) if cf_func: args = [a.strip("'\" ") for a in cf_func.group("args").split(",")] t.append(HELPERS[cf_func.group("helper")](*args).data) else: t.append(part) return t ================================================ FILE: stacker/ui.py ================================================ import threading import logging from getpass import getpass logger = logging.getLogger(__name__) def get_raw_input(message): """ Just a wrapper for raw_input for testing purposes. """ return input(message) class UI(object): """ This class is used internally by stacker to perform I/O with the terminal in a multithreaded environment. It ensures that two threads don't write over each other while asking a user for input (e.g. in interactive mode). """ def __init__(self): self._lock = threading.RLock() def lock(self, *args, **kwargs): """Obtains an exclusive lock on the UI for the currently executing thread.""" return self._lock.acquire() def unlock(self, *args, **kwargs): return self._lock.release() def info(self, *args, **kwargs): """Logs the line of the current thread owns the underlying lock, or blocks.""" self.lock() try: return logger.info(*args, **kwargs) finally: self.unlock() def ask(self, message): """This wraps the built-in raw_input function to ensure that only 1 thread is asking for input from the user at a give time. Any process that tries to log output to the terminal will block while the user is being prompted.""" self.lock() try: return get_raw_input(message) finally: self.unlock() def getpass(self, *args): """Wraps getpass to lock the UI.""" try: self.lock() return getpass(*args) finally: self.unlock() # Global UI object for other modules to use. ui = UI() ================================================ FILE: stacker/util.py ================================================ import copy import uuid import importlib import logging import os import re import shutil import subprocess import sys import tarfile import tempfile import zipfile from collections import OrderedDict import botocore.client import botocore.exceptions import dateutil import yaml from yaml.constructor import ConstructorError from yaml.nodes import MappingNode from .awscli_yamlhelper import yaml_parse from stacker.session_cache import get_session logger = logging.getLogger(__name__) def camel_to_snake(name): """Converts CamelCase to snake_case. Args: name (string): The name to convert from CamelCase to snake_case. Returns: string: Converted string. """ s1 = re.sub("(.)([A-Z][a-z]+)", r"\1_\2", name) return re.sub("([a-z0-9])([A-Z])", r"\1_\2", s1).lower() def convert_class_name(kls): """Gets a string that represents a given class. Args: kls (class): The class being analyzed for its name. Returns: string: The name of the given kls. """ return camel_to_snake(kls.__name__) def parse_zone_id(full_zone_id): """Parses the returned hosted zone id and returns only the ID itself.""" return full_zone_id.split("/")[2] def get_hosted_zone_by_name(client, zone_name): """Get the zone id of an existing zone by name. Args: client (:class:`botocore.client.Route53`): The connection used to interact with Route53's API. zone_name (string): The name of the DNS hosted zone to create. Returns: string: The Id of the Hosted Zone. """ p = client.get_paginator("list_hosted_zones") for i in p.paginate(): for zone in i["HostedZones"]: if zone["Name"] == zone_name: return parse_zone_id(zone["Id"]) return None def get_or_create_hosted_zone(client, zone_name): """Get the Id of an existing zone, or create it. Args: client (:class:`botocore.client.Route53`): The connection used to interact with Route53's API. zone_name (string): The name of the DNS hosted zone to create. Returns: string: The Id of the Hosted Zone. """ zone_id = get_hosted_zone_by_name(client, zone_name) if zone_id: return zone_id logger.debug("Zone %s does not exist, creating.", zone_name) reference = uuid.uuid4().hex response = client.create_hosted_zone(Name=zone_name, CallerReference=reference) return parse_zone_id(response["HostedZone"]["Id"]) class SOARecordText(object): """Represents the actual body of an SOARecord. """ def __init__(self, record_text): (self.nameserver, self.contact, self.serial, self.refresh, self.retry, self.expire, self.min_ttl) = record_text.split() def __str__(self): return "%s %s %s %s %s %s %s" % ( self.nameserver, self.contact, self.serial, self.refresh, self.retry, self.expire, self.min_ttl ) class SOARecord(object): """Represents an SOA record. """ def __init__(self, record): self.name = record["Name"] self.text = SOARecordText(record["ResourceRecords"][0]["Value"]) self.ttl = record["TTL"] def get_soa_record(client, zone_id, zone_name): """Gets the SOA record for zone_name from zone_id. Args: client (:class:`botocore.client.Route53`): The connection used to interact with Route53's API. zone_id (string): The AWS Route53 zone id of the hosted zone to query. zone_name (string): The name of the DNS hosted zone to create. Returns: :class:`stacker.util.SOARecord`: An object representing the parsed SOA record returned from AWS Route53. """ response = client.list_resource_record_sets(HostedZoneId=zone_id, StartRecordName=zone_name, StartRecordType="SOA", MaxItems="1") return SOARecord(response["ResourceRecordSets"][0]) def create_route53_zone(client, zone_name): """Creates the given zone_name if it doesn't already exists. Also sets the SOA negative caching TTL to something short (300 seconds). Args: client (:class:`botocore.client.Route53`): The connection used to interact with Route53's API. zone_name (string): The name of the DNS hosted zone to create. Returns: string: The zone id returned from AWS for the existing, or newly created zone. """ if not zone_name.endswith("."): zone_name += "." zone_id = get_or_create_hosted_zone(client, zone_name) old_soa = get_soa_record(client, zone_id, zone_name) # If the negative cache value is already 300, don't update it. if old_soa.text.min_ttl == "300": return zone_id new_soa = copy.deepcopy(old_soa) logger.debug("Updating negative caching value on zone %s to 300.", zone_name) new_soa.text.min_ttl = "300" client.change_resource_record_sets( HostedZoneId=zone_id, ChangeBatch={ "Comment": "Update SOA min_ttl to 300.", "Changes": [ { "Action": "UPSERT", "ResourceRecordSet": { "Name": zone_name, "Type": "SOA", "TTL": old_soa.ttl, "ResourceRecords": [ { "Value": str(new_soa.text) } ] } }, ] } ) return zone_id def load_object_from_string(fqcn): """Converts "." delimited strings to a python object. Given a "." delimited string representing the full path to an object (function, class, variable) inside a module, return that object. Example: load_object_from_string("os.path.basename") load_object_from_string("logging.Logger") load_object_from_string("LocalClassName") """ module_path = "__main__" object_name = fqcn if "." in fqcn: module_path, object_name = fqcn.rsplit(".", 1) importlib.import_module(module_path) return getattr(sys.modules[module_path], object_name) def merge_map(a, b): """Recursively merge elements of argument b into argument a. Primarly used for merging two dictionaries together, where dict b takes precedence over dict a. If 2 lists are provided, they are concatenated. """ if isinstance(a, list) and isinstance(b, list): return a + b if not isinstance(a, dict) or not isinstance(b, dict): return b for key in b: a[key] = merge_map(a[key], b[key]) if key in a else b[key] return a def yaml_to_ordered_dict(stream, loader=yaml.SafeLoader): """Provides yaml.load alternative with preserved dictionary order. Args: stream (string): YAML string to load. loader (:class:`yaml.loader`): PyYAML loader class. Defaults to safe load. Returns: OrderedDict: Parsed YAML. """ class OrderedUniqueLoader(loader): """ Subclasses the given pyYAML `loader` class. Validates all sibling keys to insure no duplicates. Returns an OrderedDict instead of a Dict. """ # keys which require no duplicate siblings. NO_DUPE_SIBLINGS = ["stacks", "class_path"] # keys which require no duplicate children keys. NO_DUPE_CHILDREN = ["stacks"] def _error_mapping_on_dupe(self, node, node_name): """check mapping node for dupe children keys.""" if isinstance(node, MappingNode): mapping = {} for n in node.value: a = n[0] b = mapping.get(a.value, None) if b: msg = "{} mapping cannot have duplicate keys {} {}" raise ConstructorError( msg.format(node_name, b.start_mark, a.start_mark) ) mapping[a.value] = a def _validate_mapping(self, node, deep=False): if not isinstance(node, MappingNode): raise ConstructorError( None, None, "expected a mapping node, but found %s" % node.id, node.start_mark) mapping = OrderedDict() for key_node, value_node in node.value: key = self.construct_object(key_node, deep=deep) try: hash(key) except TypeError as exc: raise ConstructorError( "while constructing a mapping", node.start_mark, "found unhashable key (%s)" % exc, key_node.start_mark ) # prevent duplicate sibling keys for certain "keywords". if key in mapping and key in self.NO_DUPE_SIBLINGS: msg = "{} key cannot have duplicate siblings {} {}" raise ConstructorError( msg.format(key, node.start_mark, key_node.start_mark) ) if key in self.NO_DUPE_CHILDREN: # prevent duplicate children keys for this mapping. self._error_mapping_on_dupe(value_node, key_node.value) value = self.construct_object(value_node, deep=deep) mapping[key] = value return mapping def construct_mapping(self, node, deep=False): """Override parent method to use OrderedDict.""" if isinstance(node, MappingNode): self.flatten_mapping(node) return self._validate_mapping(node, deep=deep) def construct_yaml_map(self, node): data = OrderedDict() yield data value = self.construct_mapping(node) data.update(value) OrderedUniqueLoader.add_constructor( u'tag:yaml.org,2002:map', OrderedUniqueLoader.construct_yaml_map, ) return yaml.load(stream, OrderedUniqueLoader) def uppercase_first_letter(s): """Return string "s" with first character upper case.""" return s[0].upper() + s[1:] def cf_safe_name(name): """Converts a name to a safe string for a Cloudformation resource. Given a string, returns a name that is safe for use as a CloudFormation Resource. (ie: Only alphanumeric characters) """ alphanumeric = r"[a-zA-Z0-9]+" parts = re.findall(alphanumeric, name) return "".join([uppercase_first_letter(part) for part in parts]) def get_config_directory(): """Return the directory the config file is located in. This enables us to use relative paths in config values. """ # avoid circular import from .commands.stacker import Stacker command = Stacker() namespace = command.parse_args() return os.path.dirname(namespace.config.name) def read_value_from_path(value): """Enables translators to read values from files. The value can be referred to with the `file://` prefix. ie: conf_key: ${kms file://kms_value.txt} """ if value.startswith('file://'): path = value.split('file://', 1)[1] config_directory = get_config_directory() relative_path = os.path.join(config_directory, path) with open(relative_path) as read_file: value = read_file.read() return value def get_client_region(client): """Gets the region from a :class:`boto3.client.Client` object. Args: client (:class:`boto3.client.Client`): The client to get the region from. Returns: string: AWS region string. """ return client._client_config.region_name def get_s3_endpoint(client): """Gets the s3 endpoint for the given :class:`boto3.client.Client` object. Args: client (:class:`boto3.client.Client`): The client to get the endpoint from. Returns: string: The AWS endpoint for the client. """ return client._endpoint.host def s3_bucket_location_constraint(region): """Returns the appropriate LocationConstraint info for a new S3 bucket. When creating a bucket in a region OTHER than us-east-1, you need to specify a LocationConstraint inside the CreateBucketConfiguration argument. This function helps you determine the right value given a given client. Args: region (str): The region where the bucket will be created in. Returns: string: The string to use with the given client for creating a bucket. """ if region == "us-east-1": return "" return region def ensure_s3_bucket(s3_client, bucket_name, bucket_region): """Ensure an s3 bucket exists, if it does not then create it. Args: s3_client (:class:`botocore.client.Client`): An s3 client used to verify and create the bucket. bucket_name (str): The bucket being checked/created. bucket_region (str, optional): The region to create the bucket in. If not provided, will be determined by s3_client's region. """ try: s3_client.head_bucket(Bucket=bucket_name) except botocore.exceptions.ClientError as e: if e.response['Error']['Message'] == "Not Found": logger.debug("Creating bucket %s.", bucket_name) create_args = {"Bucket": bucket_name} location_constraint = s3_bucket_location_constraint( bucket_region ) if location_constraint: create_args["CreateBucketConfiguration"] = { "LocationConstraint": location_constraint } s3_client.create_bucket(**create_args) elif e.response['Error']['Message'] == "Forbidden": logger.exception("Access denied for bucket %s. Did " + "you remember to use a globally unique name?", bucket_name) raise else: logger.exception("Error creating bucket %s. Error %s", bucket_name, e.response) raise def parse_cloudformation_template(template): """Parse CFN template string. Leverages the vendored aws-cli yamlhelper to handle JSON or YAML templates. Args: template (str): The template body. """ return yaml_parse(template) class Extractor(object): """Base class for extractors.""" def __init__(self, archive=None): """ Create extractor object with the archive path. Args: archive (string): Archive path """ self.archive = archive def set_archive(self, dir_name): """ Update archive filename to match directory name & extension. Args: dir_name (string): Archive directory name """ self.archive = dir_name + self.extension() @staticmethod def extension(): """Serve as placeholder; override this in subclasses.""" return '' class TarExtractor(Extractor): """Extracts tar archives.""" def extract(self, destination): """Extract the archive.""" with tarfile.open(self.archive, 'r:') as tar: tar.extractall(path=destination) @staticmethod def extension(): """Return archive extension.""" return '.tar' class TarGzipExtractor(Extractor): """Extracts compressed tar archives.""" def extract(self, destination): """Extract the archive.""" with tarfile.open(self.archive, 'r:gz') as tar: tar.extractall(path=destination) @staticmethod def extension(): """Return archive extension.""" return '.tar.gz' class ZipExtractor(Extractor): """Extracts zip archives.""" def extract(self, destination): """Extract the archive.""" with zipfile.ZipFile(self.archive, 'r') as zip_ref: zip_ref.extractall(destination) @staticmethod def extension(): """Return archive extension.""" return '.zip' class SourceProcessor(object): """Makes remote python package sources available in current environment.""" ISO8601_FORMAT = '%Y%m%dT%H%M%SZ' def __init__(self, sources, stacker_cache_dir=None): """ Process a config's defined package sources. Args: sources (dict): Package sources from Stacker config dictionary stacker_cache_dir (string): Path where remote sources will be cached. """ if not stacker_cache_dir: stacker_cache_dir = os.path.expanduser("~/.stacker") package_cache_dir = os.path.join(stacker_cache_dir, 'packages') self.stacker_cache_dir = stacker_cache_dir self.package_cache_dir = package_cache_dir self.sources = sources self.configs_to_merge = [] self.create_cache_directories() def create_cache_directories(self): """Ensure that SourceProcessor cache directories exist.""" if not os.path.isdir(self.package_cache_dir): if not os.path.isdir(self.stacker_cache_dir): os.mkdir(self.stacker_cache_dir) os.mkdir(self.package_cache_dir) def get_package_sources(self): """Make remote python packages available for local use.""" # Checkout local modules for config in self.sources.get('local', []): self.fetch_local_package(config=config) # Checkout S3 repositories specified in config for config in self.sources.get('s3', []): self.fetch_s3_package(config=config) # Checkout git repositories specified in config for config in self.sources.get('git', []): self.fetch_git_package(config=config) def fetch_local_package(self, config): """Make a local path available to current stacker config. Args: config (dict): 'local' path config dictionary """ # Update sys.path & merge in remote configs (if necessary) self.update_paths_and_config(config=config, pkg_dir_name=config['source'], pkg_cache_dir=os.getcwd()) def fetch_s3_package(self, config): """Make a remote S3 archive available for local use. Args: config (dict): git config dictionary """ extractor_map = {'.tar.gz': TarGzipExtractor, '.tar': TarExtractor, '.zip': ZipExtractor} extractor = None for suffix, klass in extractor_map.items(): if config['key'].endswith(suffix): extractor = klass() logger.debug("Using extractor %s for S3 object \"%s\" in " "bucket %s.", klass.__name__, config['key'], config['bucket']) dir_name = self.sanitize_uri_path( "s3-%s-%s" % (config['bucket'], config['key'][:-len(suffix)]) ) break if extractor is None: raise ValueError( "Archive type could not be determined for S3 object \"%s\" " "in bucket %s." % (config['key'], config['bucket']) ) session = get_session(region=None) extra_s3_args = {} if config.get('requester_pays', False): extra_s3_args['RequestPayer'] = 'requester' # We can skip downloading the archive if it's already been cached if config.get('use_latest', True): try: # LastModified should always be returned in UTC, but it doesn't # hurt to explicitly convert it to UTC again just in case modified_date = session.client('s3').head_object( Bucket=config['bucket'], Key=config['key'], **extra_s3_args )['LastModified'].astimezone(dateutil.tz.tzutc()) except botocore.exceptions.ClientError as client_error: logger.error("Error checking modified date of " "s3://%s/%s : %s", config['bucket'], config['key'], client_error) sys.exit(1) dir_name += "-%s" % modified_date.strftime(self.ISO8601_FORMAT) cached_dir_path = os.path.join(self.package_cache_dir, dir_name) if not os.path.isdir(cached_dir_path): logger.debug("Remote package s3://%s/%s does not appear to have " "been previously downloaded - starting download and " "extraction to %s", config['bucket'], config['key'], cached_dir_path) tmp_dir = tempfile.mkdtemp(prefix='stacker') tmp_package_path = os.path.join(tmp_dir, dir_name) try: extractor.set_archive(os.path.join(tmp_dir, dir_name)) logger.debug("Starting remote package download from S3 to %s " "with extra S3 options \"%s\"", extractor.archive, str(extra_s3_args)) session.resource('s3').Bucket(config['bucket']).download_file( config['key'], extractor.archive, ExtraArgs=extra_s3_args ) logger.debug("Download complete; extracting downloaded " "package to %s", tmp_package_path) extractor.extract(tmp_package_path) logger.debug("Moving extracted package directory %s to the " "Stacker cache at %s", dir_name, self.package_cache_dir) shutil.move(tmp_package_path, self.package_cache_dir) finally: shutil.rmtree(tmp_dir) else: logger.debug("Remote package s3://%s/%s appears to have " "been previously downloaded to %s -- bypassing " "download", config['bucket'], config['key'], cached_dir_path) # Update sys.path & merge in remote configs (if necessary) self.update_paths_and_config(config=config, pkg_dir_name=dir_name) def fetch_git_package(self, config): """Make a remote git repository available for local use. Args: config (dict): git config dictionary """ # only loading git here when needed to avoid load errors on systems # without git installed from git import Repo ref = self.determine_git_ref(config) dir_name = self.sanitize_git_path(uri=config['uri'], ref=ref) cached_dir_path = os.path.join(self.package_cache_dir, dir_name) # We can skip cloning the repo if it's already been cached if not os.path.isdir(cached_dir_path): logger.debug("Remote repo %s does not appear to have been " "previously downloaded - starting clone to %s", config['uri'], cached_dir_path) tmp_dir = tempfile.mkdtemp(prefix='stacker') try: tmp_repo_path = os.path.join(tmp_dir, dir_name) with Repo.clone_from(config['uri'], tmp_repo_path) as repo: repo.head.reference = ref repo.head.reset(index=True, working_tree=True) shutil.move(tmp_repo_path, self.package_cache_dir) finally: shutil.rmtree(tmp_dir) else: logger.debug("Remote repo %s appears to have been previously " "cloned to %s -- bypassing download", config['uri'], cached_dir_path) # Update sys.path & merge in remote configs (if necessary) self.update_paths_and_config(config=config, pkg_dir_name=dir_name) def update_paths_and_config(self, config, pkg_dir_name, pkg_cache_dir=None): """Handle remote source defined sys.paths & configs. Args: config (dict): git config dictionary pkg_dir_name (string): directory name of the stacker archive pkg_cache_dir (string): fully qualified path to stacker cache cache directory """ if pkg_cache_dir is None: pkg_cache_dir = self.package_cache_dir cached_dir_path = os.path.join(pkg_cache_dir, pkg_dir_name) # Add the appropriate directory (or directories) to sys.path if config.get('paths'): for path in config['paths']: path_to_append = os.path.join(cached_dir_path, path) logger.debug("Appending \"%s\" to python sys.path", path_to_append) sys.path.append(path_to_append) else: sys.path.append(cached_dir_path) # If the configuration defines a set of remote config yamls to # include, add them to the list for merging if config.get('configs'): for config_filename in config['configs']: self.configs_to_merge.append(os.path.join(cached_dir_path, config_filename)) def git_ls_remote(self, uri, ref): """Determine the latest commit id for a given ref. Args: uri (string): git URI ref (string): git ref Returns: str: A commit id """ logger.debug("Invoking git to retrieve commit id for repo %s...", uri) lsremote_output = subprocess.check_output(['git', 'ls-remote', uri, ref]) if b"\t" in lsremote_output: commit_id = lsremote_output.split(b"\t")[0] logger.debug("Matching commit id found: %s", commit_id) return commit_id else: raise ValueError("Ref \"%s\" not found for repo %s." % (ref, uri)) def determine_git_ls_remote_ref(self, config): """Determine the ref to be used with the "git ls-remote" command. Args: config (:class:`stacker.config.GitPackageSource`): git config dictionary; 'branch' key is optional Returns: str: A branch reference or "HEAD" """ if config.get('branch'): ref = "refs/heads/%s" % config['branch'] else: ref = "HEAD" return ref def determine_git_ref(self, config): """Determine the ref to be used for 'git checkout'. Args: config (dict): git config dictionary Returns: str: A commit id or tag name """ # First ensure redundant config keys aren't specified (which could # cause confusion as to which take precedence) ref_config_keys = 0 for i in ['commit', 'tag', 'branch']: if config.get(i): ref_config_keys += 1 if ref_config_keys > 1: raise ImportError("Fetching remote git sources failed: " "conflicting revisions (e.g. 'commit', 'tag', " "'branch') specified for a package source") # Now check for a specific point in time referenced and return it if # present if config.get('commit'): ref = config['commit'] elif config.get('tag'): ref = config['tag'] else: # Since a specific commit/tag point in time has not been specified, # check the remote repo for the commit id to use ref = self.git_ls_remote( config['uri'], self.determine_git_ls_remote_ref(config) ) if sys.version_info[0] > 2 and isinstance(ref, bytes): return ref.decode() return ref def sanitize_uri_path(self, uri): """Take a URI and converts it to a directory safe path. Args: uri (string): URI (e.g. http://example.com/cats) Returns: str: Directory name for the supplied uri """ for i in ['@', '/', ':']: uri = uri.replace(i, '_') return uri def sanitize_git_path(self, uri, ref=None): """Take a git URI and ref and converts it to a directory safe path. Args: uri (string): git URI (e.g. git@github.com:foo/bar.git) ref (string): optional git ref to be appended to the path Returns: str: Directory name for the supplied uri """ if uri.endswith('.git'): dir_name = uri[:-4] # drop .git else: dir_name = uri dir_name = self.sanitize_uri_path(dir_name) if ref is not None: dir_name += "-%s" % ref return dir_name def stack_template_key_name(blueprint): """Given a blueprint, produce an appropriate key name. Args: blueprint (:class:`stacker.blueprints.base.Blueprint`): The blueprint object to create the key from. Returns: string: Key name resulting from blueprint. """ name = blueprint.name return "stack_templates/%s/%s-%s.json" % (blueprint.context.get_fqn(name), name, blueprint.version) ================================================ FILE: stacker/variables.py ================================================ import re from past.builtins import basestring from string import Template from .exceptions import InvalidLookupCombination, UnresolvedVariable, \ UnknownLookupType, FailedVariableLookup, FailedLookup, \ UnresolvedVariableValue, InvalidLookupConcatenation from .lookups.registry import LOOKUP_HANDLERS class LookupTemplate(Template): """A custom string template we use to replace lookup values""" idpattern = r'[_a-z][^\$\{\}]*' def resolve_variables(variables, context, provider): """Given a list of variables, resolve all of them. Args: variables (list of :class:`stacker.variables.Variable`): list of variables context (:class:`stacker.context.Context`): stacker context provider (:class:`stacker.provider.base.BaseProvider`): subclass of the base provider """ for variable in variables: variable.resolve(context, provider) class Variable(object): """Represents a variable passed to a stack. Args: name (str): Name of the variable value (any): Initial value of the variable from the config (str, list, dict) """ def __init__(self, name, value): self.name = name self._raw_value = value self._value = VariableValue.parse(value) @property def value(self): """Return the current value of the Variable. """ try: return self._value.value() except UnresolvedVariableValue: raise UnresolvedVariable("", self) except InvalidLookupConcatenation as e: raise InvalidLookupCombination(e.lookup, e.lookups, self) @property def resolved(self): """Boolean for whether the Variable has been resolved. Variables only need to be resolved if they contain lookups. """ return self._value.resolved() def resolve(self, context, provider): """Recursively resolve any lookups with the Variable. Args: context (:class:`stacker.context.Context`): Current context for building the stack provider (:class:`stacker.provider.base.BaseProvider`): subclass of the base provider """ try: self._value.resolve(context, provider) except FailedLookup as e: raise FailedVariableLookup(self.name, e.lookup, e.error) def dependencies(self): """ Returns: Set[str]: Stack names that this variable depends on """ return self._value.dependencies() class VariableValue(object): """ Abstract Syntax Tree base object to parse the value for a variable """ def value(self): return NotImplementedError() def __iter__(self): return NotImplementedError() def resolved(self): """ Returns: bool: Whether value() will not raise an error """ return NotImplementedError() def resolve(self, context, provider): pass def dependencies(self): return set() def simplified(self): """ Return a simplified version of the Value. This can be used to e.g. concatenate two literals in to one literal, or to flatten nested Concatenations Returns: VariableValue """ return self @classmethod def parse(cls, input_object): if isinstance(input_object, list): return VariableValueList.parse(input_object) elif isinstance(input_object, dict): return VariableValueDict.parse(input_object) elif not isinstance(input_object, basestring): return VariableValueLiteral(input_object) # else: # str tokens = VariableValueConcatenation([ VariableValueLiteral(t) for t in re.split(r'(\$\{|\}|\s+)', input_object) ]) opener = '${' closer = '}' while True: last_open = None next_close = None for i, t in enumerate(tokens): if not isinstance(t, VariableValueLiteral): continue if t.value() == opener: last_open = i next_close = None if last_open is not None and \ t.value() == closer and \ next_close is None: next_close = i if next_close is not None: lookup_data = VariableValueConcatenation( tokens[(last_open + len(opener) + 1):next_close] ) lookup = VariableValueLookup( lookup_name=tokens[last_open + 1], lookup_data=lookup_data, ) tokens[last_open:(next_close + 1)] = [lookup] else: break tokens = tokens.simplified() return tokens class VariableValueLiteral(VariableValue): def __init__(self, value): self._value = value def value(self): return self._value def __iter__(self): yield self def resolved(self): return True def __repr__(self): return "Literal<{}>".format(repr(self._value)) class VariableValueList(VariableValue, list): @classmethod def parse(cls, input_object): acc = [ VariableValue.parse(obj) for obj in input_object ] return cls(acc) def value(self): return [ item.value() for item in self ] def resolved(self): accumulator = True for item in self: accumulator = accumulator and item.resolved() return accumulator def __repr__(self): return "List[{}]".format(', '.join([repr(value) for value in self])) def __iter__(self): return list.__iter__(self) def resolve(self, context, provider): for item in self: item.resolve(context, provider) def dependencies(self): deps = set() for item in self: deps.update(item.dependencies()) return deps def simplified(self): return [ item.simplified() for item in self ] class VariableValueDict(VariableValue, dict): @classmethod def parse(cls, input_object): acc = { k: VariableValue.parse(v) for k, v in input_object.items() } return cls(acc) def value(self): return { k: v.value() for k, v in self.items() } def resolved(self): accumulator = True for item in self.values(): accumulator = accumulator and item.resolved() return accumulator def __repr__(self): return "Dict[{}]".format(', '.join([ "{}={}".format(k, repr(v)) for k, v in self.items() ])) def __iter__(self): return dict.__iter__(self) def resolve(self, context, provider): for item in self.values(): item.resolve(context, provider) def dependencies(self): deps = set() for item in self.values(): deps.update(item.dependencies()) return deps def simplified(self): return { k: v.simplified() for k, v in self.items() } class VariableValueConcatenation(VariableValue, list): def value(self): if len(self) == 1: return self[0].value() values = [] for value in self: resolved_value = value.value() if not isinstance(resolved_value, basestring): raise InvalidLookupConcatenation(value, self) values.append(resolved_value) return ''.join(values) def __iter__(self): return list.__iter__(self) def resolved(self): accumulator = True for item in self: accumulator = accumulator and item.resolved() return accumulator def __repr__(self): return "Concat[{}]".format(', '.join([repr(value) for value in self])) def resolve(self, context, provider): for value in self: value.resolve(context, provider) def dependencies(self): deps = set() for item in self: deps.update(item.dependencies()) return deps def simplified(self): concat = [] for item in self: if isinstance(item, VariableValueLiteral) and \ item.value() == '': pass elif isinstance(item, VariableValueLiteral) and \ len(concat) > 0 and \ isinstance(concat[-1], VariableValueLiteral): # Join the literals together concat[-1] = VariableValueLiteral( concat[-1].value() + item.value() ) elif isinstance(item, VariableValueConcatenation): # Flatten concatenations concat.extend(item.simplified()) else: concat.append(item.simplified()) if len(concat) == 0: return VariableValueLiteral('') elif len(concat) == 1: return concat[0] else: return VariableValueConcatenation(concat) class VariableValueLookup(VariableValue): def __init__(self, lookup_name, lookup_data, handler=None): """ Args: lookup_name (basestring): Name of the invoked lookup lookup_data (VariableValue): Data portion of the lookup """ self._resolved = False self._value = None self.lookup_name = lookup_name if isinstance(lookup_data, basestring): lookup_data = VariableValueLiteral(lookup_data) self.lookup_data = lookup_data if handler is None: lookup_name_resolved = lookup_name.value() try: handler = LOOKUP_HANDLERS[lookup_name_resolved] except KeyError: raise UnknownLookupType(lookup_name_resolved) self.handler = handler def resolve(self, context, provider): self.lookup_data.resolve(context, provider) try: if type(self.handler) == type: # Hander is a new-style handler result = self.handler.handle( value=self.lookup_data.value(), context=context, provider=provider ) else: result = self.handler( value=self.lookup_data.value(), context=context, provider=provider ) self._resolve(result) except Exception as e: raise FailedLookup(self, e) def _resolve(self, value): self._value = value self._resolved = True def dependencies(self): if type(self.handler) == type: return self.handler.dependencies(self.lookup_data) else: return set() def value(self): if self._resolved: return self._value else: raise UnresolvedVariableValue(self) def __iter__(self): yield self def resolved(self): return self._resolved def __repr__(self): if self._resolved: return "Lookup<{r} ({t} {d})>".format( r=self._value, t=self.lookup_name, d=repr(self.lookup_data), ) else: return "Lookup<{t} {d}>".format( t=self.lookup_name, d=repr(self.lookup_data), ) def __str__(self): return "${{{type} {data}}}".format( type=self.lookup_name.value(), data=self.lookup_data.value(), ) def simplified(self): return VariableValueLookup( lookup_name=self.lookup_name, lookup_data=self.lookup_data.simplified(), ) ================================================ FILE: test-requirements.in ================================================ pytest~=6.0 pytest-cov~=2.6 mock~=2.0 moto[awslambda,ec2]~=3.0.0 testfixtures~=6.18.3 flake8 pep8-naming ================================================ FILE: tests/Makefile ================================================ permissions: ./stacker.yaml.sh | stacker build - test: permissions $(eval AWS_ACCESS_KEY_ID := $(shell ./stacker.yaml.sh | stacker info - 2>&1 | awk '/AccessKeyId/ {print $$3}')) $(eval AWS_SECRET_ACCESS_KEY := $(shell ./stacker.yaml.sh | stacker info - 2>&1 | awk '/SecretAccessKey/ {print $$3}')) $(eval STACKER_ROLE := $(shell ./stacker.yaml.sh | stacker info - 2>&1 | awk '/FunctionalTestRole/ {print $$3}')) @STACKER_ROLE=$(STACKER_ROLE) AWS_ACCESS_KEY_ID=$(AWS_ACCESS_KEY_ID) AWS_SECRET_ACCESS_KEY=$(AWS_SECRET_ACCESS_KEY) ./run_test_suite.sh ${TESTS} ================================================ FILE: tests/README.md ================================================ This directory contains the functional testing suite for stacker. It exercises all of stacker against a real AWS account. Make sure you have the AWS credentials loaded into your environment when you run these steps. ## Setup 1. First, ensure that you're inside a virtualenv: ```console $ source venv/bin/activate ``` 2. Set a stacker namespace & the AWS region for the test suite to use: ```console $ export STACKER_NAMESPACE=my-stacker-test-namespace $ export AWS_DEFAULT_REGION=us-east-1 ``` 3. Ensure that bats is installed: ```console # On MacOS if brew is installed $ brew install bats-core ``` 4. Setup functional test environment & run tests: ```console # To run all the tests $ make -C tests test # To run specific tests (ie: tests 1, 2 and 3) $ TESTS="1 2 3" make -C tests test ``` ================================================ FILE: tests/cleanup_functional_test_buckets.sh ================================================ #!/usr/bin/env bash if [ -z "$AWS_ACCESS_KEY_ID" ] then echo "AWS_ACCESS_KEY_ID not set, skipping bucket cleanup." exit 0 fi sudo pip install awscli ALL_BUT_LAST_6_BUCKETS=$(aws s3 ls | grep stacker-cloudtools-functional-tests- | sort -r | tail -n +7 | awk '{print $3}') for bucket in ${ALL_BUT_LAST_6_BUCKETS} do echo "## Deleting bucket: 's3://$bucket'" aws --region us-east-1 s3 rm --recursive s3://$bucket/ aws --region us-east-1 s3 rb s3://$bucket done ================================================ FILE: tests/fixtures/blueprints/test_repo.json ================================================ { "Resources": { "repo1Repository": { "Properties": { "RepositoryName": "repo1" }, "Type": "AWS::ECR::Repository" }, "repo2Repository": { "Properties": { "RepositoryName": "repo2" }, "Type": "AWS::ECR::Repository" } } } ================================================ FILE: tests/fixtures/stack_policies/default.json ================================================ { "Statement" : [ { "Effect" : "Allow", "Action" : "Update:*", "Principal": "*", "Resource" : "*" } ] } ================================================ FILE: tests/fixtures/stack_policies/none.json ================================================ { "Statement" : [ { "Effect" : "Deny", "Action" : "Update:*", "Principal": "*", "Resource" : "*" } ] } ================================================ FILE: tests/run_test_suite.sh ================================================ #!/bin/sh TEST_ARGS=$* if [ -z "$TEST_ARGS" ] then _TESTS="test_suite" else for T in ${TEST_ARGS} do _TESTS="${_TESTS} test_suite/$(printf %02d ${T})_*" done fi echo "bats ${_TESTS}" bats ${_TESTS} ================================================ FILE: tests/stacker.yaml.sh ================================================ #!/bin/bash cat - <&2 echo "To run these tests, you must set a STACKER_NAMESPACE environment variable" exit 1 fi if [ -z "$STACKER_ROLE" ]; then >&2 echo "To run these tests, you must set a STACKER_ROLE environment variable" exit 1 fi # Setup a base .aws/config that can be use to test stack configurations that # require stacker to assume a role. export AWS_CONFIG_DIR=$(mktemp -d) export AWS_CONFIG_FILE="$AWS_CONFIG_DIR/config" cat < "$AWS_CONFIG_FILE" [default] region = us-east-1 [profile stacker] region = us-east-1 role_arn = ${STACKER_ROLE} credential_source = Environment EOF # Simple wrapper around the builtin bash `test` command. assert() { builtin test "$@" } # Checks that the given line is in $output. assert_has_line() { echo "$output" | grep "$@" 1>/dev/null } # This helper wraps "stacker" with bats' "run" and also outputs debug # information. If you need to execute the stacker binary _without_ calling # "run", you can use "command stacker". stacker() { # Sleep between runs of stacker to try and avoid rate limiting issues. sleep 2 echo "$ stacker $@" run command stacker "$@" echo "$output" echo } # A helper to tag a test as requiring access to AWS. If no credentials are set, # then the tests will be skipped. needs_aws() { if [ -z "$AWS_ACCESS_KEY_ID" ]; then skip "aws credentials not set" fi } ================================================ FILE: tests/test_suite/01_stacker_build_no_config.bats ================================================ #!/usr/bin/env bats load ../test_helper @test "stacker build - no config" { stacker build assert ! "$status" -eq 0 assert_has_line -E "too few arguments|the following arguments are required: config" } ================================================ FILE: tests/test_suite/02_stacker_build_empty_config.bats ================================================ #!/usr/bin/env bats # load ../test_helper @test "stacker build - empty config" { stacker build <(echo "") assert ! "$status" -eq 0 assert_has_line 'stacker.exceptions.InvalidConfig:' } ================================================ FILE: tests/test_suite/03_stacker_build-config_with_no_stacks.bats ================================================ #!/usr/bin/env bats load ../test_helper @test "stacker build - config with no stacks" { needs_aws stacker build - < "vpc";' assert_has_line '"bastion2" -> "vpc";' assert_has_line '"app1" -> "bastion1";' assert_has_line '"app2" -> "bastion2";' assert $(echo "$output" | grep -A 2 vpc | tail -n 2 | grep -c vpc) = '0' } ================================================ FILE: tests/test_suite/09_stacker_build-missing_variable.bats ================================================ #!/usr/bin/env bats load ../test_helper @test "stacker build - missing variable" { needs_aws stacker build - <