[
  {
    "path": ".gitignore",
    "content": "ansible4aws-book.*\n.DS_Store\n"
  },
  {
    "path": "ami_create.yml",
    "content": "---\n- hosts: localhost\n  connection: local\n  gather_facts: no\n  vars:\n    region: ap-southeast-2\n    ins_name: wordpress_master\n    ami_name: wordpress \n  tasks:\n    - name: get instance id\n      command: \"aws ec2 describe-instances \n               --filters Name=tag:Name,Values={{ ins_name }}\n               --query 'Reservations[0].Instances[0].InstanceId' --output text\"\n      register: instanceid\n\n    - name: create ami\n      ec2_ami:\n        instance_id: \"{{ instanceid.stdout }}\"\n        region: \"{{ region }}\"\n        wait: yes\n        name: \"{{ ami_name }}\"\n      register: ami\n      when: instanceid.stdout!=\"None\"\n\n    - debug: var=ami\n\n\n"
  },
  {
    "path": "ami_delete.yml",
    "content": "---\n- hosts: localhost\n  connection: local\n  gather_facts: no\n  vars:\n    region: ap-southeast-2\n    ami_name: wordpress\n  tasks:\n    - name: get ami id\n      command: \"aws ec2 describe-images\n               --filters Name=name,Values={{ ami_name }}\n               --query 'Images[0].ImageId' --output text\"\n      register: imageid\n\n    - name: delete ami\n      ec2_ami:\n        region: \"{{ region }}\"\n        image_id: \"{{ imageid.stdout }}\"\n        delete_snapshot: yes\n        state: absent\n      when: imageid.stdout!=\"None\"\n"
  },
  {
    "path": "dbsg_create.yml",
    "content": "---\n- hosts: localhost\n  connection: local\n  gather_facts: no\n  vars:\n    region: ap-southeast-2\n  vars_files:\n    - staging_vpc_info\n  tasks:\n    - name: create Multi-AZ DB subnet group\n      rds_subnet_group:\n        name: dbsg2\n        state: present\n        region: \"{{ region }}\"\n        description: DB Subnet Group 2\n        subnets: \n          - \"{{ staging_subnet_private_0 }}\"\n          - \"{{ staging_subnet_private_1 }}\"\n"
  },
  {
    "path": "dbsg_delete.yml",
    "content": "---\n- hosts: localhost\n  connection: local\n  gather_facts: no\n  vars:\n    region: ap-southeast-2\n  tasks:\n    - name: delete DB subnet group\n      rds_subnet_group:\n        name: dbsg2\n        state: absent\n        region: \"{{ region }}\"\n"
  },
  {
    "path": "dhcp_options.yml",
    "content": "---\n- hosts: localhost\n  connection: local\n  gather_facts: no\n  vars:\n    region: ap-southeast-2\n    name: test-vpc\n  tasks:\n    - name: create dhcp options set\n      command: aws ec2 create-dhcp-options --dhcp-configuration \n               \"Key=domain-name,Values=example.com\" \"Key=domain-name-servers,Values=10.0.0.7,10.0.0.8\"\n               --query 'DhcpOptions.DhcpOptionsId' --output text\n      register: dopt\n\n    - name: get vpc id\n      command: \"aws ec2 describe-vpcs --filters Name=tag:Name,Values={{ name }} \n               --query 'Vpcs[0].VpcId' --output text\"\n      register: vpcid\n\n    - name: associate vpc with dhcp options set\n      command: aws ec2 associate-dhcp-options --dhcp-options-id {{ dopt.stdout }} \n               --vpc-id {{ vpcid.stdout }}\n"
  },
  {
    "path": "ec2.ini",
    "content": "# Ansible EC2 external inventory script settings\n#\n\n[ec2]\n\n# to talk to a private eucalyptus instance uncomment these lines\n# and edit edit eucalyptus_host to be the host name of your cloud controller\n#eucalyptus = True\n#eucalyptus_host = clc.cloud.domain.org\n\n# AWS regions to make calls to. Set this to 'all' to make request to all regions\n# in AWS and merge the results together. Alternatively, set this to a comma\n# separated list of regions. E.g. 'us-east-1,us-west-1,us-west-2'\nregions = all\nregions_exclude = us-gov-west-1,cn-north-1\n\n# When generating inventory, Ansible needs to know how to address a server.\n# Each EC2 instance has a lot of variables associated with it. Here is the list:\n#   http://docs.pythonboto.org/en/latest/ref/ec2.html#module-boto.ec2.instance\n# Below are 2 variables that are used as the address of a server:\n#   - destination_variable\n#   - vpc_destination_variable\n\n# This is the normal destination variable to use. If you are running Ansible\n# from outside EC2, then 'public_dns_name' makes the most sense. If you are\n# running Ansible from within EC2, then perhaps you want to use the internal\n# address, and should set this to 'private_dns_name'.\ndestination_variable = public_dns_name\n\n# For server inside a VPC, using DNS names may not make sense. When an instance\n# has 'subnet_id' set, this variable is used. If the subnet is public, setting\n# this to 'ip_address' will return the public IP address. For instances in a\n# private subnet, this should be set to 'private_ip_address', and Ansible must\n# be run from with EC2.\nvpc_destination_variable = ip_address\n\n# To tag instances on EC2 with the resource records that point to them from\n# Route53, uncomment and set 'route53' to True.\nroute53 = False\n\n# Additionally, you can specify the list of zones to exclude looking up in\n# 'route53_excluded_zones' as a comma-separated list.\n# route53_excluded_zones = samplezone1.com, samplezone2.com\n\n# API calls to EC2 are slow. For this reason, we cache the results of an API\n# call. Set this to the path you want cache files to be written to. Two files\n# will be written to this directory:\n#   - ansible-ec2.cache\n#   - ansible-ec2.index\ncache_path = ~/.ansible/tmp\n\n# The number of seconds a cache file is considered valid. After this many\n# seconds, a new API call will be made, and the cache file will be updated.\n# To disable the cache, set this value to 0\ncache_max_age = 300\n"
  },
  {
    "path": "ec2.py",
    "content": "#!/usr/bin/env python\n\n'''\nEC2 external inventory script\n=================================\n\nGenerates inventory that Ansible can understand by making API request to\nAWS EC2 using the Boto library.\n\nNOTE: This script assumes Ansible is being executed where the environment\nvariables needed for Boto have already been set:\n    export AWS_ACCESS_KEY_ID='AK123'\n    export AWS_SECRET_ACCESS_KEY='abc123'\n\nThis script also assumes there is an ec2.ini file alongside it.  To specify a\ndifferent path to ec2.ini, define the EC2_INI_PATH environment variable:\n\n    export EC2_INI_PATH=/path/to/my_ec2.ini\n\nIf you're using eucalyptus you need to set the above variables and\nyou need to define:\n\n    export EC2_URL=http://hostname_of_your_cc:port/services/Eucalyptus\n\nFor more details, see: http://docs.pythonboto.org/en/latest/boto_config_tut.html\n\nWhen run against a specific host, this script returns the following variables:\n - ec2_ami_launch_index\n - ec2_architecture\n - ec2_association\n - ec2_attachTime\n - ec2_attachment\n - ec2_attachmentId\n - ec2_client_token\n - ec2_deleteOnTermination\n - ec2_description\n - ec2_deviceIndex\n - ec2_dns_name\n - ec2_eventsSet\n - ec2_group_name\n - ec2_hypervisor\n - ec2_id\n - ec2_image_id\n - ec2_instanceState\n - ec2_instance_type\n - ec2_ipOwnerId\n - ec2_ip_address\n - ec2_item\n - ec2_kernel\n - ec2_key_name\n - ec2_launch_time\n - ec2_monitored\n - ec2_monitoring\n - ec2_networkInterfaceId\n - ec2_ownerId\n - ec2_persistent\n - ec2_placement\n - ec2_platform\n - ec2_previous_state\n - ec2_private_dns_name\n - ec2_private_ip_address\n - ec2_publicIp\n - ec2_public_dns_name\n - ec2_ramdisk\n - ec2_reason\n - ec2_region\n - ec2_requester_id\n - ec2_root_device_name\n - ec2_root_device_type\n - ec2_security_group_ids\n - ec2_security_group_names\n - ec2_shutdown_state\n - ec2_sourceDestCheck\n - ec2_spot_instance_request_id\n - ec2_state\n - ec2_state_code\n - ec2_state_reason\n - ec2_status\n - ec2_subnet_id\n - ec2_tenancy\n - ec2_virtualization_type\n - ec2_vpc_id\n\nThese variables are pulled out of a boto.ec2.instance object. There is a lack of\nconsistency with variable spellings (camelCase and underscores) since this\njust loops through all variables the object exposes. It is preferred to use the\nones with underscores when multiple exist.\n\nIn addition, if an instance has AWS Tags associated with it, each tag is a new\nvariable named:\n - ec2_tag_[Key] = [Value]\n\nSecurity groups are comma-separated in 'ec2_security_group_ids' and\n'ec2_security_group_names'.\n'''\n\n# (c) 2012, Peter Sankauskas\n#\n# This file is part of Ansible,\n#\n# Ansible is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# Ansible is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with Ansible.  If not, see <http://www.gnu.org/licenses/>.\n\n######################################################################\n\nimport sys\nimport os\nimport argparse\nimport re\nfrom time import time\nimport boto\nfrom boto import ec2\nfrom boto import rds\nfrom boto import route53\nimport ConfigParser\n\ntry:\n    import json\nexcept ImportError:\n    import simplejson as json\n\n\nclass Ec2Inventory(object):\n    def _empty_inventory(self):\n        return {\"_meta\" : {\"hostvars\" : {}}}\n\n    def __init__(self):\n        ''' Main execution path '''\n\n        # Inventory grouped by instance IDs, tags, security groups, regions,\n        # and availability zones\n        self.inventory = self._empty_inventory()\n\n        # Index of hostname (address) to instance ID\n        self.index = {}\n\n        # Read settings and parse CLI arguments\n        self.read_settings()\n        self.parse_cli_args()\n\n        # Cache\n        if self.args.refresh_cache:\n            self.do_api_calls_update_cache()\n        elif not self.is_cache_valid():\n            self.do_api_calls_update_cache()\n\n        # Data to print\n        if self.args.host:\n            data_to_print = self.get_host_info()\n\n        elif self.args.list:\n            # Display list of instances for inventory\n            if self.inventory == self._empty_inventory():\n                data_to_print = self.get_inventory_from_cache()\n            else:\n                data_to_print = self.json_format_dict(self.inventory, True)\n\n        print data_to_print\n\n\n    def is_cache_valid(self):\n        ''' Determines if the cache files have expired, or if it is still valid '''\n\n        if os.path.isfile(self.cache_path_cache):\n            mod_time = os.path.getmtime(self.cache_path_cache)\n            current_time = time()\n            if (mod_time + self.cache_max_age) > current_time:\n                if os.path.isfile(self.cache_path_index):\n                    return True\n\n        return False\n\n\n    def read_settings(self):\n        ''' Reads the settings from the ec2.ini file '''\n\n        config = ConfigParser.SafeConfigParser()\n        ec2_default_ini_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'ec2.ini')\n        ec2_ini_path = os.environ.get('EC2_INI_PATH', ec2_default_ini_path)\n        config.read(ec2_ini_path)\n\n        # is eucalyptus?\n        self.eucalyptus_host = None\n        self.eucalyptus = False\n        if config.has_option('ec2', 'eucalyptus'):\n            self.eucalyptus = config.getboolean('ec2', 'eucalyptus')\n        if self.eucalyptus and config.has_option('ec2', 'eucalyptus_host'):\n            self.eucalyptus_host = config.get('ec2', 'eucalyptus_host')\n\n        # Regions\n        self.regions = []\n        configRegions = config.get('ec2', 'regions')\n        configRegions_exclude = config.get('ec2', 'regions_exclude')\n        if (configRegions == 'all'):\n            if self.eucalyptus_host:\n                self.regions.append(boto.connect_euca(host=self.eucalyptus_host).region.name)\n            else:\n                for regionInfo in ec2.regions():\n                    if regionInfo.name not in configRegions_exclude:\n                        self.regions.append(regionInfo.name)\n        else:\n            self.regions = configRegions.split(\",\")\n\n        # Destination addresses\n        self.destination_variable = config.get('ec2', 'destination_variable')\n        self.vpc_destination_variable = config.get('ec2', 'vpc_destination_variable')\n\n        # Route53\n        self.route53_enabled = config.getboolean('ec2', 'route53')\n        self.route53_excluded_zones = []\n        if config.has_option('ec2', 'route53_excluded_zones'):\n            self.route53_excluded_zones.extend(\n                config.get('ec2', 'route53_excluded_zones', '').split(','))\n\n        # Cache related\n        cache_dir = os.path.expanduser(config.get('ec2', 'cache_path'))\n        if not os.path.exists(cache_dir):\n            os.makedirs(cache_dir)\n\n        self.cache_path_cache = cache_dir + \"/ansible-ec2.cache\"\n        self.cache_path_index = cache_dir + \"/ansible-ec2.index\"\n        self.cache_max_age = config.getint('ec2', 'cache_max_age')\n        \n\n\n    def parse_cli_args(self):\n        ''' Command line argument processing '''\n\n        parser = argparse.ArgumentParser(description='Produce an Ansible Inventory file based on EC2')\n        parser.add_argument('--list', action='store_true', default=True,\n                           help='List instances (default: True)')\n        parser.add_argument('--host', action='store',\n                           help='Get all the variables about a specific instance')\n        parser.add_argument('--refresh-cache', action='store_true', default=False,\n                           help='Force refresh of cache by making API requests to EC2 (default: False - use cache files)')\n        self.args = parser.parse_args()\n\n\n    def do_api_calls_update_cache(self):\n        ''' Do API calls to each region, and save data in cache files '''\n\n        if self.route53_enabled:\n            self.get_route53_records()\n\n        for region in self.regions:\n            self.get_instances_by_region(region)\n            self.get_rds_instances_by_region(region)\n\n        self.write_to_cache(self.inventory, self.cache_path_cache)\n        self.write_to_cache(self.index, self.cache_path_index)\n\n\n    def get_instances_by_region(self, region):\n        ''' Makes an AWS EC2 API call to the list of instances in a particular\n        region '''\n\n        try:\n            if self.eucalyptus:\n                conn = boto.connect_euca(host=self.eucalyptus_host)\n                conn.APIVersion = '2010-08-31'\n            else:\n                conn = ec2.connect_to_region(region)\n\n            # connect_to_region will fail \"silently\" by returning None if the region name is wrong or not supported\n            if conn is None:\n                print(\"region name: %s likely not supported, or AWS is down.  connection to region failed.\" % region)\n                sys.exit(1)\n \n            reservations = conn.get_all_instances()\n            for reservation in reservations:\n                for instance in reservation.instances:\n                    self.add_instance(instance, region)\n        \n        except boto.exception.BotoServerError, e:\n            if  not self.eucalyptus:\n                print \"Looks like AWS is down again:\"\n            print e\n            sys.exit(1)\n\n    def get_rds_instances_by_region(self, region):\n\t''' Makes an AWS API call to the list of RDS instances in a particular\n        region '''\n\n        try:\n            conn = rds.connect_to_region(region)\n            if conn:\n                instances = conn.get_all_dbinstances()\n                for instance in instances:\n                    self.add_rds_instance(instance, region)\n        except boto.exception.BotoServerError, e:\n            if not e.reason == \"Forbidden\":\n                print \"Looks like AWS RDS is down: \"\n                print e\n                sys.exit(1)\n\n    def get_instance(self, region, instance_id):\n        ''' Gets details about a specific instance '''\n        if self.eucalyptus:\n            conn = boto.connect_euca(self.eucalyptus_host)\n            conn.APIVersion = '2010-08-31'\n        else:\n            conn = ec2.connect_to_region(region)\n\n        # connect_to_region will fail \"silently\" by returning None if the region name is wrong or not supported\n        if conn is None:\n            print(\"region name: %s likely not supported, or AWS is down.  connection to region failed.\" % region)\n            sys.exit(1)\n\n        reservations = conn.get_all_instances([instance_id])\n        for reservation in reservations:\n            for instance in reservation.instances:\n                return instance\n\n\n    def add_instance(self, instance, region):\n        ''' Adds an instance to the inventory and index, as long as it is\n        addressable '''\n\n        # Only want running instances\n        if instance.state != 'running':\n            return\n\n        # Select the best destination address\n        if instance.subnet_id:\n            dest = getattr(instance, self.vpc_destination_variable)\n        else:\n            dest =  getattr(instance, self.destination_variable)\n\n        if not dest:\n            # Skip instances we cannot address (e.g. private VPC subnet)\n            return\n\n        # Add to index\n        self.index[dest] = [region, instance.id]\n\n        # Inventory: Group by instance ID (always a group of 1)\n        self.inventory[instance.id] = [dest]\n\n        # Inventory: Group by region\n        self.push(self.inventory, region, dest)\n\n        # Inventory: Group by availability zone\n        self.push(self.inventory, instance.placement, dest)\n\n        # Inventory: Group by instance type\n        self.push(self.inventory, self.to_safe('type_' + instance.instance_type), dest)\n\n        # Inventory: Group by key pair\n        if instance.key_name:\n            self.push(self.inventory, self.to_safe('key_' + instance.key_name), dest)\n        \n        # Inventory: Group by security group\n        try:\n            for group in instance.groups:\n                key = self.to_safe(\"security_group_\" + group.name)\n                self.push(self.inventory, key, dest)\n        except AttributeError:\n            print 'Package boto seems a bit older.'\n            print 'Please upgrade boto >= 2.3.0.'\n            sys.exit(1)\n\n        # Inventory: Group by tag keys\n        for k, v in instance.tags.iteritems():\n            key = self.to_safe(\"tag_\" + k + \"=\" + v)\n            self.push(self.inventory, key, dest)\n\n        # Inventory: Group by Route53 domain names if enabled\n        if self.route53_enabled:\n            route53_names = self.get_instance_route53_names(instance)\n            for name in route53_names:\n                self.push(self.inventory, name, dest)\n\n        # Global Tag: tag all EC2 instances\n        self.push(self.inventory, 'ec2', dest)\n\n        self.inventory[\"_meta\"][\"hostvars\"][dest] = self.get_host_info_dict_from_instance(instance)\n\n\n    def add_rds_instance(self, instance, region):\n        ''' Adds an RDS instance to the inventory and index, as long as it is\n        addressable '''\n\n        # Only want available instances\n        if instance.status != 'available':\n            return\n\n        # Select the best destination address\n        #if instance.subnet_id:\n            #dest = getattr(instance, self.vpc_destination_variable)\n        #else:\n            #dest =  getattr(instance, self.destination_variable)\n        dest = instance.endpoint[0]\n\n        if not dest:\n            # Skip instances we cannot address (e.g. private VPC subnet)\n            return\n\n        # Add to index\n        self.index[dest] = [region, instance.id]\n\n        # Inventory: Group by instance ID (always a group of 1)\n        self.inventory[instance.id] = [dest]\n\n        # Inventory: Group by region\n        self.push(self.inventory, region, dest)\n\n        # Inventory: Group by availability zone\n        self.push(self.inventory, instance.availability_zone, dest)\n        \n        # Inventory: Group by instance type\n        self.push(self.inventory, self.to_safe('type_' + instance.instance_class), dest)\n        \n        # Inventory: Group by security group\n        try:\n            if instance.security_group:\n                key = self.to_safe(\"security_group_\" + instance.security_group.name)\n                self.push(self.inventory, key, dest)\n        except AttributeError:\n            print 'Package boto seems a bit older.'\n            print 'Please upgrade boto >= 2.3.0.'\n            sys.exit(1)\n\n        # Inventory: Group by engine\n        self.push(self.inventory, self.to_safe(\"rds_\" + instance.engine), dest)\n\n        # Inventory: Group by parameter group\n        self.push(self.inventory, self.to_safe(\"rds_parameter_group_\" + instance.parameter_group.name), dest)\n\n        # Global Tag: all RDS instances\n        self.push(self.inventory, 'rds', dest)\n\n\n    def get_route53_records(self):\n        ''' Get and store the map of resource records to domain names that\n        point to them. '''\n\n        r53_conn = route53.Route53Connection()\n        all_zones = r53_conn.get_zones()\n\n        route53_zones = [ zone for zone in all_zones if zone.name[:-1]\n                          not in self.route53_excluded_zones ]\n\n        self.route53_records = {}\n\n        for zone in route53_zones:\n            rrsets = r53_conn.get_all_rrsets(zone.id)\n\n            for record_set in rrsets:\n                record_name = record_set.name\n\n                if record_name.endswith('.'):\n                    record_name = record_name[:-1]\n\n                for resource in record_set.resource_records:\n                    self.route53_records.setdefault(resource, set())\n                    self.route53_records[resource].add(record_name)\n\n\n    def get_instance_route53_names(self, instance):\n        ''' Check if an instance is referenced in the records we have from\n        Route53. If it is, return the list of domain names pointing to said\n        instance. If nothing points to it, return an empty list. '''\n\n        instance_attributes = [ 'public_dns_name', 'private_dns_name',\n                                'ip_address', 'private_ip_address' ]\n\n        name_list = set()\n\n        for attrib in instance_attributes:\n            try:\n                value = getattr(instance, attrib)\n            except AttributeError:\n                continue\n\n            if value in self.route53_records:\n                name_list.update(self.route53_records[value])\n\n        return list(name_list)\n\n\n    def get_host_info_dict_from_instance(self, instance):\n        instance_vars = {}\n        for key in vars(instance):\n            value = getattr(instance, key)\n            key = self.to_safe('ec2_' + key)\n\n            # Handle complex types\n            # state/previous_state changed to properties in boto in https://github.com/boto/boto/commit/a23c379837f698212252720d2af8dec0325c9518\n            if key == 'ec2__state':\n                instance_vars['ec2_state'] = instance.state or ''\n                instance_vars['ec2_state_code'] = instance.state_code\n            elif key == 'ec2__previous_state':\n                instance_vars['ec2_previous_state'] = instance.previous_state or ''\n                instance_vars['ec2_previous_state_code'] = instance.previous_state_code\n            elif type(value) in [int, bool]:\n                instance_vars[key] = value\n            elif type(value) in [str, unicode]:\n                instance_vars[key] = value.strip()\n            elif type(value) == type(None):\n                instance_vars[key] = ''\n            elif key == 'ec2_region':\n                instance_vars[key] = value.name\n            elif key == 'ec2__placement':\n                instance_vars['ec2_placement'] = value.zone\n            elif key == 'ec2_tags':\n                for k, v in value.iteritems():\n                    key = self.to_safe('ec2_tag_' + k)\n                    instance_vars[key] = v\n            elif key == 'ec2_groups':\n                group_ids = []\n                group_names = []\n                for group in value:\n                    group_ids.append(group.id)\n                    group_names.append(group.name)\n                instance_vars[\"ec2_security_group_ids\"] = ','.join(group_ids)\n                instance_vars[\"ec2_security_group_names\"] = ','.join(group_names)\n            else:\n                pass\n                # TODO Product codes if someone finds them useful\n                #print key\n                #print type(value)\n                #print value\n\n        return instance_vars\n\n    def get_host_info(self):\n        ''' Get variables about a specific host '''\n\n        if len(self.index) == 0:\n            # Need to load index from cache\n            self.load_index_from_cache()\n\n        if not self.args.host in self.index:\n            # try updating the cache\n            self.do_api_calls_update_cache()\n            if not self.args.host in self.index:\n                # host migh not exist anymore\n                return self.json_format_dict({}, True)\n\n        (region, instance_id) = self.index[self.args.host]\n\n        instance = self.get_instance(region, instance_id)\n        return self.json_format_dict(self.get_host_info_dict_from_instance(instance), True)\n\n    def push(self, my_dict, key, element):\n        ''' Pushed an element onto an array that may not have been defined in\n        the dict '''\n\n        if key in my_dict:\n            my_dict[key].append(element);\n        else:\n            my_dict[key] = [element]\n\n\n    def get_inventory_from_cache(self):\n        ''' Reads the inventory from the cache file and returns it as a JSON\n        object '''\n\n        cache = open(self.cache_path_cache, 'r')\n        json_inventory = cache.read()\n        return json_inventory\n\n\n    def load_index_from_cache(self):\n        ''' Reads the index from the cache file sets self.index '''\n\n        cache = open(self.cache_path_index, 'r')\n        json_index = cache.read()\n        self.index = json.loads(json_index)\n\n\n    def write_to_cache(self, data, filename):\n        ''' Writes data in JSON format to a file '''\n\n        json_data = self.json_format_dict(data, True)\n        cache = open(filename, 'w')\n        cache.write(json_data)\n        cache.close()\n\n\n    def to_safe(self, word):\n        ''' Converts 'bad' characters in a string to underscores so they can be\n        used as Ansible groups '''\n\n        return re.sub(\"[^A-Za-z0-9\\-]\", \"_\", word)\n\n\n    def json_format_dict(self, data, pretty=False):\n        ''' Converts a dict to a JSON object and dumps it as a formatted\n        string '''\n\n        if pretty:\n            return json.dumps(data, sort_keys=True, indent=2)\n        else:\n            return json.dumps(data)\n\n\n# Run the script\nEc2Inventory()\n\n"
  },
  {
    "path": "ec2_check_name.yml",
    "content": "---  \n- hosts: localhost  \n  gather_facts: no    \n  vars:  \n    region: ap-southeast-2    \n    key: yan-key-pair-apsydney\n    type: t2.micro\n    image: ami-d9fe9be3\n    sg: sg_webserver_apsydney\n    name: test-01\n  tasks:  \n    - name: check if instance with name tag exists\n      command: aws ec2 describe-instances --filter Name=tag:Name,Values={{ name }} \n               --query 'Reservations[0].Instances[0].InstanceId' --output text\n      register: instanceid\n\n    - name: create EC2 if not exists \n      ec2:  \n        region: \"{{ region }}\"  \n        key_name: \"{{ key }}\"  \n        instance_type: \"{{ type }}\"  \n        image: \"{{ image }}\"\n        group: \"{{ sg }}\"\n        instance_tags:\n          Name: \"{{ name }}\"\n        wait: yes\n      when: instanceid.stdout==\"None\" \n"
  },
  {
    "path": "ec2_profile.yml",
    "content": "---  \n- hosts: localhost  \n  gather_facts: no  \n  connection: local   \n  vars:  \n    #your region  \n    region: ap-southeast-2    \n  tasks:  \n    - name: EC2 provisioning with instance profile  \n      ec2:\n        region: \"{{ region }}\"  \n        key_name: yan-key-pair-apsydney  \n        instance_type: t2.micro  \n        image: ami-dc361ebf      \n        group: sg_webserver_apsydney\n        instance_profile_name: app1"
  },
  {
    "path": "ec2_start.yml",
    "content": "---  \n- hosts: localhost  \n  gather_facts: no    \n  connection: local \n  vars:  \n    region: ap-southeast-2    \n    name: test-01\n  tasks:  \n    - name: get instance id\n      command: aws ec2 describe-instances --filter Name=tag:Name,Values={{ name }} \n               --query 'Reservations[0].Instances[0].InstanceId' --output text\n      register: instanceid\n\n    - name: start instance \n      ec2:  \n        region: \"{{ region }}\"\n        instance_ids: \"{{ instanceid.stdout }}\"  \n        state: running\n        wait: yes\n      when: instanceid.stdout!=\"None\"\n"
  },
  {
    "path": "ec2_start_1.yml",
    "content": "---\n- hosts: localhost\n  gather_facts: no\n  connection: local \n  vars:\n    region: ap-southeast-2\n    name: bamboo-1\n  tasks:\n    - name: get instance id\n      instance_lookup:\n        region: \"{{ region }}\"\n        tags:\n          Name: \"{{ name }}\"\n      register: instanceid\n\n    - debug: var=instanceid.instance_ids\n\n    - name: start instance\n      ec2:\n        region: \"{{ region }}\"\n        instance_ids: \"{{ instanceid.instance_ids }}\"\n        state: running\n        wait: yes\n      when: instanceid is defined\n\n"
  },
  {
    "path": "ec2_stop.yml",
    "content": "---  \n- hosts: localhost  \n  gather_facts: no   \n  connection: local  \n  vars:  \n    region: ap-southeast-2    \n    name: test-01\n  tasks:  \n    - name: get instance id\n      command: aws ec2 describe-instances --filter Name=tag:Name,Values={{ name }} \n               --query 'Reservations[0].Instances[0].InstanceId' --output text\n      register: instanceid\n\n    - name: stop instance \n      ec2:  \n        region: \"{{ region }}\"\n        instance_ids: \"{{ instanceid.stdout }}\"  \n        state: stopped\n        wait: yes\n      when: instanceid.stdout!=\"None\"\n"
  },
  {
    "path": "ec2_vol_1.yml",
    "content": "---  \n- hosts: localhost  \n  gather_facts: no  \n  connection: local   \n  vars:  \n    #your region  \n    region: ap-southeast-2    \n  tasks:  \n    - name: EC2 provisioning with general purpose EBS volume \n      ec2:\n        region: \"{{ region }}\"  \n        key_name: yan-key-pair-apsydney  \n        instance_type: t2.micro  \n        image: ami-dc361ebf      \n        group: sg_webserver_apsydney\n        volumes:\n          - device_name: /dev/sda1\n            device_type: gp2\n            volume_size: 100\n            delete_on_termination: true\n"
  },
  {
    "path": "ec2_vol_2.yml",
    "content": "---\n- hosts: localhost\n  gather_facts: no\n  connection: local \n  vars:\n    #your region\n    region: ap-southeast-2\n  tasks:\n    - name: EC2 provisioning with provisioned IOPS EBS volume\n      ec2:\n        region: \"{{ region }}\"\n        key_name: yan-key-pair-apsydney\n        instance_type: t2.micro\n        image: ami-dc361ebf\n        group: sg_webserver_apsydney\n        volumes:\n          - device_name: /dev/sda1\n            device_type: io1\n            iops: 1000\n            volume_size: 500\n            delete_on_termination: true\n"
  },
  {
    "path": "ec2_vpc_db_create.yml",
    "content": "---\n- hosts: localhost\n  connection: local\n  gather_facts: no\n  vars_files:\n    - staging_vpc_info\n  vars:\n    region: ap-southeast-2\n    key: yan-key-pair-apsydney\n    instance_type: t2.micro\n    image: ami-dc361ebf\n    prefix: staging\n  tasks:\n    - name: database instance provisioning\n      ec2:\n        region: \"{{ region }}\"\n        key_name: \"{{ key }}\"\n        instance_type: \"{{ instance_type }}\"\n        image: \"{{ image }}\"\n        wait: yes\n        group: \"{{ prefix }}_sg_database\"\n        instance_tags:\n          Name: \"{{ prefix }}_database\"\n          class: database\n          environment: staging\n        vpc_subnet_id: \"{{ staging_subnet_private }}\"\n        assign_public_ip: no\n"
  },
  {
    "path": "ec2_vpc_jumpbox.yml",
    "content": "---\n- hosts: localhost\n  gather_facts: no\n  connection: local \n  vars_files:\n    - staging_vpc_info\n  vars:\n    region: ap-southeast-2\n    key: yan-key-pair-apsydney\n    instance_type: t2.micro\n    image: ami-dc361ebf\n    prefix: staging\n    vpc_subnet_id: \"{{ staging_subnet_public_0 }}\"\n  tasks:\n    - name: jump box instance provisioning\n      ec2:\n        region: \"{{ region }}\"\n        key_name: \"{{ key }}\"\n        instance_type: \"{{ instance_type }}\"\n        image: \"{{ image }}\"\n        wait: yes\n        group: \"{{ prefix }}_sg_jumpbox\"\n        instance_tags:\n          Name: \"{{ prefix }}_jumpbox\"\n          class: jumpbox\n          environment: \"{{ prefix }}\"\n        vpc_subnet_id: \"{{ vpc_subnet_id }}\"\n      register: ec2\n    - name: associate new EIP for the instance\n      ec2_eip:\n        region: \"{{ region }}\"\n        instance_id: \"{{ item.id }}\"\n      with_items: ec2.instances\n\n"
  },
  {
    "path": "ec2_vpc_openvpn.yml",
    "content": "---\n- hosts: localhost\n  gather_facts: no\n  connection: local \n  vars_files:\n    - staging_vpc_info\n  vars:\n    region: ap-southeast-2\n    key: yan-key-pair-apsydney\n    instance_type: t2.micro\n    image: ami-a17f199b\n    prefix: staging\n    vpc_subnet_id: \"{{ staging_subnet_public_0 }}\"\n  tasks:\n    - name: openvpn server instance provisioning\n      ec2:\n        region: \"{{ region }}\"\n        key_name: \"{{ key }}\"\n        instance_type: \"{{ instance_type }}\"\n        image: \"{{ image }}\"\n        source_dest_check: no\n        wait: yes\n        group: \"{{ prefix }}_sg_openvpn\"\n        instance_tags:\n          Name: \"{{ prefix }}_openvpn\"\n          class: openvpn\n          environment: \"{{ prefix }}\"\n        vpc_subnet_id: \"{{ vpc_subnet_id }}\"\n      register: ec2\n    - name: associate new EIP for the instance\n      ec2_eip:\n        region: \"{{ region }}\"\n        instance_id: \"{{ item.id }}\"\n      with_items: ec2.instances\n\n"
  },
  {
    "path": "ec2_vpc_web_create.yml",
    "content": "---  \n- hosts: localhost  \n  connection: local  \n  gather_facts: no    \n  vars_files:\n    - staging_vpc_info\n  vars:  \n    region: ap-southeast-2    \n    key: yan-key-pair-apsydney\n    instance_type: t2.micro\n    image: ami-dc361ebf\n    prefix: staging\n  tasks:  \n    - name: web instance provisioning\n      ec2:\n        region: \"{{ region }}\"  \n        key_name: \"{{ key }}\"\n        instance_type: \"{{ instance_type }}\"  \n        image: \"{{ image }}\"\n        wait: yes    \n        group: \"{{ prefix }}_sg_web\"\n        instance_tags:\n          Name: \"{{ prefix }}_web\"\n          class: web\n          environment: staging\n        vpc_subnet_id: \"{{ staging_subnet_public }}\"\n      register: ec2  \n    - name: associate new EIP for the instance    \n      ec2_eip:\n        region: \"{{ region }}\"\n        instance_id: \"{{ item.id }}\"\n      with_items: ec2.instances\n"
  },
  {
    "path": "group_vars/all",
    "content": "---\n# Variables here are applicable to all host groups\n\nntpserver: 0.au.pool.ntp.org\n\nansible_user: ec2-user \nansible_ssh_private_key_file: ~/.ssh/yan-key-pair-apsydney.pem\n\n"
  },
  {
    "path": "group_vars/tag_class_wordpress",
    "content": "ansible_ssh_user: ec2-user\nansible_ssh_private_key_file: ~/.ssh/wordpress-apsydney.pem\n"
  },
  {
    "path": "host_vars/localhost",
    "content": "ansible_ssh_user: ec2-user\nansible_ssh_private_key_file: ~/.ssh/wordpress-apsydney.pem\n"
  },
  {
    "path": "hosts",
    "content": "[local]\nlocalhost\n\n#[webservers]\n#54.79.109.14 ansible_ssh_user=ec2-user ansible_ssh_private_key_file=~/.ssh/yan-key-pair-apsydney.pem \n\n"
  },
  {
    "path": "iam_group.yml",
    "content": "---  \n- hosts: localhost  \n  gather_facts: no    \n  connection: local \n  tasks:\n    - name: create IAM group admin \n      iam:\n        iam_type: group\n        name: admin \n        state: present\n\n"
  },
  {
    "path": "iam_policy.yml",
    "content": "---  \n- hosts: localhost  \n  gather_facts: no    \n  connection: local \n  tasks:\n    - name: Assign a policy called Administrator to the admin group\n      iam_policy:\n        iam_type: group\n        iam_name: admin\n        policy_name: Administrator\n        state: present\n        policy_document: iam_policy_admin.json\n\n"
  },
  {
    "path": "iam_policy_admin.json",
    "content": "{\n  \"Version\": \"2012-10-17\",\n  \"Statement\": [\n    {\n      \"Effect\": \"Allow\",\n      \"Action\": \"*\",\n      \"Resource\": \"*\"\n    }\n  ]\n}"
  },
  {
    "path": "iam_policy_app1.yml",
    "content": "---  \n- hosts: localhost  \n  gather_facts: no    \n  connection: local \n  tasks:\n    - name: Assign a policy called S3ReadOnly to the app1 role\n      iam_policy:\n        iam_type: role\n        iam_name: app1\n        policy_name: S3ReadOnly\n        state: present\n        policy_document: iam_policy_s3_read.json\n\n"
  },
  {
    "path": "iam_policy_s3_read.json",
    "content": "{\n  \"Version\": \"2012-10-17\",\n  \"Statement\": [\n    {\n      \"Effect\": \"Allow\",\n      \"Action\": [\n        \"s3:Get*\",\n        \"s3:List*\"\n      ],\n      \"Resource\": \"*\"\n    }\n  ]\n}"
  },
  {
    "path": "iam_role.yml",
    "content": "---  \n- hosts: localhost  \n  gather_facts: no    \n  connection: local \n  tasks:\n    - name: create IAM role app1 \n      iam:\n        iam_type: role\n        name: app1\n        state: present\n\n"
  },
  {
    "path": "iam_user.yml",
    "content": "---  \n- hosts: localhost  \n  gather_facts: no    \n  connection: local \n  tasks:\n    - name: create IAM user yan \n      iam:\n        iam_type: user\n        name: yan\n        state: present\n        groups: admin\n\n"
  },
  {
    "path": "install_ansible.yml",
    "content": "---\n- hosts: tag_class_jumpbox\n  become: yes\n  roles:\n    - ansible\n"
  },
  {
    "path": "keypair.yml",
    "content": "---  \n- hosts: localhost  \n  connection: local  \n  gather_facts: no  \n  vars:\n    region: ap-southeast-2\n    keyname: yan1\n  tasks:  \n    - name: create key pair  \n      ec2_key:\n        region: \"{{ region }}\"\n        name: \"{{ keyname }}\"\n      register: mykey\n\n    - name: write to file\n      copy: content=\"{{ mykey.key.private_key }}\" dest=\"~/.ssh/{{ keyname }}.pem\" mode=0600\n      when: mykey.changed\n   \n"
  },
  {
    "path": "launch_ec2.yml",
    "content": "---  \n- hosts: localhost  \n  connection: local  \n  gather_facts: no    \n  vars:  \n    #your region  \n    region: ap-southeast-2    \n  tasks:  \n    - name: EC2 basic provisioning  \n      ec2:\n        region: \"{{ region }}\"  \n        key_name: yan-key-pair-apsydney  \n        instance_type: t1.micro  \n        image: ami-6bf99c51  \n        wait: yes    \n        group: sg_webserver_apsydney\n        instance_tags:  \n          group: webserver  \n        exact_count: 3  \n        count_tag:  \n          group: webserver\n"
  },
  {
    "path": "launch_ec2_eip.yml",
    "content": "---  \n- hosts: localhost  \n  connection: local  \n  gather_facts: no    \n  vars:  \n    region: ap-southeast-2    \n  tasks:  \n    - name: launch instance\n      ec2:\n        region: \"{{ region }}\"  \n        key_name: yan-key-pair-apsydney  \n        instance_type: t1.micro  \n        image: ami-6bf99c51  \n        wait: yes    \n        group: sg_webserver_apsydney\n      register: ec2  \n    - name: associate new EIP for the instance    \n      ec2_eip:\n        region: \"{{ region }}\"\n        instance_id: \"{{ item.id }}\"\n      with_items: ec2.instances\n"
  },
  {
    "path": "launch_ec2_iteration.yml",
    "content": "---  \n- hosts: localhost  \n  connection: local  \n  gather_facts: no    \n  vars:  \n    #your region  \n    region: ap-southeast-2    \n  tasks:  \n    - name: EC2 basic provisioning  \n      ec2:\n        region: \"{{ region }}\"  \n        key_name: yan-key-pair-apsydney  \n        instance_type: t1.micro  \n        image: ami-6bf99c51      \n        group: sg_webserver_apsydney\n        instance_tags:  \n          Name: \"web{{ item }}\"  \n      with_sequence: count=5\n"
  },
  {
    "path": "launch_ec2_tags.yml",
    "content": "---  \n- hosts: localhost  \n  connection: local  \n  gather_facts: no    \n  vars:  \n    region: ap-southeast-2  \n    instance_type: t1.micro     \n    image: ami-6bf99c51    \n    key: yan-key-pair-apsydney  \n  tasks:  \n    - name: launch ec2 with tags webserver staging \n      ec2:\n        region: \"{{ region }}\"  \n        key_name: \"{{ key }}\"  \n        instance_type: \"{{ instance_type }}\"    \n        image: \"{{ image }}\"     \n        wait: yes    \n        group: sg_webserver_apsydney  \n        instance_tags:  \n          Name: staging-webserver-1  \n          class: webserver  \n          environment: staging  \n    - name: launch ec2 with tags webserver production \n      ec2:\n        region: \"{{ region }}\"  \n        key_name: \"{{ key }}\"  \n        instance_type: \"{{ instance_type }}\"    \n        image: \"{{ image }}\"     \n        wait: yes    \n        group: sg_webserver_apsydney  \n        instance_tags:  \n          Name: production-webserver-1  \n          class: webserver  \n          environment: production\n    - name: launch ec2 with tags database staging \n      ec2:\n        region: \"{{ region }}\"  \n        key_name: \"{{ key }}\"  \n        instance_type: \"{{ instance_type }}\"    \n        image: \"{{ image }}\"    \n        wait: yes    \n        group: sg_database_apsydney  \n        instance_tags:  \n          Name: staging-database-1  \n          class: database  \n          environment: staging  \n"
  },
  {
    "path": "library/instance_lookup",
    "content": "#!/usr/bin/python\n\n#author: Yan Kurniawan <yan.kurniawan@gmail.com>\n\nimport sys\n\nAWS_REGIONS = ['ap-northeast-1',\n               'ap-southeast-1',\n               'ap-southeast-2',\n               'eu-west-1',\n               'sa-east-1',\n               'us-east-1',\n               'us-west-1',\n               'us-west-2']\n\ntry:\n    from boto.ec2 import connect_to_region\nexcept ImportError:\n    print \"failed=True msg='boto required for this module'\"\n    sys.exit(1)\n\ndef main():\n\n    module=AnsibleModule(\n        argument_spec=dict(\n            region=dict(choices=AWS_REGIONS),\n            tags=dict(default=None, type='dict'),\n        )\n    )\n\n    params = module.params\n    tags = params['tags']\n    region = params['region']\n\n    if region:\n\ttry:\n            ec2 = connect_to_region(region)\n        except boto.exception.NoAuthHandlerFound, e:\n            module.fail_json(msg=str(e))\n    else:\n\tmodule.fail_json(msg=\"region must be specified\")\n\n    instance_ids = []\n    for tag, value in tags.iteritems():\n        for instance in ec2.get_only_instances(filters={\"tag:\" + tag: value}):\n            instance_ids.append(instance.id)\n\n    module.exit_json(changed=False, instance_ids=instance_ids)\n\nfrom ansible.module_utils.basic import *\n\nmain()\n"
  },
  {
    "path": "library/vpc_lookup",
    "content": "#!/usr/bin/python\n# This file is part of Ansible\n#\n# Ansible is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# Ansible is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with Ansible.  If not, see <http://www.gnu.org/licenses/>.\n\nDOCUMENTATION = '''\n---\nmodule: vpc_lookup\nshort_description: returns a list of subnet Ids using tags as criteria\ndescription:\n     - Returns a list of subnet Ids for a given set of tags that identify one or more VPCs\nversion_added: \"1.5\"\noptions:\n  region:\n    description:\n      - The AWS region to use.  Must be specified if ec2_url\n        is not used.  If not specified then the value of the\n        EC2_REGION environment variable, if any, is used.\n    required: false\n    default: null\n    aliases: [ 'aws_region', 'ec2_region' ]\n  aws_secret_key:\n    description:\n      - AWS secret key. If not set then the value of\n        the AWS_SECRET_KEY environment variable is used.\n    required: false\n    default: null\n    aliases: [ 'ec2_secret_key', 'secret_key' ]\n  aws_access_key:\n    description:\n      - AWS access key. If not set then the value of the\n        AWS_ACCESS_KEY environment variable is used.\n    required: false\n    default: null\n    aliases: [ 'ec2_access_key', 'access_key' ]\n  tags:\n    desription:\n      - tags to lookup\n    required: false\n    default: null\n    type: dict\n    aliases: []\n\nrequirements: [ \"boto\" ]\nauthor: John Jarvis\n'''\n\nEXAMPLES = '''\n# Note: None of these examples set aws_access_key, aws_secret_key, or region.\n# It is assumed that their matching environment variables are set.\n\n# Return all instances that match the tag \"Name: foo\"\n- local_action:\n    module: vpc_lookup\n    tags:\n        Name: foo\n'''\n\nimport sys\n\nAWS_REGIONS = ['ap-northeast-1',\n               'ap-southeast-1',\n               'ap-southeast-2',\n               'eu-west-1',\n               'sa-east-1',\n               'us-east-1',\n               'us-west-1',\n               'us-west-2']\n\ntry:\n    from boto.vpc import VPCConnection\n    from boto.vpc import connect_to_region\nexcept ImportError:\n    print \"failed=True msg='boto required for this module'\"\n    sys.exit(1)\n\ndef main():\n\n    module=AnsibleModule(\n        argument_spec=dict(\n            region=dict(choices=AWS_REGIONS),\n            aws_secret_key=dict(aliases=['ec2_secret_key', 'secret_key'],\n                                no_log=True),\n            aws_access_key=dict(aliases=['ec2_access_key', 'access_key']),\n            tags=dict(default=None, type='dict'),\n        )\n    )\n\n    tags = module.params.get('tags')\n    aws_secret_key = module.params.get('aws_secret_key')\n    aws_access_key = module.params.get('aws_access_key')\n    region = module.params.get('region')\n\n    # If we have a region specified, connect to its endpoint.\n    if region:\n        try:\n            vpc = connect_to_region(region, aws_access_key_id=aws_access_key,\n                                    aws_secret_access_key=aws_secret_key)\n        except boto.exception.NoAuthHandlerFound, e:\n            module.fail_json(msg=str(e))\n    else:\n        module.fail_json(msg=\"region must be specified\")\n    subnet_ids = []\n    for tag, value in tags.iteritems():\n       for subnet in vpc.get_all_subnets(filters={\"tag:\" + tag: value}):\n         subnet_ids.append(subnet.id)\n    vpc_ids = []\n    for tag, value in tags.iteritems():\n       for vpc in vpc.get_all_vpcs(filters={\"tag:\" + tag: value}):\n         vpc_ids.append(vpc.id)\n\n    module.exit_json(changed=False, vpc_ids=vpc_ids, subnet_ids=subnet_ids)\n\n\n# this is magic, see lib/ansible/module_common.py\n#<<INCLUDE_ANSIBLE_MODULE_COMMON>>\n\nmain()\n"
  },
  {
    "path": "mysql_pg_create.yml",
    "content": "---\n- hosts: localhost\n  gather_facts: no\n  connection: local \n  vars:\n    region: ap-southeast-2\n  tasks:\n    - name: create mysql parameter group\n      rds_param_group:\n        name: mysqlpg1\n        state: present\n        region: \"{{ region }}\"\n        description: MySQL Parameter Group 1\n        engine: mysql5.6\n        params: \n          innodb_lock_wait_timeout: 3600\n          max_allowed_packet: 512M\n          net_write_timeout: 300\n\n\n"
  },
  {
    "path": "mysql_pg_delete.yml",
    "content": "---\n- hosts: localhost\n  gather_facts: no\n  connection: local \n  vars:\n    region: ap-southeast-2\n  tasks:\n    - name: delete mysql parameter group\n      rds_param_group:\n        name: mysqlpg1\n        state: absent\n        region: \"{{ region }}\"\n\n"
  },
  {
    "path": "mysql_rds_create.yml",
    "content": "---\n- hosts: localhost\n  gather_facts: no\n  connection: local \n  vars:\n    region: ap-southeast-2\n    size: 100\n    instance_type: db.m1.small\n    db_engine: MySQL\n    engine_version: 5.6.22\n    subnet: dbsg2\n    parameter_group: dbpg1\n    # staging_sg_database security group ID\n    security_groups: sg-xxxxxxxx\n    iops: 1000\n    db_name: mydb\n    username: dbadmin\n    password: mypassword    \n  tasks:\n    - name: create mysql RDS instance\n      rds:\n        command: create\n        instance_name: staging-mysql-1\n        region: \"{{ region }}\"\n        size: \"{{ size }}\"\n        instance_type: \"{{ instance_type }}\"\n        db_engine: \"{{ db_engine }}\"\n        engine_version: \"{{ engine_version }}\"\n        subnet: \"{{ subnet }}\"\n        parameter_group: \"{{ parameter_group }}\"\n        multi_zone: yes\n        db_name: \"{{ db_name }}\"\n        username: \"{{ username }}\"\n        password: \"{{ password }}\"\n        vpc_security_groups: \"{{ security_groups }}\"\n        iops: \"{{ iops }}\"\n"
  },
  {
    "path": "mysql_rds_delete.yml",
    "content": "- hosts: localhost\n  gather_facts: no\n  connection: local \n  vars:\n    region: ap-southeast-2\n  tasks:\n    - name: delete mysql RDS instance\n      rds:\n        command: delete\n        region: \"{{ region }}\"\n        instance_name: staging-mysql-1 \n"
  },
  {
    "path": "nat_launch.yml",
    "content": "---  \n- hosts: localhost  \n  connection: local  \n  gather_facts: no    \n  vars_files:\n    - staging_vpc_info\n  vars:   \n    region: ap-southeast-2    \n    key: yan-key-pair-apsydney\n    instance_type: t1.micro  \n    image: ami-3bae3201\n    prefix: staging\n  tasks:  \n    - name: NAT instance provisioning  \n      ec2:\n        region: \"{{ region }}\"  \n        key_name: \"{{ key }}\"  \n        instance_type: \"{{ instance_type }}\"    \n        image: \"{{ image }}\"    \n        wait: yes    \n        group: \"{{ prefix }}_sg_nat\"  \n        instance_tags:  \n          Name: \"{{ prefix }}_nat\"  \n          class: nat \n          environment: staging  \n        id: nat_launch_02\n        vpc_subnet_id: \"{{ staging_subnet_public }}\"\n        source_dest_check: no\n        wait: yes\n      register: ec2\n    - name: associate new EIP for the instance  \n      tags: eip         \n      ec2_eip:\n        region: \"{{ region }}\"  \n        instance_id: \"{{ item.id }}\"  \n      with_items: ec2.instances   \n      when: item.id is defined   \n"
  },
  {
    "path": "roles/ansible/tasks/main.yml",
    "content": "---\n- name: upgrade all packages\n  yum: name=* state=latest\n- name: install the 'Development tools' package group\n  yum: name=\"@Development tools\" state=present\n- name: install required packages\n  yum: name={{ item }} state=present\n  with_items:\n    - epel-release.noarch\n    - python-pip\n    - python-devel\n- name: install setuptools\n  pip: name=setuptools extra_args='--upgrade'\n- name: install ansible\n  pip: name=ansible\n"
  },
  {
    "path": "roles/apache/tasks/main.yml",
    "content": "---  \n- name: install apache  \n  yum: name=httpd state=present  \n  tags: apache  \n\n- name: start the httpd service  \n  service: name=httpd state=started enabled=true  \n  tags: apache  \n"
  },
  {
    "path": "roles/common/handlers/main.yml",
    "content": "---  \n- name: restart ntp  \n  service: name=ntpd state=restarted  \n"
  },
  {
    "path": "roles/common/tasks/main.yml",
    "content": "---  \n- name: install ntp  \n  yum: name=ntp state=present  \n  tags: ntp  \n\n- name: configure ntp file  \n  template: src=ntp.conf.j2 dest=/etc/ntp.conf  \n  tags: ntp  \n  notify: restart ntp  \n\n- name: start the ntp service  \n  service: name=ntpd state=started enabled=true  \n  tags: ntp  \n\n"
  },
  {
    "path": "roles/common/templates/ntp.conf.j2",
    "content": "driftfile /var/lib/ntp/drift\n\nrestrict 127.0.0.1 \nrestrict -6 ::1\n\nserver {{ ntpserver }}\n\nincludefile /etc/ntp/crypto/pw\n\nkeys /etc/ntp/keys\n"
  },
  {
    "path": "roles/mysql/tasks/main.yml",
    "content": "---  \n- name: install mysql server \n  yum: name=mysql-server state=present  \n  tags: mysql  \n\n- name: start the mysql service  \n  service: name=mysqld state=started enabled=true  \n  tags: mysql  \n"
  },
  {
    "path": "route53.yml",
    "content": "---  \n- hosts: localhost  \n  connection: local  \n  gather_facts: no   \n  tasks:\n    - name: create record \n      route53:\n        command: create\n        zone: yankurniawan.com.\n        record: yankurniawan.com.\n        type: A\n        value: 54.79.34.239\n\n"
  },
  {
    "path": "s3_create_bucket.yml",
    "content": "---\n- hosts: localhost\n  gather_facts: no\n  connection: local \n  vars:\n    bucketname: yan001\n  tasks:\n    - name: create an S3 bucket\n      s3:\n        bucket: \"{{ bucketname }}\"\n        mode: create\n"
  },
  {
    "path": "s3_create_dir.yml",
    "content": "---\n- hosts: localhost\n  gather_facts: no\n  connection: local \n  vars:\n    bucketname: yan001\n  tasks:\n    - name: create virtual directory\n      s3:\n        bucket: \"{{ bucketname }}\"\n        object: /backup/database/\n        mode: create\n"
  },
  {
    "path": "s3_delete_bucket.yml",
    "content": "---\n- hosts: localhost\n  gather_facts: no\n  connection: local \n  vars:\n    bucketname: yan001\n  tasks:\n    - name: delete an S3 bucket and all of its contents\n      s3:\n        bucket: \"{{ bucketname }}\"\n        mode: delete\n"
  },
  {
    "path": "s3_download_file.yml",
    "content": "---\n- hosts: localhost\n  gather_facts: no\n  connection: local \n  sudo: yes\n  vars:\n    bucketname: yan001\n  tasks:\n    - name: download file\n      s3:\n        bucket: \"{{ bucketname }}\"\n        object: /backup/database/test.txt\n        dest: test.txt\n        mode: get\n"
  },
  {
    "path": "s3_share_file.yml",
    "content": "---\n- hosts: localhost\n  gather_facts: no\n  connection: local \n  vars:\n    bucketname: yan001\n  tasks:\n    - name: share file\n      s3:\n        bucket: \"{{ bucketname }}\"\n        object: /backup/database/test.txt\n        expiration: 3600\n        mode: geturl\n"
  },
  {
    "path": "s3_upload_file.yml",
    "content": "---\n- hosts: localhost\n  gather_facts: no\n  connection: local \n  vars:\n    bucketname: yan001\n  tasks:\n    - name: upload file\n      s3:\n        bucket: \"{{ bucketname }}\"\n        object: /backup/database/test.txt\n        src: test.txt\n        overwrite: no\n        mode: put\n"
  },
  {
    "path": "sg_database.yml",
    "content": "---  \n- hosts: localhost  \n  connection: local  \n  gather_facts: no    \n  vars:  \n    #your region  \n    region: ap-southeast-2  \n    #your ip address\n    allowed_ip: 123.243.16.53/32\n  tasks:  \n    - name: create database security group  \n      ec2_group:\n        region: \"{{ region }}\"\n        name: sg_database_apsydney    \n        description: security group for apsydney database host\n        rules:\n          # allow ssh access from your ip address  \n          - proto: tcp  \n            from_port: 22  \n            to_port: 22  \n            cidr_ip: \"{{ allowed_ip }}\"  \n          # allow mysql access from webserver group  \n          - proto: tcp  \n            from_port: 3306  \n            to_port: 3306    \n            group_name: sg_webserver_apsydney  \n        rules_egress:\n          - proto: all\n            cidr_ip: 0.0.0.0/0\n"
  },
  {
    "path": "sg_delete.yml",
    "content": "---  \n- hosts: localhost  \n  connection: local  \n  gather_facts: no    \n  vars_files:\n    - staging_vpc_info\n  vars:  \n    #your region  \n    region: ap-southeast-2  \n    #prefix for naming\n    prefix: staging\n    vpc_id: \"{{ staging_vpc }}\"\n  tasks:  \n    - name: delete {{ prefix }}_sg_web\n      ec2_group:\n        region: \"{{ region }}\"\n        vpc_id: \"{{ vpc_id }}\"\n        name: \"{{ prefix }}_sg_web\"    \n        description: security group for webservers\n        state: absent\n    - name: delete {{ prefix }}_sg_database\n      ec2_group:\n        region: \"{{ region }}\"\n        vpc_id: \"{{ vpc_id }}\"\n        name: \"{{ prefix }}_sg_database\"    \n        description: security group for databases\n        state: absent\n    - name: delete {{ prefix }}_sg_nat\n      ec2_group:\n        region: \"{{ region }}\"\n        vpc_id: \"{{ vpc_id }}\"\n        name: \"{{ prefix }}_sg_nat\"    \n        description: security group for nat\n        state: absent\n"
  },
  {
    "path": "sg_empty.yml",
    "content": "---  \n- hosts: localhost  \n  connection: local  \n  gather_facts: no    \n  vars_files:\n    - staging_vpc_info\n  vars:  \n    #your region  \n    region: ap-southeast-2  \n    #prefix for naming\n    prefix: staging\n    vpc_id: \"{{ staging_vpc }}\"\n  tasks:  \n    - name: create empty security group for webservers \n      local_action:  \n        module: ec2_group    \n        region: \"{{ region }}\"\n        vpc_id: \"{{ vpc_id }}\"\n        name: \"{{ prefix }}_sg_web\"    \n        description: security group for webservers\n    - name: create empty security group for databases \n      local_action:  \n        module: ec2_group    \n        region: \"{{ region }}\"\n        vpc_id: \"{{ vpc_id }}\"\n        name: \"{{ prefix }}_sg_database\"    \n        description: security group for databases\n    - name: create empty security group for nat\n      local_action:  \n        module: ec2_group    \n        region: \"{{ region }}\"\n        vpc_id: \"{{ vpc_id }}\"\n        name: \"{{ prefix }}_sg_nat\"    \n        description: security group for nat\n"
  },
  {
    "path": "sg_jumpbox.yml",
    "content": "---  \n- hosts: localhost    \n  gather_facts: no   \n  connection: local  \n  vars_files:\n    - staging_vpc_info\n  vars:  \n    #your region  \n    region: ap-southeast-2  \n    #your ip address\n    allowed_ip: 123.243.16.53/32\n    #prefix for naming\n    prefix: staging\n    vpc_id: \"{{ staging_vpc }}\"\n  tasks:  \n    - name: create security group for jump box instance\n      ec2_group:\n        region: \"{{ region }}\"\n        vpc_id: \"{{ vpc_id }}\"\n        #your security group name  \n        name: \"{{ prefix }}_sg_jumpbox\"    \n        description: security group for jump box\n        rules:\n          # allow ssh access from your ip address  \n          - proto: tcp  \n            from_port: 22  \n            to_port: 22  \n            cidr_ip: \"{{ allowed_ip }}\"  \n        rules_egress:  \n          - proto: all\n            cidr_ip: 0.0.0.0/0  \n"
  },
  {
    "path": "sg_modify.yml",
    "content": "---  \n- hosts: localhost  \n  connection: local  \n  gather_facts: no    \n  vars_files:\n    - staging_vpc_info\n  vars:  \n    #your region  \n    region: ap-southeast-2  \n    #your ip address\n    allowed_ip: 54.79.34.239/32\n    #prefix for naming\n    prefix: staging\n    vpc_id: \"{{ staging_vpc }}\"\n    private_subnet: 10.0.1.0/24\n  tasks:  \n    - name: modify sg_web rules\n      ec2_group:\n        region: \"{{ region }}\"\n        vpc_id: \"{{ vpc_id }}\"\n        #your security group name  \n        name: \"{{ prefix }}_sg_web\"    \n        description: security group for webservers\n        rules:\n          # allow ssh access from your ip address  \n          - proto: tcp  \n            from_port: 22  \n            to_port: 22  \n            cidr_ip: \"{{ allowed_ip }}\"  \n          # allow http access from anywhere  \n          - proto: tcp  \n            from_port: 80    \n            to_port: 80    \n            cidr_ip: 0.0.0.0/0    \n          # allow https access from anywhere  \n          - proto: tcp  \n            from_port: 443    \n            to_port: 443    \n            cidr_ip: 0.0.0.0/0\n        rules_egress:  \n          - proto: tcp\n            from_port: 3306\n            to_port: 3306  \n            group_name: \"{{ prefix }}_sg_database\"   \n          # allow http outbound\n          - proto: tcp\n            from_port: 80\n            to_port: 80\n            cidr_ip: 0.0.0.0/0\n          # allow https outbound\n          - proto: tcp\n            from_port: 443\n            to_port: 443\n            cidr_ip: 0.0.0.0/0\n    - name: modify sg_database rules \n      ec2_group:\n        region: \"{{ region }}\"\n        vpc_id: \"{{ vpc_id }}\"\n        name: \"{{ prefix }}_sg_database\"    \n        description: security group for databases\n        rules:\n          - proto: tcp\n            from_port: 3306\n            to_port: 3306  \n            group_name: \"{{ prefix }}_sg_web\"\n        rules_egress:  \n          - proto: tcp  \n            from_port: 80    \n            to_port: 80    \n            cidr_ip: 0.0.0.0/0\n          - proto: tcp  \n            from_port: 443    \n            to_port: 443    \n            cidr_ip: 0.0.0.0/0\n    - name: modify sg_nat rules\n      ec2_group:\n        region: \"{{ region }}\"\n        vpc_id: \"{{ vpc_id }}\"\n        name: \"{{ prefix }}_sg_nat\"    \n        description: security group for nat\n        rules:\n          # allow ssh access from your ip address  \n          - proto: tcp  \n            from_port: 22  \n            to_port: 22  \n            cidr_ip: \"{{ allowed_ip }}\"  \n          # allow http access from private subnet  \n          - proto: tcp  \n            from_port: 80    \n            to_port: 80    \n            cidr_ip: \"{{ private_subnet }}\"    \n          # allow https access from private subnet  \n          - proto: tcp  \n            from_port: 443    \n            to_port: 443    \n            cidr_ip: \"{{ private_subnet }}\"\n        rules_egress:  \n          - proto: tcp  \n            from_port: 80    \n            to_port: 80    \n            cidr_ip: 0.0.0.0/0\n          - proto: tcp  \n            from_port: 443    \n            to_port: 443    \n            cidr_ip: 0.0.0.0/0\n"
  },
  {
    "path": "sg_openvpn.yml",
    "content": "---\n- hosts: localhost\n  gather_facts: no\n  connection: local \n  vars_files:\n    - staging_vpc_info\n  vars:\n    #your region\n    region: ap-southeast-2\n    #your ip address\n    allowed_ip: 123.243.16.53/32\n    #prefix for naming\n    prefix: staging\n    vpc_id: \"{{ staging_vpc }}\"\n  tasks:\n    - name: create security group for openvpn instance\n      ec2_group:\n        region: \"{{ region }}\"\n        vpc_id: \"{{ vpc_id }}\"\n        #your security group name\n        name: \"{{ prefix }}_sg_openvpn\"\n        description: security group for openvpn\n        rules:\n          - proto: tcp  \n            from_port: 22  \n            to_port: 22  \n            cidr_ip: \"{{ allowed_ip }}\"\n          - proto: tcp\n            from_port: 443\n            to_port: 443\n            cidr_ip: 0.0.0.0/0\n          - proto: tcp\n            from_port: 943\n            to_port: 943\n            cidr_ip: 0.0.0.0/0\n          - proto: udp\n            from_port: 1194\n            to_port: 1194\n            cidr_ip: 0.0.0.0/0\n        rules_egress:\n          - proto: all\n            cidr_ip: 0.0.0.0/0\n\n"
  },
  {
    "path": "sg_webserver.yml",
    "content": "---  \n- hosts: localhost  \n  connection: local  \n  gather_facts: no    \n  vars:  \n    #your region  \n    region: ap-southeast-2  \n    #your ip address\n    allowed_ip: 123.243.16.53/32\n  tasks:  \n    - name: create security group  \n      ec2_group:\n        region: \"{{ region }}\"\n        name: sg_webserver_apsydney    \n        description: security group for apsydney webserver host\n        rules:\n          # allow ssh access from your ip address  \n          - proto: tcp  \n            from_port: 22  \n            to_port: 22  \n            cidr_ip: \"{{ allowed_ip }}\"  \n          # allow http access from anywhere  \n          - proto: tcp  \n            from_port: 80    \n            to_port: 80    \n            cidr_ip: 0.0.0.0/0  \n        rules_egress:\n          - proto: all\n            cidr_ip: 0.0.0.0/0\n"
  },
  {
    "path": "site.yml",
    "content": "---\n# install, configure, and start ntp on all ec2 instances\n- hosts: ec2  \n  sudo: yes  \n  roles:  \n    - common  \n\n# install and start mysql server on instance with tags class=database\n- hosts: tag_class_database  \n  sudo: yes  \n  roles:  \n    - mysql  \n     \n# install and start apache on instance with tags class=webserver and environment=staging\n- hosts: tag_class_webserver:&tag_environment_staging  \n  sudo: yes  \n  roles:\n    - apache\n"
  },
  {
    "path": "terminate_ec2.yml",
    "content": "---  \n- hosts: localhost  \n  connection: local  \n  gather_facts: no    \n  vars:  \n    #your region  \n    region: ap-southeast-2    \n  tasks:  \n    - name: terminate instances\n      ec2:\n        region: \"{{ region }}\"  \n        wait: yes  \n        instance_ids: ['i-9e1e18a1', 'i-f61b1dc9', 'i-36272109', 'i-0b272134', 'i-0a272135']  \n        state: absent  \n"
  },
  {
    "path": "test.txt",
    "content": "Hello World. This is a test.\n"
  },
  {
    "path": "test1.txt",
    "content": "Hello World. This is a test.\n"
  },
  {
    "path": "vpc_create.yml",
    "content": "---  \n- hosts: localhost  \n  connection: local  \n  gather_facts: no    \n  vars:  \n    region: ap-southeast-2\n    # prefix for naming  \n    prefix: staging\n    # availability zone\n    az: ap-southeast-2a\n  tasks:  \n    - name: create vpc   \n      local_action:  \n        module: ec2_vpc     \n        region: \"{{ region }}\"\n        cidr_block: 10.0.0.0/16\n        resource_tags: '{\"Name\":\"{{ prefix }}_vpc\"}'\n        subnets:\n          - cidr: 10.0.0.0/24\n            az: \"{{ az }}\"\n            resource_tags: '{\"Name\":\"{{ prefix }}_subnet_public\"}'\n          - cidr: 10.0.1.0/24\n            az: \"{{ az }}\"\n            resource_tags: '{\"Name\":\"{{ prefix }}_subnet_private\"}'\n        internet_gateway: yes\n        route_tables:\n          - subnets:\n              - 10.0.0.0/24\n            routes:\n              - dest: 0.0.0.0/0\n                gw: igw\n      register: vpc\n    - name: write vpc id to {{ prefix }}_vpc_info file\n      sudo: yes\n      local_action: shell echo \"{{ prefix }}\"_vpc\":\" \"{{ vpc.vpc_id }}\" \n                      > \"{{ prefix }}\"_vpc_info\n    - name: write subnets id to {{ prefix }}_vpc_info file\n      sudo: yes\n      local_action: shell echo \"{{ item.resource_tags.Name }}\"\":\" \"{{ item.id }}\" \n                      >> \"{{ prefix }}\"_vpc_info\n      with_items: vpc.subnets\n"
  },
  {
    "path": "vpc_create_multi_az.yml",
    "content": "---  \n- hosts: localhost  \n  gather_facts: no    \n  vars:  \n    region: ap-southeast-2\n    # prefix for naming  \n    prefix: staging\n    # availability zones\n    az0: ap-southeast-2a\n    az1: ap-southeast-2b\n  tasks:  \n    - name: create vpc with multi-az subnets   \n      local_action:  \n        module: ec2_vpc     \n        region: \"{{ region }}\"\n        cidr_block: 10.0.0.0/16\n        resource_tags: '{\"Name\":\"{{ prefix }}_vpc\"}'\n        subnets:\n          - cidr: 10.0.0.0/24\n            az: \"{{ az0 }}\"\n            resource_tags: '{\"Name\":\"{{ prefix }}_subnet_public_0\"}'\n          - cidr: 10.0.1.0/24\n            az: \"{{ az0 }}\"\n            resource_tags: '{\"Name\":\"{{ prefix }}_subnet_private_0\"}'\n          - cidr: 10.0.2.0/24\n            az: \"{{ az1 }}\"\n            resource_tags: '{\"Name\":\"{{ prefix }}_subnet_public_1\"}'\n          - cidr: 10.0.3.0/24\n            az: \"{{ az1 }}\"\n            resource_tags: '{\"Name\":\"{{ prefix }}_subnet_private_1\"}'\n        internet_gateway: yes\n        route_tables:\n          - subnets:\n              - 10.0.0.0/24\n              - 10.0.2.0/24\n            routes:\n              - dest: 0.0.0.0/0\n                gw: igw\n      register: vpc\n    - name: write vpc id to {{ prefix }}_vpc_info file\n      sudo: yes\n      local_action: shell echo \"{{ prefix }}\"_vpc\":\" \"{{ vpc.vpc_id }}\" \n                      > \"{{ prefix }}\"_vpc_info\n    - name: write subnets id to {{ prefix }}_vpc_info file\n      sudo: yes\n      local_action: shell echo \"{{ item.resource_tags.Name }}\"\":\" \"{{ item.id }}\" \n                      >> \"{{ prefix }}\"_vpc_info\n      with_items: vpc.subnets\n"
  },
  {
    "path": "vpc_delete.yml",
    "content": "---\n- hosts: localhost\n  connection: local\n  gather_facts: no\n  vars:\n    region: ap-southeast-2\n  tasks:\n    - name: get vpc id\n      local_action:\n        module: vpc_lookup\n        region: \"{{ region }}\"\n        tags:\n          Name: test-vpc\n      register: vpc\n\n    - name: delete vpc\n      local_action:\n        module: ec2_vpc\n        region: \"{{ region }}\"\n        state: absent\n        vpc_id: \"{{ item }}\"\n        wait: yes\n      with_items: vpc.vpc_ids\n"
  },
  {
    "path": "vpc_delete1.yml",
    "content": "- hosts: localhost\n  connection: local\n  gather_facts: no\n  vars:\n    region: ap-southeast-2\n    name: test-vpc\n  tasks:\n    - name: get vpc id\n      command: \"aws ec2 describe-vpcs --filters Name=tag:Name,Values={{ name }}\n               --query 'Vpcs[0].VpcId' --output text\"\n      register: vpcid\n\n    - debug: var=vpcid.stdout\n\n    - name: delete vpc\n      local_action:\n        module: ec2_vpc\n        region: \"{{ region }}\"\n        state: absent\n        vpc_id: \"{{ vpcid.stdout }}\"\n        wait: yes\n"
  },
  {
    "path": "vpc_info.yml",
    "content": "---\n- hosts: localhost\n  connection: local\n  gather_facts: no\n  vars:\n    region: ap-southeast-2\n  tasks:\n    - name: get subnet\n      local_action:\n        module: vpc_lookup\n        region: \"{{ region }}\"\n        tags:\n          Name: staging_vpc\n      register: vpc_subnet\n\n"
  },
  {
    "path": "wordpress/backup.yml",
    "content": "---\n- name: backup database and store in S3\n  hosts: tag_class_wordpress\n  gather_facts: no\n  vars:\n    bucketname: yan_wordpress\n  tasks:\n    - name: get date\n      shell: date +%Y%m%d\n      register: date\n    - name: create mysql backup\n      shell: mysqldump -u {{ wp_db_user }} -p{{ wp_db_password }} {{ wp_db_name }} > /tmp/{{ date.stdout }}_backup.sql\n    - name: archive backup\n      shell: tar -czf {{ date.stdout }}_backup.tar.gz {{ date.stdout }}_backup.sql && rm -f {{ date.stdout }}_backup.sql chdir=/tmp\n    - name: create s3 bucket\n      local_action:\n        module: s3\n        bucket: \"{{ bucketname }}\"\n        mode: create\n    - name: upload backup archive\n      local_action:\n        module: s3\n        bucket: \"{{ bucketname }}\"\n        object: /backup/database/{{ date.stdout }}_backup.tar.gz\n        src: /tmp/{{ date.stdout }}_backup.tar.gz\n        mode: put\n     \n\n\n"
  },
  {
    "path": "wordpress/delete_backup.yml",
    "content": "---\n- name: delete object from S3 bucket\n  hosts: localhost\n  gather_facts: no\n  vars:\n    bucketname: yan_wordpress\n    date: 20141031\n  tasks:\n    - name: delete backup file\n      command: s3cmd del s3://{{ bucketname }}/backup/database/{{ date }}_backup.tar.gz\n      ignore_errors: yes\n\n"
  },
  {
    "path": "wordpress/ec2.ini",
    "content": "# Ansible EC2 external inventory script settings\n#\n\n[ec2]\n\n# to talk to a private eucalyptus instance uncomment these lines\n# and edit edit eucalyptus_host to be the host name of your cloud controller\n#eucalyptus = True\n#eucalyptus_host = clc.cloud.domain.org\n\n# AWS regions to make calls to. Set this to 'all' to make request to all regions\n# in AWS and merge the results together. Alternatively, set this to a comma\n# separated list of regions. E.g. 'us-east-1,us-west-1,us-west-2'\n#regions = all\nregions = ap-southeast-2\nregions_exclude = us-gov-west-1,cn-north-1\n\n# When generating inventory, Ansible needs to know how to address a server.\n# Each EC2 instance has a lot of variables associated with it. Here is the list:\n#   http://docs.pythonboto.org/en/latest/ref/ec2.html#module-boto.ec2.instance\n# Below are 2 variables that are used as the address of a server:\n#   - destination_variable\n#   - vpc_destination_variable\n\n# This is the normal destination variable to use. If you are running Ansible\n# from outside EC2, then 'public_dns_name' makes the most sense. If you are\n# running Ansible from within EC2, then perhaps you want to use the internal\n# address, and should set this to 'private_dns_name'.\ndestination_variable = public_dns_name\n\n# For server inside a VPC, using DNS names may not make sense. When an instance\n# has 'subnet_id' set, this variable is used. If the subnet is public, setting\n# this to 'ip_address' will return the public IP address. For instances in a\n# private subnet, this should be set to 'private_ip_address', and Ansible must\n# be run from with EC2.\nvpc_destination_variable = ip_address\n\n# To tag instances on EC2 with the resource records that point to them from\n# Route53, uncomment and set 'route53' to True.\nroute53 = False\n\n# Additionally, you can specify the list of zones to exclude looking up in\n# 'route53_excluded_zones' as a comma-separated list.\n# route53_excluded_zones = samplezone1.com, samplezone2.com\n\n# API calls to EC2 are slow. For this reason, we cache the results of an API\n# call. Set this to the path you want cache files to be written to. Two files\n# will be written to this directory:\n#   - ansible-ec2.cache\n#   - ansible-ec2.index\ncache_path = ~/.ansible/tmp\n\n# The number of seconds a cache file is considered valid. After this many\n# seconds, a new API call will be made, and the cache file will be updated.\n# To disable the cache, set this value to 0\ncache_max_age = 300\n"
  },
  {
    "path": "wordpress/ec2.py",
    "content": "#!/usr/bin/env python\n\n'''\nEC2 external inventory script\n=================================\n\nGenerates inventory that Ansible can understand by making API request to\nAWS EC2 using the Boto library.\n\nNOTE: This script assumes Ansible is being executed where the environment\nvariables needed for Boto have already been set:\n    export AWS_ACCESS_KEY_ID='AK123'\n    export AWS_SECRET_ACCESS_KEY='abc123'\n\nThis script also assumes there is an ec2.ini file alongside it.  To specify a\ndifferent path to ec2.ini, define the EC2_INI_PATH environment variable:\n\n    export EC2_INI_PATH=/path/to/my_ec2.ini\n\nIf you're using eucalyptus you need to set the above variables and\nyou need to define:\n\n    export EC2_URL=http://hostname_of_your_cc:port/services/Eucalyptus\n\nFor more details, see: http://docs.pythonboto.org/en/latest/boto_config_tut.html\n\nWhen run against a specific host, this script returns the following variables:\n - ec2_ami_launch_index\n - ec2_architecture\n - ec2_association\n - ec2_attachTime\n - ec2_attachment\n - ec2_attachmentId\n - ec2_client_token\n - ec2_deleteOnTermination\n - ec2_description\n - ec2_deviceIndex\n - ec2_dns_name\n - ec2_eventsSet\n - ec2_group_name\n - ec2_hypervisor\n - ec2_id\n - ec2_image_id\n - ec2_instanceState\n - ec2_instance_type\n - ec2_ipOwnerId\n - ec2_ip_address\n - ec2_item\n - ec2_kernel\n - ec2_key_name\n - ec2_launch_time\n - ec2_monitored\n - ec2_monitoring\n - ec2_networkInterfaceId\n - ec2_ownerId\n - ec2_persistent\n - ec2_placement\n - ec2_platform\n - ec2_previous_state\n - ec2_private_dns_name\n - ec2_private_ip_address\n - ec2_publicIp\n - ec2_public_dns_name\n - ec2_ramdisk\n - ec2_reason\n - ec2_region\n - ec2_requester_id\n - ec2_root_device_name\n - ec2_root_device_type\n - ec2_security_group_ids\n - ec2_security_group_names\n - ec2_shutdown_state\n - ec2_sourceDestCheck\n - ec2_spot_instance_request_id\n - ec2_state\n - ec2_state_code\n - ec2_state_reason\n - ec2_status\n - ec2_subnet_id\n - ec2_tenancy\n - ec2_virtualization_type\n - ec2_vpc_id\n\nThese variables are pulled out of a boto.ec2.instance object. There is a lack of\nconsistency with variable spellings (camelCase and underscores) since this\njust loops through all variables the object exposes. It is preferred to use the\nones with underscores when multiple exist.\n\nIn addition, if an instance has AWS Tags associated with it, each tag is a new\nvariable named:\n - ec2_tag_[Key] = [Value]\n\nSecurity groups are comma-separated in 'ec2_security_group_ids' and\n'ec2_security_group_names'.\n'''\n\n# (c) 2012, Peter Sankauskas\n#\n# This file is part of Ansible,\n#\n# Ansible is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# Ansible is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with Ansible.  If not, see <http://www.gnu.org/licenses/>.\n\n######################################################################\n\nimport sys\nimport os\nimport argparse\nimport re\nfrom time import time\nimport boto\nfrom boto import ec2\nfrom boto import rds\nfrom boto import route53\nimport ConfigParser\n\ntry:\n    import json\nexcept ImportError:\n    import simplejson as json\n\n\nclass Ec2Inventory(object):\n    def _empty_inventory(self):\n        return {\"_meta\" : {\"hostvars\" : {}}}\n\n    def __init__(self):\n        ''' Main execution path '''\n\n        # Inventory grouped by instance IDs, tags, security groups, regions,\n        # and availability zones\n        self.inventory = self._empty_inventory()\n\n        # Index of hostname (address) to instance ID\n        self.index = {}\n\n        # Read settings and parse CLI arguments\n        self.read_settings()\n        self.parse_cli_args()\n\n        # Cache\n        if self.args.refresh_cache:\n            self.do_api_calls_update_cache()\n        elif not self.is_cache_valid():\n            self.do_api_calls_update_cache()\n\n        # Data to print\n        if self.args.host:\n            data_to_print = self.get_host_info()\n\n        elif self.args.list:\n            # Display list of instances for inventory\n            if self.inventory == self._empty_inventory():\n                data_to_print = self.get_inventory_from_cache()\n            else:\n                data_to_print = self.json_format_dict(self.inventory, True)\n\n        print data_to_print\n\n\n    def is_cache_valid(self):\n        ''' Determines if the cache files have expired, or if it is still valid '''\n\n        if os.path.isfile(self.cache_path_cache):\n            mod_time = os.path.getmtime(self.cache_path_cache)\n            current_time = time()\n            if (mod_time + self.cache_max_age) > current_time:\n                if os.path.isfile(self.cache_path_index):\n                    return True\n\n        return False\n\n\n    def read_settings(self):\n        ''' Reads the settings from the ec2.ini file '''\n\n        config = ConfigParser.SafeConfigParser()\n        ec2_default_ini_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'ec2.ini')\n        ec2_ini_path = os.environ.get('EC2_INI_PATH', ec2_default_ini_path)\n        config.read(ec2_ini_path)\n\n        # is eucalyptus?\n        self.eucalyptus_host = None\n        self.eucalyptus = False\n        if config.has_option('ec2', 'eucalyptus'):\n            self.eucalyptus = config.getboolean('ec2', 'eucalyptus')\n        if self.eucalyptus and config.has_option('ec2', 'eucalyptus_host'):\n            self.eucalyptus_host = config.get('ec2', 'eucalyptus_host')\n\n        # Regions\n        self.regions = []\n        configRegions = config.get('ec2', 'regions')\n        configRegions_exclude = config.get('ec2', 'regions_exclude')\n        if (configRegions == 'all'):\n            if self.eucalyptus_host:\n                self.regions.append(boto.connect_euca(host=self.eucalyptus_host).region.name)\n            else:\n                for regionInfo in ec2.regions():\n                    if regionInfo.name not in configRegions_exclude:\n                        self.regions.append(regionInfo.name)\n        else:\n            self.regions = configRegions.split(\",\")\n\n        # Destination addresses\n        self.destination_variable = config.get('ec2', 'destination_variable')\n        self.vpc_destination_variable = config.get('ec2', 'vpc_destination_variable')\n\n        # Route53\n        self.route53_enabled = config.getboolean('ec2', 'route53')\n        self.route53_excluded_zones = []\n        if config.has_option('ec2', 'route53_excluded_zones'):\n            self.route53_excluded_zones.extend(\n                config.get('ec2', 'route53_excluded_zones', '').split(','))\n\n        # Cache related\n        cache_dir = os.path.expanduser(config.get('ec2', 'cache_path'))\n        if not os.path.exists(cache_dir):\n            os.makedirs(cache_dir)\n\n        self.cache_path_cache = cache_dir + \"/ansible-ec2.cache\"\n        self.cache_path_index = cache_dir + \"/ansible-ec2.index\"\n        self.cache_max_age = config.getint('ec2', 'cache_max_age')\n        \n\n\n    def parse_cli_args(self):\n        ''' Command line argument processing '''\n\n        parser = argparse.ArgumentParser(description='Produce an Ansible Inventory file based on EC2')\n        parser.add_argument('--list', action='store_true', default=True,\n                           help='List instances (default: True)')\n        parser.add_argument('--host', action='store',\n                           help='Get all the variables about a specific instance')\n        parser.add_argument('--refresh-cache', action='store_true', default=False,\n                           help='Force refresh of cache by making API requests to EC2 (default: False - use cache files)')\n        self.args = parser.parse_args()\n\n\n    def do_api_calls_update_cache(self):\n        ''' Do API calls to each region, and save data in cache files '''\n\n        if self.route53_enabled:\n            self.get_route53_records()\n\n        for region in self.regions:\n            self.get_instances_by_region(region)\n            self.get_rds_instances_by_region(region)\n\n        self.write_to_cache(self.inventory, self.cache_path_cache)\n        self.write_to_cache(self.index, self.cache_path_index)\n\n\n    def get_instances_by_region(self, region):\n        ''' Makes an AWS EC2 API call to the list of instances in a particular\n        region '''\n\n        try:\n            if self.eucalyptus:\n                conn = boto.connect_euca(host=self.eucalyptus_host)\n                conn.APIVersion = '2010-08-31'\n            else:\n                conn = ec2.connect_to_region(region)\n\n            # connect_to_region will fail \"silently\" by returning None if the region name is wrong or not supported\n            if conn is None:\n                print(\"region name: %s likely not supported, or AWS is down.  connection to region failed.\" % region)\n                sys.exit(1)\n \n            reservations = conn.get_all_instances()\n            for reservation in reservations:\n                for instance in reservation.instances:\n                    self.add_instance(instance, region)\n        \n        except boto.exception.BotoServerError, e:\n            if  not self.eucalyptus:\n                print \"Looks like AWS is down again:\"\n            print e\n            sys.exit(1)\n\n    def get_rds_instances_by_region(self, region):\n\t''' Makes an AWS API call to the list of RDS instances in a particular\n        region '''\n\n        try:\n            conn = rds.connect_to_region(region)\n            if conn:\n                instances = conn.get_all_dbinstances()\n                for instance in instances:\n                    self.add_rds_instance(instance, region)\n        except boto.exception.BotoServerError, e:\n            if not e.reason == \"Forbidden\":\n                print \"Looks like AWS RDS is down: \"\n                print e\n                sys.exit(1)\n\n    def get_instance(self, region, instance_id):\n        ''' Gets details about a specific instance '''\n        if self.eucalyptus:\n            conn = boto.connect_euca(self.eucalyptus_host)\n            conn.APIVersion = '2010-08-31'\n        else:\n            conn = ec2.connect_to_region(region)\n\n        # connect_to_region will fail \"silently\" by returning None if the region name is wrong or not supported\n        if conn is None:\n            print(\"region name: %s likely not supported, or AWS is down.  connection to region failed.\" % region)\n            sys.exit(1)\n\n        reservations = conn.get_all_instances([instance_id])\n        for reservation in reservations:\n            for instance in reservation.instances:\n                return instance\n\n\n    def add_instance(self, instance, region):\n        ''' Adds an instance to the inventory and index, as long as it is\n        addressable '''\n\n        # Only want running instances\n        if instance.state != 'running':\n            return\n\n        # Select the best destination address\n        if instance.subnet_id:\n            dest = getattr(instance, self.vpc_destination_variable)\n        else:\n            dest =  getattr(instance, self.destination_variable)\n\n        if not dest:\n            # Skip instances we cannot address (e.g. private VPC subnet)\n            return\n\n        # Add to index\n        self.index[dest] = [region, instance.id]\n\n        # Inventory: Group by instance ID (always a group of 1)\n        self.inventory[instance.id] = [dest]\n\n        # Inventory: Group by region\n        self.push(self.inventory, region, dest)\n\n        # Inventory: Group by availability zone\n        self.push(self.inventory, instance.placement, dest)\n\n        # Inventory: Group by instance type\n        self.push(self.inventory, self.to_safe('type_' + instance.instance_type), dest)\n\n        # Inventory: Group by key pair\n        if instance.key_name:\n            self.push(self.inventory, self.to_safe('key_' + instance.key_name), dest)\n        \n        # Inventory: Group by security group\n        try:\n            for group in instance.groups:\n                key = self.to_safe(\"security_group_\" + group.name)\n                self.push(self.inventory, key, dest)\n        except AttributeError:\n            print 'Package boto seems a bit older.'\n            print 'Please upgrade boto >= 2.3.0.'\n            sys.exit(1)\n\n        # Inventory: Group by tag keys\n        for k, v in instance.tags.iteritems():\n            key = self.to_safe(\"tag_\" + k + \"=\" + v)\n            self.push(self.inventory, key, dest)\n\n        # Inventory: Group by Route53 domain names if enabled\n        if self.route53_enabled:\n            route53_names = self.get_instance_route53_names(instance)\n            for name in route53_names:\n                self.push(self.inventory, name, dest)\n\n        # Global Tag: tag all EC2 instances\n        self.push(self.inventory, 'ec2', dest)\n\n        self.inventory[\"_meta\"][\"hostvars\"][dest] = self.get_host_info_dict_from_instance(instance)\n\n\n    def add_rds_instance(self, instance, region):\n        ''' Adds an RDS instance to the inventory and index, as long as it is\n        addressable '''\n\n        # Only want available instances\n        if instance.status != 'available':\n            return\n\n        # Select the best destination address\n        #if instance.subnet_id:\n            #dest = getattr(instance, self.vpc_destination_variable)\n        #else:\n            #dest =  getattr(instance, self.destination_variable)\n        dest = instance.endpoint[0]\n\n        if not dest:\n            # Skip instances we cannot address (e.g. private VPC subnet)\n            return\n\n        # Add to index\n        self.index[dest] = [region, instance.id]\n\n        # Inventory: Group by instance ID (always a group of 1)\n        self.inventory[instance.id] = [dest]\n\n        # Inventory: Group by region\n        self.push(self.inventory, region, dest)\n\n        # Inventory: Group by availability zone\n        self.push(self.inventory, instance.availability_zone, dest)\n        \n        # Inventory: Group by instance type\n        self.push(self.inventory, self.to_safe('type_' + instance.instance_class), dest)\n        \n        # Inventory: Group by security group\n        try:\n            if instance.security_group:\n                key = self.to_safe(\"security_group_\" + instance.security_group.name)\n                self.push(self.inventory, key, dest)\n        except AttributeError:\n            print 'Package boto seems a bit older.'\n            print 'Please upgrade boto >= 2.3.0.'\n            sys.exit(1)\n\n        # Inventory: Group by engine\n        self.push(self.inventory, self.to_safe(\"rds_\" + instance.engine), dest)\n\n        # Inventory: Group by parameter group\n        self.push(self.inventory, self.to_safe(\"rds_parameter_group_\" + instance.parameter_group.name), dest)\n\n        # Global Tag: all RDS instances\n        self.push(self.inventory, 'rds', dest)\n\n\n    def get_route53_records(self):\n        ''' Get and store the map of resource records to domain names that\n        point to them. '''\n\n        r53_conn = route53.Route53Connection()\n        all_zones = r53_conn.get_zones()\n\n        route53_zones = [ zone for zone in all_zones if zone.name[:-1]\n                          not in self.route53_excluded_zones ]\n\n        self.route53_records = {}\n\n        for zone in route53_zones:\n            rrsets = r53_conn.get_all_rrsets(zone.id)\n\n            for record_set in rrsets:\n                record_name = record_set.name\n\n                if record_name.endswith('.'):\n                    record_name = record_name[:-1]\n\n                for resource in record_set.resource_records:\n                    self.route53_records.setdefault(resource, set())\n                    self.route53_records[resource].add(record_name)\n\n\n    def get_instance_route53_names(self, instance):\n        ''' Check if an instance is referenced in the records we have from\n        Route53. If it is, return the list of domain names pointing to said\n        instance. If nothing points to it, return an empty list. '''\n\n        instance_attributes = [ 'public_dns_name', 'private_dns_name',\n                                'ip_address', 'private_ip_address' ]\n\n        name_list = set()\n\n        for attrib in instance_attributes:\n            try:\n                value = getattr(instance, attrib)\n            except AttributeError:\n                continue\n\n            if value in self.route53_records:\n                name_list.update(self.route53_records[value])\n\n        return list(name_list)\n\n\n    def get_host_info_dict_from_instance(self, instance):\n        instance_vars = {}\n        for key in vars(instance):\n            value = getattr(instance, key)\n            key = self.to_safe('ec2_' + key)\n\n            # Handle complex types\n            # state/previous_state changed to properties in boto in https://github.com/boto/boto/commit/a23c379837f698212252720d2af8dec0325c9518\n            if key == 'ec2__state':\n                instance_vars['ec2_state'] = instance.state or ''\n                instance_vars['ec2_state_code'] = instance.state_code\n            elif key == 'ec2__previous_state':\n                instance_vars['ec2_previous_state'] = instance.previous_state or ''\n                instance_vars['ec2_previous_state_code'] = instance.previous_state_code\n            elif type(value) in [int, bool]:\n                instance_vars[key] = value\n            elif type(value) in [str, unicode]:\n                instance_vars[key] = value.strip()\n            elif type(value) == type(None):\n                instance_vars[key] = ''\n            elif key == 'ec2_region':\n                instance_vars[key] = value.name\n            elif key == 'ec2__placement':\n                instance_vars['ec2_placement'] = value.zone\n            elif key == 'ec2_tags':\n                for k, v in value.iteritems():\n                    key = self.to_safe('ec2_tag_' + k)\n                    instance_vars[key] = v\n            elif key == 'ec2_groups':\n                group_ids = []\n                group_names = []\n                for group in value:\n                    group_ids.append(group.id)\n                    group_names.append(group.name)\n                instance_vars[\"ec2_security_group_ids\"] = ','.join(group_ids)\n                instance_vars[\"ec2_security_group_names\"] = ','.join(group_names)\n            else:\n                pass\n                # TODO Product codes if someone finds them useful\n                #print key\n                #print type(value)\n                #print value\n\n        return instance_vars\n\n    def get_host_info(self):\n        ''' Get variables about a specific host '''\n\n        if len(self.index) == 0:\n            # Need to load index from cache\n            self.load_index_from_cache()\n\n        if not self.args.host in self.index:\n            # try updating the cache\n            self.do_api_calls_update_cache()\n            if not self.args.host in self.index:\n                # host migh not exist anymore\n                return self.json_format_dict({}, True)\n\n        (region, instance_id) = self.index[self.args.host]\n\n        instance = self.get_instance(region, instance_id)\n        return self.json_format_dict(self.get_host_info_dict_from_instance(instance), True)\n\n    def push(self, my_dict, key, element):\n        ''' Pushed an element onto an array that may not have been defined in\n        the dict '''\n\n        if key in my_dict:\n            my_dict[key].append(element);\n        else:\n            my_dict[key] = [element]\n\n\n    def get_inventory_from_cache(self):\n        ''' Reads the inventory from the cache file and returns it as a JSON\n        object '''\n\n        cache = open(self.cache_path_cache, 'r')\n        json_inventory = cache.read()\n        return json_inventory\n\n\n    def load_index_from_cache(self):\n        ''' Reads the index from the cache file sets self.index '''\n\n        cache = open(self.cache_path_index, 'r')\n        json_index = cache.read()\n        self.index = json.loads(json_index)\n\n\n    def write_to_cache(self, data, filename):\n        ''' Writes data in JSON format to a file '''\n\n        json_data = self.json_format_dict(data, True)\n        cache = open(filename, 'w')\n        cache.write(json_data)\n        cache.close()\n\n\n    def to_safe(self, word):\n        ''' Converts 'bad' characters in a string to underscores so they can be\n        used as Ansible groups '''\n\n        return re.sub(\"[^A-Za-z0-9\\-]\", \"_\", word)\n\n\n    def json_format_dict(self, data, pretty=False):\n        ''' Converts a dict to a JSON object and dumps it as a formatted\n        string '''\n\n        if pretty:\n            return json.dumps(data, sort_keys=True, indent=2)\n        else:\n            return json.dumps(data)\n\n\n# Run the script\nEc2Inventory()\n\n"
  },
  {
    "path": "wordpress/group_vars/all",
    "content": "ansible_ssh_user: ec2-user  \nansible_ssh_private_key_file: ~/.ssh/wordpress-apsydney.pem\n\n# Which version of WordPress to deploy\nwp_version: 4.5.3\n#wp_sha256sum: 73c21224d42156150b948ca645a296a2431f1dd6a19350e0d8a72e465adde56d\n\n# These are the WordPress database settings\nwp_db_name: wordpress \nwp_db_user: wordpress\n# Set your database password here \nwp_db_password: secret\n\n# WordPress settings\n\n# Disable All Updates\n# By default automatic updates are enabled, set this value to true to disable all automatic updates\nauto_up_disable: false\n\n#Define Core Update Level\n#true  = Development, minor, and major updates are all enabled\n#false = Development, minor, and major updates are all disabled\n#minor = Minor updates are enabled, development, and major updates are disabled\ncore_update_level: true \n"
  },
  {
    "path": "wordpress/hosts",
    "content": "[local]\nlocalhost\n"
  },
  {
    "path": "wordpress/provisioning.yml",
    "content": "---  \n- hosts: localhost  \n  connection: local  \n  gather_facts: no    \n  vars:  \n    #your region  \n    region: ap-southeast-2\n    keyname: wordpress-apsydney\n    #your ip address\n    allowed_ip: 203.87.79.2/32\n    instance_type: t2.micro     \n    image: ami-dc361ebf\n  tasks:  \n    - name: create key pair  \n      tags: keypair  \n      ec2_key:  \n        region: \"{{ region }}\"  \n        name: \"{{ keyname }}\"      \n      register: mykey  \n\n    - name: write the private key to file\n      copy: content=\"{{ mykey.key.private_key }}\" dest=\"~/.ssh/{{ keyname }}.pem\" mode=0600\n      when: mykey.changed\n\n    - name: create security group  \n      tags: sg\n      ec2_group:    \n        region: \"{{ region }}\"\n        #your security group name  \n        name: sg_wordpress_apsydney    \n        description: security group for apsydney webserver host\n        rules:\n          # allow ssh access from your ip address  \n          - proto: tcp  \n            from_port: 22  \n            to_port: 22  \n            cidr_ip: \"{{ allowed_ip }}\"  \n          # allow http access from anywhere  \n          - proto: tcp  \n            from_port: 80    \n            to_port: 80    \n            cidr_ip: 0.0.0.0/0    \n          # allow https access from anywhere  \n          - proto: tcp  \n            from_port: 443    \n            to_port: 443    \n            cidr_ip: 0.0.0.0/0\n        rules_egress:  \n          - proto: all  \n            cidr_ip: 0.0.0.0/0    \n\n    - name: launch ec2 instance  \n      tags: ec2\n      local_action:  \n        module: ec2  \n        region: \"{{ region }}\"  \n        key_name: \"{{ keyname }}\"\n        instance_type: \"{{ instance_type }}\"    \n        image: \"{{ image }}\"  \n        wait: yes    \n        group: sg_wordpress_apsydney  \n        instance_tags:  \n          Name: wordpress-1  \n          class: wordpress\n      register: ec2    \n    - name: associate new EIP for the instance  \n      tags: eip      \n      local_action:\n        module: ec2_eip\n        region: \"{{ region }}\"\n        instance_id: \"{{ item.id }}\"\n      with_items: ec2.instances   \n      when: item.id is defined   \n"
  },
  {
    "path": "wordpress/restore.yml",
    "content": "---\n- name: download backup from S3 and restore\n  hosts: tag_class_wordpress\n  gather_facts: no\n  vars:\n    bucketname: yan_wordpress\n    date: 20140920\n  tasks:\n    - name: download backup archive\n      local_action:\n        module: s3\n        bucket: \"{{ bucketname }}\"\n        object: /backup/database/{{ date }}_backup.tar.gz\n        dest: /tmp/{{ date }}_backup.tar.gz\n        mode: get\n    - name: extract archive and restore mysql backup\n      shell: tar -xzf {{ date }}_backup.tar.gz &&\n             mysql -u {{ wp_db_user }} -p{{ wp_db_password }}\n             {{ wp_db_name }} < {{ date }}_backup.sql\n             chdir=/tmp\n"
  },
  {
    "path": "wordpress/roles/common/tasks/main.yml",
    "content": "---\n- name: install the 'Development tools' package group\n  yum: name=\"@Development tools\" state=present update_cache=yes"
  },
  {
    "path": "wordpress/roles/mysql/handlers/main.yml",
    "content": "---  \n- name: restart mysql  \n  service: name=mysqld state=restarted  \n"
  },
  {
    "path": "wordpress/roles/mysql/tasks/main.yml",
    "content": "---\n- name: install mysql server\n  yum: name={{ item }} state=present\n  with_items:\n   - mysql-devel\n   - mysql-server\n\n- name: install mysql-python \n  pip: name=mysql-python state=present\n\n- name: create mysql configuration file\n  template: src=my.cnf.j2 dest=/etc/my.cnf\n  notify: restart mysql\n  \n- name: start mysql service\n  service: name=mysqld state=started enabled=true\n"
  },
  {
    "path": "wordpress/roles/mysql/templates/my.cnf.j2",
    "content": "# You can customize your mysql server configuration here\n[mysqld]\ndatadir=/var/lib/mysql\nsocket=/var/lib/mysql/mysql.sock\nuser=mysql\n# Disabling symbolic-links is recommended to prevent assorted security risks\nsymbolic-links=0\nport=3306\n\n[mysqld_safe]\nlog-error=/var/log/mysqld.log\npid-file=/var/run/mysqld/mysqld.pid\n"
  },
  {
    "path": "wordpress/roles/web/tasks/main.yml",
    "content": "---\n- name: install apache, php, and php-mysql\n  yum: name={{ item }} state=present\n  with_items:\n   - httpd\n   - php\n   - php-mysql\n- name: start and enable httpd\n  service: name=httpd state=started enabled=yes\n\n\n"
  },
  {
    "path": "wordpress/roles/wordpress/tasks/main.yml",
    "content": "---\n- name: download wordpress\n  get_url: url=http://wordpress.org/wordpress-{{ wp_version }}.tar.gz dest=~/wordpress-{{ wp_version }}.tar.gz\n#           sha256sum=\"{{ wp_sha256sum }}\"\n\n- name: extract wordpress archive\n  command: chdir=~ /bin/tar xvf wordpress-{{ wp_version }}.tar.gz creates=~/wordpress\n\n- name: copy wordpress to apache root directory\n  shell: cp -r ~/wordpress/* /var/www/html  \n\n- name: fetch random salts for wordpress config\n  command: curl https://api.wordpress.org/secret-key/1.1/salt/\n  register: \"wp_salt\"\n\n- name: create wordpress database\n  mysql_db: name={{ wp_db_name }} state=present\n\n- name: create wordpress database user\n  mysql_user: name={{ wp_db_user }} password={{ wp_db_password }} priv={{ wp_db_name }}.*:ALL host='localhost' state=present\n\n- name: copy wordpress config file\n  template: src=wp-config.php dest=/var/www/html/\n\n- name: change ownership of wordpress installation\n  file: path=/var/www/html/ owner=apache group=apache state=directory recurse=yes\n"
  },
  {
    "path": "wordpress/roles/wordpress/templates/wp-config.php",
    "content": "<?php\n/**\n * The base configurations of the WordPress.\n *\n * This file has the following configurations: MySQL settings, Table Prefix,\n * Secret Keys, WordPress Language, and ABSPATH. You can find more information\n * by visiting {@link http://codex.wordpress.org/Editing_wp-config.php Editing\n * wp-config.php} Codex page. You can get the MySQL settings from your web host.\n *\n * This file is used by the wp-config.php creation script during the\n * installation. You don't have to use the web site, you can just copy this file\n * to \"wp-config.php\" and fill in the values.\n *\n * @package WordPress\n */\n\n// ** MySQL settings - You can get this info from your web host ** //\n/** The name of the database for WordPress */\ndefine('DB_NAME', '{{ wp_db_name }}');\n\n/** MySQL database username */\ndefine('DB_USER', '{{ wp_db_user }}');\n\n/** MySQL database password */\ndefine('DB_PASSWORD', '{{ wp_db_password }}');\n\n/** MySQL hostname */\ndefine('DB_HOST', 'localhost');\n\n/** Database Charset to use in creating database tables. */\ndefine('DB_CHARSET', 'utf8');\n\n/** The Database Collate type. Don't change this if in doubt. */\ndefine('DB_COLLATE', '');\n\n/**#@+\n * Authentication Unique Keys and Salts.\n *\n * Change these to different unique phrases!\n * You can generate these using the {@link https://api.wordpress.org/secret-key/1.1/salt/ WordPress.org secret-key service}\n * You can change these at any point in time to invalidate all existing cookies. This will force all users to have to log in again.\n *\n * @since 2.6.0\n */\n\n{{ wp_salt.stdout }}\n\n/**#@-*/\n\n/**\n * WordPress Database Table prefix.\n *\n * You can have multiple installations in one database if you give each a unique\n * prefix. Only numbers, letters, and underscores please!\n */\n$table_prefix  = 'wp_';\n\n/**\n * WordPress Localized Language, defaults to English.\n *\n * Change this to localize WordPress. A corresponding MO file for the chosen\n * language must be installed to wp-content/languages. For example, install\n * de_DE.mo to wp-content/languages and set WPLANG to 'de_DE' to enable German\n * language support.\n */\ndefine('WPLANG', '');\n\n/**\n * For developers: WordPress debugging mode.\n *\n * Change this to true to enable the display of notices during development.\n * It is strongly recommended that plugin and theme developers use WP_DEBUG\n * in their development environments.\n */\ndefine('WP_DEBUG', false);\n\n/* That's all, stop editing! Happy blogging. */\n\n/** Absolute path to the WordPress directory. */\nif ( !defined('ABSPATH') )\n\tdefine('ABSPATH', dirname(__FILE__) . '/');\n\n/** Sets up WordPress vars and included files. */\nrequire_once(ABSPATH . 'wp-settings.php');\n\n/** Disable Automatic Updates Completely */\ndefine( 'AUTOMATIC_UPDATER_DISABLED', {{auto_up_disable}} );\n\n/** Define AUTOMATIC Updates for Components. */\ndefine( 'WP_AUTO_UPDATE_CORE', {{core_update_level}} );\n"
  },
  {
    "path": "wordpress/site.yml",
    "content": "---\n- name: install apache, php, mysql server, wordpress\n  hosts: tag_class_wordpress\n  become: yes\n\n  roles:\n#    - common\n    - web\n    - mysql\n    - wordpress\n\n"
  },
  {
    "path": "wordpress_ha/ec2.ini",
    "content": "# Ansible EC2 external inventory script settings\n#\n\n[ec2]\n\n# to talk to a private eucalyptus instance uncomment these lines\n# and edit edit eucalyptus_host to be the host name of your cloud controller\n#eucalyptus = True\n#eucalyptus_host = clc.cloud.domain.org\n\n# AWS regions to make calls to. Set this to 'all' to make request to all regions\n# in AWS and merge the results together. Alternatively, set this to a comma\n# separated list of regions. E.g. 'us-east-1,us-west-1,us-west-2'\nregions = all\nregions_exclude = us-gov-west-1,cn-north-1\n\n# When generating inventory, Ansible needs to know how to address a server.\n# Each EC2 instance has a lot of variables associated with it. Here is the list:\n#   http://docs.pythonboto.org/en/latest/ref/ec2.html#module-boto.ec2.instance\n# Below are 2 variables that are used as the address of a server:\n#   - destination_variable\n#   - vpc_destination_variable\n\n# This is the normal destination variable to use. If you are running Ansible\n# from outside EC2, then 'public_dns_name' makes the most sense. If you are\n# running Ansible from within EC2, then perhaps you want to use the internal\n# address, and should set this to 'private_dns_name'.\ndestination_variable = public_dns_name\n\n# For server inside a VPC, using DNS names may not make sense. When an instance\n# has 'subnet_id' set, this variable is used. If the subnet is public, setting\n# this to 'ip_address' will return the public IP address. For instances in a\n# private subnet, this should be set to 'private_ip_address', and Ansible must\n# be run from with EC2.\nvpc_destination_variable = ip_address\n\n# To tag instances on EC2 with the resource records that point to them from\n# Route53, uncomment and set 'route53' to True.\nroute53 = False\n\n# Additionally, you can specify the list of zones to exclude looking up in\n# 'route53_excluded_zones' as a comma-separated list.\n# route53_excluded_zones = samplezone1.com, samplezone2.com\n\n# API calls to EC2 are slow. For this reason, we cache the results of an API\n# call. Set this to the path you want cache files to be written to. Two files\n# will be written to this directory:\n#   - ansible-ec2.cache\n#   - ansible-ec2.index\ncache_path = ~/.ansible/tmp\n\n# The number of seconds a cache file is considered valid. After this many\n# seconds, a new API call will be made, and the cache file will be updated.\n# To disable the cache, set this value to 0\ncache_max_age = 300\n"
  },
  {
    "path": "wordpress_ha/ec2.py",
    "content": "#!/usr/bin/env python\n\n'''\nEC2 external inventory script\n=================================\n\nGenerates inventory that Ansible can understand by making API request to\nAWS EC2 using the Boto library.\n\nNOTE: This script assumes Ansible is being executed where the environment\nvariables needed for Boto have already been set:\n    export AWS_ACCESS_KEY_ID='AK123'\n    export AWS_SECRET_ACCESS_KEY='abc123'\n\nThis script also assumes there is an ec2.ini file alongside it.  To specify a\ndifferent path to ec2.ini, define the EC2_INI_PATH environment variable:\n\n    export EC2_INI_PATH=/path/to/my_ec2.ini\n\nIf you're using eucalyptus you need to set the above variables and\nyou need to define:\n\n    export EC2_URL=http://hostname_of_your_cc:port/services/Eucalyptus\n\nFor more details, see: http://docs.pythonboto.org/en/latest/boto_config_tut.html\n\nWhen run against a specific host, this script returns the following variables:\n - ec2_ami_launch_index\n - ec2_architecture\n - ec2_association\n - ec2_attachTime\n - ec2_attachment\n - ec2_attachmentId\n - ec2_client_token\n - ec2_deleteOnTermination\n - ec2_description\n - ec2_deviceIndex\n - ec2_dns_name\n - ec2_eventsSet\n - ec2_group_name\n - ec2_hypervisor\n - ec2_id\n - ec2_image_id\n - ec2_instanceState\n - ec2_instance_type\n - ec2_ipOwnerId\n - ec2_ip_address\n - ec2_item\n - ec2_kernel\n - ec2_key_name\n - ec2_launch_time\n - ec2_monitored\n - ec2_monitoring\n - ec2_networkInterfaceId\n - ec2_ownerId\n - ec2_persistent\n - ec2_placement\n - ec2_platform\n - ec2_previous_state\n - ec2_private_dns_name\n - ec2_private_ip_address\n - ec2_publicIp\n - ec2_public_dns_name\n - ec2_ramdisk\n - ec2_reason\n - ec2_region\n - ec2_requester_id\n - ec2_root_device_name\n - ec2_root_device_type\n - ec2_security_group_ids\n - ec2_security_group_names\n - ec2_shutdown_state\n - ec2_sourceDestCheck\n - ec2_spot_instance_request_id\n - ec2_state\n - ec2_state_code\n - ec2_state_reason\n - ec2_status\n - ec2_subnet_id\n - ec2_tenancy\n - ec2_virtualization_type\n - ec2_vpc_id\n\nThese variables are pulled out of a boto.ec2.instance object. There is a lack of\nconsistency with variable spellings (camelCase and underscores) since this\njust loops through all variables the object exposes. It is preferred to use the\nones with underscores when multiple exist.\n\nIn addition, if an instance has AWS Tags associated with it, each tag is a new\nvariable named:\n - ec2_tag_[Key] = [Value]\n\nSecurity groups are comma-separated in 'ec2_security_group_ids' and\n'ec2_security_group_names'.\n'''\n\n# (c) 2012, Peter Sankauskas\n#\n# This file is part of Ansible,\n#\n# Ansible is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# Ansible is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with Ansible.  If not, see <http://www.gnu.org/licenses/>.\n\n######################################################################\n\nimport sys\nimport os\nimport argparse\nimport re\nfrom time import time\nimport boto\nfrom boto import ec2\nfrom boto import rds\nfrom boto import route53\nimport ConfigParser\n\ntry:\n    import json\nexcept ImportError:\n    import simplejson as json\n\n\nclass Ec2Inventory(object):\n    def _empty_inventory(self):\n        return {\"_meta\" : {\"hostvars\" : {}}}\n\n    def __init__(self):\n        ''' Main execution path '''\n\n        # Inventory grouped by instance IDs, tags, security groups, regions,\n        # and availability zones\n        self.inventory = self._empty_inventory()\n\n        # Index of hostname (address) to instance ID\n        self.index = {}\n\n        # Read settings and parse CLI arguments\n        self.read_settings()\n        self.parse_cli_args()\n\n        # Cache\n        if self.args.refresh_cache:\n            self.do_api_calls_update_cache()\n        elif not self.is_cache_valid():\n            self.do_api_calls_update_cache()\n\n        # Data to print\n        if self.args.host:\n            data_to_print = self.get_host_info()\n\n        elif self.args.list:\n            # Display list of instances for inventory\n            if self.inventory == self._empty_inventory():\n                data_to_print = self.get_inventory_from_cache()\n            else:\n                data_to_print = self.json_format_dict(self.inventory, True)\n\n        print data_to_print\n\n\n    def is_cache_valid(self):\n        ''' Determines if the cache files have expired, or if it is still valid '''\n\n        if os.path.isfile(self.cache_path_cache):\n            mod_time = os.path.getmtime(self.cache_path_cache)\n            current_time = time()\n            if (mod_time + self.cache_max_age) > current_time:\n                if os.path.isfile(self.cache_path_index):\n                    return True\n\n        return False\n\n\n    def read_settings(self):\n        ''' Reads the settings from the ec2.ini file '''\n\n        config = ConfigParser.SafeConfigParser()\n        ec2_default_ini_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'ec2.ini')\n        ec2_ini_path = os.environ.get('EC2_INI_PATH', ec2_default_ini_path)\n        config.read(ec2_ini_path)\n\n        # is eucalyptus?\n        self.eucalyptus_host = None\n        self.eucalyptus = False\n        if config.has_option('ec2', 'eucalyptus'):\n            self.eucalyptus = config.getboolean('ec2', 'eucalyptus')\n        if self.eucalyptus and config.has_option('ec2', 'eucalyptus_host'):\n            self.eucalyptus_host = config.get('ec2', 'eucalyptus_host')\n\n        # Regions\n        self.regions = []\n        configRegions = config.get('ec2', 'regions')\n        configRegions_exclude = config.get('ec2', 'regions_exclude')\n        if (configRegions == 'all'):\n            if self.eucalyptus_host:\n                self.regions.append(boto.connect_euca(host=self.eucalyptus_host).region.name)\n            else:\n                for regionInfo in ec2.regions():\n                    if regionInfo.name not in configRegions_exclude:\n                        self.regions.append(regionInfo.name)\n        else:\n            self.regions = configRegions.split(\",\")\n\n        # Destination addresses\n        self.destination_variable = config.get('ec2', 'destination_variable')\n        self.vpc_destination_variable = config.get('ec2', 'vpc_destination_variable')\n\n        # Route53\n        self.route53_enabled = config.getboolean('ec2', 'route53')\n        self.route53_excluded_zones = []\n        if config.has_option('ec2', 'route53_excluded_zones'):\n            self.route53_excluded_zones.extend(\n                config.get('ec2', 'route53_excluded_zones', '').split(','))\n\n        # Cache related\n        cache_dir = os.path.expanduser(config.get('ec2', 'cache_path'))\n        if not os.path.exists(cache_dir):\n            os.makedirs(cache_dir)\n\n        self.cache_path_cache = cache_dir + \"/ansible-ec2.cache\"\n        self.cache_path_index = cache_dir + \"/ansible-ec2.index\"\n        self.cache_max_age = config.getint('ec2', 'cache_max_age')\n        \n\n\n    def parse_cli_args(self):\n        ''' Command line argument processing '''\n\n        parser = argparse.ArgumentParser(description='Produce an Ansible Inventory file based on EC2')\n        parser.add_argument('--list', action='store_true', default=True,\n                           help='List instances (default: True)')\n        parser.add_argument('--host', action='store',\n                           help='Get all the variables about a specific instance')\n        parser.add_argument('--refresh-cache', action='store_true', default=False,\n                           help='Force refresh of cache by making API requests to EC2 (default: False - use cache files)')\n        self.args = parser.parse_args()\n\n\n    def do_api_calls_update_cache(self):\n        ''' Do API calls to each region, and save data in cache files '''\n\n        if self.route53_enabled:\n            self.get_route53_records()\n\n        for region in self.regions:\n            self.get_instances_by_region(region)\n            self.get_rds_instances_by_region(region)\n\n        self.write_to_cache(self.inventory, self.cache_path_cache)\n        self.write_to_cache(self.index, self.cache_path_index)\n\n\n    def get_instances_by_region(self, region):\n        ''' Makes an AWS EC2 API call to the list of instances in a particular\n        region '''\n\n        try:\n            if self.eucalyptus:\n                conn = boto.connect_euca(host=self.eucalyptus_host)\n                conn.APIVersion = '2010-08-31'\n            else:\n                conn = ec2.connect_to_region(region)\n\n            # connect_to_region will fail \"silently\" by returning None if the region name is wrong or not supported\n            if conn is None:\n                print(\"region name: %s likely not supported, or AWS is down.  connection to region failed.\" % region)\n                sys.exit(1)\n \n            reservations = conn.get_all_instances()\n            for reservation in reservations:\n                for instance in reservation.instances:\n                    self.add_instance(instance, region)\n        \n        except boto.exception.BotoServerError, e:\n            if  not self.eucalyptus:\n                print \"Looks like AWS is down again:\"\n            print e\n            sys.exit(1)\n\n    def get_rds_instances_by_region(self, region):\n\t''' Makes an AWS API call to the list of RDS instances in a particular\n        region '''\n\n        try:\n            conn = rds.connect_to_region(region)\n            if conn:\n                instances = conn.get_all_dbinstances()\n                for instance in instances:\n                    self.add_rds_instance(instance, region)\n        except boto.exception.BotoServerError, e:\n            if not e.reason == \"Forbidden\":\n                print \"Looks like AWS RDS is down: \"\n                print e\n                sys.exit(1)\n\n    def get_instance(self, region, instance_id):\n        ''' Gets details about a specific instance '''\n        if self.eucalyptus:\n            conn = boto.connect_euca(self.eucalyptus_host)\n            conn.APIVersion = '2010-08-31'\n        else:\n            conn = ec2.connect_to_region(region)\n\n        # connect_to_region will fail \"silently\" by returning None if the region name is wrong or not supported\n        if conn is None:\n            print(\"region name: %s likely not supported, or AWS is down.  connection to region failed.\" % region)\n            sys.exit(1)\n\n        reservations = conn.get_all_instances([instance_id])\n        for reservation in reservations:\n            for instance in reservation.instances:\n                return instance\n\n\n    def add_instance(self, instance, region):\n        ''' Adds an instance to the inventory and index, as long as it is\n        addressable '''\n\n        # Only want running instances\n        if instance.state != 'running':\n            return\n\n        # Select the best destination address\n        if instance.subnet_id:\n            dest = getattr(instance, self.vpc_destination_variable)\n        else:\n            dest =  getattr(instance, self.destination_variable)\n\n        if not dest:\n            # Skip instances we cannot address (e.g. private VPC subnet)\n            return\n\n        # Add to index\n        self.index[dest] = [region, instance.id]\n\n        # Inventory: Group by instance ID (always a group of 1)\n        self.inventory[instance.id] = [dest]\n\n        # Inventory: Group by region\n        self.push(self.inventory, region, dest)\n\n        # Inventory: Group by availability zone\n        self.push(self.inventory, instance.placement, dest)\n\n        # Inventory: Group by instance type\n        self.push(self.inventory, self.to_safe('type_' + instance.instance_type), dest)\n\n        # Inventory: Group by key pair\n        if instance.key_name:\n            self.push(self.inventory, self.to_safe('key_' + instance.key_name), dest)\n        \n        # Inventory: Group by security group\n        try:\n            for group in instance.groups:\n                key = self.to_safe(\"security_group_\" + group.name)\n                self.push(self.inventory, key, dest)\n        except AttributeError:\n            print 'Package boto seems a bit older.'\n            print 'Please upgrade boto >= 2.3.0.'\n            sys.exit(1)\n\n        # Inventory: Group by tag keys\n        for k, v in instance.tags.iteritems():\n            key = self.to_safe(\"tag_\" + k + \"=\" + v)\n            self.push(self.inventory, key, dest)\n\n        # Inventory: Group by Route53 domain names if enabled\n        if self.route53_enabled:\n            route53_names = self.get_instance_route53_names(instance)\n            for name in route53_names:\n                self.push(self.inventory, name, dest)\n\n        # Global Tag: tag all EC2 instances\n        self.push(self.inventory, 'ec2', dest)\n\n        self.inventory[\"_meta\"][\"hostvars\"][dest] = self.get_host_info_dict_from_instance(instance)\n\n\n    def add_rds_instance(self, instance, region):\n        ''' Adds an RDS instance to the inventory and index, as long as it is\n        addressable '''\n\n        # Only want available instances\n        if instance.status != 'available':\n            return\n\n        # Select the best destination address\n        #if instance.subnet_id:\n            #dest = getattr(instance, self.vpc_destination_variable)\n        #else:\n            #dest =  getattr(instance, self.destination_variable)\n        dest = instance.endpoint[0]\n\n        if not dest:\n            # Skip instances we cannot address (e.g. private VPC subnet)\n            return\n\n        # Add to index\n        self.index[dest] = [region, instance.id]\n\n        # Inventory: Group by instance ID (always a group of 1)\n        self.inventory[instance.id] = [dest]\n\n        # Inventory: Group by region\n        self.push(self.inventory, region, dest)\n\n        # Inventory: Group by availability zone\n        self.push(self.inventory, instance.availability_zone, dest)\n        \n        # Inventory: Group by instance type\n        self.push(self.inventory, self.to_safe('type_' + instance.instance_class), dest)\n        \n        # Inventory: Group by security group\n        try:\n            if instance.security_group:\n                key = self.to_safe(\"security_group_\" + instance.security_group.name)\n                self.push(self.inventory, key, dest)\n        except AttributeError:\n            print 'Package boto seems a bit older.'\n            print 'Please upgrade boto >= 2.3.0.'\n            sys.exit(1)\n\n        # Inventory: Group by engine\n        self.push(self.inventory, self.to_safe(\"rds_\" + instance.engine), dest)\n\n        # Inventory: Group by parameter group\n        self.push(self.inventory, self.to_safe(\"rds_parameter_group_\" + instance.parameter_group.name), dest)\n\n        # Global Tag: all RDS instances\n        self.push(self.inventory, 'rds', dest)\n\n\n    def get_route53_records(self):\n        ''' Get and store the map of resource records to domain names that\n        point to them. '''\n\n        r53_conn = route53.Route53Connection()\n        all_zones = r53_conn.get_zones()\n\n        route53_zones = [ zone for zone in all_zones if zone.name[:-1]\n                          not in self.route53_excluded_zones ]\n\n        self.route53_records = {}\n\n        for zone in route53_zones:\n            rrsets = r53_conn.get_all_rrsets(zone.id)\n\n            for record_set in rrsets:\n                record_name = record_set.name\n\n                if record_name.endswith('.'):\n                    record_name = record_name[:-1]\n\n                for resource in record_set.resource_records:\n                    self.route53_records.setdefault(resource, set())\n                    self.route53_records[resource].add(record_name)\n\n\n    def get_instance_route53_names(self, instance):\n        ''' Check if an instance is referenced in the records we have from\n        Route53. If it is, return the list of domain names pointing to said\n        instance. If nothing points to it, return an empty list. '''\n\n        instance_attributes = [ 'public_dns_name', 'private_dns_name',\n                                'ip_address', 'private_ip_address' ]\n\n        name_list = set()\n\n        for attrib in instance_attributes:\n            try:\n                value = getattr(instance, attrib)\n            except AttributeError:\n                continue\n\n            if value in self.route53_records:\n                name_list.update(self.route53_records[value])\n\n        return list(name_list)\n\n\n    def get_host_info_dict_from_instance(self, instance):\n        instance_vars = {}\n        for key in vars(instance):\n            value = getattr(instance, key)\n            key = self.to_safe('ec2_' + key)\n\n            # Handle complex types\n            # state/previous_state changed to properties in boto in https://github.com/boto/boto/commit/a23c379837f698212252720d2af8dec0325c9518\n            if key == 'ec2__state':\n                instance_vars['ec2_state'] = instance.state or ''\n                instance_vars['ec2_state_code'] = instance.state_code\n            elif key == 'ec2__previous_state':\n                instance_vars['ec2_previous_state'] = instance.previous_state or ''\n                instance_vars['ec2_previous_state_code'] = instance.previous_state_code\n            elif type(value) in [int, bool]:\n                instance_vars[key] = value\n            elif type(value) in [str, unicode]:\n                instance_vars[key] = value.strip()\n            elif type(value) == type(None):\n                instance_vars[key] = ''\n            elif key == 'ec2_region':\n                instance_vars[key] = value.name\n            elif key == 'ec2__placement':\n                instance_vars['ec2_placement'] = value.zone\n            elif key == 'ec2_tags':\n                for k, v in value.iteritems():\n                    key = self.to_safe('ec2_tag_' + k)\n                    instance_vars[key] = v\n            elif key == 'ec2_groups':\n                group_ids = []\n                group_names = []\n                for group in value:\n                    group_ids.append(group.id)\n                    group_names.append(group.name)\n                instance_vars[\"ec2_security_group_ids\"] = ','.join(group_ids)\n                instance_vars[\"ec2_security_group_names\"] = ','.join(group_names)\n            else:\n                pass\n                # TODO Product codes if someone finds them useful\n                #print key\n                #print type(value)\n                #print value\n\n        return instance_vars\n\n    def get_host_info(self):\n        ''' Get variables about a specific host '''\n\n        if len(self.index) == 0:\n            # Need to load index from cache\n            self.load_index_from_cache()\n\n        if not self.args.host in self.index:\n            # try updating the cache\n            self.do_api_calls_update_cache()\n            if not self.args.host in self.index:\n                # host migh not exist anymore\n                return self.json_format_dict({}, True)\n\n        (region, instance_id) = self.index[self.args.host]\n\n        instance = self.get_instance(region, instance_id)\n        return self.json_format_dict(self.get_host_info_dict_from_instance(instance), True)\n\n    def push(self, my_dict, key, element):\n        ''' Pushed an element onto an array that may not have been defined in\n        the dict '''\n\n        if key in my_dict:\n            my_dict[key].append(element);\n        else:\n            my_dict[key] = [element]\n\n\n    def get_inventory_from_cache(self):\n        ''' Reads the inventory from the cache file and returns it as a JSON\n        object '''\n\n        cache = open(self.cache_path_cache, 'r')\n        json_inventory = cache.read()\n        return json_inventory\n\n\n    def load_index_from_cache(self):\n        ''' Reads the index from the cache file sets self.index '''\n\n        cache = open(self.cache_path_index, 'r')\n        json_index = cache.read()\n        self.index = json.loads(json_index)\n\n\n    def write_to_cache(self, data, filename):\n        ''' Writes data in JSON format to a file '''\n\n        json_data = self.json_format_dict(data, True)\n        cache = open(filename, 'w')\n        cache.write(json_data)\n        cache.close()\n\n\n    def to_safe(self, word):\n        ''' Converts 'bad' characters in a string to underscores so they can be\n        used as Ansible groups '''\n\n        return re.sub(\"[^A-Za-z0-9\\-]\", \"_\", word)\n\n\n    def json_format_dict(self, data, pretty=False):\n        ''' Converts a dict to a JSON object and dumps it as a formatted\n        string '''\n\n        if pretty:\n            return json.dumps(data, sort_keys=True, indent=2)\n        else:\n            return json.dumps(data)\n\n\n# Run the script\nEc2Inventory()\n\n"
  },
  {
    "path": "wordpress_ha/group_vars/all",
    "content": "---\ndb_name: wordpress\nusername: dbadmin\npassword: mypassword\ndbhost: staging-wordpress-rds.cxzxl961nonk.ap-southeast-2.rds.amazonaws.com\n\nansible_ssh_user: ec2-user\nansible_ssh_private_key_file: ~/.ssh/wordpress-apsydney.pem\n\nwp_version: 4.1\n\n# WordPress settings\n\n# Disable All Updates\n# By default automatic updates are enabled, set this value to true to disable all automatic updates\nauto_up_disable: false\n\n#Define Core Update Level\n#true  = Development, minor, and major updates are all enabled\n#false = Development, minor, and major updates are all disabled\n#minor = Minor updates are enabled, development, and major updates are disabled\ncore_update_level: true\n"
  },
  {
    "path": "wordpress_ha/hosts",
    "content": "[local]\nlocalhost"
  },
  {
    "path": "wordpress_ha/provisioning_asg.yml",
    "content": "---\n- hosts: localhost\n  connection: local\n  gather_facts: no\n  tasks:\n    - include_vars: \"{{ env }}.yml\"\n\n    - set_fact:\n        timestamp: \"{{ lookup('pipe', 'date +%g%m%d%H%M%S') }}\"\n\n    - name: Create public ELB\n      ec2_elb_lb:\n        region: \"{{ region }}\"\n        name: \"{{ asg_name }}-{{ env }}\"\n        state: present\n        cross_az_load_balancing: yes\n        security_group_ids: \"{{ elb_group_ids }}\"\n        subnets: \"{{ elb_subnets }}\"\n        listeners:\n          - protocol: http\n            load_balancer_port: 80\n            instance_port: 80\n        health_check:\n          ping_protocol: http\n          ping_port: 80\n          ping_path: \"/index.php\"\n          response_timeout: 2\n          interval: 10\n          unhealthy_threshold: 2\n          healthy_threshold: 2\n        connection_draining_timeout: 60\n      register: elb\n    - debug: var=elb\n\n    - name: Create Launch Configuration\n      ec2_lc:\n        region: \"{{ region }}\"\n        name: \"{{ asg_name }}-{{ env }}-{{ timestamp }}\"\n        image_id: \"{{ image_id }}\"\n        key_name: \"{{ keypair }}\"\n        instance_type: \"{{ instance_type }}\"\n        security_groups: \"{{ security_groups }}\"\n        instance_monitoring: yes\n      register: lc\n    - debug: var=lc\n\n    - name: Configure Auto Scaling Group\n      ec2_asg:\n        region: \"{{ region }}\"\n        name: \"{{ asg_name }}-{{ env }}-{{ timestamp }}\"\n        vpc_zone_identifier: \"{{ asg_subnet_ids }}\"\n        launch_config_name: \"{{ lc.name }}\"\n        availability_zones: \"{{ zones }}\"\n        health_check_type: EC2\n        health_check_period: 300\n        desired_capacity: \"{{ asg_min }}\"\n        min_size: \"{{ asg_min }}\"\n        max_size: \"{{ asg_max }}\"\n        tags:\n         - Name: \"{{ asg_name }}-{{ env }}\"\n        load_balancers: \"{{ elb.elb.name }}\"\n        state: present\n      register: asg\n    - debug: var=asg\n\n    - name: Configure Scaling Policies\n      ec2_scaling_policy:\n        region: \"{{ region }}\"\n        name: \"{{ item.name }}\"\n        asg_name: \"{{ asg_name }}-{{ env }}-{{ timestamp }}\"\n        state: present\n        adjustment_type: \"{{ item.adjustment_type }}\"\n        min_adjustment_step: \"{{ item.min_adjustment_step }}\"\n        scaling_adjustment: \"{{ item.scaling_adjustment }}\"\n        cooldown: \"{{ item.cooldown }}\"\n      with_items:\n        - name: \"Increase Group Size\"\n          adjustment_type: \"ChangeInCapacity\"\n          scaling_adjustment: +1\n          min_adjustment_step: 1\n          cooldown: 180\n        - name: \"Decrease Group Size\"\n          adjustment_type: \"ChangeInCapacity\"\n          scaling_adjustment: -1\n          min_adjustment_step: 1\n          cooldown: 300\n      register: scaling_policy\n    - debug: var=scaling_policy\n\n    - name: Define Metric Alarms configuration\n      set_fact:\n        metric_alarms:\n          - name: \"{{ asg.name }}-ScaleUp\"\n            comparison: \">=\"\n            threshold: 70.0\n            alarm_actions:\n              - \"{{ scaling_policy.results[0].arn }}\"\n          - name: \"{{ asg.name }}-ScaleDown\"\n            comparison: \"<=\"\n            threshold: 30.0\n            alarm_actions:\n              - \"{{ scaling_policy.results[1].arn }}\"\n\n    - name: Configure Metric Alarms\n      ec2_metric_alarm:\n        region: \"{{ region }}\"\n        name: \"{{ item.name }}\"\n        state: present\n        metric: \"CPUUtilization\"\n        namespace: \"AWS/EC2\"\n        statistic: \"Average\"\n        comparison: \"{{ item.comparison }}\"\n        threshold: \"{{ item.threshold }}\"\n        period: 60\n        evaluation_periods: 5\n        unit: \"Percent\"\n        dimensions:\n          AutoScalingGroupName: \"{{ asg.name }}\"\n        alarm_actions: \"{{ item.alarm_actions }}\"\n      with_items: \"{{ metric_alarms }}\"\n      when: \"{{ asg.max_size }} > 1\"\n      register: alarms\n    - debug: var=alarms\n"
  },
  {
    "path": "wordpress_ha/provisioning_rds.yml",
    "content": "---\n- hosts: localhost\n  connection: local\n  gather_facts: no\n  vars:\n    region: ap-southeast-2\n    env: staging\n    size: 5\n    instance_type: db.t2.micro\n    db_engine: MySQL\n    engine_version: 5.6.23\n    subnet_group: dbsg_wordpress\n    param_group: wordpress\n    # staging_sg_database security group ID\n    security_groups: sg-eeb14e8b\n\n  tasks:\n    - name: \"get {{ env }}_subnet_private_0 subnet id\"\n      command: \"aws ec2 describe-subnets --filters Name=tag:Name,Values={{ env }}_subnet_private_0\n               --region {{ region }} --query 'Subnets[0].SubnetId' --output text\"\n      register: subnet0\n\n    - debug: var=subnet0.stdout\n    \n    - name: \"get {{ env }}_subnet_private_1 subnet id\"\n      command: \"aws ec2 describe-subnets --filters Name=tag:Name,Values={{ env }}_subnet_private_1\n               --region {{ region }} --query 'Subnets[0].SubnetId' --output text\"\n      register: subnet1\n\n    - debug: var=subnet1.stdout\n\n    - name: create Multi-AZ DB subnet group\n      rds_subnet_group:\n        name: \"{{ subnet_group }}\"\n        state: present\n        region: \"{{ region }}\"\n        description: DB Subnet Group for WordPress HA\n        subnets:\n          - \"{{ subnet0.stdout }}\"\n          - \"{{ subnet1.stdout }}\"\n\n    - name: create mysql parameter group\n      rds_param_group:\n        name: \"{{ param_group }}\"\n        state: present\n        region: \"{{ region }}\"\n        description: MySQL Parameter Group for WordPress HA\n        engine: mysql5.6\n        params:\n          innodb_lock_wait_timeout: 3600\n          max_allowed_packet: 512M\n          net_write_timeout: 300\n\n    - name: create mysql RDS instance\n      rds:\n        command: create\n        instance_name: \"{{ env }}-wordpress-rds\"\n        region: \"{{ region }}\"\n        size: \"{{ size }}\"\n        instance_type: \"{{ instance_type }}\"\n        db_engine: \"{{ db_engine }}\"\n        engine_version: \"{{ engine_version }}\"\n        subnet: \"{{ subnet_group }}\"\n        parameter_group: \"{{ param_group }}\"\n        multi_zone: yes\n        db_name: \"{{ db_name }}\"\n        username: \"{{ username }}\"\n        password: \"{{ password }}\"\n        vpc_security_groups: \"{{ security_groups }}\"\n\n\n\n    \n"
  },
  {
    "path": "wordpress_ha/provisioning_sg.yml",
    "content": "---\n- hosts: localhost\n  connection: local\n  gather_facts: no\n  vars:\n    region: ap-southeast-2\n    allowed_ip: xx.xx.xx.xx/32\n    vpc_cidr: 10.0.0.0/16\n    env: staging\n  tasks:\n    - name: get vpc id\n      command: \"aws ec2 describe-vpcs --filters Name=tag:Name,Values={{ env }}_vpc\n               --query 'Vpcs[0].VpcId' --output text\"\n      register: vpcid\n\n    - name: create sg_web rules\n      ec2_group:\n        region: \"{{ region }}\"\n        vpc_id: \"{{ vpcid.stdout }}\"\n        name: \"{{ env }}_sg_web\"\n        description: security group for public web\n        rules:\n          # allow ssh access from ansible group\n          - proto: tcp\n            from_port: 22\n            to_port: 22\n            group_name: \"{{ env }}_sg_ansible\"\n            group_desc: security group for ansible\n          # allow http access from anywhere\n          - proto: tcp\n            from_port: 80\n            to_port: 80\n            cidr_ip: 0.0.0.0/0\n          # allow https access from anywhere\n          - proto: tcp\n            from_port: 443\n            to_port: 443\n            cidr_ip: 0.0.0.0/0\n\n    - name: create sg_wordpress rules\n      ec2_group:\n        region: \"{{ region }}\"\n        vpc_id: \"{{ vpcid.stdout }}\"\n        name: \"{{ env }}_sg_wordpress\"\n        description: security group for wordpress servers\n        rules:\n          # allow ssh access from ansible group\n          - proto: tcp\n            from_port: 22\n            to_port: 22\n            group_name: \"{{ env }}_sg_ansible\"\n            group_desc: security group for ansible\n          # allow http access from vpc cidr\n          - proto: tcp\n            from_port: 80\n            to_port: 80\n            group_name: \"{{ env }}_sg_wordpress_lb\"\n            group_desc: security group for wordpress load balancer\n          # allow https access from vpc cidr\n          - proto: tcp\n            from_port: 443\n            to_port: 443\n            group_name: \"{{ env }}_sg_wordpress_lb\"\n            group_desc: security group for wordpress load balancer\n\n    - name: create sg_database rules\n      ec2_group:\n        region: \"{{ region }}\"\n        vpc_id: \"{{ vpcid.stdout }}\"\n        name: \"{{ env }}_sg_database\"\n        description: security group for database\n        rules:\n          - proto: tcp\n            from_port: 3306\n            to_port: 3306\n            group_name: \"{{ env }}_sg_web\"\n          - proto: tcp\n            from_port: 3306\n            to_port: 3306\n            group_name: \"{{ env }}_sg_wordpress\"\n\n    - name: create sg_ansible rules\n      ec2_group:\n        region: \"{{ region }}\"\n        vpc_id: \"{{ vpcid.stdout }}\"\n        name: \"{{ env }}_sg_ansible\"\n        description: security group for ansible \n        rules:\n          - proto: tcp\n            from_port: 22\n            to_port: 22\n            cidr_ip: \"{{ allowed_ip }}\"\n\n    - name: create sg_wordpress_lb\n      ec2_group:\n        region: \"{{ region }}\"\n        vpc_id: \"{{ vpcid.stdout }}\"\n        name: \"{{ env }}_sg_wordpress_lb\"\n        description: security group for wordpress load balancer\n        rules:\n          # allow http access from anywhere\n          - proto: tcp\n            from_port: 80\n            to_port: 80\n            cidr_ip: 0.0.0.0/0\n          # allow https access from anywhere\n          - proto: tcp\n            from_port: 443\n            to_port: 443\n            cidr_ip: 0.0.0.0/0\n"
  },
  {
    "path": "wordpress_ha/provisioning_vpc.yml",
    "content": "---\n- hosts: localhost\n  connection: local\n  gather_facts: no\n  vars:\n    region: ap-southeast-2\n    env: staging\n    az0: ap-southeast-2a\n    az1: ap-southeast-2b\n  tasks:\n    - name: create vpc with multi-az subnets\n      ec2_vpc:\n        region: \"{{ region }}\"\n        cidr_block: 10.0.0.0/16\n        resource_tags: '{\"Name\":\"{{ env }}_vpc\"}'\n        subnets:\n          - cidr: 10.0.0.0/24\n            az: \"{{ az0 }}\"\n            resource_tags: '{\"Name\":\"{{ env }}_subnet_public_0\"}'\n          - cidr: 10.0.1.0/24\n            az: \"{{ az0 }}\"\n            resource_tags: '{\"Name\":\"{{ env }}_subnet_private_0\"}'\n          - cidr: 10.0.2.0/24\n            az: \"{{ az1 }}\"\n            resource_tags: '{\"Name\":\"{{ env }}_subnet_public_1\"}'\n          - cidr: 10.0.3.0/24\n            az: \"{{ az1 }}\"\n            resource_tags: '{\"Name\":\"{{ env }}_subnet_private_1\"}'\n          - cidr: 10.0.4.0/24\n            az: \"{{ az0 }}\"\n            resource_tags: '{\"Name\":\"{{ env }}_subnet_private_2\"}'\n          - cidr: 10.0.5.0/24\n            az: \"{{ az1 }}\"\n            resource_tags: '{\"Name\":\"{{ env }}_subnet_private_3\"}'\n        internet_gateway: yes\n        route_tables:\n          - subnets:\n              - 10.0.0.0/24\n              - 10.0.2.0/24\n            routes:\n              - dest: 0.0.0.0/0\n                gw: igw\n      \n"
  },
  {
    "path": "wordpress_ha/provisioning_wp.yml",
    "content": "---\n- hosts: localhost\n  connection: local\n  gather_facts: no\n  vars:\n    #your region\n    region: ap-southeast-2\n    keyname: wordpress-apsydney\n    instance_type: t2.micro\n    env: staging\n    image: ami-d9fe9be3\n    ins_name: wordpress_master \n  tasks:\n    - name: get {{ env }}_subnet_public_0 subnet id\n      command: \"aws ec2 describe-subnets \n               --region {{ region }}\n               --filters Name=tag:Name,Values={{ env }}_subnet_public_0\n               --query 'Subnets[0].SubnetId' --output text\"\n      register: subnet0\n\n    - name: launch ec2 instance\n      ec2:\n        region: \"{{ region }}\"\n        key_name: \"{{ keyname }}\"\n        instance_type: \"{{ instance_type }}\"\n        image: \"{{ image }}\"\n        wait: yes\n        group: \"{{ env }}_sg_web\"\n        id: wordpress_ha_1\n        instance_tags:\n          Name: \"{{ ins_name }}\"\n          class: wordpress_ha\n        vpc_subnet_id: \"{{ subnet0.stdout }}\"\n      register: ec2\n      when: subnet0.stdout!=\"None\"\n\n    - name: check EIP association\n      command: \"aws ec2 describe-instances\n               --region {{ region }}\n               --filters Name=tag:Name,Values={{ ins_name }}\n               --query 'Reservations[0].Instances[0].NetworkInterfaces[0].Association' \n               --output text\"\n      register: eip\n\n    - name: associate new EIP for the instance\n      ec2_eip:\n        region: \"{{ region }}\"\n        instance_id: \"{{ item.id }}\"\n      with_items: ec2.instances\n      when: item.id is defined and eip.stdout==\"None\"\n\n"
  },
  {
    "path": "wordpress_ha/roles/web/tasks/main.yml",
    "content": "---\n- name: install apache, php, and php-mysql\n  yum: name={{ item }} state=present\n  with_items:\n   - httpd\n   - php\n   - php-mysql\n- name: start and enable httpd\n  service: name=httpd state=started enabled=yes\n"
  },
  {
    "path": "wordpress_ha/roles/wordpress/tasks/main.yml",
    "content": "---\n- name: download wordpress\n  get_url: url=http://wordpress.org/wordpress-{{ wp_version }}.tar.gz \n           dest=~/wordpress-{{ wp_version }}.tar.gz\n\n- name: extract wordpress archive\n  command: chdir=~ /bin/tar xvf wordpress-{{ wp_version }}.tar.gz \n           creates=~/wordpress\n\n- name: copy wordpress to apache root directory\n  shell: cp -r ~/wordpress/* /var/www/html\n\n- name: fetch random salts for wordpress config\n  local_action: command curl https://api.wordpress.org/secret-key/1.1/salt/\n  register: \"wp_salt\"\n\n- name: copy wordpress config file\n  template: src=wp-config.php dest=/var/www/html/\n\n- name: change ownership of wordpress installation\n  file: path=/var/www/html/ owner=apache group=apache state=directory recurse=yes\n"
  },
  {
    "path": "wordpress_ha/roles/wordpress/templates/wp-config.php",
    "content": "<?php\n/**\n * The base configurations of the WordPress.\n *\n * This file has the following configurations: MySQL settings, Table Prefix,\n * Secret Keys, WordPress Language, and ABSPATH. You can find more information\n * by visiting {@link http://codex.wordpress.org/Editing_wp-config.php Editing\n * wp-config.php} Codex page. You can get the MySQL settings from your web host.\n *\n * This file is used by the wp-config.php creation script during the\n * installation. You don't have to use the web site, you can just copy this file\n * to \"wp-config.php\" and fill in the values.\n *\n * @package WordPress\n */\n\n// ** MySQL settings - You can get this info from your web host ** //\n/** The name of the database for WordPress */\ndefine('DB_NAME', '{{ db_name }}');\n\n/** MySQL database username */\ndefine('DB_USER', '{{ username }}');\n\n/** MySQL database password */\ndefine('DB_PASSWORD', '{{ password }}');\n\n/** MySQL hostname */\ndefine('DB_HOST', '{{ dbhost }}');\n\n/** Database Charset to use in creating database tables. */\ndefine('DB_CHARSET', 'utf8');\n\n/** The Database Collate type. Don't change this if in doubt. */\ndefine('DB_COLLATE', '');\n\n/**#@+\n * Authentication Unique Keys and Salts.\n *\n * Change these to different unique phrases!\n * You can generate these using the {@link https://api.wordpress.org/secret-key/1.1/salt/ WordPress.org secret-key service}\n * You can change these at any point in time to invalidate all existing cookies. This will force all users to have to log in again.\n *\n * @since 2.6.0\n */\n\n{{ wp_salt.stdout }}\n\n/**#@-*/\n\n/**\n * WordPress Database Table prefix.\n *\n * You can have multiple installations in one database if you give each a unique\n * prefix. Only numbers, letters, and underscores please!\n */\n$table_prefix  = 'wp_';\n\n/**\n * WordPress Localized Language, defaults to English.\n *\n * Change this to localize WordPress. A corresponding MO file for the chosen\n * language must be installed to wp-content/languages. For example, install\n * de_DE.mo to wp-content/languages and set WPLANG to 'de_DE' to enable German\n * language support.\n */\ndefine('WPLANG', '');\n\n/**\n * For developers: WordPress debugging mode.\n *\n * Change this to true to enable the display of notices during development.\n * It is strongly recommended that plugin and theme developers use WP_DEBUG\n * in their development environments.\n */\ndefine('WP_DEBUG', false);\n\n/* That's all, stop editing! Happy blogging. */\n\n/** Absolute path to the WordPress directory. */\nif ( !defined('ABSPATH') )\n\tdefine('ABSPATH', dirname(__FILE__) . '/');\n\n/** Sets up WordPress vars and included files. */\nrequire_once(ABSPATH . 'wp-settings.php');\n\n/** Disable Automatic Updates Completely */\ndefine( 'AUTOMATIC_UPDATER_DISABLED', {{auto_up_disable}} );\n\n/** Define AUTOMATIC Updates for Components. */\ndefine( 'WP_AUTO_UPDATE_CORE', {{core_update_level}} );\n"
  },
  {
    "path": "wordpress_ha/site.yml",
    "content": "---\n- name: install apache, php, wordpress\n  hosts: tag_class_wordpress_ha\n  sudo: yes\n\n  roles:\n    - web\n    - wordpress\n"
  },
  {
    "path": "wordpress_ha/vars/staging.yml",
    "content": "---\nregion: ap-southeast-2\nzones: ['ap-southeast-2a','ap-southeast-2b']\nasg_subnet_ids: ['subnet-0da97768','subnet-1f33f768'] # staging_subnet_private_2 and staging_subnet_private_3\nelb_subnets: ['subnet-8ceb18e9','subnet-08ccd27c'] # staging_subnet_public_0 and staging_subnet_public_1\nasg_name: wordpress\nimage_id: ami-6d7c590e # ami created from the master wordpress instance\ninstance_type: t2.micro\nsecurity_groups: ['sg-85426fe0'] # staging_sg_wordpress \nelb_group_ids: ['sg-c4507da1'] # staging_sg_wordpress_lb\nkeypair: wordpress-apsydney\nasg_min: 1\nasg_max: 8"
  }
]