Repository: yankurniawan/ansible-for-aws
Branch: master
Commit: 99b6d82fbbd4
Files: 102
Total size: 134.4 KB
Directory structure:
gitextract_brojytnn/
├── .gitignore
├── ami_create.yml
├── ami_delete.yml
├── dbsg_create.yml
├── dbsg_delete.yml
├── dhcp_options.yml
├── ec2.ini
├── ec2.py
├── ec2_check_name.yml
├── ec2_profile.yml
├── ec2_start.yml
├── ec2_start_1.yml
├── ec2_stop.yml
├── ec2_vol_1.yml
├── ec2_vol_2.yml
├── ec2_vpc_db_create.yml
├── ec2_vpc_jumpbox.yml
├── ec2_vpc_openvpn.yml
├── ec2_vpc_web_create.yml
├── group_vars/
│ ├── all
│ └── tag_class_wordpress
├── host_vars/
│ └── localhost
├── hosts
├── iam_group.yml
├── iam_policy.yml
├── iam_policy_admin.json
├── iam_policy_app1.yml
├── iam_policy_s3_read.json
├── iam_role.yml
├── iam_user.yml
├── install_ansible.yml
├── keypair.yml
├── launch_ec2.yml
├── launch_ec2_eip.yml
├── launch_ec2_iteration.yml
├── launch_ec2_tags.yml
├── library/
│ ├── instance_lookup
│ └── vpc_lookup
├── mysql_pg_create.yml
├── mysql_pg_delete.yml
├── mysql_rds_create.yml
├── mysql_rds_delete.yml
├── nat_launch.yml
├── roles/
│ ├── ansible/
│ │ └── tasks/
│ │ └── main.yml
│ ├── apache/
│ │ └── tasks/
│ │ └── main.yml
│ ├── common/
│ │ ├── handlers/
│ │ │ └── main.yml
│ │ ├── tasks/
│ │ │ └── main.yml
│ │ └── templates/
│ │ └── ntp.conf.j2
│ └── mysql/
│ └── tasks/
│ └── main.yml
├── route53.yml
├── s3_create_bucket.yml
├── s3_create_dir.yml
├── s3_delete_bucket.yml
├── s3_download_file.yml
├── s3_share_file.yml
├── s3_upload_file.yml
├── sg_database.yml
├── sg_delete.yml
├── sg_empty.yml
├── sg_jumpbox.yml
├── sg_modify.yml
├── sg_openvpn.yml
├── sg_webserver.yml
├── site.yml
├── terminate_ec2.yml
├── test.txt
├── test1.txt
├── vpc_create.yml
├── vpc_create_multi_az.yml
├── vpc_delete.yml
├── vpc_delete1.yml
├── vpc_info.yml
├── wordpress/
│ ├── backup.yml
│ ├── delete_backup.yml
│ ├── ec2.ini
│ ├── ec2.py
│ ├── group_vars/
│ │ └── all
│ ├── hosts
│ ├── provisioning.yml
│ ├── restore.yml
│ ├── roles/
│ │ ├── common/
│ │ │ └── tasks/
│ │ │ └── main.yml
│ │ ├── mysql/
│ │ │ ├── handlers/
│ │ │ │ └── main.yml
│ │ │ ├── tasks/
│ │ │ │ └── main.yml
│ │ │ └── templates/
│ │ │ └── my.cnf.j2
│ │ ├── web/
│ │ │ └── tasks/
│ │ │ └── main.yml
│ │ └── wordpress/
│ │ ├── tasks/
│ │ │ └── main.yml
│ │ └── templates/
│ │ └── wp-config.php
│ └── site.yml
└── wordpress_ha/
├── ec2.ini
├── ec2.py
├── group_vars/
│ └── all
├── hosts
├── provisioning_asg.yml
├── provisioning_rds.yml
├── provisioning_sg.yml
├── provisioning_vpc.yml
├── provisioning_wp.yml
├── roles/
│ ├── web/
│ │ └── tasks/
│ │ └── main.yml
│ └── wordpress/
│ ├── tasks/
│ │ └── main.yml
│ └── templates/
│ └── wp-config.php
├── site.yml
└── vars/
└── staging.yml
================================================
FILE CONTENTS
================================================
================================================
FILE: .gitignore
================================================
ansible4aws-book.*
.DS_Store
================================================
FILE: ami_create.yml
================================================
---
- hosts: localhost
connection: local
gather_facts: no
vars:
region: ap-southeast-2
ins_name: wordpress_master
ami_name: wordpress
tasks:
- name: get instance id
command: "aws ec2 describe-instances
--filters Name=tag:Name,Values={{ ins_name }}
--query 'Reservations[0].Instances[0].InstanceId' --output text"
register: instanceid
- name: create ami
ec2_ami:
instance_id: "{{ instanceid.stdout }}"
region: "{{ region }}"
wait: yes
name: "{{ ami_name }}"
register: ami
when: instanceid.stdout!="None"
- debug: var=ami
================================================
FILE: ami_delete.yml
================================================
---
- hosts: localhost
connection: local
gather_facts: no
vars:
region: ap-southeast-2
ami_name: wordpress
tasks:
- name: get ami id
command: "aws ec2 describe-images
--filters Name=name,Values={{ ami_name }}
--query 'Images[0].ImageId' --output text"
register: imageid
- name: delete ami
ec2_ami:
region: "{{ region }}"
image_id: "{{ imageid.stdout }}"
delete_snapshot: yes
state: absent
when: imageid.stdout!="None"
================================================
FILE: dbsg_create.yml
================================================
---
- hosts: localhost
connection: local
gather_facts: no
vars:
region: ap-southeast-2
vars_files:
- staging_vpc_info
tasks:
- name: create Multi-AZ DB subnet group
rds_subnet_group:
name: dbsg2
state: present
region: "{{ region }}"
description: DB Subnet Group 2
subnets:
- "{{ staging_subnet_private_0 }}"
- "{{ staging_subnet_private_1 }}"
================================================
FILE: dbsg_delete.yml
================================================
---
- hosts: localhost
connection: local
gather_facts: no
vars:
region: ap-southeast-2
tasks:
- name: delete DB subnet group
rds_subnet_group:
name: dbsg2
state: absent
region: "{{ region }}"
================================================
FILE: dhcp_options.yml
================================================
---
- hosts: localhost
connection: local
gather_facts: no
vars:
region: ap-southeast-2
name: test-vpc
tasks:
- name: create dhcp options set
command: aws ec2 create-dhcp-options --dhcp-configuration
"Key=domain-name,Values=example.com" "Key=domain-name-servers,Values=10.0.0.7,10.0.0.8"
--query 'DhcpOptions.DhcpOptionsId' --output text
register: dopt
- name: get vpc id
command: "aws ec2 describe-vpcs --filters Name=tag:Name,Values={{ name }}
--query 'Vpcs[0].VpcId' --output text"
register: vpcid
- name: associate vpc with dhcp options set
command: aws ec2 associate-dhcp-options --dhcp-options-id {{ dopt.stdout }}
--vpc-id {{ vpcid.stdout }}
================================================
FILE: ec2.ini
================================================
# Ansible EC2 external inventory script settings
#
[ec2]
# to talk to a private eucalyptus instance uncomment these lines
# and edit edit eucalyptus_host to be the host name of your cloud controller
#eucalyptus = True
#eucalyptus_host = clc.cloud.domain.org
# AWS regions to make calls to. Set this to 'all' to make request to all regions
# in AWS and merge the results together. Alternatively, set this to a comma
# separated list of regions. E.g. 'us-east-1,us-west-1,us-west-2'
regions = all
regions_exclude = us-gov-west-1,cn-north-1
# When generating inventory, Ansible needs to know how to address a server.
# Each EC2 instance has a lot of variables associated with it. Here is the list:
# http://docs.pythonboto.org/en/latest/ref/ec2.html#module-boto.ec2.instance
# Below are 2 variables that are used as the address of a server:
# - destination_variable
# - vpc_destination_variable
# This is the normal destination variable to use. If you are running Ansible
# from outside EC2, then 'public_dns_name' makes the most sense. If you are
# running Ansible from within EC2, then perhaps you want to use the internal
# address, and should set this to 'private_dns_name'.
destination_variable = public_dns_name
# For server inside a VPC, using DNS names may not make sense. When an instance
# has 'subnet_id' set, this variable is used. If the subnet is public, setting
# this to 'ip_address' will return the public IP address. For instances in a
# private subnet, this should be set to 'private_ip_address', and Ansible must
# be run from with EC2.
vpc_destination_variable = ip_address
# To tag instances on EC2 with the resource records that point to them from
# Route53, uncomment and set 'route53' to True.
route53 = False
# Additionally, you can specify the list of zones to exclude looking up in
# 'route53_excluded_zones' as a comma-separated list.
# route53_excluded_zones = samplezone1.com, samplezone2.com
# API calls to EC2 are slow. For this reason, we cache the results of an API
# call. Set this to the path you want cache files to be written to. Two files
# will be written to this directory:
# - ansible-ec2.cache
# - ansible-ec2.index
cache_path = ~/.ansible/tmp
# The number of seconds a cache file is considered valid. After this many
# seconds, a new API call will be made, and the cache file will be updated.
# To disable the cache, set this value to 0
cache_max_age = 300
================================================
FILE: ec2.py
================================================
#!/usr/bin/env python
'''
EC2 external inventory script
=================================
Generates inventory that Ansible can understand by making API request to
AWS EC2 using the Boto library.
NOTE: This script assumes Ansible is being executed where the environment
variables needed for Boto have already been set:
export AWS_ACCESS_KEY_ID='AK123'
export AWS_SECRET_ACCESS_KEY='abc123'
This script also assumes there is an ec2.ini file alongside it. To specify a
different path to ec2.ini, define the EC2_INI_PATH environment variable:
export EC2_INI_PATH=/path/to/my_ec2.ini
If you're using eucalyptus you need to set the above variables and
you need to define:
export EC2_URL=http://hostname_of_your_cc:port/services/Eucalyptus
For more details, see: http://docs.pythonboto.org/en/latest/boto_config_tut.html
When run against a specific host, this script returns the following variables:
- ec2_ami_launch_index
- ec2_architecture
- ec2_association
- ec2_attachTime
- ec2_attachment
- ec2_attachmentId
- ec2_client_token
- ec2_deleteOnTermination
- ec2_description
- ec2_deviceIndex
- ec2_dns_name
- ec2_eventsSet
- ec2_group_name
- ec2_hypervisor
- ec2_id
- ec2_image_id
- ec2_instanceState
- ec2_instance_type
- ec2_ipOwnerId
- ec2_ip_address
- ec2_item
- ec2_kernel
- ec2_key_name
- ec2_launch_time
- ec2_monitored
- ec2_monitoring
- ec2_networkInterfaceId
- ec2_ownerId
- ec2_persistent
- ec2_placement
- ec2_platform
- ec2_previous_state
- ec2_private_dns_name
- ec2_private_ip_address
- ec2_publicIp
- ec2_public_dns_name
- ec2_ramdisk
- ec2_reason
- ec2_region
- ec2_requester_id
- ec2_root_device_name
- ec2_root_device_type
- ec2_security_group_ids
- ec2_security_group_names
- ec2_shutdown_state
- ec2_sourceDestCheck
- ec2_spot_instance_request_id
- ec2_state
- ec2_state_code
- ec2_state_reason
- ec2_status
- ec2_subnet_id
- ec2_tenancy
- ec2_virtualization_type
- ec2_vpc_id
These variables are pulled out of a boto.ec2.instance object. There is a lack of
consistency with variable spellings (camelCase and underscores) since this
just loops through all variables the object exposes. It is preferred to use the
ones with underscores when multiple exist.
In addition, if an instance has AWS Tags associated with it, each tag is a new
variable named:
- ec2_tag_[Key] = [Value]
Security groups are comma-separated in 'ec2_security_group_ids' and
'ec2_security_group_names'.
'''
# (c) 2012, Peter Sankauskas
#
# This file is part of Ansible,
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
######################################################################
import sys
import os
import argparse
import re
from time import time
import boto
from boto import ec2
from boto import rds
from boto import route53
import ConfigParser
try:
import json
except ImportError:
import simplejson as json
class Ec2Inventory(object):
def _empty_inventory(self):
return {"_meta" : {"hostvars" : {}}}
def __init__(self):
''' Main execution path '''
# Inventory grouped by instance IDs, tags, security groups, regions,
# and availability zones
self.inventory = self._empty_inventory()
# Index of hostname (address) to instance ID
self.index = {}
# Read settings and parse CLI arguments
self.read_settings()
self.parse_cli_args()
# Cache
if self.args.refresh_cache:
self.do_api_calls_update_cache()
elif not self.is_cache_valid():
self.do_api_calls_update_cache()
# Data to print
if self.args.host:
data_to_print = self.get_host_info()
elif self.args.list:
# Display list of instances for inventory
if self.inventory == self._empty_inventory():
data_to_print = self.get_inventory_from_cache()
else:
data_to_print = self.json_format_dict(self.inventory, True)
print data_to_print
def is_cache_valid(self):
''' Determines if the cache files have expired, or if it is still valid '''
if os.path.isfile(self.cache_path_cache):
mod_time = os.path.getmtime(self.cache_path_cache)
current_time = time()
if (mod_time + self.cache_max_age) > current_time:
if os.path.isfile(self.cache_path_index):
return True
return False
def read_settings(self):
''' Reads the settings from the ec2.ini file '''
config = ConfigParser.SafeConfigParser()
ec2_default_ini_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'ec2.ini')
ec2_ini_path = os.environ.get('EC2_INI_PATH', ec2_default_ini_path)
config.read(ec2_ini_path)
# is eucalyptus?
self.eucalyptus_host = None
self.eucalyptus = False
if config.has_option('ec2', 'eucalyptus'):
self.eucalyptus = config.getboolean('ec2', 'eucalyptus')
if self.eucalyptus and config.has_option('ec2', 'eucalyptus_host'):
self.eucalyptus_host = config.get('ec2', 'eucalyptus_host')
# Regions
self.regions = []
configRegions = config.get('ec2', 'regions')
configRegions_exclude = config.get('ec2', 'regions_exclude')
if (configRegions == 'all'):
if self.eucalyptus_host:
self.regions.append(boto.connect_euca(host=self.eucalyptus_host).region.name)
else:
for regionInfo in ec2.regions():
if regionInfo.name not in configRegions_exclude:
self.regions.append(regionInfo.name)
else:
self.regions = configRegions.split(",")
# Destination addresses
self.destination_variable = config.get('ec2', 'destination_variable')
self.vpc_destination_variable = config.get('ec2', 'vpc_destination_variable')
# Route53
self.route53_enabled = config.getboolean('ec2', 'route53')
self.route53_excluded_zones = []
if config.has_option('ec2', 'route53_excluded_zones'):
self.route53_excluded_zones.extend(
config.get('ec2', 'route53_excluded_zones', '').split(','))
# Cache related
cache_dir = os.path.expanduser(config.get('ec2', 'cache_path'))
if not os.path.exists(cache_dir):
os.makedirs(cache_dir)
self.cache_path_cache = cache_dir + "/ansible-ec2.cache"
self.cache_path_index = cache_dir + "/ansible-ec2.index"
self.cache_max_age = config.getint('ec2', 'cache_max_age')
def parse_cli_args(self):
''' Command line argument processing '''
parser = argparse.ArgumentParser(description='Produce an Ansible Inventory file based on EC2')
parser.add_argument('--list', action='store_true', default=True,
help='List instances (default: True)')
parser.add_argument('--host', action='store',
help='Get all the variables about a specific instance')
parser.add_argument('--refresh-cache', action='store_true', default=False,
help='Force refresh of cache by making API requests to EC2 (default: False - use cache files)')
self.args = parser.parse_args()
def do_api_calls_update_cache(self):
''' Do API calls to each region, and save data in cache files '''
if self.route53_enabled:
self.get_route53_records()
for region in self.regions:
self.get_instances_by_region(region)
self.get_rds_instances_by_region(region)
self.write_to_cache(self.inventory, self.cache_path_cache)
self.write_to_cache(self.index, self.cache_path_index)
def get_instances_by_region(self, region):
''' Makes an AWS EC2 API call to the list of instances in a particular
region '''
try:
if self.eucalyptus:
conn = boto.connect_euca(host=self.eucalyptus_host)
conn.APIVersion = '2010-08-31'
else:
conn = ec2.connect_to_region(region)
# connect_to_region will fail "silently" by returning None if the region name is wrong or not supported
if conn is None:
print("region name: %s likely not supported, or AWS is down. connection to region failed." % region)
sys.exit(1)
reservations = conn.get_all_instances()
for reservation in reservations:
for instance in reservation.instances:
self.add_instance(instance, region)
except boto.exception.BotoServerError, e:
if not self.eucalyptus:
print "Looks like AWS is down again:"
print e
sys.exit(1)
def get_rds_instances_by_region(self, region):
''' Makes an AWS API call to the list of RDS instances in a particular
region '''
try:
conn = rds.connect_to_region(region)
if conn:
instances = conn.get_all_dbinstances()
for instance in instances:
self.add_rds_instance(instance, region)
except boto.exception.BotoServerError, e:
if not e.reason == "Forbidden":
print "Looks like AWS RDS is down: "
print e
sys.exit(1)
def get_instance(self, region, instance_id):
''' Gets details about a specific instance '''
if self.eucalyptus:
conn = boto.connect_euca(self.eucalyptus_host)
conn.APIVersion = '2010-08-31'
else:
conn = ec2.connect_to_region(region)
# connect_to_region will fail "silently" by returning None if the region name is wrong or not supported
if conn is None:
print("region name: %s likely not supported, or AWS is down. connection to region failed." % region)
sys.exit(1)
reservations = conn.get_all_instances([instance_id])
for reservation in reservations:
for instance in reservation.instances:
return instance
def add_instance(self, instance, region):
''' Adds an instance to the inventory and index, as long as it is
addressable '''
# Only want running instances
if instance.state != 'running':
return
# Select the best destination address
if instance.subnet_id:
dest = getattr(instance, self.vpc_destination_variable)
else:
dest = getattr(instance, self.destination_variable)
if not dest:
# Skip instances we cannot address (e.g. private VPC subnet)
return
# Add to index
self.index[dest] = [region, instance.id]
# Inventory: Group by instance ID (always a group of 1)
self.inventory[instance.id] = [dest]
# Inventory: Group by region
self.push(self.inventory, region, dest)
# Inventory: Group by availability zone
self.push(self.inventory, instance.placement, dest)
# Inventory: Group by instance type
self.push(self.inventory, self.to_safe('type_' + instance.instance_type), dest)
# Inventory: Group by key pair
if instance.key_name:
self.push(self.inventory, self.to_safe('key_' + instance.key_name), dest)
# Inventory: Group by security group
try:
for group in instance.groups:
key = self.to_safe("security_group_" + group.name)
self.push(self.inventory, key, dest)
except AttributeError:
print 'Package boto seems a bit older.'
print 'Please upgrade boto >= 2.3.0.'
sys.exit(1)
# Inventory: Group by tag keys
for k, v in instance.tags.iteritems():
key = self.to_safe("tag_" + k + "=" + v)
self.push(self.inventory, key, dest)
# Inventory: Group by Route53 domain names if enabled
if self.route53_enabled:
route53_names = self.get_instance_route53_names(instance)
for name in route53_names:
self.push(self.inventory, name, dest)
# Global Tag: tag all EC2 instances
self.push(self.inventory, 'ec2', dest)
self.inventory["_meta"]["hostvars"][dest] = self.get_host_info_dict_from_instance(instance)
def add_rds_instance(self, instance, region):
''' Adds an RDS instance to the inventory and index, as long as it is
addressable '''
# Only want available instances
if instance.status != 'available':
return
# Select the best destination address
#if instance.subnet_id:
#dest = getattr(instance, self.vpc_destination_variable)
#else:
#dest = getattr(instance, self.destination_variable)
dest = instance.endpoint[0]
if not dest:
# Skip instances we cannot address (e.g. private VPC subnet)
return
# Add to index
self.index[dest] = [region, instance.id]
# Inventory: Group by instance ID (always a group of 1)
self.inventory[instance.id] = [dest]
# Inventory: Group by region
self.push(self.inventory, region, dest)
# Inventory: Group by availability zone
self.push(self.inventory, instance.availability_zone, dest)
# Inventory: Group by instance type
self.push(self.inventory, self.to_safe('type_' + instance.instance_class), dest)
# Inventory: Group by security group
try:
if instance.security_group:
key = self.to_safe("security_group_" + instance.security_group.name)
self.push(self.inventory, key, dest)
except AttributeError:
print 'Package boto seems a bit older.'
print 'Please upgrade boto >= 2.3.0.'
sys.exit(1)
# Inventory: Group by engine
self.push(self.inventory, self.to_safe("rds_" + instance.engine), dest)
# Inventory: Group by parameter group
self.push(self.inventory, self.to_safe("rds_parameter_group_" + instance.parameter_group.name), dest)
# Global Tag: all RDS instances
self.push(self.inventory, 'rds', dest)
def get_route53_records(self):
''' Get and store the map of resource records to domain names that
point to them. '''
r53_conn = route53.Route53Connection()
all_zones = r53_conn.get_zones()
route53_zones = [ zone for zone in all_zones if zone.name[:-1]
not in self.route53_excluded_zones ]
self.route53_records = {}
for zone in route53_zones:
rrsets = r53_conn.get_all_rrsets(zone.id)
for record_set in rrsets:
record_name = record_set.name
if record_name.endswith('.'):
record_name = record_name[:-1]
for resource in record_set.resource_records:
self.route53_records.setdefault(resource, set())
self.route53_records[resource].add(record_name)
def get_instance_route53_names(self, instance):
''' Check if an instance is referenced in the records we have from
Route53. If it is, return the list of domain names pointing to said
instance. If nothing points to it, return an empty list. '''
instance_attributes = [ 'public_dns_name', 'private_dns_name',
'ip_address', 'private_ip_address' ]
name_list = set()
for attrib in instance_attributes:
try:
value = getattr(instance, attrib)
except AttributeError:
continue
if value in self.route53_records:
name_list.update(self.route53_records[value])
return list(name_list)
def get_host_info_dict_from_instance(self, instance):
instance_vars = {}
for key in vars(instance):
value = getattr(instance, key)
key = self.to_safe('ec2_' + key)
# Handle complex types
# state/previous_state changed to properties in boto in https://github.com/boto/boto/commit/a23c379837f698212252720d2af8dec0325c9518
if key == 'ec2__state':
instance_vars['ec2_state'] = instance.state or ''
instance_vars['ec2_state_code'] = instance.state_code
elif key == 'ec2__previous_state':
instance_vars['ec2_previous_state'] = instance.previous_state or ''
instance_vars['ec2_previous_state_code'] = instance.previous_state_code
elif type(value) in [int, bool]:
instance_vars[key] = value
elif type(value) in [str, unicode]:
instance_vars[key] = value.strip()
elif type(value) == type(None):
instance_vars[key] = ''
elif key == 'ec2_region':
instance_vars[key] = value.name
elif key == 'ec2__placement':
instance_vars['ec2_placement'] = value.zone
elif key == 'ec2_tags':
for k, v in value.iteritems():
key = self.to_safe('ec2_tag_' + k)
instance_vars[key] = v
elif key == 'ec2_groups':
group_ids = []
group_names = []
for group in value:
group_ids.append(group.id)
group_names.append(group.name)
instance_vars["ec2_security_group_ids"] = ','.join(group_ids)
instance_vars["ec2_security_group_names"] = ','.join(group_names)
else:
pass
# TODO Product codes if someone finds them useful
#print key
#print type(value)
#print value
return instance_vars
def get_host_info(self):
''' Get variables about a specific host '''
if len(self.index) == 0:
# Need to load index from cache
self.load_index_from_cache()
if not self.args.host in self.index:
# try updating the cache
self.do_api_calls_update_cache()
if not self.args.host in self.index:
# host migh not exist anymore
return self.json_format_dict({}, True)
(region, instance_id) = self.index[self.args.host]
instance = self.get_instance(region, instance_id)
return self.json_format_dict(self.get_host_info_dict_from_instance(instance), True)
def push(self, my_dict, key, element):
''' Pushed an element onto an array that may not have been defined in
the dict '''
if key in my_dict:
my_dict[key].append(element);
else:
my_dict[key] = [element]
def get_inventory_from_cache(self):
''' Reads the inventory from the cache file and returns it as a JSON
object '''
cache = open(self.cache_path_cache, 'r')
json_inventory = cache.read()
return json_inventory
def load_index_from_cache(self):
''' Reads the index from the cache file sets self.index '''
cache = open(self.cache_path_index, 'r')
json_index = cache.read()
self.index = json.loads(json_index)
def write_to_cache(self, data, filename):
''' Writes data in JSON format to a file '''
json_data = self.json_format_dict(data, True)
cache = open(filename, 'w')
cache.write(json_data)
cache.close()
def to_safe(self, word):
''' Converts 'bad' characters in a string to underscores so they can be
used as Ansible groups '''
return re.sub("[^A-Za-z0-9\-]", "_", word)
def json_format_dict(self, data, pretty=False):
''' Converts a dict to a JSON object and dumps it as a formatted
string '''
if pretty:
return json.dumps(data, sort_keys=True, indent=2)
else:
return json.dumps(data)
# Run the script
Ec2Inventory()
================================================
FILE: ec2_check_name.yml
================================================
---
- hosts: localhost
gather_facts: no
vars:
region: ap-southeast-2
key: yan-key-pair-apsydney
type: t2.micro
image: ami-d9fe9be3
sg: sg_webserver_apsydney
name: test-01
tasks:
- name: check if instance with name tag exists
command: aws ec2 describe-instances --filter Name=tag:Name,Values={{ name }}
--query 'Reservations[0].Instances[0].InstanceId' --output text
register: instanceid
- name: create EC2 if not exists
ec2:
region: "{{ region }}"
key_name: "{{ key }}"
instance_type: "{{ type }}"
image: "{{ image }}"
group: "{{ sg }}"
instance_tags:
Name: "{{ name }}"
wait: yes
when: instanceid.stdout=="None"
================================================
FILE: ec2_profile.yml
================================================
---
- hosts: localhost
gather_facts: no
connection: local
vars:
#your region
region: ap-southeast-2
tasks:
- name: EC2 provisioning with instance profile
ec2:
region: "{{ region }}"
key_name: yan-key-pair-apsydney
instance_type: t2.micro
image: ami-dc361ebf
group: sg_webserver_apsydney
instance_profile_name: app1
================================================
FILE: ec2_start.yml
================================================
---
- hosts: localhost
gather_facts: no
connection: local
vars:
region: ap-southeast-2
name: test-01
tasks:
- name: get instance id
command: aws ec2 describe-instances --filter Name=tag:Name,Values={{ name }}
--query 'Reservations[0].Instances[0].InstanceId' --output text
register: instanceid
- name: start instance
ec2:
region: "{{ region }}"
instance_ids: "{{ instanceid.stdout }}"
state: running
wait: yes
when: instanceid.stdout!="None"
================================================
FILE: ec2_start_1.yml
================================================
---
- hosts: localhost
gather_facts: no
connection: local
vars:
region: ap-southeast-2
name: bamboo-1
tasks:
- name: get instance id
instance_lookup:
region: "{{ region }}"
tags:
Name: "{{ name }}"
register: instanceid
- debug: var=instanceid.instance_ids
- name: start instance
ec2:
region: "{{ region }}"
instance_ids: "{{ instanceid.instance_ids }}"
state: running
wait: yes
when: instanceid is defined
================================================
FILE: ec2_stop.yml
================================================
---
- hosts: localhost
gather_facts: no
connection: local
vars:
region: ap-southeast-2
name: test-01
tasks:
- name: get instance id
command: aws ec2 describe-instances --filter Name=tag:Name,Values={{ name }}
--query 'Reservations[0].Instances[0].InstanceId' --output text
register: instanceid
- name: stop instance
ec2:
region: "{{ region }}"
instance_ids: "{{ instanceid.stdout }}"
state: stopped
wait: yes
when: instanceid.stdout!="None"
================================================
FILE: ec2_vol_1.yml
================================================
---
- hosts: localhost
gather_facts: no
connection: local
vars:
#your region
region: ap-southeast-2
tasks:
- name: EC2 provisioning with general purpose EBS volume
ec2:
region: "{{ region }}"
key_name: yan-key-pair-apsydney
instance_type: t2.micro
image: ami-dc361ebf
group: sg_webserver_apsydney
volumes:
- device_name: /dev/sda1
device_type: gp2
volume_size: 100
delete_on_termination: true
================================================
FILE: ec2_vol_2.yml
================================================
---
- hosts: localhost
gather_facts: no
connection: local
vars:
#your region
region: ap-southeast-2
tasks:
- name: EC2 provisioning with provisioned IOPS EBS volume
ec2:
region: "{{ region }}"
key_name: yan-key-pair-apsydney
instance_type: t2.micro
image: ami-dc361ebf
group: sg_webserver_apsydney
volumes:
- device_name: /dev/sda1
device_type: io1
iops: 1000
volume_size: 500
delete_on_termination: true
================================================
FILE: ec2_vpc_db_create.yml
================================================
---
- hosts: localhost
connection: local
gather_facts: no
vars_files:
- staging_vpc_info
vars:
region: ap-southeast-2
key: yan-key-pair-apsydney
instance_type: t2.micro
image: ami-dc361ebf
prefix: staging
tasks:
- name: database instance provisioning
ec2:
region: "{{ region }}"
key_name: "{{ key }}"
instance_type: "{{ instance_type }}"
image: "{{ image }}"
wait: yes
group: "{{ prefix }}_sg_database"
instance_tags:
Name: "{{ prefix }}_database"
class: database
environment: staging
vpc_subnet_id: "{{ staging_subnet_private }}"
assign_public_ip: no
================================================
FILE: ec2_vpc_jumpbox.yml
================================================
---
- hosts: localhost
gather_facts: no
connection: local
vars_files:
- staging_vpc_info
vars:
region: ap-southeast-2
key: yan-key-pair-apsydney
instance_type: t2.micro
image: ami-dc361ebf
prefix: staging
vpc_subnet_id: "{{ staging_subnet_public_0 }}"
tasks:
- name: jump box instance provisioning
ec2:
region: "{{ region }}"
key_name: "{{ key }}"
instance_type: "{{ instance_type }}"
image: "{{ image }}"
wait: yes
group: "{{ prefix }}_sg_jumpbox"
instance_tags:
Name: "{{ prefix }}_jumpbox"
class: jumpbox
environment: "{{ prefix }}"
vpc_subnet_id: "{{ vpc_subnet_id }}"
register: ec2
- name: associate new EIP for the instance
ec2_eip:
region: "{{ region }}"
instance_id: "{{ item.id }}"
with_items: ec2.instances
================================================
FILE: ec2_vpc_openvpn.yml
================================================
---
- hosts: localhost
gather_facts: no
connection: local
vars_files:
- staging_vpc_info
vars:
region: ap-southeast-2
key: yan-key-pair-apsydney
instance_type: t2.micro
image: ami-a17f199b
prefix: staging
vpc_subnet_id: "{{ staging_subnet_public_0 }}"
tasks:
- name: openvpn server instance provisioning
ec2:
region: "{{ region }}"
key_name: "{{ key }}"
instance_type: "{{ instance_type }}"
image: "{{ image }}"
source_dest_check: no
wait: yes
group: "{{ prefix }}_sg_openvpn"
instance_tags:
Name: "{{ prefix }}_openvpn"
class: openvpn
environment: "{{ prefix }}"
vpc_subnet_id: "{{ vpc_subnet_id }}"
register: ec2
- name: associate new EIP for the instance
ec2_eip:
region: "{{ region }}"
instance_id: "{{ item.id }}"
with_items: ec2.instances
================================================
FILE: ec2_vpc_web_create.yml
================================================
---
- hosts: localhost
connection: local
gather_facts: no
vars_files:
- staging_vpc_info
vars:
region: ap-southeast-2
key: yan-key-pair-apsydney
instance_type: t2.micro
image: ami-dc361ebf
prefix: staging
tasks:
- name: web instance provisioning
ec2:
region: "{{ region }}"
key_name: "{{ key }}"
instance_type: "{{ instance_type }}"
image: "{{ image }}"
wait: yes
group: "{{ prefix }}_sg_web"
instance_tags:
Name: "{{ prefix }}_web"
class: web
environment: staging
vpc_subnet_id: "{{ staging_subnet_public }}"
register: ec2
- name: associate new EIP for the instance
ec2_eip:
region: "{{ region }}"
instance_id: "{{ item.id }}"
with_items: ec2.instances
================================================
FILE: group_vars/all
================================================
---
# Variables here are applicable to all host groups
ntpserver: 0.au.pool.ntp.org
ansible_user: ec2-user
ansible_ssh_private_key_file: ~/.ssh/yan-key-pair-apsydney.pem
================================================
FILE: group_vars/tag_class_wordpress
================================================
ansible_ssh_user: ec2-user
ansible_ssh_private_key_file: ~/.ssh/wordpress-apsydney.pem
================================================
FILE: host_vars/localhost
================================================
ansible_ssh_user: ec2-user
ansible_ssh_private_key_file: ~/.ssh/wordpress-apsydney.pem
================================================
FILE: hosts
================================================
[local]
localhost
#[webservers]
#54.79.109.14 ansible_ssh_user=ec2-user ansible_ssh_private_key_file=~/.ssh/yan-key-pair-apsydney.pem
================================================
FILE: iam_group.yml
================================================
---
- hosts: localhost
gather_facts: no
connection: local
tasks:
- name: create IAM group admin
iam:
iam_type: group
name: admin
state: present
================================================
FILE: iam_policy.yml
================================================
---
- hosts: localhost
gather_facts: no
connection: local
tasks:
- name: Assign a policy called Administrator to the admin group
iam_policy:
iam_type: group
iam_name: admin
policy_name: Administrator
state: present
policy_document: iam_policy_admin.json
================================================
FILE: iam_policy_admin.json
================================================
{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": "*",
"Resource": "*"
}
]
}
================================================
FILE: iam_policy_app1.yml
================================================
---
- hosts: localhost
gather_facts: no
connection: local
tasks:
- name: Assign a policy called S3ReadOnly to the app1 role
iam_policy:
iam_type: role
iam_name: app1
policy_name: S3ReadOnly
state: present
policy_document: iam_policy_s3_read.json
================================================
FILE: iam_policy_s3_read.json
================================================
{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": [
"s3:Get*",
"s3:List*"
],
"Resource": "*"
}
]
}
================================================
FILE: iam_role.yml
================================================
---
- hosts: localhost
gather_facts: no
connection: local
tasks:
- name: create IAM role app1
iam:
iam_type: role
name: app1
state: present
================================================
FILE: iam_user.yml
================================================
---
- hosts: localhost
gather_facts: no
connection: local
tasks:
- name: create IAM user yan
iam:
iam_type: user
name: yan
state: present
groups: admin
================================================
FILE: install_ansible.yml
================================================
---
- hosts: tag_class_jumpbox
become: yes
roles:
- ansible
================================================
FILE: keypair.yml
================================================
---
- hosts: localhost
connection: local
gather_facts: no
vars:
region: ap-southeast-2
keyname: yan1
tasks:
- name: create key pair
ec2_key:
region: "{{ region }}"
name: "{{ keyname }}"
register: mykey
- name: write to file
copy: content="{{ mykey.key.private_key }}" dest="~/.ssh/{{ keyname }}.pem" mode=0600
when: mykey.changed
================================================
FILE: launch_ec2.yml
================================================
---
- hosts: localhost
connection: local
gather_facts: no
vars:
#your region
region: ap-southeast-2
tasks:
- name: EC2 basic provisioning
ec2:
region: "{{ region }}"
key_name: yan-key-pair-apsydney
instance_type: t1.micro
image: ami-6bf99c51
wait: yes
group: sg_webserver_apsydney
instance_tags:
group: webserver
exact_count: 3
count_tag:
group: webserver
================================================
FILE: launch_ec2_eip.yml
================================================
---
- hosts: localhost
connection: local
gather_facts: no
vars:
region: ap-southeast-2
tasks:
- name: launch instance
ec2:
region: "{{ region }}"
key_name: yan-key-pair-apsydney
instance_type: t1.micro
image: ami-6bf99c51
wait: yes
group: sg_webserver_apsydney
register: ec2
- name: associate new EIP for the instance
ec2_eip:
region: "{{ region }}"
instance_id: "{{ item.id }}"
with_items: ec2.instances
================================================
FILE: launch_ec2_iteration.yml
================================================
---
- hosts: localhost
connection: local
gather_facts: no
vars:
#your region
region: ap-southeast-2
tasks:
- name: EC2 basic provisioning
ec2:
region: "{{ region }}"
key_name: yan-key-pair-apsydney
instance_type: t1.micro
image: ami-6bf99c51
group: sg_webserver_apsydney
instance_tags:
Name: "web{{ item }}"
with_sequence: count=5
================================================
FILE: launch_ec2_tags.yml
================================================
---
- hosts: localhost
connection: local
gather_facts: no
vars:
region: ap-southeast-2
instance_type: t1.micro
image: ami-6bf99c51
key: yan-key-pair-apsydney
tasks:
- name: launch ec2 with tags webserver staging
ec2:
region: "{{ region }}"
key_name: "{{ key }}"
instance_type: "{{ instance_type }}"
image: "{{ image }}"
wait: yes
group: sg_webserver_apsydney
instance_tags:
Name: staging-webserver-1
class: webserver
environment: staging
- name: launch ec2 with tags webserver production
ec2:
region: "{{ region }}"
key_name: "{{ key }}"
instance_type: "{{ instance_type }}"
image: "{{ image }}"
wait: yes
group: sg_webserver_apsydney
instance_tags:
Name: production-webserver-1
class: webserver
environment: production
- name: launch ec2 with tags database staging
ec2:
region: "{{ region }}"
key_name: "{{ key }}"
instance_type: "{{ instance_type }}"
image: "{{ image }}"
wait: yes
group: sg_database_apsydney
instance_tags:
Name: staging-database-1
class: database
environment: staging
================================================
FILE: library/instance_lookup
================================================
#!/usr/bin/python
#author: Yan Kurniawan <yan.kurniawan@gmail.com>
import sys
AWS_REGIONS = ['ap-northeast-1',
'ap-southeast-1',
'ap-southeast-2',
'eu-west-1',
'sa-east-1',
'us-east-1',
'us-west-1',
'us-west-2']
try:
from boto.ec2 import connect_to_region
except ImportError:
print "failed=True msg='boto required for this module'"
sys.exit(1)
def main():
module=AnsibleModule(
argument_spec=dict(
region=dict(choices=AWS_REGIONS),
tags=dict(default=None, type='dict'),
)
)
params = module.params
tags = params['tags']
region = params['region']
if region:
try:
ec2 = connect_to_region(region)
except boto.exception.NoAuthHandlerFound, e:
module.fail_json(msg=str(e))
else:
module.fail_json(msg="region must be specified")
instance_ids = []
for tag, value in tags.iteritems():
for instance in ec2.get_only_instances(filters={"tag:" + tag: value}):
instance_ids.append(instance.id)
module.exit_json(changed=False, instance_ids=instance_ids)
from ansible.module_utils.basic import *
main()
================================================
FILE: library/vpc_lookup
================================================
#!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: vpc_lookup
short_description: returns a list of subnet Ids using tags as criteria
description:
- Returns a list of subnet Ids for a given set of tags that identify one or more VPCs
version_added: "1.5"
options:
region:
description:
- The AWS region to use. Must be specified if ec2_url
is not used. If not specified then the value of the
EC2_REGION environment variable, if any, is used.
required: false
default: null
aliases: [ 'aws_region', 'ec2_region' ]
aws_secret_key:
description:
- AWS secret key. If not set then the value of
the AWS_SECRET_KEY environment variable is used.
required: false
default: null
aliases: [ 'ec2_secret_key', 'secret_key' ]
aws_access_key:
description:
- AWS access key. If not set then the value of the
AWS_ACCESS_KEY environment variable is used.
required: false
default: null
aliases: [ 'ec2_access_key', 'access_key' ]
tags:
desription:
- tags to lookup
required: false
default: null
type: dict
aliases: []
requirements: [ "boto" ]
author: John Jarvis
'''
EXAMPLES = '''
# Note: None of these examples set aws_access_key, aws_secret_key, or region.
# It is assumed that their matching environment variables are set.
# Return all instances that match the tag "Name: foo"
- local_action:
module: vpc_lookup
tags:
Name: foo
'''
import sys
AWS_REGIONS = ['ap-northeast-1',
'ap-southeast-1',
'ap-southeast-2',
'eu-west-1',
'sa-east-1',
'us-east-1',
'us-west-1',
'us-west-2']
try:
from boto.vpc import VPCConnection
from boto.vpc import connect_to_region
except ImportError:
print "failed=True msg='boto required for this module'"
sys.exit(1)
def main():
module=AnsibleModule(
argument_spec=dict(
region=dict(choices=AWS_REGIONS),
aws_secret_key=dict(aliases=['ec2_secret_key', 'secret_key'],
no_log=True),
aws_access_key=dict(aliases=['ec2_access_key', 'access_key']),
tags=dict(default=None, type='dict'),
)
)
tags = module.params.get('tags')
aws_secret_key = module.params.get('aws_secret_key')
aws_access_key = module.params.get('aws_access_key')
region = module.params.get('region')
# If we have a region specified, connect to its endpoint.
if region:
try:
vpc = connect_to_region(region, aws_access_key_id=aws_access_key,
aws_secret_access_key=aws_secret_key)
except boto.exception.NoAuthHandlerFound, e:
module.fail_json(msg=str(e))
else:
module.fail_json(msg="region must be specified")
subnet_ids = []
for tag, value in tags.iteritems():
for subnet in vpc.get_all_subnets(filters={"tag:" + tag: value}):
subnet_ids.append(subnet.id)
vpc_ids = []
for tag, value in tags.iteritems():
for vpc in vpc.get_all_vpcs(filters={"tag:" + tag: value}):
vpc_ids.append(vpc.id)
module.exit_json(changed=False, vpc_ids=vpc_ids, subnet_ids=subnet_ids)
# this is magic, see lib/ansible/module_common.py
#<<INCLUDE_ANSIBLE_MODULE_COMMON>>
main()
================================================
FILE: mysql_pg_create.yml
================================================
---
- hosts: localhost
gather_facts: no
connection: local
vars:
region: ap-southeast-2
tasks:
- name: create mysql parameter group
rds_param_group:
name: mysqlpg1
state: present
region: "{{ region }}"
description: MySQL Parameter Group 1
engine: mysql5.6
params:
innodb_lock_wait_timeout: 3600
max_allowed_packet: 512M
net_write_timeout: 300
================================================
FILE: mysql_pg_delete.yml
================================================
---
- hosts: localhost
gather_facts: no
connection: local
vars:
region: ap-southeast-2
tasks:
- name: delete mysql parameter group
rds_param_group:
name: mysqlpg1
state: absent
region: "{{ region }}"
================================================
FILE: mysql_rds_create.yml
================================================
---
- hosts: localhost
gather_facts: no
connection: local
vars:
region: ap-southeast-2
size: 100
instance_type: db.m1.small
db_engine: MySQL
engine_version: 5.6.22
subnet: dbsg2
parameter_group: dbpg1
# staging_sg_database security group ID
security_groups: sg-xxxxxxxx
iops: 1000
db_name: mydb
username: dbadmin
password: mypassword
tasks:
- name: create mysql RDS instance
rds:
command: create
instance_name: staging-mysql-1
region: "{{ region }}"
size: "{{ size }}"
instance_type: "{{ instance_type }}"
db_engine: "{{ db_engine }}"
engine_version: "{{ engine_version }}"
subnet: "{{ subnet }}"
parameter_group: "{{ parameter_group }}"
multi_zone: yes
db_name: "{{ db_name }}"
username: "{{ username }}"
password: "{{ password }}"
vpc_security_groups: "{{ security_groups }}"
iops: "{{ iops }}"
================================================
FILE: mysql_rds_delete.yml
================================================
- hosts: localhost
gather_facts: no
connection: local
vars:
region: ap-southeast-2
tasks:
- name: delete mysql RDS instance
rds:
command: delete
region: "{{ region }}"
instance_name: staging-mysql-1
================================================
FILE: nat_launch.yml
================================================
---
- hosts: localhost
connection: local
gather_facts: no
vars_files:
- staging_vpc_info
vars:
region: ap-southeast-2
key: yan-key-pair-apsydney
instance_type: t1.micro
image: ami-3bae3201
prefix: staging
tasks:
- name: NAT instance provisioning
ec2:
region: "{{ region }}"
key_name: "{{ key }}"
instance_type: "{{ instance_type }}"
image: "{{ image }}"
wait: yes
group: "{{ prefix }}_sg_nat"
instance_tags:
Name: "{{ prefix }}_nat"
class: nat
environment: staging
id: nat_launch_02
vpc_subnet_id: "{{ staging_subnet_public }}"
source_dest_check: no
wait: yes
register: ec2
- name: associate new EIP for the instance
tags: eip
ec2_eip:
region: "{{ region }}"
instance_id: "{{ item.id }}"
with_items: ec2.instances
when: item.id is defined
================================================
FILE: roles/ansible/tasks/main.yml
================================================
---
- name: upgrade all packages
yum: name=* state=latest
- name: install the 'Development tools' package group
yum: name="@Development tools" state=present
- name: install required packages
yum: name={{ item }} state=present
with_items:
- epel-release.noarch
- python-pip
- python-devel
- name: install setuptools
pip: name=setuptools extra_args='--upgrade'
- name: install ansible
pip: name=ansible
================================================
FILE: roles/apache/tasks/main.yml
================================================
---
- name: install apache
yum: name=httpd state=present
tags: apache
- name: start the httpd service
service: name=httpd state=started enabled=true
tags: apache
================================================
FILE: roles/common/handlers/main.yml
================================================
---
- name: restart ntp
service: name=ntpd state=restarted
================================================
FILE: roles/common/tasks/main.yml
================================================
---
- name: install ntp
yum: name=ntp state=present
tags: ntp
- name: configure ntp file
template: src=ntp.conf.j2 dest=/etc/ntp.conf
tags: ntp
notify: restart ntp
- name: start the ntp service
service: name=ntpd state=started enabled=true
tags: ntp
================================================
FILE: roles/common/templates/ntp.conf.j2
================================================
driftfile /var/lib/ntp/drift
restrict 127.0.0.1
restrict -6 ::1
server {{ ntpserver }}
includefile /etc/ntp/crypto/pw
keys /etc/ntp/keys
================================================
FILE: roles/mysql/tasks/main.yml
================================================
---
- name: install mysql server
yum: name=mysql-server state=present
tags: mysql
- name: start the mysql service
service: name=mysqld state=started enabled=true
tags: mysql
================================================
FILE: route53.yml
================================================
---
- hosts: localhost
connection: local
gather_facts: no
tasks:
- name: create record
route53:
command: create
zone: yankurniawan.com.
record: yankurniawan.com.
type: A
value: 54.79.34.239
================================================
FILE: s3_create_bucket.yml
================================================
---
- hosts: localhost
gather_facts: no
connection: local
vars:
bucketname: yan001
tasks:
- name: create an S3 bucket
s3:
bucket: "{{ bucketname }}"
mode: create
================================================
FILE: s3_create_dir.yml
================================================
---
- hosts: localhost
gather_facts: no
connection: local
vars:
bucketname: yan001
tasks:
- name: create virtual directory
s3:
bucket: "{{ bucketname }}"
object: /backup/database/
mode: create
================================================
FILE: s3_delete_bucket.yml
================================================
---
- hosts: localhost
gather_facts: no
connection: local
vars:
bucketname: yan001
tasks:
- name: delete an S3 bucket and all of its contents
s3:
bucket: "{{ bucketname }}"
mode: delete
================================================
FILE: s3_download_file.yml
================================================
---
- hosts: localhost
gather_facts: no
connection: local
sudo: yes
vars:
bucketname: yan001
tasks:
- name: download file
s3:
bucket: "{{ bucketname }}"
object: /backup/database/test.txt
dest: test.txt
mode: get
================================================
FILE: s3_share_file.yml
================================================
---
- hosts: localhost
gather_facts: no
connection: local
vars:
bucketname: yan001
tasks:
- name: share file
s3:
bucket: "{{ bucketname }}"
object: /backup/database/test.txt
expiration: 3600
mode: geturl
================================================
FILE: s3_upload_file.yml
================================================
---
- hosts: localhost
gather_facts: no
connection: local
vars:
bucketname: yan001
tasks:
- name: upload file
s3:
bucket: "{{ bucketname }}"
object: /backup/database/test.txt
src: test.txt
overwrite: no
mode: put
================================================
FILE: sg_database.yml
================================================
---
- hosts: localhost
connection: local
gather_facts: no
vars:
#your region
region: ap-southeast-2
#your ip address
allowed_ip: 123.243.16.53/32
tasks:
- name: create database security group
ec2_group:
region: "{{ region }}"
name: sg_database_apsydney
description: security group for apsydney database host
rules:
# allow ssh access from your ip address
- proto: tcp
from_port: 22
to_port: 22
cidr_ip: "{{ allowed_ip }}"
# allow mysql access from webserver group
- proto: tcp
from_port: 3306
to_port: 3306
group_name: sg_webserver_apsydney
rules_egress:
- proto: all
cidr_ip: 0.0.0.0/0
================================================
FILE: sg_delete.yml
================================================
---
- hosts: localhost
connection: local
gather_facts: no
vars_files:
- staging_vpc_info
vars:
#your region
region: ap-southeast-2
#prefix for naming
prefix: staging
vpc_id: "{{ staging_vpc }}"
tasks:
- name: delete {{ prefix }}_sg_web
ec2_group:
region: "{{ region }}"
vpc_id: "{{ vpc_id }}"
name: "{{ prefix }}_sg_web"
description: security group for webservers
state: absent
- name: delete {{ prefix }}_sg_database
ec2_group:
region: "{{ region }}"
vpc_id: "{{ vpc_id }}"
name: "{{ prefix }}_sg_database"
description: security group for databases
state: absent
- name: delete {{ prefix }}_sg_nat
ec2_group:
region: "{{ region }}"
vpc_id: "{{ vpc_id }}"
name: "{{ prefix }}_sg_nat"
description: security group for nat
state: absent
================================================
FILE: sg_empty.yml
================================================
---
- hosts: localhost
connection: local
gather_facts: no
vars_files:
- staging_vpc_info
vars:
#your region
region: ap-southeast-2
#prefix for naming
prefix: staging
vpc_id: "{{ staging_vpc }}"
tasks:
- name: create empty security group for webservers
local_action:
module: ec2_group
region: "{{ region }}"
vpc_id: "{{ vpc_id }}"
name: "{{ prefix }}_sg_web"
description: security group for webservers
- name: create empty security group for databases
local_action:
module: ec2_group
region: "{{ region }}"
vpc_id: "{{ vpc_id }}"
name: "{{ prefix }}_sg_database"
description: security group for databases
- name: create empty security group for nat
local_action:
module: ec2_group
region: "{{ region }}"
vpc_id: "{{ vpc_id }}"
name: "{{ prefix }}_sg_nat"
description: security group for nat
================================================
FILE: sg_jumpbox.yml
================================================
---
- hosts: localhost
gather_facts: no
connection: local
vars_files:
- staging_vpc_info
vars:
#your region
region: ap-southeast-2
#your ip address
allowed_ip: 123.243.16.53/32
#prefix for naming
prefix: staging
vpc_id: "{{ staging_vpc }}"
tasks:
- name: create security group for jump box instance
ec2_group:
region: "{{ region }}"
vpc_id: "{{ vpc_id }}"
#your security group name
name: "{{ prefix }}_sg_jumpbox"
description: security group for jump box
rules:
# allow ssh access from your ip address
- proto: tcp
from_port: 22
to_port: 22
cidr_ip: "{{ allowed_ip }}"
rules_egress:
- proto: all
cidr_ip: 0.0.0.0/0
================================================
FILE: sg_modify.yml
================================================
---
- hosts: localhost
connection: local
gather_facts: no
vars_files:
- staging_vpc_info
vars:
#your region
region: ap-southeast-2
#your ip address
allowed_ip: 54.79.34.239/32
#prefix for naming
prefix: staging
vpc_id: "{{ staging_vpc }}"
private_subnet: 10.0.1.0/24
tasks:
- name: modify sg_web rules
ec2_group:
region: "{{ region }}"
vpc_id: "{{ vpc_id }}"
#your security group name
name: "{{ prefix }}_sg_web"
description: security group for webservers
rules:
# allow ssh access from your ip address
- proto: tcp
from_port: 22
to_port: 22
cidr_ip: "{{ allowed_ip }}"
# allow http access from anywhere
- proto: tcp
from_port: 80
to_port: 80
cidr_ip: 0.0.0.0/0
# allow https access from anywhere
- proto: tcp
from_port: 443
to_port: 443
cidr_ip: 0.0.0.0/0
rules_egress:
- proto: tcp
from_port: 3306
to_port: 3306
group_name: "{{ prefix }}_sg_database"
# allow http outbound
- proto: tcp
from_port: 80
to_port: 80
cidr_ip: 0.0.0.0/0
# allow https outbound
- proto: tcp
from_port: 443
to_port: 443
cidr_ip: 0.0.0.0/0
- name: modify sg_database rules
ec2_group:
region: "{{ region }}"
vpc_id: "{{ vpc_id }}"
name: "{{ prefix }}_sg_database"
description: security group for databases
rules:
- proto: tcp
from_port: 3306
to_port: 3306
group_name: "{{ prefix }}_sg_web"
rules_egress:
- proto: tcp
from_port: 80
to_port: 80
cidr_ip: 0.0.0.0/0
- proto: tcp
from_port: 443
to_port: 443
cidr_ip: 0.0.0.0/0
- name: modify sg_nat rules
ec2_group:
region: "{{ region }}"
vpc_id: "{{ vpc_id }}"
name: "{{ prefix }}_sg_nat"
description: security group for nat
rules:
# allow ssh access from your ip address
- proto: tcp
from_port: 22
to_port: 22
cidr_ip: "{{ allowed_ip }}"
# allow http access from private subnet
- proto: tcp
from_port: 80
to_port: 80
cidr_ip: "{{ private_subnet }}"
# allow https access from private subnet
- proto: tcp
from_port: 443
to_port: 443
cidr_ip: "{{ private_subnet }}"
rules_egress:
- proto: tcp
from_port: 80
to_port: 80
cidr_ip: 0.0.0.0/0
- proto: tcp
from_port: 443
to_port: 443
cidr_ip: 0.0.0.0/0
================================================
FILE: sg_openvpn.yml
================================================
---
- hosts: localhost
gather_facts: no
connection: local
vars_files:
- staging_vpc_info
vars:
#your region
region: ap-southeast-2
#your ip address
allowed_ip: 123.243.16.53/32
#prefix for naming
prefix: staging
vpc_id: "{{ staging_vpc }}"
tasks:
- name: create security group for openvpn instance
ec2_group:
region: "{{ region }}"
vpc_id: "{{ vpc_id }}"
#your security group name
name: "{{ prefix }}_sg_openvpn"
description: security group for openvpn
rules:
- proto: tcp
from_port: 22
to_port: 22
cidr_ip: "{{ allowed_ip }}"
- proto: tcp
from_port: 443
to_port: 443
cidr_ip: 0.0.0.0/0
- proto: tcp
from_port: 943
to_port: 943
cidr_ip: 0.0.0.0/0
- proto: udp
from_port: 1194
to_port: 1194
cidr_ip: 0.0.0.0/0
rules_egress:
- proto: all
cidr_ip: 0.0.0.0/0
================================================
FILE: sg_webserver.yml
================================================
---
- hosts: localhost
connection: local
gather_facts: no
vars:
#your region
region: ap-southeast-2
#your ip address
allowed_ip: 123.243.16.53/32
tasks:
- name: create security group
ec2_group:
region: "{{ region }}"
name: sg_webserver_apsydney
description: security group for apsydney webserver host
rules:
# allow ssh access from your ip address
- proto: tcp
from_port: 22
to_port: 22
cidr_ip: "{{ allowed_ip }}"
# allow http access from anywhere
- proto: tcp
from_port: 80
to_port: 80
cidr_ip: 0.0.0.0/0
rules_egress:
- proto: all
cidr_ip: 0.0.0.0/0
================================================
FILE: site.yml
================================================
---
# install, configure, and start ntp on all ec2 instances
- hosts: ec2
sudo: yes
roles:
- common
# install and start mysql server on instance with tags class=database
- hosts: tag_class_database
sudo: yes
roles:
- mysql
# install and start apache on instance with tags class=webserver and environment=staging
- hosts: tag_class_webserver:&tag_environment_staging
sudo: yes
roles:
- apache
================================================
FILE: terminate_ec2.yml
================================================
---
- hosts: localhost
connection: local
gather_facts: no
vars:
#your region
region: ap-southeast-2
tasks:
- name: terminate instances
ec2:
region: "{{ region }}"
wait: yes
instance_ids: ['i-9e1e18a1', 'i-f61b1dc9', 'i-36272109', 'i-0b272134', 'i-0a272135']
state: absent
================================================
FILE: test.txt
================================================
Hello World. This is a test.
================================================
FILE: test1.txt
================================================
Hello World. This is a test.
================================================
FILE: vpc_create.yml
================================================
---
- hosts: localhost
connection: local
gather_facts: no
vars:
region: ap-southeast-2
# prefix for naming
prefix: staging
# availability zone
az: ap-southeast-2a
tasks:
- name: create vpc
local_action:
module: ec2_vpc
region: "{{ region }}"
cidr_block: 10.0.0.0/16
resource_tags: '{"Name":"{{ prefix }}_vpc"}'
subnets:
- cidr: 10.0.0.0/24
az: "{{ az }}"
resource_tags: '{"Name":"{{ prefix }}_subnet_public"}'
- cidr: 10.0.1.0/24
az: "{{ az }}"
resource_tags: '{"Name":"{{ prefix }}_subnet_private"}'
internet_gateway: yes
route_tables:
- subnets:
- 10.0.0.0/24
routes:
- dest: 0.0.0.0/0
gw: igw
register: vpc
- name: write vpc id to {{ prefix }}_vpc_info file
sudo: yes
local_action: shell echo "{{ prefix }}"_vpc":" "{{ vpc.vpc_id }}"
> "{{ prefix }}"_vpc_info
- name: write subnets id to {{ prefix }}_vpc_info file
sudo: yes
local_action: shell echo "{{ item.resource_tags.Name }}"":" "{{ item.id }}"
>> "{{ prefix }}"_vpc_info
with_items: vpc.subnets
================================================
FILE: vpc_create_multi_az.yml
================================================
---
- hosts: localhost
gather_facts: no
vars:
region: ap-southeast-2
# prefix for naming
prefix: staging
# availability zones
az0: ap-southeast-2a
az1: ap-southeast-2b
tasks:
- name: create vpc with multi-az subnets
local_action:
module: ec2_vpc
region: "{{ region }}"
cidr_block: 10.0.0.0/16
resource_tags: '{"Name":"{{ prefix }}_vpc"}'
subnets:
- cidr: 10.0.0.0/24
az: "{{ az0 }}"
resource_tags: '{"Name":"{{ prefix }}_subnet_public_0"}'
- cidr: 10.0.1.0/24
az: "{{ az0 }}"
resource_tags: '{"Name":"{{ prefix }}_subnet_private_0"}'
- cidr: 10.0.2.0/24
az: "{{ az1 }}"
resource_tags: '{"Name":"{{ prefix }}_subnet_public_1"}'
- cidr: 10.0.3.0/24
az: "{{ az1 }}"
resource_tags: '{"Name":"{{ prefix }}_subnet_private_1"}'
internet_gateway: yes
route_tables:
- subnets:
- 10.0.0.0/24
- 10.0.2.0/24
routes:
- dest: 0.0.0.0/0
gw: igw
register: vpc
- name: write vpc id to {{ prefix }}_vpc_info file
sudo: yes
local_action: shell echo "{{ prefix }}"_vpc":" "{{ vpc.vpc_id }}"
> "{{ prefix }}"_vpc_info
- name: write subnets id to {{ prefix }}_vpc_info file
sudo: yes
local_action: shell echo "{{ item.resource_tags.Name }}"":" "{{ item.id }}"
>> "{{ prefix }}"_vpc_info
with_items: vpc.subnets
================================================
FILE: vpc_delete.yml
================================================
---
- hosts: localhost
connection: local
gather_facts: no
vars:
region: ap-southeast-2
tasks:
- name: get vpc id
local_action:
module: vpc_lookup
region: "{{ region }}"
tags:
Name: test-vpc
register: vpc
- name: delete vpc
local_action:
module: ec2_vpc
region: "{{ region }}"
state: absent
vpc_id: "{{ item }}"
wait: yes
with_items: vpc.vpc_ids
================================================
FILE: vpc_delete1.yml
================================================
- hosts: localhost
connection: local
gather_facts: no
vars:
region: ap-southeast-2
name: test-vpc
tasks:
- name: get vpc id
command: "aws ec2 describe-vpcs --filters Name=tag:Name,Values={{ name }}
--query 'Vpcs[0].VpcId' --output text"
register: vpcid
- debug: var=vpcid.stdout
- name: delete vpc
local_action:
module: ec2_vpc
region: "{{ region }}"
state: absent
vpc_id: "{{ vpcid.stdout }}"
wait: yes
================================================
FILE: vpc_info.yml
================================================
---
- hosts: localhost
connection: local
gather_facts: no
vars:
region: ap-southeast-2
tasks:
- name: get subnet
local_action:
module: vpc_lookup
region: "{{ region }}"
tags:
Name: staging_vpc
register: vpc_subnet
================================================
FILE: wordpress/backup.yml
================================================
---
- name: backup database and store in S3
hosts: tag_class_wordpress
gather_facts: no
vars:
bucketname: yan_wordpress
tasks:
- name: get date
shell: date +%Y%m%d
register: date
- name: create mysql backup
shell: mysqldump -u {{ wp_db_user }} -p{{ wp_db_password }} {{ wp_db_name }} > /tmp/{{ date.stdout }}_backup.sql
- name: archive backup
shell: tar -czf {{ date.stdout }}_backup.tar.gz {{ date.stdout }}_backup.sql && rm -f {{ date.stdout }}_backup.sql chdir=/tmp
- name: create s3 bucket
local_action:
module: s3
bucket: "{{ bucketname }}"
mode: create
- name: upload backup archive
local_action:
module: s3
bucket: "{{ bucketname }}"
object: /backup/database/{{ date.stdout }}_backup.tar.gz
src: /tmp/{{ date.stdout }}_backup.tar.gz
mode: put
================================================
FILE: wordpress/delete_backup.yml
================================================
---
- name: delete object from S3 bucket
hosts: localhost
gather_facts: no
vars:
bucketname: yan_wordpress
date: 20141031
tasks:
- name: delete backup file
command: s3cmd del s3://{{ bucketname }}/backup/database/{{ date }}_backup.tar.gz
ignore_errors: yes
================================================
FILE: wordpress/ec2.ini
================================================
# Ansible EC2 external inventory script settings
#
[ec2]
# to talk to a private eucalyptus instance uncomment these lines
# and edit edit eucalyptus_host to be the host name of your cloud controller
#eucalyptus = True
#eucalyptus_host = clc.cloud.domain.org
# AWS regions to make calls to. Set this to 'all' to make request to all regions
# in AWS and merge the results together. Alternatively, set this to a comma
# separated list of regions. E.g. 'us-east-1,us-west-1,us-west-2'
#regions = all
regions = ap-southeast-2
regions_exclude = us-gov-west-1,cn-north-1
# When generating inventory, Ansible needs to know how to address a server.
# Each EC2 instance has a lot of variables associated with it. Here is the list:
# http://docs.pythonboto.org/en/latest/ref/ec2.html#module-boto.ec2.instance
# Below are 2 variables that are used as the address of a server:
# - destination_variable
# - vpc_destination_variable
# This is the normal destination variable to use. If you are running Ansible
# from outside EC2, then 'public_dns_name' makes the most sense. If you are
# running Ansible from within EC2, then perhaps you want to use the internal
# address, and should set this to 'private_dns_name'.
destination_variable = public_dns_name
# For server inside a VPC, using DNS names may not make sense. When an instance
# has 'subnet_id' set, this variable is used. If the subnet is public, setting
# this to 'ip_address' will return the public IP address. For instances in a
# private subnet, this should be set to 'private_ip_address', and Ansible must
# be run from with EC2.
vpc_destination_variable = ip_address
# To tag instances on EC2 with the resource records that point to them from
# Route53, uncomment and set 'route53' to True.
route53 = False
# Additionally, you can specify the list of zones to exclude looking up in
# 'route53_excluded_zones' as a comma-separated list.
# route53_excluded_zones = samplezone1.com, samplezone2.com
# API calls to EC2 are slow. For this reason, we cache the results of an API
# call. Set this to the path you want cache files to be written to. Two files
# will be written to this directory:
# - ansible-ec2.cache
# - ansible-ec2.index
cache_path = ~/.ansible/tmp
# The number of seconds a cache file is considered valid. After this many
# seconds, a new API call will be made, and the cache file will be updated.
# To disable the cache, set this value to 0
cache_max_age = 300
================================================
FILE: wordpress/ec2.py
================================================
#!/usr/bin/env python
'''
EC2 external inventory script
=================================
Generates inventory that Ansible can understand by making API request to
AWS EC2 using the Boto library.
NOTE: This script assumes Ansible is being executed where the environment
variables needed for Boto have already been set:
export AWS_ACCESS_KEY_ID='AK123'
export AWS_SECRET_ACCESS_KEY='abc123'
This script also assumes there is an ec2.ini file alongside it. To specify a
different path to ec2.ini, define the EC2_INI_PATH environment variable:
export EC2_INI_PATH=/path/to/my_ec2.ini
If you're using eucalyptus you need to set the above variables and
you need to define:
export EC2_URL=http://hostname_of_your_cc:port/services/Eucalyptus
For more details, see: http://docs.pythonboto.org/en/latest/boto_config_tut.html
When run against a specific host, this script returns the following variables:
- ec2_ami_launch_index
- ec2_architecture
- ec2_association
- ec2_attachTime
- ec2_attachment
- ec2_attachmentId
- ec2_client_token
- ec2_deleteOnTermination
- ec2_description
- ec2_deviceIndex
- ec2_dns_name
- ec2_eventsSet
- ec2_group_name
- ec2_hypervisor
- ec2_id
- ec2_image_id
- ec2_instanceState
- ec2_instance_type
- ec2_ipOwnerId
- ec2_ip_address
- ec2_item
- ec2_kernel
- ec2_key_name
- ec2_launch_time
- ec2_monitored
- ec2_monitoring
- ec2_networkInterfaceId
- ec2_ownerId
- ec2_persistent
- ec2_placement
- ec2_platform
- ec2_previous_state
- ec2_private_dns_name
- ec2_private_ip_address
- ec2_publicIp
- ec2_public_dns_name
- ec2_ramdisk
- ec2_reason
- ec2_region
- ec2_requester_id
- ec2_root_device_name
- ec2_root_device_type
- ec2_security_group_ids
- ec2_security_group_names
- ec2_shutdown_state
- ec2_sourceDestCheck
- ec2_spot_instance_request_id
- ec2_state
- ec2_state_code
- ec2_state_reason
- ec2_status
- ec2_subnet_id
- ec2_tenancy
- ec2_virtualization_type
- ec2_vpc_id
These variables are pulled out of a boto.ec2.instance object. There is a lack of
consistency with variable spellings (camelCase and underscores) since this
just loops through all variables the object exposes. It is preferred to use the
ones with underscores when multiple exist.
In addition, if an instance has AWS Tags associated with it, each tag is a new
variable named:
- ec2_tag_[Key] = [Value]
Security groups are comma-separated in 'ec2_security_group_ids' and
'ec2_security_group_names'.
'''
# (c) 2012, Peter Sankauskas
#
# This file is part of Ansible,
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
######################################################################
import sys
import os
import argparse
import re
from time import time
import boto
from boto import ec2
from boto import rds
from boto import route53
import ConfigParser
try:
import json
except ImportError:
import simplejson as json
class Ec2Inventory(object):
def _empty_inventory(self):
return {"_meta" : {"hostvars" : {}}}
def __init__(self):
''' Main execution path '''
# Inventory grouped by instance IDs, tags, security groups, regions,
# and availability zones
self.inventory = self._empty_inventory()
# Index of hostname (address) to instance ID
self.index = {}
# Read settings and parse CLI arguments
self.read_settings()
self.parse_cli_args()
# Cache
if self.args.refresh_cache:
self.do_api_calls_update_cache()
elif not self.is_cache_valid():
self.do_api_calls_update_cache()
# Data to print
if self.args.host:
data_to_print = self.get_host_info()
elif self.args.list:
# Display list of instances for inventory
if self.inventory == self._empty_inventory():
data_to_print = self.get_inventory_from_cache()
else:
data_to_print = self.json_format_dict(self.inventory, True)
print data_to_print
def is_cache_valid(self):
''' Determines if the cache files have expired, or if it is still valid '''
if os.path.isfile(self.cache_path_cache):
mod_time = os.path.getmtime(self.cache_path_cache)
current_time = time()
if (mod_time + self.cache_max_age) > current_time:
if os.path.isfile(self.cache_path_index):
return True
return False
def read_settings(self):
''' Reads the settings from the ec2.ini file '''
config = ConfigParser.SafeConfigParser()
ec2_default_ini_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'ec2.ini')
ec2_ini_path = os.environ.get('EC2_INI_PATH', ec2_default_ini_path)
config.read(ec2_ini_path)
# is eucalyptus?
self.eucalyptus_host = None
self.eucalyptus = False
if config.has_option('ec2', 'eucalyptus'):
self.eucalyptus = config.getboolean('ec2', 'eucalyptus')
if self.eucalyptus and config.has_option('ec2', 'eucalyptus_host'):
self.eucalyptus_host = config.get('ec2', 'eucalyptus_host')
# Regions
self.regions = []
configRegions = config.get('ec2', 'regions')
configRegions_exclude = config.get('ec2', 'regions_exclude')
if (configRegions == 'all'):
if self.eucalyptus_host:
self.regions.append(boto.connect_euca(host=self.eucalyptus_host).region.name)
else:
for regionInfo in ec2.regions():
if regionInfo.name not in configRegions_exclude:
self.regions.append(regionInfo.name)
else:
self.regions = configRegions.split(",")
# Destination addresses
self.destination_variable = config.get('ec2', 'destination_variable')
self.vpc_destination_variable = config.get('ec2', 'vpc_destination_variable')
# Route53
self.route53_enabled = config.getboolean('ec2', 'route53')
self.route53_excluded_zones = []
if config.has_option('ec2', 'route53_excluded_zones'):
self.route53_excluded_zones.extend(
config.get('ec2', 'route53_excluded_zones', '').split(','))
# Cache related
cache_dir = os.path.expanduser(config.get('ec2', 'cache_path'))
if not os.path.exists(cache_dir):
os.makedirs(cache_dir)
self.cache_path_cache = cache_dir + "/ansible-ec2.cache"
self.cache_path_index = cache_dir + "/ansible-ec2.index"
self.cache_max_age = config.getint('ec2', 'cache_max_age')
def parse_cli_args(self):
''' Command line argument processing '''
parser = argparse.ArgumentParser(description='Produce an Ansible Inventory file based on EC2')
parser.add_argument('--list', action='store_true', default=True,
help='List instances (default: True)')
parser.add_argument('--host', action='store',
help='Get all the variables about a specific instance')
parser.add_argument('--refresh-cache', action='store_true', default=False,
help='Force refresh of cache by making API requests to EC2 (default: False - use cache files)')
self.args = parser.parse_args()
def do_api_calls_update_cache(self):
''' Do API calls to each region, and save data in cache files '''
if self.route53_enabled:
self.get_route53_records()
for region in self.regions:
self.get_instances_by_region(region)
self.get_rds_instances_by_region(region)
self.write_to_cache(self.inventory, self.cache_path_cache)
self.write_to_cache(self.index, self.cache_path_index)
def get_instances_by_region(self, region):
''' Makes an AWS EC2 API call to the list of instances in a particular
region '''
try:
if self.eucalyptus:
conn = boto.connect_euca(host=self.eucalyptus_host)
conn.APIVersion = '2010-08-31'
else:
conn = ec2.connect_to_region(region)
# connect_to_region will fail "silently" by returning None if the region name is wrong or not supported
if conn is None:
print("region name: %s likely not supported, or AWS is down. connection to region failed." % region)
sys.exit(1)
reservations = conn.get_all_instances()
for reservation in reservations:
for instance in reservation.instances:
self.add_instance(instance, region)
except boto.exception.BotoServerError, e:
if not self.eucalyptus:
print "Looks like AWS is down again:"
print e
sys.exit(1)
def get_rds_instances_by_region(self, region):
''' Makes an AWS API call to the list of RDS instances in a particular
region '''
try:
conn = rds.connect_to_region(region)
if conn:
instances = conn.get_all_dbinstances()
for instance in instances:
self.add_rds_instance(instance, region)
except boto.exception.BotoServerError, e:
if not e.reason == "Forbidden":
print "Looks like AWS RDS is down: "
print e
sys.exit(1)
def get_instance(self, region, instance_id):
''' Gets details about a specific instance '''
if self.eucalyptus:
conn = boto.connect_euca(self.eucalyptus_host)
conn.APIVersion = '2010-08-31'
else:
conn = ec2.connect_to_region(region)
# connect_to_region will fail "silently" by returning None if the region name is wrong or not supported
if conn is None:
print("region name: %s likely not supported, or AWS is down. connection to region failed." % region)
sys.exit(1)
reservations = conn.get_all_instances([instance_id])
for reservation in reservations:
for instance in reservation.instances:
return instance
def add_instance(self, instance, region):
''' Adds an instance to the inventory and index, as long as it is
addressable '''
# Only want running instances
if instance.state != 'running':
return
# Select the best destination address
if instance.subnet_id:
dest = getattr(instance, self.vpc_destination_variable)
else:
dest = getattr(instance, self.destination_variable)
if not dest:
# Skip instances we cannot address (e.g. private VPC subnet)
return
# Add to index
self.index[dest] = [region, instance.id]
# Inventory: Group by instance ID (always a group of 1)
self.inventory[instance.id] = [dest]
# Inventory: Group by region
self.push(self.inventory, region, dest)
# Inventory: Group by availability zone
self.push(self.inventory, instance.placement, dest)
# Inventory: Group by instance type
self.push(self.inventory, self.to_safe('type_' + instance.instance_type), dest)
# Inventory: Group by key pair
if instance.key_name:
self.push(self.inventory, self.to_safe('key_' + instance.key_name), dest)
# Inventory: Group by security group
try:
for group in instance.groups:
key = self.to_safe("security_group_" + group.name)
self.push(self.inventory, key, dest)
except AttributeError:
print 'Package boto seems a bit older.'
print 'Please upgrade boto >= 2.3.0.'
sys.exit(1)
# Inventory: Group by tag keys
for k, v in instance.tags.iteritems():
key = self.to_safe("tag_" + k + "=" + v)
self.push(self.inventory, key, dest)
# Inventory: Group by Route53 domain names if enabled
if self.route53_enabled:
route53_names = self.get_instance_route53_names(instance)
for name in route53_names:
self.push(self.inventory, name, dest)
# Global Tag: tag all EC2 instances
self.push(self.inventory, 'ec2', dest)
self.inventory["_meta"]["hostvars"][dest] = self.get_host_info_dict_from_instance(instance)
def add_rds_instance(self, instance, region):
''' Adds an RDS instance to the inventory and index, as long as it is
addressable '''
# Only want available instances
if instance.status != 'available':
return
# Select the best destination address
#if instance.subnet_id:
#dest = getattr(instance, self.vpc_destination_variable)
#else:
#dest = getattr(instance, self.destination_variable)
dest = instance.endpoint[0]
if not dest:
# Skip instances we cannot address (e.g. private VPC subnet)
return
# Add to index
self.index[dest] = [region, instance.id]
# Inventory: Group by instance ID (always a group of 1)
self.inventory[instance.id] = [dest]
# Inventory: Group by region
self.push(self.inventory, region, dest)
# Inventory: Group by availability zone
self.push(self.inventory, instance.availability_zone, dest)
# Inventory: Group by instance type
self.push(self.inventory, self.to_safe('type_' + instance.instance_class), dest)
# Inventory: Group by security group
try:
if instance.security_group:
key = self.to_safe("security_group_" + instance.security_group.name)
self.push(self.inventory, key, dest)
except AttributeError:
print 'Package boto seems a bit older.'
print 'Please upgrade boto >= 2.3.0.'
sys.exit(1)
# Inventory: Group by engine
self.push(self.inventory, self.to_safe("rds_" + instance.engine), dest)
# Inventory: Group by parameter group
self.push(self.inventory, self.to_safe("rds_parameter_group_" + instance.parameter_group.name), dest)
# Global Tag: all RDS instances
self.push(self.inventory, 'rds', dest)
def get_route53_records(self):
''' Get and store the map of resource records to domain names that
point to them. '''
r53_conn = route53.Route53Connection()
all_zones = r53_conn.get_zones()
route53_zones = [ zone for zone in all_zones if zone.name[:-1]
not in self.route53_excluded_zones ]
self.route53_records = {}
for zone in route53_zones:
rrsets = r53_conn.get_all_rrsets(zone.id)
for record_set in rrsets:
record_name = record_set.name
if record_name.endswith('.'):
record_name = record_name[:-1]
for resource in record_set.resource_records:
self.route53_records.setdefault(resource, set())
self.route53_records[resource].add(record_name)
def get_instance_route53_names(self, instance):
''' Check if an instance is referenced in the records we have from
Route53. If it is, return the list of domain names pointing to said
instance. If nothing points to it, return an empty list. '''
instance_attributes = [ 'public_dns_name', 'private_dns_name',
'ip_address', 'private_ip_address' ]
name_list = set()
for attrib in instance_attributes:
try:
value = getattr(instance, attrib)
except AttributeError:
continue
if value in self.route53_records:
name_list.update(self.route53_records[value])
return list(name_list)
def get_host_info_dict_from_instance(self, instance):
instance_vars = {}
for key in vars(instance):
value = getattr(instance, key)
key = self.to_safe('ec2_' + key)
# Handle complex types
# state/previous_state changed to properties in boto in https://github.com/boto/boto/commit/a23c379837f698212252720d2af8dec0325c9518
if key == 'ec2__state':
instance_vars['ec2_state'] = instance.state or ''
instance_vars['ec2_state_code'] = instance.state_code
elif key == 'ec2__previous_state':
instance_vars['ec2_previous_state'] = instance.previous_state or ''
instance_vars['ec2_previous_state_code'] = instance.previous_state_code
elif type(value) in [int, bool]:
instance_vars[key] = value
elif type(value) in [str, unicode]:
instance_vars[key] = value.strip()
elif type(value) == type(None):
instance_vars[key] = ''
elif key == 'ec2_region':
instance_vars[key] = value.name
elif key == 'ec2__placement':
instance_vars['ec2_placement'] = value.zone
elif key == 'ec2_tags':
for k, v in value.iteritems():
key = self.to_safe('ec2_tag_' + k)
instance_vars[key] = v
elif key == 'ec2_groups':
group_ids = []
group_names = []
for group in value:
group_ids.append(group.id)
group_names.append(group.name)
instance_vars["ec2_security_group_ids"] = ','.join(group_ids)
instance_vars["ec2_security_group_names"] = ','.join(group_names)
else:
pass
# TODO Product codes if someone finds them useful
#print key
#print type(value)
#print value
return instance_vars
def get_host_info(self):
''' Get variables about a specific host '''
if len(self.index) == 0:
# Need to load index from cache
self.load_index_from_cache()
if not self.args.host in self.index:
# try updating the cache
self.do_api_calls_update_cache()
if not self.args.host in self.index:
# host migh not exist anymore
return self.json_format_dict({}, True)
(region, instance_id) = self.index[self.args.host]
instance = self.get_instance(region, instance_id)
return self.json_format_dict(self.get_host_info_dict_from_instance(instance), True)
def push(self, my_dict, key, element):
''' Pushed an element onto an array that may not have been defined in
the dict '''
if key in my_dict:
my_dict[key].append(element);
else:
my_dict[key] = [element]
def get_inventory_from_cache(self):
''' Reads the inventory from the cache file and returns it as a JSON
object '''
cache = open(self.cache_path_cache, 'r')
json_inventory = cache.read()
return json_inventory
def load_index_from_cache(self):
''' Reads the index from the cache file sets self.index '''
cache = open(self.cache_path_index, 'r')
json_index = cache.read()
self.index = json.loads(json_index)
def write_to_cache(self, data, filename):
''' Writes data in JSON format to a file '''
json_data = self.json_format_dict(data, True)
cache = open(filename, 'w')
cache.write(json_data)
cache.close()
def to_safe(self, word):
''' Converts 'bad' characters in a string to underscores so they can be
used as Ansible groups '''
return re.sub("[^A-Za-z0-9\-]", "_", word)
def json_format_dict(self, data, pretty=False):
''' Converts a dict to a JSON object and dumps it as a formatted
string '''
if pretty:
return json.dumps(data, sort_keys=True, indent=2)
else:
return json.dumps(data)
# Run the script
Ec2Inventory()
================================================
FILE: wordpress/group_vars/all
================================================
ansible_ssh_user: ec2-user
ansible_ssh_private_key_file: ~/.ssh/wordpress-apsydney.pem
# Which version of WordPress to deploy
wp_version: 4.5.3
#wp_sha256sum: 73c21224d42156150b948ca645a296a2431f1dd6a19350e0d8a72e465adde56d
# These are the WordPress database settings
wp_db_name: wordpress
wp_db_user: wordpress
# Set your database password here
wp_db_password: secret
# WordPress settings
# Disable All Updates
# By default automatic updates are enabled, set this value to true to disable all automatic updates
auto_up_disable: false
#Define Core Update Level
#true = Development, minor, and major updates are all enabled
#false = Development, minor, and major updates are all disabled
#minor = Minor updates are enabled, development, and major updates are disabled
core_update_level: true
================================================
FILE: wordpress/hosts
================================================
[local]
localhost
================================================
FILE: wordpress/provisioning.yml
================================================
---
- hosts: localhost
connection: local
gather_facts: no
vars:
#your region
region: ap-southeast-2
keyname: wordpress-apsydney
#your ip address
allowed_ip: 203.87.79.2/32
instance_type: t2.micro
image: ami-dc361ebf
tasks:
- name: create key pair
tags: keypair
ec2_key:
region: "{{ region }}"
name: "{{ keyname }}"
register: mykey
- name: write the private key to file
copy: content="{{ mykey.key.private_key }}" dest="~/.ssh/{{ keyname }}.pem" mode=0600
when: mykey.changed
- name: create security group
tags: sg
ec2_group:
region: "{{ region }}"
#your security group name
name: sg_wordpress_apsydney
description: security group for apsydney webserver host
rules:
# allow ssh access from your ip address
- proto: tcp
from_port: 22
to_port: 22
cidr_ip: "{{ allowed_ip }}"
# allow http access from anywhere
- proto: tcp
from_port: 80
to_port: 80
cidr_ip: 0.0.0.0/0
# allow https access from anywhere
- proto: tcp
from_port: 443
to_port: 443
cidr_ip: 0.0.0.0/0
rules_egress:
- proto: all
cidr_ip: 0.0.0.0/0
- name: launch ec2 instance
tags: ec2
local_action:
module: ec2
region: "{{ region }}"
key_name: "{{ keyname }}"
instance_type: "{{ instance_type }}"
image: "{{ image }}"
wait: yes
group: sg_wordpress_apsydney
instance_tags:
Name: wordpress-1
class: wordpress
register: ec2
- name: associate new EIP for the instance
tags: eip
local_action:
module: ec2_eip
region: "{{ region }}"
instance_id: "{{ item.id }}"
with_items: ec2.instances
when: item.id is defined
================================================
FILE: wordpress/restore.yml
================================================
---
- name: download backup from S3 and restore
hosts: tag_class_wordpress
gather_facts: no
vars:
bucketname: yan_wordpress
date: 20140920
tasks:
- name: download backup archive
local_action:
module: s3
bucket: "{{ bucketname }}"
object: /backup/database/{{ date }}_backup.tar.gz
dest: /tmp/{{ date }}_backup.tar.gz
mode: get
- name: extract archive and restore mysql backup
shell: tar -xzf {{ date }}_backup.tar.gz &&
mysql -u {{ wp_db_user }} -p{{ wp_db_password }}
{{ wp_db_name }} < {{ date }}_backup.sql
chdir=/tmp
================================================
FILE: wordpress/roles/common/tasks/main.yml
================================================
---
- name: install the 'Development tools' package group
yum: name="@Development tools" state=present update_cache=yes
================================================
FILE: wordpress/roles/mysql/handlers/main.yml
================================================
---
- name: restart mysql
service: name=mysqld state=restarted
================================================
FILE: wordpress/roles/mysql/tasks/main.yml
================================================
---
- name: install mysql server
yum: name={{ item }} state=present
with_items:
- mysql-devel
- mysql-server
- name: install mysql-python
pip: name=mysql-python state=present
- name: create mysql configuration file
template: src=my.cnf.j2 dest=/etc/my.cnf
notify: restart mysql
- name: start mysql service
service: name=mysqld state=started enabled=true
================================================
FILE: wordpress/roles/mysql/templates/my.cnf.j2
================================================
# You can customize your mysql server configuration here
[mysqld]
datadir=/var/lib/mysql
socket=/var/lib/mysql/mysql.sock
user=mysql
# Disabling symbolic-links is recommended to prevent assorted security risks
symbolic-links=0
port=3306
[mysqld_safe]
log-error=/var/log/mysqld.log
pid-file=/var/run/mysqld/mysqld.pid
================================================
FILE: wordpress/roles/web/tasks/main.yml
================================================
---
- name: install apache, php, and php-mysql
yum: name={{ item }} state=present
with_items:
- httpd
- php
- php-mysql
- name: start and enable httpd
service: name=httpd state=started enabled=yes
================================================
FILE: wordpress/roles/wordpress/tasks/main.yml
================================================
---
- name: download wordpress
get_url: url=http://wordpress.org/wordpress-{{ wp_version }}.tar.gz dest=~/wordpress-{{ wp_version }}.tar.gz
# sha256sum="{{ wp_sha256sum }}"
- name: extract wordpress archive
command: chdir=~ /bin/tar xvf wordpress-{{ wp_version }}.tar.gz creates=~/wordpress
- name: copy wordpress to apache root directory
shell: cp -r ~/wordpress/* /var/www/html
- name: fetch random salts for wordpress config
command: curl https://api.wordpress.org/secret-key/1.1/salt/
register: "wp_salt"
- name: create wordpress database
mysql_db: name={{ wp_db_name }} state=present
- name: create wordpress database user
mysql_user: name={{ wp_db_user }} password={{ wp_db_password }} priv={{ wp_db_name }}.*:ALL host='localhost' state=present
- name: copy wordpress config file
template: src=wp-config.php dest=/var/www/html/
- name: change ownership of wordpress installation
file: path=/var/www/html/ owner=apache group=apache state=directory recurse=yes
================================================
FILE: wordpress/roles/wordpress/templates/wp-config.php
================================================
<?php
/**
* The base configurations of the WordPress.
*
* This file has the following configurations: MySQL settings, Table Prefix,
* Secret Keys, WordPress Language, and ABSPATH. You can find more information
* by visiting {@link http://codex.wordpress.org/Editing_wp-config.php Editing
* wp-config.php} Codex page. You can get the MySQL settings from your web host.
*
* This file is used by the wp-config.php creation script during the
* installation. You don't have to use the web site, you can just copy this file
* to "wp-config.php" and fill in the values.
*
* @package WordPress
*/
// ** MySQL settings - You can get this info from your web host ** //
/** The name of the database for WordPress */
define('DB_NAME', '{{ wp_db_name }}');
/** MySQL database username */
define('DB_USER', '{{ wp_db_user }}');
/** MySQL database password */
define('DB_PASSWORD', '{{ wp_db_password }}');
/** MySQL hostname */
define('DB_HOST', 'localhost');
/** Database Charset to use in creating database tables. */
define('DB_CHARSET', 'utf8');
/** The Database Collate type. Don't change this if in doubt. */
define('DB_COLLATE', '');
/**#@+
* Authentication Unique Keys and Salts.
*
* Change these to different unique phrases!
* You can generate these using the {@link https://api.wordpress.org/secret-key/1.1/salt/ WordPress.org secret-key service}
* You can change these at any point in time to invalidate all existing cookies. This will force all users to have to log in again.
*
* @since 2.6.0
*/
{{ wp_salt.stdout }}
/**#@-*/
/**
* WordPress Database Table prefix.
*
* You can have multiple installations in one database if you give each a unique
* prefix. Only numbers, letters, and underscores please!
*/
$table_prefix = 'wp_';
/**
* WordPress Localized Language, defaults to English.
*
* Change this to localize WordPress. A corresponding MO file for the chosen
* language must be installed to wp-content/languages. For example, install
* de_DE.mo to wp-content/languages and set WPLANG to 'de_DE' to enable German
* language support.
*/
define('WPLANG', '');
/**
* For developers: WordPress debugging mode.
*
* Change this to true to enable the display of notices during development.
* It is strongly recommended that plugin and theme developers use WP_DEBUG
* in their development environments.
*/
define('WP_DEBUG', false);
/* That's all, stop editing! Happy blogging. */
/** Absolute path to the WordPress directory. */
if ( !defined('ABSPATH') )
define('ABSPATH', dirname(__FILE__) . '/');
/** Sets up WordPress vars and included files. */
require_once(ABSPATH . 'wp-settings.php');
/** Disable Automatic Updates Completely */
define( 'AUTOMATIC_UPDATER_DISABLED', {{auto_up_disable}} );
/** Define AUTOMATIC Updates for Components. */
define( 'WP_AUTO_UPDATE_CORE', {{core_update_level}} );
================================================
FILE: wordpress/site.yml
================================================
---
- name: install apache, php, mysql server, wordpress
hosts: tag_class_wordpress
become: yes
roles:
# - common
- web
- mysql
- wordpress
================================================
FILE: wordpress_ha/ec2.ini
================================================
# Ansible EC2 external inventory script settings
#
[ec2]
# to talk to a private eucalyptus instance uncomment these lines
# and edit edit eucalyptus_host to be the host name of your cloud controller
#eucalyptus = True
#eucalyptus_host = clc.cloud.domain.org
# AWS regions to make calls to. Set this to 'all' to make request to all regions
# in AWS and merge the results together. Alternatively, set this to a comma
# separated list of regions. E.g. 'us-east-1,us-west-1,us-west-2'
regions = all
regions_exclude = us-gov-west-1,cn-north-1
# When generating inventory, Ansible needs to know how to address a server.
# Each EC2 instance has a lot of variables associated with it. Here is the list:
# http://docs.pythonboto.org/en/latest/ref/ec2.html#module-boto.ec2.instance
# Below are 2 variables that are used as the address of a server:
# - destination_variable
# - vpc_destination_variable
# This is the normal destination variable to use. If you are running Ansible
# from outside EC2, then 'public_dns_name' makes the most sense. If you are
# running Ansible from within EC2, then perhaps you want to use the internal
# address, and should set this to 'private_dns_name'.
destination_variable = public_dns_name
# For server inside a VPC, using DNS names may not make sense. When an instance
# has 'subnet_id' set, this variable is used. If the subnet is public, setting
# this to 'ip_address' will return the public IP address. For instances in a
# private subnet, this should be set to 'private_ip_address', and Ansible must
# be run from with EC2.
vpc_destination_variable = ip_address
# To tag instances on EC2 with the resource records that point to them from
# Route53, uncomment and set 'route53' to True.
route53 = False
# Additionally, you can specify the list of zones to exclude looking up in
# 'route53_excluded_zones' as a comma-separated list.
# route53_excluded_zones = samplezone1.com, samplezone2.com
# API calls to EC2 are slow. For this reason, we cache the results of an API
# call. Set this to the path you want cache files to be written to. Two files
# will be written to this directory:
# - ansible-ec2.cache
# - ansible-ec2.index
cache_path = ~/.ansible/tmp
# The number of seconds a cache file is considered valid. After this many
# seconds, a new API call will be made, and the cache file will be updated.
# To disable the cache, set this value to 0
cache_max_age = 300
================================================
FILE: wordpress_ha/ec2.py
================================================
#!/usr/bin/env python
'''
EC2 external inventory script
=================================
Generates inventory that Ansible can understand by making API request to
AWS EC2 using the Boto library.
NOTE: This script assumes Ansible is being executed where the environment
variables needed for Boto have already been set:
export AWS_ACCESS_KEY_ID='AK123'
export AWS_SECRET_ACCESS_KEY='abc123'
This script also assumes there is an ec2.ini file alongside it. To specify a
different path to ec2.ini, define the EC2_INI_PATH environment variable:
export EC2_INI_PATH=/path/to/my_ec2.ini
If you're using eucalyptus you need to set the above variables and
you need to define:
export EC2_URL=http://hostname_of_your_cc:port/services/Eucalyptus
For more details, see: http://docs.pythonboto.org/en/latest/boto_config_tut.html
When run against a specific host, this script returns the following variables:
- ec2_ami_launch_index
- ec2_architecture
- ec2_association
- ec2_attachTime
- ec2_attachment
- ec2_attachmentId
- ec2_client_token
- ec2_deleteOnTermination
- ec2_description
- ec2_deviceIndex
- ec2_dns_name
- ec2_eventsSet
- ec2_group_name
- ec2_hypervisor
- ec2_id
- ec2_image_id
- ec2_instanceState
- ec2_instance_type
- ec2_ipOwnerId
- ec2_ip_address
- ec2_item
- ec2_kernel
- ec2_key_name
- ec2_launch_time
- ec2_monitored
- ec2_monitoring
- ec2_networkInterfaceId
- ec2_ownerId
- ec2_persistent
- ec2_placement
- ec2_platform
- ec2_previous_state
- ec2_private_dns_name
- ec2_private_ip_address
- ec2_publicIp
- ec2_public_dns_name
- ec2_ramdisk
- ec2_reason
- ec2_region
- ec2_requester_id
- ec2_root_device_name
- ec2_root_device_type
- ec2_security_group_ids
- ec2_security_group_names
- ec2_shutdown_state
- ec2_sourceDestCheck
- ec2_spot_instance_request_id
- ec2_state
- ec2_state_code
- ec2_state_reason
- ec2_status
- ec2_subnet_id
- ec2_tenancy
- ec2_virtualization_type
- ec2_vpc_id
These variables are pulled out of a boto.ec2.instance object. There is a lack of
consistency with variable spellings (camelCase and underscores) since this
just loops through all variables the object exposes. It is preferred to use the
ones with underscores when multiple exist.
In addition, if an instance has AWS Tags associated with it, each tag is a new
variable named:
- ec2_tag_[Key] = [Value]
Security groups are comma-separated in 'ec2_security_group_ids' and
'ec2_security_group_names'.
'''
# (c) 2012, Peter Sankauskas
#
# This file is part of Ansible,
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
######################################################################
import sys
import os
import argparse
import re
from time import time
import boto
from boto import ec2
from boto import rds
from boto import route53
import ConfigParser
try:
import json
except ImportError:
import simplejson as json
class Ec2Inventory(object):
def _empty_inventory(self):
return {"_meta" : {"hostvars" : {}}}
def __init__(self):
''' Main execution path '''
# Inventory grouped by instance IDs, tags, security groups, regions,
# and availability zones
self.inventory = self._empty_inventory()
# Index of hostname (address) to instance ID
self.index = {}
# Read settings and parse CLI arguments
self.read_settings()
self.parse_cli_args()
# Cache
if self.args.refresh_cache:
self.do_api_calls_update_cache()
elif not self.is_cache_valid():
self.do_api_calls_update_cache()
# Data to print
if self.args.host:
data_to_print = self.get_host_info()
elif self.args.list:
# Display list of instances for inventory
if self.inventory == self._empty_inventory():
data_to_print = self.get_inventory_from_cache()
else:
data_to_print = self.json_format_dict(self.inventory, True)
print data_to_print
def is_cache_valid(self):
''' Determines if the cache files have expired, or if it is still valid '''
if os.path.isfile(self.cache_path_cache):
mod_time = os.path.getmtime(self.cache_path_cache)
current_time = time()
if (mod_time + self.cache_max_age) > current_time:
if os.path.isfile(self.cache_path_index):
return True
return False
def read_settings(self):
''' Reads the settings from the ec2.ini file '''
config = ConfigParser.SafeConfigParser()
ec2_default_ini_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'ec2.ini')
ec2_ini_path = os.environ.get('EC2_INI_PATH', ec2_default_ini_path)
config.read(ec2_ini_path)
# is eucalyptus?
self.eucalyptus_host = None
self.eucalyptus = False
if config.has_option('ec2', 'eucalyptus'):
self.eucalyptus = config.getboolean('ec2', 'eucalyptus')
if self.eucalyptus and config.has_option('ec2', 'eucalyptus_host'):
self.eucalyptus_host = config.get('ec2', 'eucalyptus_host')
# Regions
self.regions = []
configRegions = config.get('ec2', 'regions')
configRegions_exclude = config.get('ec2', 'regions_exclude')
if (configRegions == 'all'):
if self.eucalyptus_host:
self.regions.append(boto.connect_euca(host=self.eucalyptus_host).region.name)
else:
for regionInfo in ec2.regions():
if regionInfo.name not in configRegions_exclude:
self.regions.append(regionInfo.name)
else:
self.regions = configRegions.split(",")
# Destination addresses
self.destination_variable = config.get('ec2', 'destination_variable')
self.vpc_destination_variable = config.get('ec2', 'vpc_destination_variable')
# Route53
self.route53_enabled = config.getboolean('ec2', 'route53')
self.route53_excluded_zones = []
if config.has_option('ec2', 'route53_excluded_zones'):
self.route53_excluded_zones.extend(
config.get('ec2', 'route53_excluded_zones', '').split(','))
# Cache related
cache_dir = os.path.expanduser(config.get('ec2', 'cache_path'))
if not os.path.exists(cache_dir):
os.makedirs(cache_dir)
self.cache_path_cache = cache_dir + "/ansible-ec2.cache"
self.cache_path_index = cache_dir + "/ansible-ec2.index"
self.cache_max_age = config.getint('ec2', 'cache_max_age')
def parse_cli_args(self):
''' Command line argument processing '''
parser = argparse.ArgumentParser(description='Produce an Ansible Inventory file based on EC2')
parser.add_argument('--list', action='store_true', default=True,
help='List instances (default: True)')
parser.add_argument('--host', action='store',
help='Get all the variables about a specific instance')
parser.add_argument('--refresh-cache', action='store_true', default=False,
help='Force refresh of cache by making API requests to EC2 (default: False - use cache files)')
self.args = parser.parse_args()
def do_api_calls_update_cache(self):
''' Do API calls to each region, and save data in cache files '''
if self.route53_enabled:
self.get_route53_records()
for region in self.regions:
self.get_instances_by_region(region)
self.get_rds_instances_by_region(region)
self.write_to_cache(self.inventory, self.cache_path_cache)
self.write_to_cache(self.index, self.cache_path_index)
def get_instances_by_region(self, region):
''' Makes an AWS EC2 API call to the list of instances in a particular
region '''
try:
if self.eucalyptus:
conn = boto.connect_euca(host=self.eucalyptus_host)
conn.APIVersion = '2010-08-31'
else:
conn = ec2.connect_to_region(region)
# connect_to_region will fail "silently" by returning None if the region name is wrong or not supported
if conn is None:
print("region name: %s likely not supported, or AWS is down. connection to region failed." % region)
sys.exit(1)
reservations = conn.get_all_instances()
for reservation in reservations:
for instance in reservation.instances:
self.add_instance(instance, region)
except boto.exception.BotoServerError, e:
if not self.eucalyptus:
print "Looks like AWS is down again:"
print e
sys.exit(1)
def get_rds_instances_by_region(self, region):
''' Makes an AWS API call to the list of RDS instances in a particular
region '''
try:
conn = rds.connect_to_region(region)
if conn:
instances = conn.get_all_dbinstances()
for instance in instances:
self.add_rds_instance(instance, region)
except boto.exception.BotoServerError, e:
if not e.reason == "Forbidden":
print "Looks like AWS RDS is down: "
print e
sys.exit(1)
def get_instance(self, region, instance_id):
''' Gets details about a specific instance '''
if self.eucalyptus:
conn = boto.connect_euca(self.eucalyptus_host)
conn.APIVersion = '2010-08-31'
else:
conn = ec2.connect_to_region(region)
# connect_to_region will fail "silently" by returning None if the region name is wrong or not supported
if conn is None:
print("region name: %s likely not supported, or AWS is down. connection to region failed." % region)
sys.exit(1)
reservations = conn.get_all_instances([instance_id])
for reservation in reservations:
for instance in reservation.instances:
return instance
def add_instance(self, instance, region):
''' Adds an instance to the inventory and index, as long as it is
addressable '''
# Only want running instances
if instance.state != 'running':
return
# Select the best destination address
if instance.subnet_id:
dest = getattr(instance, self.vpc_destination_variable)
else:
dest = getattr(instance, self.destination_variable)
if not dest:
# Skip instances we cannot address (e.g. private VPC subnet)
return
# Add to index
self.index[dest] = [region, instance.id]
# Inventory: Group by instance ID (always a group of 1)
self.inventory[instance.id] = [dest]
# Inventory: Group by region
self.push(self.inventory, region, dest)
# Inventory: Group by availability zone
self.push(self.inventory, instance.placement, dest)
# Inventory: Group by instance type
self.push(self.inventory, self.to_safe('type_' + instance.instance_type), dest)
# Inventory: Group by key pair
if instance.key_name:
self.push(self.inventory, self.to_safe('key_' + instance.key_name), dest)
# Inventory: Group by security group
try:
for group in instance.groups:
key = self.to_safe("security_group_" + group.name)
self.push(self.inventory, key, dest)
except AttributeError:
print 'Package boto seems a bit older.'
print 'Please upgrade boto >= 2.3.0.'
sys.exit(1)
# Inventory: Group by tag keys
for k, v in instance.tags.iteritems():
key = self.to_safe("tag_" + k + "=" + v)
self.push(self.inventory, key, dest)
# Inventory: Group by Route53 domain names if enabled
if self.route53_enabled:
route53_names = self.get_instance_route53_names(instance)
for name in route53_names:
self.push(self.inventory, name, dest)
# Global Tag: tag all EC2 instances
self.push(self.inventory, 'ec2', dest)
self.inventory["_meta"]["hostvars"][dest] = self.get_host_info_dict_from_instance(instance)
def add_rds_instance(self, instance, region):
''' Adds an RDS instance to the inventory and index, as long as it is
addressable '''
# Only want available instances
if instance.status != 'available':
return
# Select the best destination address
#if instance.subnet_id:
#dest = getattr(instance, self.vpc_destination_variable)
#else:
#dest = getattr(instance, self.destination_variable)
dest = instance.endpoint[0]
if not dest:
# Skip instances we cannot address (e.g. private VPC subnet)
return
# Add to index
self.index[dest] = [region, instance.id]
# Inventory: Group by instance ID (always a group of 1)
self.inventory[instance.id] = [dest]
# Inventory: Group by region
self.push(self.inventory, region, dest)
# Inventory: Group by availability zone
self.push(self.inventory, instance.availability_zone, dest)
# Inventory: Group by instance type
self.push(self.inventory, self.to_safe('type_' + instance.instance_class), dest)
# Inventory: Group by security group
try:
if instance.security_group:
key = self.to_safe("security_group_" + instance.security_group.name)
self.push(self.inventory, key, dest)
except AttributeError:
print 'Package boto seems a bit older.'
print 'Please upgrade boto >= 2.3.0.'
sys.exit(1)
# Inventory: Group by engine
self.push(self.inventory, self.to_safe("rds_" + instance.engine), dest)
# Inventory: Group by parameter group
self.push(self.inventory, self.to_safe("rds_parameter_group_" + instance.parameter_group.name), dest)
# Global Tag: all RDS instances
self.push(self.inventory, 'rds', dest)
def get_route53_records(self):
''' Get and store the map of resource records to domain names that
point to them. '''
r53_conn = route53.Route53Connection()
all_zones = r53_conn.get_zones()
route53_zones = [ zone for zone in all_zones if zone.name[:-1]
not in self.route53_excluded_zones ]
self.route53_records = {}
for zone in route53_zones:
rrsets = r53_conn.get_all_rrsets(zone.id)
for record_set in rrsets:
record_name = record_set.name
if record_name.endswith('.'):
record_name = record_name[:-1]
for resource in record_set.resource_records:
self.route53_records.setdefault(resource, set())
self.route53_records[resource].add(record_name)
def get_instance_route53_names(self, instance):
''' Check if an instance is referenced in the records we have from
Route53. If it is, return the list of domain names pointing to said
instance. If nothing points to it, return an empty list. '''
instance_attributes = [ 'public_dns_name', 'private_dns_name',
'ip_address', 'private_ip_address' ]
name_list = set()
for attrib in instance_attributes:
try:
value = getattr(instance, attrib)
except AttributeError:
continue
if value in self.route53_records:
name_list.update(self.route53_records[value])
return list(name_list)
def get_host_info_dict_from_instance(self, instance):
instance_vars = {}
for key in vars(instance):
value = getattr(instance, key)
key = self.to_safe('ec2_' + key)
# Handle complex types
# state/previous_state changed to properties in boto in https://github.com/boto/boto/commit/a23c379837f698212252720d2af8dec0325c9518
if key == 'ec2__state':
instance_vars['ec2_state'] = instance.state or ''
instance_vars['ec2_state_code'] = instance.state_code
elif key == 'ec2__previous_state':
instance_vars['ec2_previous_state'] = instance.previous_state or ''
instance_vars['ec2_previous_state_code'] = instance.previous_state_code
elif type(value) in [int, bool]:
instance_vars[key] = value
elif type(value) in [str, unicode]:
instance_vars[key] = value.strip()
elif type(value) == type(None):
instance_vars[key] = ''
elif key == 'ec2_region':
instance_vars[key] = value.name
elif key == 'ec2__placement':
instance_vars['ec2_placement'] = value.zone
elif key == 'ec2_tags':
for k, v in value.iteritems():
key = self.to_safe('ec2_tag_' + k)
instance_vars[key] = v
elif key == 'ec2_groups':
group_ids = []
group_names = []
for group in value:
group_ids.append(group.id)
group_names.append(group.name)
instance_vars["ec2_security_group_ids"] = ','.join(group_ids)
instance_vars["ec2_security_group_names"] = ','.join(group_names)
else:
pass
# TODO Product codes if someone finds them useful
#print key
#print type(value)
#print value
return instance_vars
def get_host_info(self):
''' Get variables about a specific host '''
if len(self.index) == 0:
# Need to load index from cache
self.load_index_from_cache()
if not self.args.host in self.index:
# try updating the cache
self.do_api_calls_update_cache()
if not self.args.host in self.index:
# host migh not exist anymore
return self.json_format_dict({}, True)
(region, instance_id) = self.index[self.args.host]
instance = self.get_instance(region, instance_id)
return self.json_format_dict(self.get_host_info_dict_from_instance(instance), True)
def push(self, my_dict, key, element):
''' Pushed an element onto an array that may not have been defined in
the dict '''
if key in my_dict:
my_dict[key].append(element);
else:
my_dict[key] = [element]
def get_inventory_from_cache(self):
''' Reads the inventory from the cache file and returns it as a JSON
object '''
cache = open(self.cache_path_cache, 'r')
json_inventory = cache.read()
return json_inventory
def load_index_from_cache(self):
''' Reads the index from the cache file sets self.index '''
cache = open(self.cache_path_index, 'r')
json_index = cache.read()
self.index = json.loads(json_index)
def write_to_cache(self, data, filename):
''' Writes data in JSON format to a file '''
json_data = self.json_format_dict(data, True)
cache = open(filename, 'w')
cache.write(json_data)
cache.close()
def to_safe(self, word):
''' Converts 'bad' characters in a string to underscores so they can be
used as Ansible groups '''
return re.sub("[^A-Za-z0-9\-]", "_", word)
def json_format_dict(self, data, pretty=False):
''' Converts a dict to a JSON object and dumps it as a formatted
string '''
if pretty:
return json.dumps(data, sort_keys=True, indent=2)
else:
return json.dumps(data)
# Run the script
Ec2Inventory()
================================================
FILE: wordpress_ha/group_vars/all
================================================
---
db_name: wordpress
username: dbadmin
password: mypassword
dbhost: staging-wordpress-rds.cxzxl961nonk.ap-southeast-2.rds.amazonaws.com
ansible_ssh_user: ec2-user
ansible_ssh_private_key_file: ~/.ssh/wordpress-apsydney.pem
wp_version: 4.1
# WordPress settings
# Disable All Updates
# By default automatic updates are enabled, set this value to true to disable all automatic updates
auto_up_disable: false
#Define Core Update Level
#true = Development, minor, and major updates are all enabled
#false = Development, minor, and major updates are all disabled
#minor = Minor updates are enabled, development, and major updates are disabled
core_update_level: true
================================================
FILE: wordpress_ha/hosts
================================================
[local]
localhost
================================================
FILE: wordpress_ha/provisioning_asg.yml
================================================
---
- hosts: localhost
connection: local
gather_facts: no
tasks:
- include_vars: "{{ env }}.yml"
- set_fact:
timestamp: "{{ lookup('pipe', 'date +%g%m%d%H%M%S') }}"
- name: Create public ELB
ec2_elb_lb:
region: "{{ region }}"
name: "{{ asg_name }}-{{ env }}"
state: present
cross_az_load_balancing: yes
security_group_ids: "{{ elb_group_ids }}"
subnets: "{{ elb_subnets }}"
listeners:
- protocol: http
load_balancer_port: 80
instance_port: 80
health_check:
ping_protocol: http
ping_port: 80
ping_path: "/index.php"
response_timeout: 2
interval: 10
unhealthy_threshold: 2
healthy_threshold: 2
connection_draining_timeout: 60
register: elb
- debug: var=elb
- name: Create Launch Configuration
ec2_lc:
region: "{{ region }}"
name: "{{ asg_name }}-{{ env }}-{{ timestamp }}"
image_id: "{{ image_id }}"
key_name: "{{ keypair }}"
instance_type: "{{ instance_type }}"
security_groups: "{{ security_groups }}"
instance_monitoring: yes
register: lc
- debug: var=lc
- name: Configure Auto Scaling Group
ec2_asg:
region: "{{ region }}"
name: "{{ asg_name }}-{{ env }}-{{ timestamp }}"
vpc_zone_identifier: "{{ asg_subnet_ids }}"
launch_config_name: "{{ lc.name }}"
availability_zones: "{{ zones }}"
health_check_type: EC2
health_check_period: 300
desired_capacity: "{{ asg_min }}"
min_size: "{{ asg_min }}"
max_size: "{{ asg_max }}"
tags:
- Name: "{{ asg_name }}-{{ env }}"
load_balancers: "{{ elb.elb.name }}"
state: present
register: asg
- debug: var=asg
- name: Configure Scaling Policies
ec2_scaling_policy:
region: "{{ region }}"
name: "{{ item.name }}"
asg_name: "{{ asg_name }}-{{ env }}-{{ timestamp }}"
state: present
adjustment_type: "{{ item.adjustment_type }}"
min_adjustment_step: "{{ item.min_adjustment_step }}"
scaling_adjustment: "{{ item.scaling_adjustment }}"
cooldown: "{{ item.cooldown }}"
with_items:
- name: "Increase Group Size"
adjustment_type: "ChangeInCapacity"
scaling_adjustment: +1
min_adjustment_step: 1
cooldown: 180
- name: "Decrease Group Size"
adjustment_type: "ChangeInCapacity"
scaling_adjustment: -1
min_adjustment_step: 1
cooldown: 300
register: scaling_policy
- debug: var=scaling_policy
- name: Define Metric Alarms configuration
set_fact:
metric_alarms:
- name: "{{ asg.name }}-ScaleUp"
comparison: ">="
threshold: 70.0
alarm_actions:
- "{{ scaling_policy.results[0].arn }}"
- name: "{{ asg.name }}-ScaleDown"
comparison: "<="
threshold: 30.0
alarm_actions:
- "{{ scaling_policy.results[1].arn }}"
- name: Configure Metric Alarms
ec2_metric_alarm:
region: "{{ region }}"
name: "{{ item.name }}"
state: present
metric: "CPUUtilization"
namespace: "AWS/EC2"
statistic: "Average"
comparison: "{{ item.comparison }}"
threshold: "{{ item.threshold }}"
period: 60
evaluation_periods: 5
unit: "Percent"
dimensions:
AutoScalingGroupName: "{{ asg.name }}"
alarm_actions: "{{ item.alarm_actions }}"
with_items: "{{ metric_alarms }}"
when: "{{ asg.max_size }} > 1"
register: alarms
- debug: var=alarms
================================================
FILE: wordpress_ha/provisioning_rds.yml
================================================
---
- hosts: localhost
connection: local
gather_facts: no
vars:
region: ap-southeast-2
env: staging
size: 5
instance_type: db.t2.micro
db_engine: MySQL
engine_version: 5.6.23
subnet_group: dbsg_wordpress
param_group: wordpress
# staging_sg_database security group ID
security_groups: sg-eeb14e8b
tasks:
- name: "get {{ env }}_subnet_private_0 subnet id"
command: "aws ec2 describe-subnets --filters Name=tag:Name,Values={{ env }}_subnet_private_0
--region {{ region }} --query 'Subnets[0].SubnetId' --output text"
register: subnet0
- debug: var=subnet0.stdout
- name: "get {{ env }}_subnet_private_1 subnet id"
command: "aws ec2 describe-subnets --filters Name=tag:Name,Values={{ env }}_subnet_private_1
--region {{ region }} --query 'Subnets[0].SubnetId' --output text"
register: subnet1
- debug: var=subnet1.stdout
- name: create Multi-AZ DB subnet group
rds_subnet_group:
name: "{{ subnet_group }}"
state: present
region: "{{ region }}"
description: DB Subnet Group for WordPress HA
subnets:
- "{{ subnet0.stdout }}"
- "{{ subnet1.stdout }}"
- name: create mysql parameter group
rds_param_group:
name: "{{ param_group }}"
state: present
region: "{{ region }}"
description: MySQL Parameter Group for WordPress HA
engine: mysql5.6
params:
innodb_lock_wait_timeout: 3600
max_allowed_packet: 512M
net_write_timeout: 300
- name: create mysql RDS instance
rds:
command: create
instance_name: "{{ env }}-wordpress-rds"
region: "{{ region }}"
size: "{{ size }}"
instance_type: "{{ instance_type }}"
db_engine: "{{ db_engine }}"
engine_version: "{{ engine_version }}"
subnet: "{{ subnet_group }}"
parameter_group: "{{ param_group }}"
multi_zone: yes
db_name: "{{ db_name }}"
username: "{{ username }}"
password: "{{ password }}"
vpc_security_groups: "{{ security_groups }}"
================================================
FILE: wordpress_ha/provisioning_sg.yml
================================================
---
- hosts: localhost
connection: local
gather_facts: no
vars:
region: ap-southeast-2
allowed_ip: xx.xx.xx.xx/32
vpc_cidr: 10.0.0.0/16
env: staging
tasks:
- name: get vpc id
command: "aws ec2 describe-vpcs --filters Name=tag:Name,Values={{ env }}_vpc
--query 'Vpcs[0].VpcId' --output text"
register: vpcid
- name: create sg_web rules
ec2_group:
region: "{{ region }}"
vpc_id: "{{ vpcid.stdout }}"
name: "{{ env }}_sg_web"
description: security group for public web
rules:
# allow ssh access from ansible group
- proto: tcp
from_port: 22
to_port: 22
group_name: "{{ env }}_sg_ansible"
group_desc: security group for ansible
# allow http access from anywhere
- proto: tcp
from_port: 80
to_port: 80
cidr_ip: 0.0.0.0/0
# allow https access from anywhere
- proto: tcp
from_port: 443
to_port: 443
cidr_ip: 0.0.0.0/0
- name: create sg_wordpress rules
ec2_group:
region: "{{ region }}"
vpc_id: "{{ vpcid.stdout }}"
name: "{{ env }}_sg_wordpress"
description: security group for wordpress servers
rules:
# allow ssh access from ansible group
- proto: tcp
from_port: 22
to_port: 22
group_name: "{{ env }}_sg_ansible"
group_desc: security group for ansible
# allow http access from vpc cidr
- proto: tcp
from_port: 80
to_port: 80
group_name: "{{ env }}_sg_wordpress_lb"
group_desc: security group for wordpress load balancer
# allow https access from vpc cidr
- proto: tcp
from_port: 443
to_port: 443
group_name: "{{ env }}_sg_wordpress_lb"
group_desc: security group for wordpress load balancer
- name: create sg_database rules
ec2_group:
region: "{{ region }}"
vpc_id: "{{ vpcid.stdout }}"
name: "{{ env }}_sg_database"
description: security group for database
rules:
- proto: tcp
from_port: 3306
to_port: 3306
group_name: "{{ env }}_sg_web"
- proto: tcp
from_port: 3306
to_port: 3306
group_name: "{{ env }}_sg_wordpress"
- name: create sg_ansible rules
ec2_group:
region: "{{ region }}"
vpc_id: "{{ vpcid.stdout }}"
name: "{{ env }}_sg_ansible"
description: security group for ansible
rules:
- proto: tcp
from_port: 22
to_port: 22
cidr_ip: "{{ allowed_ip }}"
- name: create sg_wordpress_lb
ec2_group:
region: "{{ region }}"
vpc_id: "{{ vpcid.stdout }}"
name: "{{ env }}_sg_wordpress_lb"
description: security group for wordpress load balancer
rules:
# allow http access from anywhere
- proto: tcp
from_port: 80
to_port: 80
cidr_ip: 0.0.0.0/0
# allow https access from anywhere
- proto: tcp
from_port: 443
to_port: 443
cidr_ip: 0.0.0.0/0
================================================
FILE: wordpress_ha/provisioning_vpc.yml
================================================
---
- hosts: localhost
connection: local
gather_facts: no
vars:
region: ap-southeast-2
env: staging
az0: ap-southeast-2a
az1: ap-southeast-2b
tasks:
- name: create vpc with multi-az subnets
ec2_vpc:
region: "{{ region }}"
cidr_block: 10.0.0.0/16
resource_tags: '{"Name":"{{ env }}_vpc"}'
subnets:
- cidr: 10.0.0.0/24
az: "{{ az0 }}"
resource_tags: '{"Name":"{{ env }}_subnet_public_0"}'
- cidr: 10.0.1.0/24
az: "{{ az0 }}"
resource_tags: '{"Name":"{{ env }}_subnet_private_0"}'
- cidr: 10.0.2.0/24
az: "{{ az1 }}"
resource_tags: '{"Name":"{{ env }}_subnet_public_1"}'
- cidr: 10.0.3.0/24
az: "{{ az1 }}"
resource_tags: '{"Name":"{{ env }}_subnet_private_1"}'
- cidr: 10.0.4.0/24
az: "{{ az0 }}"
resource_tags: '{"Name":"{{ env }}_subnet_private_2"}'
- cidr: 10.0.5.0/24
az: "{{ az1 }}"
resource_tags: '{"Name":"{{ env }}_subnet_private_3"}'
internet_gateway: yes
route_tables:
- subnets:
- 10.0.0.0/24
- 10.0.2.0/24
routes:
- dest: 0.0.0.0/0
gw: igw
================================================
FILE: wordpress_ha/provisioning_wp.yml
================================================
---
- hosts: localhost
connection: local
gather_facts: no
vars:
#your region
region: ap-southeast-2
keyname: wordpress-apsydney
instance_type: t2.micro
env: staging
image: ami-d9fe9be3
ins_name: wordpress_master
tasks:
- name: get {{ env }}_subnet_public_0 subnet id
command: "aws ec2 describe-subnets
--region {{ region }}
--filters Name=tag:Name,Values={{ env }}_subnet_public_0
--query 'Subnets[0].SubnetId' --output text"
register: subnet0
- name: launch ec2 instance
ec2:
region: "{{ region }}"
key_name: "{{ keyname }}"
instance_type: "{{ instance_type }}"
image: "{{ image }}"
wait: yes
group: "{{ env }}_sg_web"
id: wordpress_ha_1
instance_tags:
Name: "{{ ins_name }}"
class: wordpress_ha
vpc_subnet_id: "{{ subnet0.stdout }}"
register: ec2
when: subnet0.stdout!="None"
- name: check EIP association
command: "aws ec2 describe-instances
--region {{ region }}
--filters Name=tag:Name,Values={{ ins_name }}
--query 'Reservations[0].Instances[0].NetworkInterfaces[0].Association'
--output text"
register: eip
- name: associate new EIP for the instance
ec2_eip:
region: "{{ region }}"
instance_id: "{{ item.id }}"
with_items: ec2.instances
when: item.id is defined and eip.stdout=="None"
================================================
FILE: wordpress_ha/roles/web/tasks/main.yml
================================================
---
- name: install apache, php, and php-mysql
yum: name={{ item }} state=present
with_items:
- httpd
- php
- php-mysql
- name: start and enable httpd
service: name=httpd state=started enabled=yes
================================================
FILE: wordpress_ha/roles/wordpress/tasks/main.yml
================================================
---
- name: download wordpress
get_url: url=http://wordpress.org/wordpress-{{ wp_version }}.tar.gz
dest=~/wordpress-{{ wp_version }}.tar.gz
- name: extract wordpress archive
command: chdir=~ /bin/tar xvf wordpress-{{ wp_version }}.tar.gz
creates=~/wordpress
- name: copy wordpress to apache root directory
shell: cp -r ~/wordpress/* /var/www/html
- name: fetch random salts for wordpress config
local_action: command curl https://api.wordpress.org/secret-key/1.1/salt/
register: "wp_salt"
- name: copy wordpress config file
template: src=wp-config.php dest=/var/www/html/
- name: change ownership of wordpress installation
file: path=/var/www/html/ owner=apache group=apache state=directory recurse=yes
================================================
FILE: wordpress_ha/roles/wordpress/templates/wp-config.php
================================================
<?php
/**
* The base configurations of the WordPress.
*
* This file has the following configurations: MySQL settings, Table Prefix,
* Secret Keys, WordPress Language, and ABSPATH. You can find more information
* by visiting {@link http://codex.wordpress.org/Editing_wp-config.php Editing
* wp-config.php} Codex page. You can get the MySQL settings from your web host.
*
* This file is used by the wp-config.php creation script during the
* installation. You don't have to use the web site, you can just copy this file
* to "wp-config.php" and fill in the values.
*
* @package WordPress
*/
// ** MySQL settings - You can get this info from your web host ** //
/** The name of the database for WordPress */
define('DB_NAME', '{{ db_name }}');
/** MySQL database username */
define('DB_USER', '{{ username }}');
/** MySQL database password */
define('DB_PASSWORD', '{{ password }}');
/** MySQL hostname */
define('DB_HOST', '{{ dbhost }}');
/** Database Charset to use in creating database tables. */
define('DB_CHARSET', 'utf8');
/** The Database Collate type. Don't change this if in doubt. */
define('DB_COLLATE', '');
/**#@+
* Authentication Unique Keys and Salts.
*
* Change these to different unique phrases!
* You can generate these using the {@link https://api.wordpress.org/secret-key/1.1/salt/ WordPress.org secret-key service}
* You can change these at any point in time to invalidate all existing cookies. This will force all users to have to log in again.
*
* @since 2.6.0
*/
{{ wp_salt.stdout }}
/**#@-*/
/**
* WordPress Database Table prefix.
*
* You can have multiple installations in one database if you give each a unique
* prefix. Only numbers, letters, and underscores please!
*/
$table_prefix = 'wp_';
/**
* WordPress Localized Language, defaults to English.
*
* Change this to localize WordPress. A corresponding MO file for the chosen
* language must be installed to wp-content/languages. For example, install
* de_DE.mo to wp-content/languages and set WPLANG to 'de_DE' to enable German
* language support.
*/
define('WPLANG', '');
/**
* For developers: WordPress debugging mode.
*
* Change this to true to enable the display of notices during development.
* It is strongly recommended that plugin and theme developers use WP_DEBUG
* in their development environments.
*/
define('WP_DEBUG', false);
/* That's all, stop editing! Happy blogging. */
/** Absolute path to the WordPress directory. */
if ( !defined('ABSPATH') )
define('ABSPATH', dirname(__FILE__) . '/');
/** Sets up WordPress vars and included files. */
require_once(ABSPATH . 'wp-settings.php');
/** Disable Automatic Updates Completely */
define( 'AUTOMATIC_UPDATER_DISABLED', {{auto_up_disable}} );
/** Define AUTOMATIC Updates for Components. */
define( 'WP_AUTO_UPDATE_CORE', {{core_update_level}} );
================================================
FILE: wordpress_ha/site.yml
================================================
---
- name: install apache, php, wordpress
hosts: tag_class_wordpress_ha
sudo: yes
roles:
- web
- wordpress
================================================
FILE: wordpress_ha/vars/staging.yml
================================================
---
region: ap-southeast-2
zones: ['ap-southeast-2a','ap-southeast-2b']
asg_subnet_ids: ['subnet-0da97768','subnet-1f33f768'] # staging_subnet_private_2 and staging_subnet_private_3
elb_subnets: ['subnet-8ceb18e9','subnet-08ccd27c'] # staging_subnet_public_0 and staging_subnet_public_1
asg_name: wordpress
image_id: ami-6d7c590e # ami created from the master wordpress instance
instance_type: t2.micro
security_groups: ['sg-85426fe0'] # staging_sg_wordpress
elb_group_ids: ['sg-c4507da1'] # staging_sg_wordpress_lb
keypair: wordpress-apsydney
asg_min: 1
asg_max: 8
gitextract_brojytnn/
├── .gitignore
├── ami_create.yml
├── ami_delete.yml
├── dbsg_create.yml
├── dbsg_delete.yml
├── dhcp_options.yml
├── ec2.ini
├── ec2.py
├── ec2_check_name.yml
├── ec2_profile.yml
├── ec2_start.yml
├── ec2_start_1.yml
├── ec2_stop.yml
├── ec2_vol_1.yml
├── ec2_vol_2.yml
├── ec2_vpc_db_create.yml
├── ec2_vpc_jumpbox.yml
├── ec2_vpc_openvpn.yml
├── ec2_vpc_web_create.yml
├── group_vars/
│ ├── all
│ └── tag_class_wordpress
├── host_vars/
│ └── localhost
├── hosts
├── iam_group.yml
├── iam_policy.yml
├── iam_policy_admin.json
├── iam_policy_app1.yml
├── iam_policy_s3_read.json
├── iam_role.yml
├── iam_user.yml
├── install_ansible.yml
├── keypair.yml
├── launch_ec2.yml
├── launch_ec2_eip.yml
├── launch_ec2_iteration.yml
├── launch_ec2_tags.yml
├── library/
│ ├── instance_lookup
│ └── vpc_lookup
├── mysql_pg_create.yml
├── mysql_pg_delete.yml
├── mysql_rds_create.yml
├── mysql_rds_delete.yml
├── nat_launch.yml
├── roles/
│ ├── ansible/
│ │ └── tasks/
│ │ └── main.yml
│ ├── apache/
│ │ └── tasks/
│ │ └── main.yml
│ ├── common/
│ │ ├── handlers/
│ │ │ └── main.yml
│ │ ├── tasks/
│ │ │ └── main.yml
│ │ └── templates/
│ │ └── ntp.conf.j2
│ └── mysql/
│ └── tasks/
│ └── main.yml
├── route53.yml
├── s3_create_bucket.yml
├── s3_create_dir.yml
├── s3_delete_bucket.yml
├── s3_download_file.yml
├── s3_share_file.yml
├── s3_upload_file.yml
├── sg_database.yml
├── sg_delete.yml
├── sg_empty.yml
├── sg_jumpbox.yml
├── sg_modify.yml
├── sg_openvpn.yml
├── sg_webserver.yml
├── site.yml
├── terminate_ec2.yml
├── test.txt
├── test1.txt
├── vpc_create.yml
├── vpc_create_multi_az.yml
├── vpc_delete.yml
├── vpc_delete1.yml
├── vpc_info.yml
├── wordpress/
│ ├── backup.yml
│ ├── delete_backup.yml
│ ├── ec2.ini
│ ├── ec2.py
│ ├── group_vars/
│ │ └── all
│ ├── hosts
│ ├── provisioning.yml
│ ├── restore.yml
│ ├── roles/
│ │ ├── common/
│ │ │ └── tasks/
│ │ │ └── main.yml
│ │ ├── mysql/
│ │ │ ├── handlers/
│ │ │ │ └── main.yml
│ │ │ ├── tasks/
│ │ │ │ └── main.yml
│ │ │ └── templates/
│ │ │ └── my.cnf.j2
│ │ ├── web/
│ │ │ └── tasks/
│ │ │ └── main.yml
│ │ └── wordpress/
│ │ ├── tasks/
│ │ │ └── main.yml
│ │ └── templates/
│ │ └── wp-config.php
│ └── site.yml
└── wordpress_ha/
├── ec2.ini
├── ec2.py
├── group_vars/
│ └── all
├── hosts
├── provisioning_asg.yml
├── provisioning_rds.yml
├── provisioning_sg.yml
├── provisioning_vpc.yml
├── provisioning_wp.yml
├── roles/
│ ├── web/
│ │ └── tasks/
│ │ └── main.yml
│ └── wordpress/
│ ├── tasks/
│ │ └── main.yml
│ └── templates/
│ └── wp-config.php
├── site.yml
└── vars/
└── staging.yml
SYMBOL INDEX (66 symbols across 3 files)
FILE: ec2.py
class Ec2Inventory (line 133) | class Ec2Inventory(object):
method _empty_inventory (line 134) | def _empty_inventory(self):
method __init__ (line 137) | def __init__(self):
method is_cache_valid (line 171) | def is_cache_valid(self):
method read_settings (line 184) | def read_settings(self):
method parse_cli_args (line 236) | def parse_cli_args(self):
method do_api_calls_update_cache (line 249) | def do_api_calls_update_cache(self):
method get_instances_by_region (line 263) | def get_instances_by_region(self, region):
method get_rds_instances_by_region (line 290) | def get_rds_instances_by_region(self, region):
method get_instance (line 306) | def get_instance(self, region, instance_id):
method add_instance (line 325) | def add_instance(self, instance, region):
method add_rds_instance (line 389) | def add_rds_instance(self, instance, region):
method get_route53_records (line 443) | def get_route53_records(self):
method get_instance_route53_names (line 469) | def get_instance_route53_names(self, instance):
method get_host_info_dict_from_instance (line 491) | def get_host_info_dict_from_instance(self, instance):
method get_host_info (line 536) | def get_host_info(self):
method push (line 555) | def push(self, my_dict, key, element):
method get_inventory_from_cache (line 565) | def get_inventory_from_cache(self):
method load_index_from_cache (line 574) | def load_index_from_cache(self):
method write_to_cache (line 582) | def write_to_cache(self, data, filename):
method to_safe (line 591) | def to_safe(self, word):
method json_format_dict (line 598) | def json_format_dict(self, data, pretty=False):
FILE: wordpress/ec2.py
class Ec2Inventory (line 133) | class Ec2Inventory(object):
method _empty_inventory (line 134) | def _empty_inventory(self):
method __init__ (line 137) | def __init__(self):
method is_cache_valid (line 171) | def is_cache_valid(self):
method read_settings (line 184) | def read_settings(self):
method parse_cli_args (line 236) | def parse_cli_args(self):
method do_api_calls_update_cache (line 249) | def do_api_calls_update_cache(self):
method get_instances_by_region (line 263) | def get_instances_by_region(self, region):
method get_rds_instances_by_region (line 290) | def get_rds_instances_by_region(self, region):
method get_instance (line 306) | def get_instance(self, region, instance_id):
method add_instance (line 325) | def add_instance(self, instance, region):
method add_rds_instance (line 389) | def add_rds_instance(self, instance, region):
method get_route53_records (line 443) | def get_route53_records(self):
method get_instance_route53_names (line 469) | def get_instance_route53_names(self, instance):
method get_host_info_dict_from_instance (line 491) | def get_host_info_dict_from_instance(self, instance):
method get_host_info (line 536) | def get_host_info(self):
method push (line 555) | def push(self, my_dict, key, element):
method get_inventory_from_cache (line 565) | def get_inventory_from_cache(self):
method load_index_from_cache (line 574) | def load_index_from_cache(self):
method write_to_cache (line 582) | def write_to_cache(self, data, filename):
method to_safe (line 591) | def to_safe(self, word):
method json_format_dict (line 598) | def json_format_dict(self, data, pretty=False):
FILE: wordpress_ha/ec2.py
class Ec2Inventory (line 133) | class Ec2Inventory(object):
method _empty_inventory (line 134) | def _empty_inventory(self):
method __init__ (line 137) | def __init__(self):
method is_cache_valid (line 171) | def is_cache_valid(self):
method read_settings (line 184) | def read_settings(self):
method parse_cli_args (line 236) | def parse_cli_args(self):
method do_api_calls_update_cache (line 249) | def do_api_calls_update_cache(self):
method get_instances_by_region (line 263) | def get_instances_by_region(self, region):
method get_rds_instances_by_region (line 290) | def get_rds_instances_by_region(self, region):
method get_instance (line 306) | def get_instance(self, region, instance_id):
method add_instance (line 325) | def add_instance(self, instance, region):
method add_rds_instance (line 389) | def add_rds_instance(self, instance, region):
method get_route53_records (line 443) | def get_route53_records(self):
method get_instance_route53_names (line 469) | def get_instance_route53_names(self, instance):
method get_host_info_dict_from_instance (line 491) | def get_host_info_dict_from_instance(self, instance):
method get_host_info (line 536) | def get_host_info(self):
method push (line 555) | def push(self, my_dict, key, element):
method get_inventory_from_cache (line 565) | def get_inventory_from_cache(self):
method load_index_from_cache (line 574) | def load_index_from_cache(self):
method write_to_cache (line 582) | def write_to_cache(self, data, filename):
method to_safe (line 591) | def to_safe(self, word):
method json_format_dict (line 598) | def json_format_dict(self, data, pretty=False):
Condensed preview — 102 files, each showing path, character count, and a content snippet. Download the .json file or copy for the full structured content (148K chars).
[
{
"path": ".gitignore",
"chars": 29,
"preview": "ansible4aws-book.*\n.DS_Store\n"
},
{
"path": "ami_create.yml",
"chars": 650,
"preview": "---\n- hosts: localhost\n connection: local\n gather_facts: no\n vars:\n region: ap-southeast-2\n ins_name: wordpress"
},
{
"path": "ami_delete.yml",
"chars": 529,
"preview": "---\n- hosts: localhost\n connection: local\n gather_facts: no\n vars:\n region: ap-southeast-2\n ami_name: wordpress"
},
{
"path": "dbsg_create.yml",
"chars": 432,
"preview": "---\n- hosts: localhost\n connection: local\n gather_facts: no\n vars:\n region: ap-southeast-2\n vars_files:\n - sta"
},
{
"path": "dbsg_delete.yml",
"chars": 238,
"preview": "---\n- hosts: localhost\n connection: local\n gather_facts: no\n vars:\n region: ap-southeast-2\n tasks:\n - name: de"
},
{
"path": "dhcp_options.yml",
"chars": 771,
"preview": "---\n- hosts: localhost\n connection: local\n gather_facts: no\n vars:\n region: ap-southeast-2\n name: test-vpc\n ta"
},
{
"path": "ec2.ini",
"chars": 2419,
"preview": "# Ansible EC2 external inventory script settings\n#\n\n[ec2]\n\n# to talk to a private eucalyptus instance uncomment these li"
},
{
"path": "ec2.py",
"chars": 20903,
"preview": "#!/usr/bin/env python\n\n'''\nEC2 external inventory script\n=================================\n\nGenerates inventory that Ans"
},
{
"path": "ec2_check_name.yml",
"chars": 784,
"preview": "--- \n- hosts: localhost \n gather_facts: no \n vars: \n region: ap-southeast-2 \n key: yan-key-pair-apsydne"
},
{
"path": "ec2_profile.yml",
"chars": 421,
"preview": "--- \n- hosts: localhost \n gather_facts: no \n connection: local \n vars: \n #your region \n region: ap-south"
},
{
"path": "ec2_start.yml",
"chars": 561,
"preview": "--- \n- hosts: localhost \n gather_facts: no \n connection: local \n vars: \n region: ap-southeast-2 \n name"
},
{
"path": "ec2_start_1.yml",
"chars": 520,
"preview": "---\n- hosts: localhost\n gather_facts: no\n connection: local \n vars:\n region: ap-southeast-2\n name: bamboo-1\n t"
},
{
"path": "ec2_stop.yml",
"chars": 560,
"preview": "--- \n- hosts: localhost \n gather_facts: no \n connection: local \n vars: \n region: ap-southeast-2 \n name"
},
{
"path": "ec2_vol_1.yml",
"chars": 545,
"preview": "--- \n- hosts: localhost \n gather_facts: no \n connection: local \n vars: \n #your region \n region: ap-south"
},
{
"path": "ec2_vol_2.yml",
"chars": 538,
"preview": "---\n- hosts: localhost\n gather_facts: no\n connection: local \n vars:\n #your region\n region: ap-southeast-2\n tas"
},
{
"path": "ec2_vpc_db_create.yml",
"chars": 698,
"preview": "---\n- hosts: localhost\n connection: local\n gather_facts: no\n vars_files:\n - staging_vpc_info\n vars:\n region: a"
},
{
"path": "ec2_vpc_jumpbox.yml",
"chars": 899,
"preview": "---\n- hosts: localhost\n gather_facts: no\n connection: local \n vars_files:\n - staging_vpc_info\n vars:\n region: "
},
{
"path": "ec2_vpc_openvpn.yml",
"chars": 935,
"preview": "---\n- hosts: localhost\n gather_facts: no\n connection: local \n vars_files:\n - staging_vpc_info\n vars:\n region: "
},
{
"path": "ec2_vpc_web_create.yml",
"chars": 862,
"preview": "--- \n- hosts: localhost \n connection: local \n gather_facts: no \n vars_files:\n - staging_vpc_info\n vars: \n "
},
{
"path": "group_vars/all",
"chars": 174,
"preview": "---\n# Variables here are applicable to all host groups\n\nntpserver: 0.au.pool.ntp.org\n\nansible_user: ec2-user \nansible_ss"
},
{
"path": "group_vars/tag_class_wordpress",
"chars": 87,
"preview": "ansible_ssh_user: ec2-user\nansible_ssh_private_key_file: ~/.ssh/wordpress-apsydney.pem\n"
},
{
"path": "host_vars/localhost",
"chars": 87,
"preview": "ansible_ssh_user: ec2-user\nansible_ssh_private_key_file: ~/.ssh/wordpress-apsydney.pem\n"
},
{
"path": "hosts",
"chars": 137,
"preview": "[local]\nlocalhost\n\n#[webservers]\n#54.79.109.14 ansible_ssh_user=ec2-user ansible_ssh_private_key_file=~/.ssh/yan-key-pai"
},
{
"path": "iam_group.yml",
"chars": 196,
"preview": "--- \n- hosts: localhost \n gather_facts: no \n connection: local \n tasks:\n - name: create IAM group admin \n "
},
{
"path": "iam_policy.yml",
"chars": 320,
"preview": "--- \n- hosts: localhost \n gather_facts: no \n connection: local \n tasks:\n - name: Assign a policy called Admin"
},
{
"path": "iam_policy_admin.json",
"chars": 131,
"preview": "{\n \"Version\": \"2012-10-17\",\n \"Statement\": [\n {\n \"Effect\": \"Allow\",\n \"Action\": \"*\",\n \"Resource\": \"*\"\n"
},
{
"path": "iam_policy_app1.yml",
"chars": 312,
"preview": "--- \n- hosts: localhost \n gather_facts: no \n connection: local \n tasks:\n - name: Assign a policy called S3Rea"
},
{
"path": "iam_policy_s3_read.json",
"chars": 175,
"preview": "{\n \"Version\": \"2012-10-17\",\n \"Statement\": [\n {\n \"Effect\": \"Allow\",\n \"Action\": [\n \"s3:Get*\",\n "
},
{
"path": "iam_role.yml",
"chars": 191,
"preview": "--- \n- hosts: localhost \n gather_facts: no \n connection: local \n tasks:\n - name: create IAM role app1 \n "
},
{
"path": "iam_user.yml",
"chars": 211,
"preview": "--- \n- hosts: localhost \n gather_facts: no \n connection: local \n tasks:\n - name: create IAM user yan \n i"
},
{
"path": "install_ansible.yml",
"chars": 68,
"preview": "---\n- hosts: tag_class_jumpbox\n become: yes\n roles:\n - ansible\n"
},
{
"path": "keypair.yml",
"chars": 411,
"preview": "--- \n- hosts: localhost \n connection: local \n gather_facts: no \n vars:\n region: ap-southeast-2\n keyname: ya"
},
{
"path": "launch_ec2.yml",
"chars": 516,
"preview": "--- \n- hosts: localhost \n connection: local \n gather_facts: no \n vars: \n #your region \n region: ap-sout"
},
{
"path": "launch_ec2_eip.yml",
"chars": 549,
"preview": "--- \n- hosts: localhost \n connection: local \n gather_facts: no \n vars: \n region: ap-southeast-2 \n tasks"
},
{
"path": "launch_ec2_iteration.yml",
"chars": 459,
"preview": "--- \n- hosts: localhost \n connection: local \n gather_facts: no \n vars: \n #your region \n region: ap-sout"
},
{
"path": "launch_ec2_tags.yml",
"chars": 1409,
"preview": "--- \n- hosts: localhost \n connection: local \n gather_facts: no \n vars: \n region: ap-southeast-2 \n insta"
},
{
"path": "library/instance_lookup",
"chars": 1250,
"preview": "#!/usr/bin/python\n\n#author: Yan Kurniawan <yan.kurniawan@gmail.com>\n\nimport sys\n\nAWS_REGIONS = ['ap-northeast-1',\n "
},
{
"path": "library/vpc_lookup",
"chars": 4030,
"preview": "#!/usr/bin/python\n# This file is part of Ansible\n#\n# Ansible is free software: you can redistribute it and/or modify\n# i"
},
{
"path": "mysql_pg_create.yml",
"chars": 446,
"preview": "---\n- hosts: localhost\n gather_facts: no\n connection: local \n vars:\n region: ap-southeast-2\n tasks:\n - name: c"
},
{
"path": "mysql_pg_delete.yml",
"chars": 248,
"preview": "---\n- hosts: localhost\n gather_facts: no\n connection: local \n vars:\n region: ap-southeast-2\n tasks:\n - name: d"
},
{
"path": "mysql_rds_create.yml",
"chars": 992,
"preview": "---\n- hosts: localhost\n gather_facts: no\n connection: local \n vars:\n region: ap-southeast-2\n size: 100\n inst"
},
{
"path": "mysql_rds_delete.yml",
"chars": 247,
"preview": "- hosts: localhost\n gather_facts: no\n connection: local \n vars:\n region: ap-southeast-2\n tasks:\n - name: delet"
},
{
"path": "nat_launch.yml",
"chars": 1020,
"preview": "--- \n- hosts: localhost \n connection: local \n gather_facts: no \n vars_files:\n - staging_vpc_info\n vars: \n"
},
{
"path": "roles/ansible/tasks/main.yml",
"chars": 425,
"preview": "---\n- name: upgrade all packages\n yum: name=* state=latest\n- name: install the 'Development tools' package group\n yum:"
},
{
"path": "roles/apache/tasks/main.yml",
"chars": 185,
"preview": "--- \n- name: install apache \n yum: name=httpd state=present \n tags: apache \n\n- name: start the httpd service \n s"
},
{
"path": "roles/common/handlers/main.yml",
"chars": 67,
"preview": "--- \n- name: restart ntp \n service: name=ntpd state=restarted \n"
},
{
"path": "roles/common/tasks/main.yml",
"chars": 289,
"preview": "--- \n- name: install ntp \n yum: name=ntp state=present \n tags: ntp \n\n- name: configure ntp file \n template: src="
},
{
"path": "roles/common/templates/ntp.conf.j2",
"chars": 142,
"preview": "driftfile /var/lib/ntp/drift\n\nrestrict 127.0.0.1 \nrestrict -6 ::1\n\nserver {{ ntpserver }}\n\nincludefile /etc/ntp/crypto/p"
},
{
"path": "roles/mysql/tasks/main.yml",
"chars": 196,
"preview": "--- \n- name: install mysql server \n yum: name=mysql-server state=present \n tags: mysql \n\n- name: start the mysql se"
},
{
"path": "route53.yml",
"chars": 257,
"preview": "--- \n- hosts: localhost \n connection: local \n gather_facts: no \n tasks:\n - name: create record \n route53"
},
{
"path": "s3_create_bucket.yml",
"chars": 201,
"preview": "---\n- hosts: localhost\n gather_facts: no\n connection: local \n vars:\n bucketname: yan001\n tasks:\n - name: creat"
},
{
"path": "s3_create_dir.yml",
"chars": 240,
"preview": "---\n- hosts: localhost\n gather_facts: no\n connection: local \n vars:\n bucketname: yan001\n tasks:\n - name: creat"
},
{
"path": "s3_delete_bucket.yml",
"chars": 225,
"preview": "---\n- hosts: localhost\n gather_facts: no\n connection: local \n vars:\n bucketname: yan001\n tasks:\n - name: delet"
},
{
"path": "s3_download_file.yml",
"chars": 269,
"preview": "---\n- hosts: localhost\n gather_facts: no\n connection: local \n sudo: yes\n vars:\n bucketname: yan001\n tasks:\n -"
},
{
"path": "s3_share_file.yml",
"chars": 259,
"preview": "---\n- hosts: localhost\n gather_facts: no\n connection: local \n vars:\n bucketname: yan001\n tasks:\n - name: share"
},
{
"path": "s3_upload_file.yml",
"chars": 276,
"preview": "---\n- hosts: localhost\n gather_facts: no\n connection: local \n vars:\n bucketname: yan001\n tasks:\n - name: uploa"
},
{
"path": "sg_database.yml",
"chars": 841,
"preview": "--- \n- hosts: localhost \n connection: local \n gather_facts: no \n vars: \n #your region \n region: ap-sout"
},
{
"path": "sg_delete.yml",
"chars": 948,
"preview": "--- \n- hosts: localhost \n connection: local \n gather_facts: no \n vars_files:\n - staging_vpc_info\n vars: \n "
},
{
"path": "sg_empty.yml",
"chars": 1024,
"preview": "--- \n- hosts: localhost \n connection: local \n gather_facts: no \n vars_files:\n - staging_vpc_info\n vars: \n "
},
{
"path": "sg_jumpbox.yml",
"chars": 840,
"preview": "--- \n- hosts: localhost \n gather_facts: no \n connection: local \n vars_files:\n - staging_vpc_info\n vars: \n"
},
{
"path": "sg_modify.yml",
"chars": 3131,
"preview": "--- \n- hosts: localhost \n connection: local \n gather_facts: no \n vars_files:\n - staging_vpc_info\n vars: \n "
},
{
"path": "sg_openvpn.yml",
"chars": 1077,
"preview": "---\n- hosts: localhost\n gather_facts: no\n connection: local \n vars_files:\n - staging_vpc_info\n vars:\n #your re"
},
{
"path": "sg_webserver.yml",
"chars": 809,
"preview": "--- \n- hosts: localhost \n connection: local \n gather_facts: no \n vars: \n #your region \n region: ap-sout"
},
{
"path": "site.yml",
"chars": 443,
"preview": "---\n# install, configure, and start ntp on all ec2 instances\n- hosts: ec2 \n sudo: yes \n roles: \n - common \n\n# i"
},
{
"path": "terminate_ec2.yml",
"chars": 358,
"preview": "--- \n- hosts: localhost \n connection: local \n gather_facts: no \n vars: \n #your region \n region: ap-sout"
},
{
"path": "test.txt",
"chars": 29,
"preview": "Hello World. This is a test.\n"
},
{
"path": "test1.txt",
"chars": 29,
"preview": "Hello World. This is a test.\n"
},
{
"path": "vpc_create.yml",
"chars": 1299,
"preview": "--- \n- hosts: localhost \n connection: local \n gather_facts: no \n vars: \n region: ap-southeast-2\n # prefi"
},
{
"path": "vpc_create_multi_az.yml",
"chars": 1615,
"preview": "--- \n- hosts: localhost \n gather_facts: no \n vars: \n region: ap-southeast-2\n # prefix for naming \n pre"
},
{
"path": "vpc_delete.yml",
"chars": 464,
"preview": "---\n- hosts: localhost\n connection: local\n gather_facts: no\n vars:\n region: ap-southeast-2\n tasks:\n - name: ge"
},
{
"path": "vpc_delete1.yml",
"chars": 507,
"preview": "- hosts: localhost\n connection: local\n gather_facts: no\n vars:\n region: ap-southeast-2\n name: test-vpc\n tasks:"
},
{
"path": "vpc_info.yml",
"chars": 277,
"preview": "---\n- hosts: localhost\n connection: local\n gather_facts: no\n vars:\n region: ap-southeast-2\n tasks:\n - name: ge"
},
{
"path": "wordpress/backup.yml",
"chars": 890,
"preview": "---\n- name: backup database and store in S3\n hosts: tag_class_wordpress\n gather_facts: no\n vars:\n bucketname: yan_"
},
{
"path": "wordpress/delete_backup.yml",
"chars": 290,
"preview": "---\n- name: delete object from S3 bucket\n hosts: localhost\n gather_facts: no\n vars:\n bucketname: yan_wordpress\n "
},
{
"path": "wordpress/ec2.ini",
"chars": 2445,
"preview": "# Ansible EC2 external inventory script settings\n#\n\n[ec2]\n\n# to talk to a private eucalyptus instance uncomment these li"
},
{
"path": "wordpress/ec2.py",
"chars": 20903,
"preview": "#!/usr/bin/env python\n\n'''\nEC2 external inventory script\n=================================\n\nGenerates inventory that Ans"
},
{
"path": "wordpress/group_vars/all",
"chars": 802,
"preview": "ansible_ssh_user: ec2-user \nansible_ssh_private_key_file: ~/.ssh/wordpress-apsydney.pem\n\n# Which version of WordPress t"
},
{
"path": "wordpress/hosts",
"chars": 18,
"preview": "[local]\nlocalhost\n"
},
{
"path": "wordpress/provisioning.yml",
"chars": 2107,
"preview": "--- \n- hosts: localhost \n connection: local \n gather_facts: no \n vars: \n #your region \n region: ap-sout"
},
{
"path": "wordpress/restore.yml",
"chars": 635,
"preview": "---\n- name: download backup from S3 and restore\n hosts: tag_class_wordpress\n gather_facts: no\n vars:\n bucketname: "
},
{
"path": "wordpress/roles/common/tasks/main.yml",
"chars": 121,
"preview": "---\n- name: install the 'Development tools' package group\n yum: name=\"@Development tools\" state=present update_cache=ye"
},
{
"path": "wordpress/roles/mysql/handlers/main.yml",
"chars": 71,
"preview": "--- \n- name: restart mysql \n service: name=mysqld state=restarted \n"
},
{
"path": "wordpress/roles/mysql/tasks/main.yml",
"chars": 378,
"preview": "---\n- name: install mysql server\n yum: name={{ item }} state=present\n with_items:\n - mysql-devel\n - mysql-server\n\n"
},
{
"path": "wordpress/roles/mysql/templates/my.cnf.j2",
"chars": 318,
"preview": "# You can customize your mysql server configuration here\n[mysqld]\ndatadir=/var/lib/mysql\nsocket=/var/lib/mysql/mysql.soc"
},
{
"path": "wordpress/roles/web/tasks/main.yml",
"chars": 214,
"preview": "---\n- name: install apache, php, and php-mysql\n yum: name={{ item }} state=present\n with_items:\n - httpd\n - php\n "
},
{
"path": "wordpress/roles/wordpress/tasks/main.yml",
"chars": 1002,
"preview": "---\n- name: download wordpress\n get_url: url=http://wordpress.org/wordpress-{{ wp_version }}.tar.gz dest=~/wordpress-{{"
},
{
"path": "wordpress/roles/wordpress/templates/wp-config.php",
"chars": 2856,
"preview": "<?php\n/**\n * The base configurations of the WordPress.\n *\n * This file has the following configurations: MySQL settings,"
},
{
"path": "wordpress/site.yml",
"chars": 163,
"preview": "---\n- name: install apache, php, mysql server, wordpress\n hosts: tag_class_wordpress\n become: yes\n\n roles:\n# - com"
},
{
"path": "wordpress_ha/ec2.ini",
"chars": 2419,
"preview": "# Ansible EC2 external inventory script settings\n#\n\n[ec2]\n\n# to talk to a private eucalyptus instance uncomment these li"
},
{
"path": "wordpress_ha/ec2.py",
"chars": 20903,
"preview": "#!/usr/bin/env python\n\n'''\nEC2 external inventory script\n=================================\n\nGenerates inventory that Ans"
},
{
"path": "wordpress_ha/group_vars/all",
"chars": 669,
"preview": "---\ndb_name: wordpress\nusername: dbadmin\npassword: mypassword\ndbhost: staging-wordpress-rds.cxzxl961nonk.ap-southeast-2."
},
{
"path": "wordpress_ha/hosts",
"chars": 17,
"preview": "[local]\nlocalhost"
},
{
"path": "wordpress_ha/provisioning_asg.yml",
"chars": 3826,
"preview": "---\n- hosts: localhost\n connection: local\n gather_facts: no\n tasks:\n - include_vars: \"{{ env }}.yml\"\n\n - set_fa"
},
{
"path": "wordpress_ha/provisioning_rds.yml",
"chars": 2185,
"preview": "---\n- hosts: localhost\n connection: local\n gather_facts: no\n vars:\n region: ap-southeast-2\n env: staging\n si"
},
{
"path": "wordpress_ha/provisioning_sg.yml",
"chars": 3375,
"preview": "---\n- hosts: localhost\n connection: local\n gather_facts: no\n vars:\n region: ap-southeast-2\n allowed_ip: xx.xx.x"
},
{
"path": "wordpress_ha/provisioning_vpc.yml",
"chars": 1323,
"preview": "---\n- hosts: localhost\n connection: local\n gather_facts: no\n vars:\n region: ap-southeast-2\n env: staging\n az"
},
{
"path": "wordpress_ha/provisioning_wp.yml",
"chars": 1524,
"preview": "---\n- hosts: localhost\n connection: local\n gather_facts: no\n vars:\n #your region\n region: ap-southeast-2\n ke"
},
{
"path": "wordpress_ha/roles/web/tasks/main.yml",
"chars": 212,
"preview": "---\n- name: install apache, php, and php-mysql\n yum: name={{ item }} state=present\n with_items:\n - httpd\n - php\n "
},
{
"path": "wordpress_ha/roles/wordpress/tasks/main.yml",
"chars": 746,
"preview": "---\n- name: download wordpress\n get_url: url=http://wordpress.org/wordpress-{{ wp_version }}.tar.gz \n dest=~/"
},
{
"path": "wordpress_ha/roles/wordpress/templates/wp-config.php",
"chars": 2848,
"preview": "<?php\n/**\n * The base configurations of the WordPress.\n *\n * This file has the following configurations: MySQL settings,"
},
{
"path": "wordpress_ha/site.yml",
"chars": 123,
"preview": "---\n- name: install apache, php, wordpress\n hosts: tag_class_wordpress_ha\n sudo: yes\n\n roles:\n - web\n - wordpre"
},
{
"path": "wordpress_ha/vars/staging.yml",
"chars": 566,
"preview": "---\nregion: ap-southeast-2\nzones: ['ap-southeast-2a','ap-southeast-2b']\nasg_subnet_ids: ['subnet-0da97768','subnet-1f33f"
}
]
About this extraction
This page contains the full source code of the yankurniawan/ansible-for-aws GitHub repository, extracted and formatted as plain text for AI agents and large language models (LLMs). The extraction includes 102 files (134.4 KB), approximately 35.6k tokens, and a symbol index with 66 extracted functions, classes, methods, constants, and types. Use this with OpenClaw, Claude, ChatGPT, Cursor, Windsurf, or any other AI tool that accepts text input. You can copy the full output to your clipboard or download it as a .txt file.
Extracted by GitExtract — free GitHub repo to text converter for AI. Built by Nikandr Surkov.