Repository: j-mcnally/ansible-rails Branch: master Commit: 264d1e1eca03 Files: 66 Total size: 89.5 KB Directory structure: gitextract_jlibz5q6/ ├── ansible.cfg ├── apps.yml ├── checks.yml ├── digitalocean.ini ├── dohosts ├── extra/ │ └── capistrano/ │ └── deploy.rb ├── group_vars/ │ ├── production.yml │ └── staging.yml ├── loadbalancers.yml ├── local ├── myapp.yml ├── provisioning/ │ ├── common/ │ │ └── tasks/ │ │ └── main.yml │ ├── do_creds.yml │ ├── requirements/ │ │ ├── production.yml │ │ └── staging.yml │ └── stack.yml ├── readme.md ├── roles/ │ ├── apps/ │ │ ├── files/ │ │ │ └── nginx.service │ │ ├── handlers/ │ │ │ ├── main.yml │ │ │ └── nginx.yml │ │ ├── tasks/ │ │ │ ├── main.yml │ │ │ └── nginx.yml │ │ └── templates/ │ │ ├── app.logrotate.j2 │ │ └── nginx.conf.j2 │ ├── common/ │ │ ├── files/ │ │ │ ├── install-ruby.sh │ │ │ ├── myapp.github.pem │ │ │ ├── myapp.github.pem.pub │ │ │ ├── public_key │ │ │ └── wheel_extensions.sudoers │ │ ├── handlers/ │ │ │ ├── firewalld.yml │ │ │ ├── main.yml │ │ │ └── services.yml │ │ ├── tasks/ │ │ │ ├── firewalld.yml │ │ │ ├── main.yml │ │ │ ├── ruby.yml │ │ │ ├── users.yml │ │ │ └── yum.yml │ │ └── templates/ │ │ ├── app.env.j2 │ │ └── firewall.xml.j2 │ ├── loadbalancers/ │ │ ├── handlers/ │ │ │ └── main.yml │ │ ├── tasks/ │ │ │ ├── haproxy.yml │ │ │ ├── main.yml │ │ │ └── yum.yml │ │ └── templates/ │ │ └── haproxy.cfg.j2 │ ├── services/ │ │ ├── files/ │ │ │ ├── postgres-bk.logrotate │ │ │ ├── postgresql.conf │ │ │ └── redis.conf │ │ ├── handlers/ │ │ │ ├── main.yml │ │ │ ├── postgres.yml │ │ │ └── redis.yml │ │ ├── tasks/ │ │ │ ├── main.yml │ │ │ ├── memcached.yml │ │ │ ├── postgres.yml │ │ │ ├── redis.yml │ │ │ └── yum.yml │ │ └── templates/ │ │ └── pg_hba.conf.j2 │ └── workers/ │ ├── files/ │ │ └── sidekiq.logrotate │ ├── handlers/ │ │ ├── main.yml │ │ └── sidekiq.yml │ ├── tasks/ │ │ ├── main.yml │ │ ├── sidekiq.yml │ │ └── yum.yml │ └── templates/ │ └── sidekiq.service.j2 ├── services.yml ├── test.yml └── workers.yml ================================================ FILE CONTENTS ================================================ ================================================ FILE: ansible.cfg ================================================ [defaults] private_key_file=~/.ssh/myapp.pem ================================================ FILE: apps.yml ================================================ --- # file: app.yml - hosts: "{{ myapp_env }}-myapp-apps" remote_user: root roles: - common - apps ================================================ FILE: checks.yml ================================================ - hosts: 127.0.0.1 connection: local tasks: - name: "ensure an myapp_env is set" fail: msg="You must specify myapp_env as staging or production" when: myapp_env is not defined or myapp_env != "production" and myapp_env != "staging" ================================================ FILE: digitalocean.ini ================================================ [digitalocean] api_key = cache_path = /tmp cache_max_age = 100 ================================================ FILE: dohosts ================================================ #!/usr/bin/env python ''' DigitalOcean external inventory script ================================= Generates inventory that Ansible can understand by making API requests to DigitalOcean. When run without arguments (or with --list), When run against a specific droplet host, returns informaiton about that droplet. Configuration is read from digitalocean.ini, then from environment variables, then and command-line arguments. Most notably, the DigitalOcean Client ID and API Key must be specified. They can be specified in the INI file or with the following environment variables: export DIGITALOCEAN_API_KEY='abc123' Alternatively, they can be passed on the command-line with --client-id and --api-key. ''' # (c) 2013, Evan Wies # # Adapted from the EC2 inventory plugin: # https://github.com/ansible/ansible/blob/devel/plugins/inventory/ec2.py # # This file is part of Ansible, # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . ###################################################################### import os import sys import argparse from time import time import ConfigParser try: import json except ImportError: import simplejson as json try: import requests except ImportError: print "failed=True msg='requests library required for this module'" sys.exit(1) class DigitalOceanInventory(object): def __init__(self): ''' Main execution path ''' # Inventory of droplets self.inventory = {} # Index of hostname (address) to droplet ID self.index = {} # Read settings, environment variables, and CLI arguments self.read_settings() self.read_environment() self.parse_cli_args() # Verify credentials were set if not hasattr(self, 'api_key'): print '''Could not find DigitalOcean values for api_key. They must be specified, via either ini file, command line argument (--api-key), or environment variables (DIGITALOCEAN_API_KEY)''' sys.exit(-1) # Check cache self.cache_path = self.cache_path or '.' self.cache_max_age = self.cache_max_age or 0 self.cache_path_cache = self.cache_path + "/ansible-digitalocean.cache" self.cache_path_index = self.cache_path + "/ansible-digitalocean.index" if self.args.refresh_cache: self.do_api_calls_update_cache() elif not self.is_cache_valid(): self.do_api_calls_update_cache() # Data to print if self.args.host: data_to_print = self.json_format_dict(self.get_host_info(), True) elif self.args.list: # Display list of droplets for inventory if len(self.inventory) == 0: data_to_print = self.get_inventory_from_cache() else: data_to_print = self.json_format_dict(self.inventory, True) print data_to_print def __do_api(self, path, params=dict()): request = { 'page': '1', 'per_page': '250' } request.update(params) headers = {'Content-Type': 'application/json', "Authorization": "Bearer %s" % self.api_key} response = requests.get("https://api.digitalocean.com/v2%s" % path, headers=headers, params=request) print response.url if response.status_code != requests.codes.ok: raise Exception(data) data = response.json() return data def is_cache_valid(self): ''' Determines if the cache files have expired, or if it is still valid ''' if os.path.isfile(self.cache_path_cache): mod_time = os.path.getmtime(self.cache_path_cache) current_time = time() if (mod_time + self.cache_max_age) > current_time: if os.path.isfile(self.cache_path_index): return True return False def read_settings(self): ''' Reads the settings from the digitalocean.ini file ''' config = ConfigParser.SafeConfigParser() config.read(os.path.dirname(os.path.realpath(__file__)) + '/digitalocean.ini') # Credentials if config.has_option('digitalocean', 'api_key'): self.api_key = config.get('digitalocean', 'api_key') # Cache related if config.has_option('digitalocean', 'cache_path'): self.cache_path = config.get('digitalocean', 'cache_path') if config.has_option('digitalocean', 'cache_max_age'): self.cache_max_age = config.getint('digitalocean', 'cache_max_age') def read_environment(self): ''' Reads the settings from environment variables ''' # Setup credentials if os.getenv("DIGITALOCEAN_API_KEY"): self.api_key = os.getenv("DIGITALOCEAN_API_KEY") def parse_cli_args(self): ''' Command line argument processing ''' parser = argparse.ArgumentParser(description='Produce an Ansible Inventory file based on DigitalOcean') parser.add_argument('--list', action='store_true', default=True, help='List droplets (default: True)') parser.add_argument('--host', action='store', help='Get all the variables about a specific droplet') parser.add_argument('--cache-path', action='store', help='Path to the cache files (default: .)') parser.add_argument('--cache-max_age', action='store', help='Maximum age of the cached items (default: 0)') parser.add_argument('--refresh-cache', action='store_true', default=False, help='Force refresh of cache by making API requests to DigitalOcean (default: False - use cache files)') parser.add_argument('--client-id', action='store', help='DigitalOcean Client ID') parser.add_argument('--api-key', action='store', help='DigitalOcean API Key') self.args = parser.parse_args() if self.args.api_key: self.api_key = self.args.api_key if self.args.cache_path: self.cache_path = self.args.cache_path if self.args.cache_max_age: self.cache_max_age = self.args.cache_max_age def do_api_calls_update_cache(self): ''' Do API calls to get the droplets, and save data in cache files ''' regions = dict() for region in self.__do_api('/regions')['regions']: regions[ region['slug'] ] = region['slug'] droplets = self.__do_api('/droplets')['droplets'] for droplet in droplets: region_name = regions.get( droplet['region']['slug'], 'Unknown Region' ) self.add_droplet( droplet, region_name ) self.write_to_cache(self.inventory, self.cache_path_cache) self.write_to_cache(self.index, self.cache_path_index) def get_droplet(self, droplet_id): ''' Get details about a specific droplet ''' return self.__do_api('/droplets/'+str(droplet_id))['droplet'] def add_droplet(self, droplet, region_name): ''' Adds a droplet to the inventory and index, as long as it is addressable''' dest = droplet['networks']['v4'][len(droplet['networks']['v4'])-1]['ip_address'] if not dest: # Skip droplets we cannot address (when would this be on DigitalOcean?) return # Add to index self.index[dest] = [ droplet['region']['slug'], droplet['id'] ] # Inventory: Group by instance ID (always a group of 1) self.inventory[ droplet['id'] ] = [dest] # Inventory: Group by region self.push( self.inventory, region_name, dest ) # Inventory: Group by name self.push( self.inventory, droplet['name'], dest ) def get_host_info(self): ''' Get variables about a specific host ''' if len(self.index) == 0: # Need to load index from cache self.load_index_from_cache() if not self.args.host in self.index: # try updating the cache self.do_api_calls_update_cache() if not self.args.host in self.index: # host migh not exist anymore return self.json_format_dict({}, True) (region, droplet_id) = self.index[self.args.host] instance = self.get_droplet(droplet_id) return instance def push(self, my_dict, key, element): ''' Pushed an element onto an array that may not have been defined in the dict ''' split_key = key.split('.')[0] if split_key in my_dict: my_dict[split_key].append(element); else: my_dict[split_key] = [element] def get_inventory_from_cache(self): ''' Reads the inventory from the cache file and returns it as a JSON object ''' cache = open(self.cache_path_cache, 'r') json_inventory = cache.read() return json_inventory def load_index_from_cache(self): ''' Reads the index from the cache file sets self.index ''' cache = open(self.cache_path_index, 'r') json_index = cache.read() self.index = json.loads(json_index) def write_to_cache(self, data, filename): ''' Writes data in JSON format to a file ''' json_data = self.json_format_dict(data, True) cache = open(filename, 'w') cache.write(json_data) cache.close() def to_safe(self, word): ''' Converts 'bad' characters in a string to underscores so they can be used as Ansible groups ''' return re.sub("[^A-Za-z0-9\-]", "_", word) def json_format_dict(self, data, pretty=False): ''' Converts a dict to a JSON object and dumps it as a formatted string ''' if pretty: return json.dumps(data, sort_keys=True, indent=2) else: return json.dumps(data) # Run the script DigitalOceanInventory() ================================================ FILE: extra/capistrano/deploy.rb ================================================ # config valid only for Capistrano 3.1 require 'json' lock '3.3.5' set :application, 'myapp' set :repo_url, 'git@github.com:myorg/myapp.git' set :branch, :master set :deploy_to, '/var/app' set :scm, :git set :format, :pretty set :log_level, :debug set :pty, true set :keep_releases, 3 set :ssh_options, { keys: ["#{ENV['HOME']}/.ssh/private_key.pem"] } set :linked_dirs, %w{bin log tmp/pids tmp/cache tmp/sockets vendor/bundle public/system} # Lookup servers and roles do_hosts_script = File.expand_path("../../ansible/dohosts", __FILE__) do_servers = JSON.parse(`#{do_hosts_script}`) puts fetch(:stage) app_servers = do_servers["#{fetch(:stage)}-myapp-apps"] worker_servers = do_servers["#{fetch(:stage)}-myapp-workers"] app_servers.each_with_index do |s, i| roles = ['web', 'app'] roles << 'db' if i == 0 server s, user: 'webapp', roles: roles end worker_servers.each do |s| server s, user: 'webapp', roles: %w{app sidekiq} end namespace :deploy do desc 'Setup environment' task :setup_env do SSHKit.config.command_map = Hash.new do |hash, command| hash[command] = "#{command}" end SSHKit.config.command_map.prefix[:rake].push("export $(cat /var/app/support/app.env | xargs); /usr/bin/env bundle exec") SSHKit.config.command_map.prefix[:bundle].push("export $(cat /var/app/support/app.env | xargs); /usr/bin/env") end desc 'Restart application' task :restart do on roles(:app), in: :sequence, wait: 5 do execute "mkdir -p #{release_path.join('tmp/')}" end on roles(:web), in: :sequence, wait: 5 do execute :touch, release_path.join('tmp/restart.txt') execute "sudo systemctl restart nginx" end on roles(:sidekiq), in: :sequence, wait: 5 do execute "sudo systemctl restart sidekiq" end end after :updating, :setup_env after :publishing, :restart after :restart, :clear_cache do on roles(:web), in: :groups, limit: 3, wait: 10 do end end end ================================================ FILE: group_vars/production.yml ================================================ --- # file: group_vars/development # App Environment SECRET_KEY_BASE: CHANGEME RAILS_ENV: production # DB Configuration postgres_password: CHANGME ================================================ FILE: group_vars/staging.yml ================================================ --- # file: group_vars/development # App Environment SECRET_KEY_BASE: CHANGEME RAILS_ENV: staging # DB Configuration postgres_password: CHANGME ================================================ FILE: loadbalancers.yml ================================================ --- # file: workers.yml - hosts: "{{ myapp_env }}-myapp-loadbalancers" remote_user: root roles: - common - loadbalancers ================================================ FILE: local ================================================ [local] 127.0.0.1 ================================================ FILE: myapp.yml ================================================ --- # file: myapp.yml - include: checks.yml - include: apps.yml - include: services.yml - include: workers.yml - include: loadbalancers.yml ================================================ FILE: provisioning/common/tasks/main.yml ================================================ --- # file: provisioning/common/tasks/main.yml - name: "ensure an myapp_env is set" fail: msg="You must specify myapp_env as staging or production" when: myapp_env is not defined or myapp_env != "production" and myapp_env != "staging" - include_vars: "../../requirements/{{ myapp_env }}.yml" - include_vars: ../../do_creds.yml - name: "Upload Digital Ocean ssh key" digital_ocean: > state=present command=ssh name="myapp" ssh_pub_key="{{ myapp_ssh_public_key }}" client_id="{{ digital_ocean['client_id'] }}" api_key="{{ digital_ocean['api_key'] }}" register: myapp_ssh_key - name: "Create required servers" digital_ocean: > state=present command=droplet name={{ myapp_env }}-myapp-{{ item.1.internal_id }} unique_name=yes private_networking=yes client_id="{{ digital_ocean.client_id }}" api_key="{{ digital_ocean.api_key }}" size_id={{ item.1.size_id }} region_id={{ item.1.region_id }} image_id={{ item.1.image_id }} ssh_key_ids={{ myapp_ssh_key['ssh_key']['id'] }} wait_timeout=500 with_subelements: - myapp_servers - servers ================================================ FILE: provisioning/do_creds.yml ================================================ --- digital_ocean: api_key: client_id: ================================================ FILE: provisioning/requirements/production.yml ================================================ --- # file: provisioning/requirements/production.yml myapp_servers: - role: apps servers: - region_id: 8 size_id: 63 image_id: 7053293 internal_id: apps.1 - role: workers servers: - region_id: 8 size_id: 63 image_id: 7053293 internal_id: workers.1 - role: services servers: - region_id: 8 size_id: 63 image_id: 7053293 internal_id: services.1 - role: loadbalancers servers: - region_id: 8 size_id: 63 image_id: 7053293 internal_id: loadbalancers.1 ================================================ FILE: provisioning/requirements/staging.yml ================================================ --- # file: provisioning/requirements/staging.yml myapp_servers: - role: apps servers: - region_id: 8 size_id: 63 image_id: 7053293 internal_id: apps.1 - role: workers servers: - region_id: 8 size_id: 63 image_id: 7053293 internal_id: workers.1 - role: services servers: - region_id: 8 size_id: 66 image_id: 7053293 internal_id: services.1 - role: loadbalancers servers: - region_id: 8 size_id: 66 image_id: 7053293 internal_id: loadbalancers.1 ================================================ FILE: provisioning/stack.yml ================================================ --- # file: provisioning/stack.yml - hosts: local connection: local vars: myapp_ssh_public_key: "{{ lookup('file', '../roles/common/files/public_key') }}" roles: - common ================================================ FILE: readme.md ================================================ # Ansible-Rails Rails ansible is a collection of playbooks to bootstrap a full rails stack, complete with Postgres, Redis, Sidekiq, Nginx and HAProxy on Centos 7 targeting Digital Ocean as a host. The playbooks have 4 primary roles. - App Tier (Rails App + Nginx) - Services Tier (Postgres / Redis / Memcached) - Worker Tier (Sidekiq) - Loadbalancer Tier (HAProxy) In addtional to these 4 tiers there is a pretty extensive core / common role that all tiers/roles include. Goals of the playbooks are: - Create a solid foundation for hosting complex rails applications on CentOS 7. - Autoconfigures its own firewall rules, loadbalancer configuration, postgres access, and app environment based on inventory. - Build current stable ruby from source, its easy to do and package maintainers seem to have a problem getting this right and keeping things current. - Build nginx with phusion passenger. - Host rails configuration as environment variables - Ensure rails logs are properly rotated - Keep a decent ammount of security through very limited firewall exposure - Use systemd for services - Limit sudo access for webapp user to only service management - Automatically provision public-key authentication - Provision required servers via DigitalOcean api. ## Getting Started - Install ansible - Review configurations - digitalocean.ini - provisioning/do_creds.yml - group_vars/staging.yml - group_vars/production.yml - roles/common/templates/app.env.j2 - Configure SSH private / public keys - ~/.ssh/app_private_key.pem - roles/common/files/public_key - roles/common/myapp.github.pem - roles/common/myapp.github.pem.pub - Define your required architecture per environment - provisioning/requirements/staging.yml - provisioning/requirements/produciton.yml ## Spin up your instances Once you've added you Digital Ocean api keys to the required files. *Note: You should be using v1 api keys.* You can go ahead and start your staging environment servers by running the following playbook command. `ansible-playbook -i local provisioning/stack.yml --extra-vars "myapp_env=staging"` If you rolled with the defaults you will have 1 server for each role. ## Provision your staging environment Now is the point where we run our main playbooks to setup the boxes. `ansible-playbook -i dohosts myapp.yml --extra-vars="myapp_env=staging"` With any luck at this point all the software and services for your stack should be running. ## Deploy your rails application We wont go into a ton of detail about how to deploy a rails app. At memms.io we use Capistrano v3, so i've included a few tricks in `extra/capistrano/deploy.rb` Basically the hacks here are to load our dohosts from ansible and use our dynamic inventory to do our cap deploy. Also one of neat things we do is make sure the app.env variables are loaded for each cap command. ## Rinse, wash, repeat Deploying your staging environment should be as easy as changing myapp_env=production in the ansible-playbook commands. ### Troubleshooting / Known-Issues A few python packages may be missing for some of the local tasks. The dohosts script requires the request package. This can be installed with `pip install requests`. To run the digital ocean provisioning scripts you may need the digital ocean python library. `pip install dopy` Mac OSX users should install ansible with brew. `brew install ansible` # Get Help You can find me in the #ansible room on irc.freenode.net # Contributing Any issues / problems / feedback should be filed as github issues. Pull-Requests Welcomed! # Sponsored by - [memms.io](http://memms.io) - Memories made easy. No signups, no logins, no fuss, just memms. - [kohactive.com](http://kohactive.com) - Web / Mobile Development Studio # Made with Love by - [@j_mcnally](http://www.twitter.com/j_mcnally) in chicago. ================================================ FILE: roles/apps/files/nginx.service ================================================ [Unit] Description=The nginx HTTP and reverse proxy server After=syslog.target network.target remote-fs.target nss-lookup.target [Service] Type=forking PIDFile=/var/app/support/logs/nginx.pid EnvironmentFile=/var/app/support/app.env ExecStartPre=/opt/nginx/sbin/nginx -t ExecStart=/opt/nginx/sbin/nginx ExecReload=/bin/kill -s HUP $MAINPID ExecStop=/bin/kill -s QUIT $MAINPID PrivateTmp=true [Install] WantedBy=multi-user.target ================================================ FILE: roles/apps/handlers/main.yml ================================================ --- # file: roles/app/handlers/main.yml - include: nginx.yml ================================================ FILE: roles/apps/handlers/nginx.yml ================================================ --- # file: roles/app/handlers/nginx.yml - name: restart nginx service: name=nginx state=restarted ================================================ FILE: roles/apps/tasks/main.yml ================================================ --- # file: roles/app/tasks/main.yml - include: nginx.yml - name: open port 80 firewalld: service=http args: permanent: true state: enabled zone: public notify: - reload firewall rules ================================================ FILE: roles/apps/tasks/nginx.yml ================================================ --- # file: roles/app/tasks/nginx.yml - name: install passenger gem command: gem install passenger -v 4.0.56 args: creates: /usr/local/bin/passenger-install-nginx-module - name : install nginx and passenger command: /bin/bash -l -c "swapoff /swapfile; rm /swapfile; dd if=/dev/zero of=/swapfile bs=1024 count=1024k; mkswap /swapfile; swapon /swapfile; passenger-install-nginx-module --auto --prefix=/opt/nginx --auto-download --languages ruby; swapoff /swapfile; rm /swapfile" args: creates: /opt/nginx/sbin/nginx - name: send nginx conf template: src=../templates/nginx.conf.j2 dest=/opt/nginx/conf/nginx.conf args: mode: 0644 tags: nginx notify: - restart nginx - name: send nginx service copy: src=../files/nginx.service dest=/etc/systemd/system/nginx.service args: mode: 0644 tags: nginx notify: - reload services - name: start nginx service service: name=nginx state=started args: enabled: true tags: nginx - name: logrotate nginx / rails template: src=../templates/app.logrotate.j2 dest=/etc/logrotate.d/appnginx args: mode: 0644 tags: nginx ================================================ FILE: roles/apps/templates/app.logrotate.j2 ================================================ /var/app/shared/log/{{ myapp_env }}.log { missingok notifempty size 20M daily create 0644 webapp webapp } /var/app/support/logs/error.log { missingok notifempty size 20M daily create 0644 root root } /var/app/support/logs/host.access.log { missingok notifempty size 20M daily create 0644 root root } ================================================ FILE: roles/apps/templates/nginx.conf.j2 ================================================ user webapp; worker_processes 4; error_log /var/app/support/logs/error.log; pid /var/app/support/logs/nginx.pid; events { worker_connections 1024; } http { passenger_root /usr/local/lib/ruby/gems/2.1.0/gems/passenger-4.0.56; passenger_ruby /usr/local/bin/ruby; include mime.types; default_type application/octet-stream; log_format main '$remote_addr - $remote_user [$time_local] "$request" ' '$status $body_bytes_sent "$http_referer" ' '"$http_user_agent" "$http_x_forwarded_for"'; access_log /var/app/support/logs/host.access.log main; sendfile on; #tcp_nopush on; #keepalive_timeout 0; keepalive_timeout 65; gzip on; server { listen 80; server_name localhost; passenger_enabled on; rails_env {{ RAILS_ENV }}; root /var/app/current/public; location ~ \.(swf|png|jpg|gif|css|js|ttf|woff) { root /var/app/current/public; if ($request_method = 'OPTIONS') { add_header 'Access-Control-Allow-Origin' '*'; add_header 'Access-Control-Allow-Credentials' 'true'; add_header 'Access-Control-Allow-Methods' 'GET, OPTIONS'; add_header 'Access-Control-Allow-Headers' 'DNT,X-CustomHeader,Keep-Alive,User-Agent,X-Requested-With,If-Modified-Since,Cache-Control,Content-Type'; add_header 'Access-Control-Max-Age' 1728000; add_header 'Content-Type' 'text/plain charset=UTF-8'; add_header 'Content-Length' 0; return 204; } if ($request_method = 'GET') { add_header 'Access-Control-Allow-Origin' '*'; add_header 'Access-Control-Allow-Credentials' 'true'; add_header 'Access-Control-Allow-Methods' 'GET, OPTIONS'; add_header 'Access-Control-Allow-Headers' 'DNT,X-CustomHeader,Keep-Alive,User-Agent,X-Requested-With,If-Modified-Since,Cache-Control,Content-Type'; } } } } ================================================ FILE: roles/common/files/install-ruby.sh ================================================ #!/bin/sh wget http://cache.ruby-lang.org/pub/ruby/2.1/ruby-2.1.5.tar.gz tar zxvf ruby-2.1.5.tar.gz cd ruby-2.1.5 ./configure make make install exit 0 ================================================ FILE: roles/common/files/myapp.github.pem ================================================ # REPLACE THIS FILE WITH YOUR GIT DEPLOY PRIVATE KEY ================================================ FILE: roles/common/files/myapp.github.pem.pub ================================================ # REPLACE THIS FILE WITH YOUR GIT DEPLOY PUBLIC KEY ================================================ FILE: roles/common/files/public_key ================================================ # PUT A PUBLIC KEY HERE, THIS IS THE KEY THAT WILL BE USED TO SSH AS webapp USER ================================================ FILE: roles/common/files/wheel_extensions.sudoers ================================================ Cmnd_Alias SERVICES = /usr/bin/systemctl webapp ALL = NOPASSWD: SERVICES ================================================ FILE: roles/common/handlers/firewalld.yml ================================================ --- # file: roles/common/handlers/firewalld.yml - name: reload firewall rules command: firewall-cmd --reload ================================================ FILE: roles/common/handlers/main.yml ================================================ --- # file: roles/common/handlers/main.yml - include: firewalld.yml - include: services.yml ================================================ FILE: roles/common/handlers/services.yml ================================================ --- # file: roles/common/handlers/services.yml - name: reload services command: systemctl daemon-reload ================================================ FILE: roles/common/tasks/firewalld.yml ================================================ --- # file: roles/common/tasks/firewalld.yml - name: start firewalld service service: name=firewalld state=started args: enabled: true tags: firewalld - name: send intranet zone template: src=../templates/firewall.xml.j2 dest=/etc/firewalld/zones/internal.xml args: mode: 0644 owner: root group: root notify: - reload firewall rules ================================================ FILE: roles/common/tasks/main.yml ================================================ --- # file: roles/common/tasks/main.yml - include_vars: "group_vars/{{ myapp_env }}.yml" - include: users.yml - include: yum.yml - include: ruby.yml - include: firewalld.yml - name: make app folder command: "/bin/bash -c 'mkdir -p /var/app; chown webapp:wheel /var/app; chmod 744 /var/app'" args: creates: /var/app - name: make support folder command: "/bin/bash -c 'mkdir -p /var/app/support; chown webapp:wheel /var/app/support; chmod 744 /var/app/support'" args: creates: /var/app/support - name: make logs folder command: "/bin/bash -c 'mkdir -p /var/app/support/logs; chown webapp:wheel /var/app/support/logs; chmod 744 /var/app/support/logs'" args: creates: /var/app/support/logs - name: send app environment template: src=../templates/app.env.j2 dest=/var/app/support/app.env args: owner: webapp group: webapp mode: "u=rw,g=r,o=r" notify: - restart nginx tags: nginx - name: set utc timezone command: "/bin/bash -c 'unlink /etc/localtime; ln -s /usr/share/zoneinfo/UTC /etc/localtime'" ================================================ FILE: roles/common/tasks/ruby.yml ================================================ --- # file: roles/common/tasks/ruby.yml - name: make init directory command: mkdir -p /var/init args: creates: /var/init - name: make source directory command: mkdir -p /src args: creates: /src - name: send ruby install script copy: src=../files/install-ruby.sh dest=/var/init/install-ruby.sh args: mode: 0700 tags: ruby - name: be sure ruby is installed command: /var/init/install-ruby.sh args: chdir: /src creates: /usr/local/bin/ruby tags: ruby - name: install bundler command: gem install bundler args: creates: /usr/local/bin/bundle ================================================ FILE: roles/common/tasks/users.yml ================================================ --- # file: roles/common/tasks/users.yml - name: be sure webapp group is created group: name=webapp - name: be sure webapp user is created user: name=webapp args: comment: "myapp app user" group: webapp shell: /bin/bash home: /home/webapp - name: make ssh directory command: "/bin/bash -c 'mkdir -p /home/webapp/.ssh; chown webapp:wheel /home/webapp/.ssh; chmod 700 /home/webapp/.ssh'" args: creates: /home/webapp/.ssh - name: send github pem copy: src=../files/myapp.github.pem dest=/home/webapp/.ssh/id_rsa args: mode: 0400 owner: webapp group: wheel - name: add base rsa public key authorized_key: user=webapp args: key: "{{ lookup('file', '../files/public_key') }}" manage_dir: no - name: make sudoers directory command: mkdir -p /etc/sudoers.d args: creates: /etc/sudoers.d - name: allow wheel to sudo copy: src=../files/wheel_extensions.sudoers dest=/etc/sudoers.d/wheel_extensions args: mode: 0440 tags: ruby ================================================ FILE: roles/common/tasks/yum.yml ================================================ --- # file: roles/common/tasks/yum.yml - name: install epel repo command: rpm -Uvh http://dl.fedoraproject.org/pub/epel/7/x86_64/e/epel-release-7-5.noarch.rpm args: creates: /etc/yum.repos.d/epel.repo - name: be sure git is installed yum: pkg=git state=installed tags: git - name: be sure nodejs is installed yum: pkg=nodejs enablerepo=epel state=installed tags: nodejs - name: be sure lsof is installed yum: pkg=lsof state=installed tags: lsof - name: be sure wget is installed yum: pkg=wget state=installed tags: wget - name: be sure curl-devel is installed yum: pkg=curl-devel state=installed tags: curl-devel - name: be sure ImageMagick is installed yum: pkg=ImageMagick state=installed tags: ImageMagick - name: be sure ImageMagick-devel is installed yum: pkg=ImageMagick-devel state=installed tags: ImageMagick-devel - name: be sure libxml2 is installed yum: pkg=libxml2 state=installed tags: libxml2 - name: be sure libxml2-devel is installed yum: pkg=libxml2-devel state=installed tags: libxml2-devel - name: be sure libxslt is installed yum: pkg=libxslt state=installed tags: libxslt - name: be sure libxslt-devel is installed yum: pkg=libxslt-devel state=installed tags: libxslt-devel - name: be sure openssl is installed yum: pkg=openssl state=installed tags: openssl - name: be sure openssl-devel is installed yum: pkg=openssl-devel state=installed tags: openssl-devel - name: be sure postgresql is installed yum: pkg=postgresql state=installed tags: postgresql - name: be sure postgresql-libs is installed yum: pkg=postgresql-libs state=installed tags: postgresql-libs - name: be sure postgresql-devel is installed yum: pkg=postgresql-devel state=installed tags: postgresql-devel - name: be sure dev tools is installed yum: pkg='@development' state=installed tags: dev-tools ================================================ FILE: roles/common/templates/app.env.j2 ================================================ DATABASE_URL="postgres://myapp:{{ postgres_password }}@{{ groups[myapp_env + '-myapp-services'][0] }}/myapp_{{ myapp_env }}" REDIS_URL="redis://{{ groups[myapp_env + '-myapp-services'][0] }}/" SECRET_KEY_BASE={{ SECRET_KEY_BASE }} RAILS_ENV={{ RAILS_ENV }} ================================================ FILE: roles/common/templates/firewall.xml.j2 ================================================ internal internal network zone {% for host in groups[myapp_env + '-myapp-apps'] %} {% endfor %} {% for host in groups[myapp_env + '-myapp-services'] %} {% endfor %} {% for host in groups[myapp_env + '-myapp-loadbalancers'] %} {% endfor %} {% for host in groups[myapp_env + '-myapp-workers'] %} {% endfor %} ================================================ FILE: roles/loadbalancers/handlers/main.yml ================================================ --- # file: roles/workers/handlers/main.yml - name: restart nginx command: "echo nop" - name: restart haproxy service: name=haproxy state=restarted ================================================ FILE: roles/loadbalancers/tasks/haproxy.yml ================================================ --- # file: roles/loadbalancers/tasks/haproxy.yml - name: send haproxy config based on app inventory template: src=../templates/haproxy.cfg.j2 dest=/etc/haproxy/haproxy.cfg args: owner: root group: root mode: "u=rw,g=r,o=r" notify: - restart haproxy tags: haproxy ================================================ FILE: roles/loadbalancers/tasks/main.yml ================================================ --- # file: roles/workers/tasks/main.yml - include: yum.yml - include: haproxy.yml - name: open port 80 firewalld: service=http args: permanent: true state: enabled zone: public notify: - reload firewall rules ================================================ FILE: roles/loadbalancers/tasks/yum.yml ================================================ --- # file: roles/loadbalancers/tasks/yum.yml - name: be sure haproxy is installed yum: pkg=haproxy state=installed tags: haproxy ================================================ FILE: roles/loadbalancers/templates/haproxy.cfg.j2 ================================================ global chroot /var/lib/haproxy pidfile /var/run/haproxy.pid maxconn 4000 user haproxy group haproxy daemon # turn on stats unix socket stats socket /var/lib/haproxy/stats defaults mode http log global option httplog option dontlognull option http-server-close option forwardfor except 127.0.0.0/8 option redispatch retries 3 timeout http-request 10s timeout queue 1m timeout connect 10s timeout client 1m timeout server 1m timeout http-keep-alive 10s timeout check 10s maxconn 10000 frontend main *:80 default_backend app backend app balance roundrobin {% for host in groups[myapp_env + '-myapp-apps'] %} server app{{ loop.index }} {{ host }}:80 check {% endfor %} ================================================ FILE: roles/services/files/postgres-bk.logrotate ================================================ /var/backups/postgres.pgdump.gz { daily rotate 8 nocompress create 640 webapp webapp postrotate sudo -u postgres pg_dumpall > /var/backups/postgres.pgdump gzip -9f /var/backups/postgres.pgdump endscript } ================================================ FILE: roles/services/files/postgresql.conf ================================================ # ----------------------------- # PostgreSQL configuration file # ----------------------------- # # This file consists of lines of the form: # # name = value # # (The "=" is optional.) Whitespace may be used. Comments are introduced with # "#" anywhere on a line. The complete list of parameter names and allowed # values can be found in the PostgreSQL documentation. # # The commented-out settings shown in this file represent the default values. # Re-commenting a setting is NOT sufficient to revert it to the default value; # you need to reload the server. # # This file is read on server startup and when the server receives a SIGHUP # signal. If you edit the file on a running system, you have to SIGHUP the # server for the changes to take effect, or use "pg_ctl reload". Some # parameters, which are marked below, require a server shutdown and restart to # take effect. # # Any parameter can also be given as a command-line option to the server, e.g., # "postgres -c log_connections=on". Some parameters can be changed at run time # with the "SET" SQL command. # # Memory units: kB = kilobytes Time units: ms = milliseconds # MB = megabytes s = seconds # GB = gigabytes min = minutes # h = hours # d = days #------------------------------------------------------------------------------ # FILE LOCATIONS #------------------------------------------------------------------------------ # The default values of these variables are driven from the -D command-line # option or PGDATA environment variable, represented here as ConfigDir. #data_directory = 'ConfigDir' # use data in another directory # (change requires restart) #hba_file = 'ConfigDir/pg_hba.conf' # host-based authentication file # (change requires restart) #ident_file = 'ConfigDir/pg_ident.conf' # ident configuration file # (change requires restart) # If external_pid_file is not explicitly set, no extra PID file is written. #external_pid_file = '' # write an extra PID file # (change requires restart) #------------------------------------------------------------------------------ # CONNECTIONS AND AUTHENTICATION #------------------------------------------------------------------------------ # - Connection Settings - listen_addresses='*' #listen_addresses = 'localhost' # what IP address(es) to listen on; # comma-separated list of addresses; # defaults to 'localhost'; use '*' for all # (change requires restart) #port = 5432 # (change requires restart) # Note: In RHEL/Fedora installations, you can't set the port number here; # adjust it in the service file instead. max_connections = 100 # (change requires restart) # Note: Increasing max_connections costs ~400 bytes of shared memory per # connection slot, plus lock space (see max_locks_per_transaction). #superuser_reserved_connections = 3 # (change requires restart) #unix_socket_directories = '/var/run/postgresql, /tmp' # comma-separated list of directories # (change requires restart) #unix_socket_group = '' # (change requires restart) #unix_socket_permissions = 0777 # begin with 0 to use octal notation # (change requires restart) #bonjour = off # advertise server via Bonjour # (change requires restart) #bonjour_name = '' # defaults to the computer name # (change requires restart) # - Security and Authentication - #authentication_timeout = 1min # 1s-600s #ssl = off # (change requires restart) #ssl_ciphers = 'ALL:!ADH:!LOW:!EXP:!MD5:@STRENGTH' # allowed SSL ciphers # (change requires restart) #ssl_renegotiation_limit = 512MB # amount of data between renegotiations #ssl_cert_file = 'server.crt' # (change requires restart) #ssl_key_file = 'server.key' # (change requires restart) #ssl_ca_file = '' # (change requires restart) #ssl_crl_file = '' # (change requires restart) #password_encryption = on #db_user_namespace = off # Kerberos and GSSAPI #krb_server_keyfile = '' #krb_srvname = 'postgres' # (Kerberos only) #krb_caseins_users = off # - TCP Keepalives - # see "man 7 tcp" for details #tcp_keepalives_idle = 0 # TCP_KEEPIDLE, in seconds; # 0 selects the system default #tcp_keepalives_interval = 0 # TCP_KEEPINTVL, in seconds; # 0 selects the system default #tcp_keepalives_count = 0 # TCP_KEEPCNT; # 0 selects the system default #------------------------------------------------------------------------------ # RESOURCE USAGE (except WAL) #------------------------------------------------------------------------------ # - Memory - shared_buffers = 32MB # min 128kB # (change requires restart) #temp_buffers = 8MB # min 800kB #max_prepared_transactions = 0 # zero disables the feature # (change requires restart) # Note: Increasing max_prepared_transactions costs ~600 bytes of shared memory # per transaction slot, plus lock space (see max_locks_per_transaction). # It is not advisable to set max_prepared_transactions nonzero unless you # actively intend to use prepared transactions. #work_mem = 1MB # min 64kB #maintenance_work_mem = 16MB # min 1MB #max_stack_depth = 2MB # min 100kB # - Disk - #temp_file_limit = -1 # limits per-session temp file space # in kB, or -1 for no limit # - Kernel Resource Usage - #max_files_per_process = 1000 # min 25 # (change requires restart) #shared_preload_libraries = '' # (change requires restart) # - Cost-Based Vacuum Delay - #vacuum_cost_delay = 0ms # 0-100 milliseconds #vacuum_cost_page_hit = 1 # 0-10000 credits #vacuum_cost_page_miss = 10 # 0-10000 credits #vacuum_cost_page_dirty = 20 # 0-10000 credits #vacuum_cost_limit = 200 # 1-10000 credits # - Background Writer - #bgwriter_delay = 200ms # 10-10000ms between rounds #bgwriter_lru_maxpages = 100 # 0-1000 max buffers written/round #bgwriter_lru_multiplier = 2.0 # 0-10.0 multipler on buffers scanned/round # - Asynchronous Behavior - #effective_io_concurrency = 1 # 1-1000; 0 disables prefetching #------------------------------------------------------------------------------ # WRITE AHEAD LOG #------------------------------------------------------------------------------ # - Settings - #wal_level = minimal # minimal, archive, or hot_standby # (change requires restart) #fsync = on # turns forced synchronization on or off #synchronous_commit = on # synchronization level; # off, local, remote_write, or on #wal_sync_method = fsync # the default is the first option # supported by the operating system: # open_datasync # fdatasync (default on Linux) # fsync # fsync_writethrough # open_sync #full_page_writes = on # recover from partial page writes #wal_buffers = -1 # min 32kB, -1 sets based on shared_buffers # (change requires restart) #wal_writer_delay = 200ms # 1-10000 milliseconds #commit_delay = 0 # range 0-100000, in microseconds #commit_siblings = 5 # range 1-1000 # - Checkpoints - #checkpoint_segments = 3 # in logfile segments, min 1, 16MB each #checkpoint_timeout = 5min # range 30s-1h #checkpoint_completion_target = 0.5 # checkpoint target duration, 0.0 - 1.0 #checkpoint_warning = 30s # 0 disables # - Archiving - #archive_mode = off # allows archiving to be done # (change requires restart) #archive_command = '' # command to use to archive a logfile segment # placeholders: %p = path of file to archive # %f = file name only # e.g. 'test ! -f /mnt/server/archivedir/%f && cp %p /mnt/server/archivedir/%f' #archive_timeout = 0 # force a logfile segment switch after this # number of seconds; 0 disables #------------------------------------------------------------------------------ # REPLICATION #------------------------------------------------------------------------------ # - Sending Server(s) - # Set these on the master and on any standby that will send replication data. #max_wal_senders = 0 # max number of walsender processes # (change requires restart) #wal_keep_segments = 0 # in logfile segments, 16MB each; 0 disables #replication_timeout = 60s # in milliseconds; 0 disables # - Master Server - # These settings are ignored on a standby server. #synchronous_standby_names = '' # standby servers that provide sync rep # comma-separated list of application_name # from standby(s); '*' = all #vacuum_defer_cleanup_age = 0 # number of xacts by which cleanup is delayed # - Standby Servers - # These settings are ignored on a master server. #hot_standby = off # "on" allows queries during recovery # (change requires restart) #max_standby_archive_delay = 30s # max delay before canceling queries # when reading WAL from archive; # -1 allows indefinite delay #max_standby_streaming_delay = 30s # max delay before canceling queries # when reading streaming WAL; # -1 allows indefinite delay #wal_receiver_status_interval = 10s # send replies at least this often # 0 disables #hot_standby_feedback = off # send info from standby to prevent # query conflicts #------------------------------------------------------------------------------ # QUERY TUNING #------------------------------------------------------------------------------ # - Planner Method Configuration - #enable_bitmapscan = on #enable_hashagg = on #enable_hashjoin = on #enable_indexscan = on #enable_indexonlyscan = on #enable_material = on #enable_mergejoin = on #enable_nestloop = on #enable_seqscan = on #enable_sort = on #enable_tidscan = on # - Planner Cost Constants - #seq_page_cost = 1.0 # measured on an arbitrary scale #random_page_cost = 4.0 # same scale as above #cpu_tuple_cost = 0.01 # same scale as above #cpu_index_tuple_cost = 0.005 # same scale as above #cpu_operator_cost = 0.0025 # same scale as above #effective_cache_size = 128MB # - Genetic Query Optimizer - #geqo = on #geqo_threshold = 12 #geqo_effort = 5 # range 1-10 #geqo_pool_size = 0 # selects default based on effort #geqo_generations = 0 # selects default based on effort #geqo_selection_bias = 2.0 # range 1.5-2.0 #geqo_seed = 0.0 # range 0.0-1.0 # - Other Planner Options - #default_statistics_target = 100 # range 1-10000 #constraint_exclusion = partition # on, off, or partition #cursor_tuple_fraction = 0.1 # range 0.0-1.0 #from_collapse_limit = 8 #join_collapse_limit = 8 # 1 disables collapsing of explicit # JOIN clauses #------------------------------------------------------------------------------ # ERROR REPORTING AND LOGGING #------------------------------------------------------------------------------ # - Where to Log - #log_destination = 'stderr' # Valid values are combinations of # stderr, csvlog, syslog, and eventlog, # depending on platform. csvlog # requires logging_collector to be on. # This is used when logging to stderr: logging_collector = on # Enable capturing of stderr and csvlog # into log files. Required to be on for # csvlogs. # (change requires restart) # These are only used if logging_collector is on: #log_directory = 'pg_log' # directory where log files are written, # can be absolute or relative to PGDATA log_filename = 'postgresql-%a.log' # log file name pattern, # can include strftime() escapes #log_file_mode = 0600 # creation mode for log files, # begin with 0 to use octal notation log_truncate_on_rotation = on # If on, an existing log file with the # same name as the new log file will be # truncated rather than appended to. # But such truncation only occurs on # time-driven rotation, not on restarts # or size-driven rotation. Default is # off, meaning append to existing files # in all cases. log_rotation_age = 1d # Automatic rotation of logfiles will # happen after that time. 0 disables. log_rotation_size = 0 # Automatic rotation of logfiles will # happen after that much log output. # 0 disables. # These are relevant when logging to syslog: #syslog_facility = 'LOCAL0' #syslog_ident = 'postgres' # This is only relevant when logging to eventlog (win32): #event_source = 'PostgreSQL' # - When to Log - #client_min_messages = notice # values in order of decreasing detail: # debug5 # debug4 # debug3 # debug2 # debug1 # log # notice # warning # error #log_min_messages = warning # values in order of decreasing detail: # debug5 # debug4 # debug3 # debug2 # debug1 # info # notice # warning # error # log # fatal # panic #log_min_error_statement = error # values in order of decreasing detail: # debug5 # debug4 # debug3 # debug2 # debug1 # info # notice # warning # error # log # fatal # panic (effectively off) #log_min_duration_statement = -1 # -1 is disabled, 0 logs all statements # and their durations, > 0 logs only # statements running at least this number # of milliseconds # - What to Log - #debug_print_parse = off #debug_print_rewritten = off #debug_print_plan = off #debug_pretty_print = on #log_checkpoints = off #log_connections = off #log_disconnections = off #log_duration = off #log_error_verbosity = default # terse, default, or verbose messages #log_hostname = off #log_line_prefix = '' # special values: # %a = application name # %u = user name # %d = database name # %r = remote host and port # %h = remote host # %p = process ID # %t = timestamp without milliseconds # %m = timestamp with milliseconds # %i = command tag # %e = SQL state # %c = session ID # %l = session line number # %s = session start timestamp # %v = virtual transaction ID # %x = transaction ID (0 if none) # %q = stop here in non-session # processes # %% = '%' # e.g. '<%u%%%d> ' #log_lock_waits = off # log lock waits >= deadlock_timeout #log_statement = 'none' # none, ddl, mod, all #log_temp_files = -1 # log temporary files equal or larger # than the specified size in kilobytes; # -1 disables, 0 logs all temp files log_timezone = 'US/Eastern' #------------------------------------------------------------------------------ # RUNTIME STATISTICS #------------------------------------------------------------------------------ # - Query/Index Statistics Collector - #track_activities = on #track_counts = on #track_io_timing = off #track_functions = none # none, pl, all #track_activity_query_size = 1024 # (change requires restart) #update_process_title = on #stats_temp_directory = 'pg_stat_tmp' # - Statistics Monitoring - #log_parser_stats = off #log_planner_stats = off #log_executor_stats = off #log_statement_stats = off #------------------------------------------------------------------------------ # AUTOVACUUM PARAMETERS #------------------------------------------------------------------------------ #autovacuum = on # Enable autovacuum subprocess? 'on' # requires track_counts to also be on. #log_autovacuum_min_duration = -1 # -1 disables, 0 logs all actions and # their durations, > 0 logs only # actions running at least this number # of milliseconds. #autovacuum_max_workers = 3 # max number of autovacuum subprocesses # (change requires restart) #autovacuum_naptime = 1min # time between autovacuum runs #autovacuum_vacuum_threshold = 50 # min number of row updates before # vacuum #autovacuum_analyze_threshold = 50 # min number of row updates before # analyze #autovacuum_vacuum_scale_factor = 0.2 # fraction of table size before vacuum #autovacuum_analyze_scale_factor = 0.1 # fraction of table size before analyze #autovacuum_freeze_max_age = 200000000 # maximum XID age before forced vacuum # (change requires restart) #autovacuum_vacuum_cost_delay = 20ms # default vacuum cost delay for # autovacuum, in milliseconds; # -1 means use vacuum_cost_delay #autovacuum_vacuum_cost_limit = -1 # default vacuum cost limit for # autovacuum, -1 means use # vacuum_cost_limit #------------------------------------------------------------------------------ # CLIENT CONNECTION DEFAULTS #------------------------------------------------------------------------------ # - Statement Behavior - #search_path = '"$user",public' # schema names #default_tablespace = '' # a tablespace name, '' uses the default #temp_tablespaces = '' # a list of tablespace names, '' uses # only default tablespace #check_function_bodies = on #default_transaction_isolation = 'read committed' #default_transaction_read_only = off #default_transaction_deferrable = off #session_replication_role = 'origin' #statement_timeout = 0 # in milliseconds, 0 is disabled #vacuum_freeze_min_age = 50000000 #vacuum_freeze_table_age = 150000000 #bytea_output = 'hex' # hex, escape #xmlbinary = 'base64' #xmloption = 'content' # - Locale and Formatting - datestyle = 'iso, mdy' #intervalstyle = 'postgres' timezone = 'US/Eastern' #timezone_abbreviations = 'Default' # Select the set of available time zone # abbreviations. Currently, there are # Default # Australia # India # You can create your own file in # share/timezonesets/. #extra_float_digits = 0 # min -15, max 3 #client_encoding = sql_ascii # actually, defaults to database # encoding # These settings are initialized by initdb, but they can be changed. lc_messages = 'en_US.UTF-8' # locale for system error message # strings lc_monetary = 'en_US.UTF-8' # locale for monetary formatting lc_numeric = 'en_US.UTF-8' # locale for number formatting lc_time = 'en_US.UTF-8' # locale for time formatting # default configuration for text search default_text_search_config = 'pg_catalog.english' # - Other Defaults - #dynamic_library_path = '$libdir' #local_preload_libraries = '' #------------------------------------------------------------------------------ # LOCK MANAGEMENT #------------------------------------------------------------------------------ #deadlock_timeout = 1s #max_locks_per_transaction = 64 # min 10 # (change requires restart) # Note: Each lock table slot uses ~270 bytes of shared memory, and there are # max_locks_per_transaction * (max_connections + max_prepared_transactions) # lock table slots. #max_pred_locks_per_transaction = 64 # min 10 # (change requires restart) #------------------------------------------------------------------------------ # VERSION/PLATFORM COMPATIBILITY #------------------------------------------------------------------------------ # - Previous PostgreSQL Versions - #array_nulls = on #backslash_quote = safe_encoding # on, off, or safe_encoding #default_with_oids = off #escape_string_warning = on #lo_compat_privileges = off #quote_all_identifiers = off #sql_inheritance = on #standard_conforming_strings = on #synchronize_seqscans = on # - Other Platforms and Clients - #transform_null_equals = off #------------------------------------------------------------------------------ # ERROR HANDLING #------------------------------------------------------------------------------ #exit_on_error = off # terminate session on any error? #restart_after_crash = on # reinitialize after backend crash? #------------------------------------------------------------------------------ # CUSTOMIZED OPTIONS #------------------------------------------------------------------------------ # Add settings for extensions here ================================================ FILE: roles/services/files/redis.conf ================================================ # Redis configuration file example # Note on units: when memory size is needed, it is possible to specify # it in the usual form of 1k 5GB 4M and so forth: # # 1k => 1000 bytes # 1kb => 1024 bytes # 1m => 1000000 bytes # 1mb => 1024*1024 bytes # 1g => 1000000000 bytes # 1gb => 1024*1024*1024 bytes # # units are case insensitive so 1GB 1Gb 1gB are all the same. ################################## INCLUDES ################################### # Include one or more other config files here. This is useful if you # have a standard template that goes to all Redis server but also need # to customize a few per-server settings. Include files can include # other files, so use this wisely. # # Notice option "include" won't be rewritten by command "CONFIG REWRITE" # from admin or Redis Sentinel. Since Redis always uses the last processed # line as value of a configuration directive, you'd better put includes # at the beginning of this file to avoid overwriting config change at runtime. # # If instead you are interested in using includes to override configuration # options, it is better to use include as the last line. # # include /path/to/local.conf # include /path/to/other.conf ################################ GENERAL ##################################### # By default Redis does not run as a daemon. Use 'yes' if you need it. # Note that Redis will write a pid file in /var/run/redis.pid when daemonized. daemonize no # When running daemonized, Redis writes a pid file in /var/run/redis.pid by # default. You can specify a custom pid file location here. pidfile /var/run/redis/redis.pid # Accept connections on the specified port, default is 6379. # If port 0 is specified Redis will not listen on a TCP socket. port 6379 # TCP listen() backlog. # # In high requests-per-second environments you need an high backlog in order # to avoid slow clients connections issues. Note that the Linux kernel # will silently truncate it to the value of /proc/sys/net/core/somaxconn so # make sure to raise both the value of somaxconn and tcp_max_syn_backlog # in order to get the desired effect. tcp-backlog 511 # By default Redis listens for connections from all the network interfaces # available on the server. It is possible to listen to just one or multiple # interfaces using the "bind" configuration directive, followed by one or # more IP addresses. # # Examples: # # bind 192.168.1.100 10.0.0.1 bind 0.0.0.0 # Specify the path for the Unix socket that will be used to listen for # incoming connections. There is no default, so Redis will not listen # on a unix socket when not specified. # # unixsocket /tmp/redis.sock # unixsocketperm 700 # Close the connection after a client is idle for N seconds (0 to disable) timeout 0 # TCP keepalive. # # If non-zero, use SO_KEEPALIVE to send TCP ACKs to clients in absence # of communication. This is useful for two reasons: # # 1) Detect dead peers. # 2) Take the connection alive from the point of view of network # equipment in the middle. # # On Linux, the specified value (in seconds) is the period used to send ACKs. # Note that to close the connection the double of the time is needed. # On other kernels the period depends on the kernel configuration. # # A reasonable value for this option is 60 seconds. tcp-keepalive 0 # Specify the server verbosity level. # This can be one of: # debug (a lot of information, useful for development/testing) # verbose (many rarely useful info, but not a mess like the debug level) # notice (moderately verbose, what you want in production probably) # warning (only very important / critical messages are logged) loglevel notice # Specify the log file name. Also the empty string can be used to force # Redis to log on the standard output. Note that if you use standard # output for logging but daemonize, logs will be sent to /dev/null logfile /var/log/redis/redis.log # To enable logging to the system logger, just set 'syslog-enabled' to yes, # and optionally update the other syslog parameters to suit your needs. # syslog-enabled no # Specify the syslog identity. # syslog-ident redis # Specify the syslog facility. Must be USER or between LOCAL0-LOCAL7. # syslog-facility local0 # Set the number of databases. The default database is DB 0, you can select # a different one on a per-connection basis using SELECT where # dbid is a number between 0 and 'databases'-1 databases 16 ################################ SNAPSHOTTING ################################ # # Save the DB on disk: # # save # # Will save the DB if both the given number of seconds and the given # number of write operations against the DB occurred. # # In the example below the behaviour will be to save: # after 900 sec (15 min) if at least 1 key changed # after 300 sec (5 min) if at least 10 keys changed # after 60 sec if at least 10000 keys changed # # Note: you can disable saving at all commenting all the "save" lines. # # It is also possible to remove all the previously configured save # points by adding a save directive with a single empty string argument # like in the following example: # # save "" save 900 1 save 300 10 save 60 10000 # By default Redis will stop accepting writes if RDB snapshots are enabled # (at least one save point) and the latest background save failed. # This will make the user aware (in a hard way) that data is not persisting # on disk properly, otherwise chances are that no one will notice and some # disaster will happen. # # If the background saving process will start working again Redis will # automatically allow writes again. # # However if you have setup your proper monitoring of the Redis server # and persistence, you may want to disable this feature so that Redis will # continue to work as usual even if there are problems with disk, # permissions, and so forth. stop-writes-on-bgsave-error yes # Compress string objects using LZF when dump .rdb databases? # For default that's set to 'yes' as it's almost always a win. # If you want to save some CPU in the saving child set it to 'no' but # the dataset will likely be bigger if you have compressible values or keys. rdbcompression yes # Since version 5 of RDB a CRC64 checksum is placed at the end of the file. # This makes the format more resistant to corruption but there is a performance # hit to pay (around 10%) when saving and loading RDB files, so you can disable it # for maximum performances. # # RDB files created with checksum disabled have a checksum of zero that will # tell the loading code to skip the check. rdbchecksum yes # The filename where to dump the DB dbfilename dump.rdb # The working directory. # # The DB will be written inside this directory, with the filename specified # above using the 'dbfilename' configuration directive. # # The Append Only File will also be created inside this directory. # # Note that you must specify a directory here, not a file name. dir /var/lib/redis/ ################################# REPLICATION ################################# # Master-Slave replication. Use slaveof to make a Redis instance a copy of # another Redis server. A few things to understand ASAP about Redis replication. # # 1) Redis replication is asynchronous, but you can configure a master to # stop accepting writes if it appears to be not connected with at least # a given number of slaves. # 2) Redis slaves are able to perform a partial resynchronization with the # master if the replication link is lost for a relatively small amount of # time. You may want to configure the replication backlog size (see the next # sections of this file) with a sensible value depending on your needs. # 3) Replication is automatic and does not need user intervention. After a # network partition slaves automatically try to reconnect to masters # and resynchronize with them. # # slaveof # If the master is password protected (using the "requirepass" configuration # directive below) it is possible to tell the slave to authenticate before # starting the replication synchronization process, otherwise the master will # refuse the slave request. # # masterauth # When a slave loses its connection with the master, or when the replication # is still in progress, the slave can act in two different ways: # # 1) if slave-serve-stale-data is set to 'yes' (the default) the slave will # still reply to client requests, possibly with out of date data, or the # data set may just be empty if this is the first synchronization. # # 2) if slave-serve-stale-data is set to 'no' the slave will reply with # an error "SYNC with master in progress" to all the kind of commands # but to INFO and SLAVEOF. # slave-serve-stale-data yes # You can configure a slave instance to accept writes or not. Writing against # a slave instance may be useful to store some ephemeral data (because data # written on a slave will be easily deleted after resync with the master) but # may also cause problems if clients are writing to it because of a # misconfiguration. # # Since Redis 2.6 by default slaves are read-only. # # Note: read only slaves are not designed to be exposed to untrusted clients # on the internet. It's just a protection layer against misuse of the instance. # Still a read only slave exports by default all the administrative commands # such as CONFIG, DEBUG, and so forth. To a limited extent you can improve # security of read only slaves using 'rename-command' to shadow all the # administrative / dangerous commands. slave-read-only yes # Slaves send PINGs to server in a predefined interval. It's possible to change # this interval with the repl_ping_slave_period option. The default value is 10 # seconds. # # repl-ping-slave-period 10 # The following option sets the replication timeout for: # # 1) Bulk transfer I/O during SYNC, from the point of view of slave. # 2) Master timeout from the point of view of slaves (data, pings). # 3) Slave timeout from the point of view of masters (REPLCONF ACK pings). # # It is important to make sure that this value is greater than the value # specified for repl-ping-slave-period otherwise a timeout will be detected # every time there is low traffic between the master and the slave. # # repl-timeout 60 # Disable TCP_NODELAY on the slave socket after SYNC? # # If you select "yes" Redis will use a smaller number of TCP packets and # less bandwidth to send data to slaves. But this can add a delay for # the data to appear on the slave side, up to 40 milliseconds with # Linux kernels using a default configuration. # # If you select "no" the delay for data to appear on the slave side will # be reduced but more bandwidth will be used for replication. # # By default we optimize for low latency, but in very high traffic conditions # or when the master and slaves are many hops away, turning this to "yes" may # be a good idea. repl-disable-tcp-nodelay no # Set the replication backlog size. The backlog is a buffer that accumulates # slave data when slaves are disconnected for some time, so that when a slave # wants to reconnect again, often a full resync is not needed, but a partial # resync is enough, just passing the portion of data the slave missed while # disconnected. # # The biggest the replication backlog, the longer the time the slave can be # disconnected and later be able to perform a partial resynchronization. # # The backlog is only allocated once there is at least a slave connected. # # repl-backlog-size 1mb # After a master has no longer connected slaves for some time, the backlog # will be freed. The following option configures the amount of seconds that # need to elapse, starting from the time the last slave disconnected, for # the backlog buffer to be freed. # # A value of 0 means to never release the backlog. # # repl-backlog-ttl 3600 # The slave priority is an integer number published by Redis in the INFO output. # It is used by Redis Sentinel in order to select a slave to promote into a # master if the master is no longer working correctly. # # A slave with a low priority number is considered better for promotion, so # for instance if there are three slaves with priority 10, 100, 25 Sentinel will # pick the one with priority 10, that is the lowest. # # However a special priority of 0 marks the slave as not able to perform the # role of master, so a slave with priority of 0 will never be selected by # Redis Sentinel for promotion. # # By default the priority is 100. slave-priority 100 # It is possible for a master to stop accepting writes if there are less than # N slaves connected, having a lag less or equal than M seconds. # # The N slaves need to be in "online" state. # # The lag in seconds, that must be <= the specified value, is calculated from # the last ping received from the slave, that is usually sent every second. # # This option does not GUARANTEES that N replicas will accept the write, but # will limit the window of exposure for lost writes in case not enough slaves # are available, to the specified number of seconds. # # For example to require at least 3 slaves with a lag <= 10 seconds use: # # min-slaves-to-write 3 # min-slaves-max-lag 10 # # Setting one or the other to 0 disables the feature. # # By default min-slaves-to-write is set to 0 (feature disabled) and # min-slaves-max-lag is set to 10. ################################## SECURITY ################################### # Require clients to issue AUTH before processing any other # commands. This might be useful in environments in which you do not trust # others with access to the host running redis-server. # # This should stay commented out for backward compatibility and because most # people do not need auth (e.g. they run their own servers). # # Warning: since Redis is pretty fast an outside user can try up to # 150k passwords per second against a good box. This means that you should # use a very strong password otherwise it will be very easy to break. # # requirepass foobared # Command renaming. # # It is possible to change the name of dangerous commands in a shared # environment. For instance the CONFIG command may be renamed into something # hard to guess so that it will still be available for internal-use tools # but not available for general clients. # # Example: # # rename-command CONFIG b840fc02d524045429941cc15f59e41cb7be6c52 # # It is also possible to completely kill a command by renaming it into # an empty string: # # rename-command CONFIG "" # # Please note that changing the name of commands that are logged into the # AOF file or transmitted to slaves may cause problems. ################################### LIMITS #################################### # Set the max number of connected clients at the same time. By default # this limit is set to 10000 clients, however if the Redis server is not # able to configure the process file limit to allow for the specified limit # the max number of allowed clients is set to the current file limit # minus 32 (as Redis reserves a few file descriptors for internal uses). # # Once the limit is reached Redis will close all the new connections sending # an error 'max number of clients reached'. # # maxclients 10000 # Don't use more memory than the specified amount of bytes. # When the memory limit is reached Redis will try to remove keys # according to the eviction policy selected (see maxmemory-policy). # # If Redis can't remove keys according to the policy, or if the policy is # set to 'noeviction', Redis will start to reply with errors to commands # that would use more memory, like SET, LPUSH, and so on, and will continue # to reply to read-only commands like GET. # # This option is usually useful when using Redis as an LRU cache, or to set # a hard memory limit for an instance (using the 'noeviction' policy). # # WARNING: If you have slaves attached to an instance with maxmemory on, # the size of the output buffers needed to feed the slaves are subtracted # from the used memory count, so that network problems / resyncs will # not trigger a loop where keys are evicted, and in turn the output # buffer of slaves is full with DELs of keys evicted triggering the deletion # of more keys, and so forth until the database is completely emptied. # # In short... if you have slaves attached it is suggested that you set a lower # limit for maxmemory so that there is some free RAM on the system for slave # output buffers (but this is not needed if the policy is 'noeviction'). # # maxmemory # MAXMEMORY POLICY: how Redis will select what to remove when maxmemory # is reached. You can select among five behaviors: # # volatile-lru -> remove the key with an expire set using an LRU algorithm # allkeys-lru -> remove any key accordingly to the LRU algorithm # volatile-random -> remove a random key with an expire set # allkeys-random -> remove a random key, any key # volatile-ttl -> remove the key with the nearest expire time (minor TTL) # noeviction -> don't expire at all, just return an error on write operations # # Note: with any of the above policies, Redis will return an error on write # operations, when there are not suitable keys for eviction. # # At the date of writing this commands are: set setnx setex append # incr decr rpush lpush rpushx lpushx linsert lset rpoplpush sadd # sinter sinterstore sunion sunionstore sdiff sdiffstore zadd zincrby # zunionstore zinterstore hset hsetnx hmset hincrby incrby decrby # getset mset msetnx exec sort # # The default is: # # maxmemory-policy volatile-lru # LRU and minimal TTL algorithms are not precise algorithms but approximated # algorithms (in order to save memory), so you can select as well the sample # size to check. For instance for default Redis will check three keys and # pick the one that was used less recently, you can change the sample size # using the following configuration directive. # # maxmemory-samples 3 ############################## APPEND ONLY MODE ############################### # By default Redis asynchronously dumps the dataset on disk. This mode is # good enough in many applications, but an issue with the Redis process or # a power outage may result into a few minutes of writes lost (depending on # the configured save points). # # The Append Only File is an alternative persistence mode that provides # much better durability. For instance using the default data fsync policy # (see later in the config file) Redis can lose just one second of writes in a # dramatic event like a server power outage, or a single write if something # wrong with the Redis process itself happens, but the operating system is # still running correctly. # # AOF and RDB persistence can be enabled at the same time without problems. # If the AOF is enabled on startup Redis will load the AOF, that is the file # with the better durability guarantees. # # Please check http://redis.io/topics/persistence for more information. appendonly no # The name of the append only file (default: "appendonly.aof") appendfilename "appendonly.aof" # The fsync() call tells the Operating System to actually write data on disk # instead to wait for more data in the output buffer. Some OS will really flush # data on disk, some other OS will just try to do it ASAP. # # Redis supports three different modes: # # no: don't fsync, just let the OS flush the data when it wants. Faster. # always: fsync after every write to the append only log . Slow, Safest. # everysec: fsync only one time every second. Compromise. # # The default is "everysec", as that's usually the right compromise between # speed and data safety. It's up to you to understand if you can relax this to # "no" that will let the operating system flush the output buffer when # it wants, for better performances (but if you can live with the idea of # some data loss consider the default persistence mode that's snapshotting), # or on the contrary, use "always" that's very slow but a bit safer than # everysec. # # More details please check the following article: # http://antirez.com/post/redis-persistence-demystified.html # # If unsure, use "everysec". # appendfsync always appendfsync everysec # appendfsync no # When the AOF fsync policy is set to always or everysec, and a background # saving process (a background save or AOF log background rewriting) is # performing a lot of I/O against the disk, in some Linux configurations # Redis may block too long on the fsync() call. Note that there is no fix for # this currently, as even performing fsync in a different thread will block # our synchronous write(2) call. # # In order to mitigate this problem it's possible to use the following option # that will prevent fsync() from being called in the main process while a # BGSAVE or BGREWRITEAOF is in progress. # # This means that while another child is saving, the durability of Redis is # the same as "appendfsync none". In practical terms, this means that it is # possible to lose up to 30 seconds of log in the worst scenario (with the # default Linux settings). # # If you have latency problems turn this to "yes". Otherwise leave it as # "no" that is the safest pick from the point of view of durability. no-appendfsync-on-rewrite no # Automatic rewrite of the append only file. # Redis is able to automatically rewrite the log file implicitly calling # BGREWRITEAOF when the AOF log size grows by the specified percentage. # # This is how it works: Redis remembers the size of the AOF file after the # latest rewrite (if no rewrite has happened since the restart, the size of # the AOF at startup is used). # # This base size is compared to the current size. If the current size is # bigger than the specified percentage, the rewrite is triggered. Also # you need to specify a minimal size for the AOF file to be rewritten, this # is useful to avoid rewriting the AOF file even if the percentage increase # is reached but it is still pretty small. # # Specify a percentage of zero in order to disable the automatic AOF # rewrite feature. auto-aof-rewrite-percentage 100 auto-aof-rewrite-min-size 64mb ################################ LUA SCRIPTING ############################### # Max execution time of a Lua script in milliseconds. # # If the maximum execution time is reached Redis will log that a script is # still in execution after the maximum allowed time and will start to # reply to queries with an error. # # When a long running script exceed the maximum execution time only the # SCRIPT KILL and SHUTDOWN NOSAVE commands are available. The first can be # used to stop a script that did not yet called write commands. The second # is the only way to shut down the server in the case a write commands was # already issue by the script but the user don't want to wait for the natural # termination of the script. # # Set it to 0 or a negative value for unlimited execution without warnings. lua-time-limit 5000 ################################## SLOW LOG ################################### # The Redis Slow Log is a system to log queries that exceeded a specified # execution time. The execution time does not include the I/O operations # like talking with the client, sending the reply and so forth, # but just the time needed to actually execute the command (this is the only # stage of command execution where the thread is blocked and can not serve # other requests in the meantime). # # You can configure the slow log with two parameters: one tells Redis # what is the execution time, in microseconds, to exceed in order for the # command to get logged, and the other parameter is the length of the # slow log. When a new command is logged the oldest one is removed from the # queue of logged commands. # The following time is expressed in microseconds, so 1000000 is equivalent # to one second. Note that a negative number disables the slow log, while # a value of zero forces the logging of every command. slowlog-log-slower-than 10000 # There is no limit to this length. Just be aware that it will consume memory. # You can reclaim memory used by the slow log with SLOWLOG RESET. slowlog-max-len 128 ################################ LATENCY MONITOR ############################## # The Redis latency monitoring subsystem samples different operations # at runtime in order to collect data related to possible sources of # latency of a Redis instance. # # Via the LATENCY command this information is available to the user that can # print graphs and obtain reports. # # The system only logs operations that were performed in a time equal or # greater than the amount of milliseconds specified via the # latency-monitor-threshold configuration directive. When its value is set # to zero, the latency monitor is turned off. # # By default latency monitoring is disabled since it is mostly not needed # if you don't have latency issues, and collecting data has a performance # impact, that while very small, can be measured under big load. Latency # monitoring can easily be enalbed at runtime using the command # "CONFIG SET latency-monitor-threshold " if needed. latency-monitor-threshold 0 ############################# Event notification ############################## # Redis can notify Pub/Sub clients about events happening in the key space. # This feature is documented at http://redis.io/topics/notifications # # For instance if keyspace events notification is enabled, and a client # performs a DEL operation on key "foo" stored in the Database 0, two # messages will be published via Pub/Sub: # # PUBLISH __keyspace@0__:foo del # PUBLISH __keyevent@0__:del foo # # It is possible to select the events that Redis will notify among a set # of classes. Every class is identified by a single character: # # K Keyspace events, published with __keyspace@__ prefix. # E Keyevent events, published with __keyevent@__ prefix. # g Generic commands (non-type specific) like DEL, EXPIRE, RENAME, ... # $ String commands # l List commands # s Set commands # h Hash commands # z Sorted set commands # x Expired events (events generated every time a key expires) # e Evicted events (events generated when a key is evicted for maxmemory) # A Alias for g$lshzxe, so that the "AKE" string means all the events. # # The "notify-keyspace-events" takes as argument a string that is composed # by zero or multiple characters. The empty string means that notifications # are disabled at all. # # Example: to enable list and generic events, from the point of view of the # event name, use: # # notify-keyspace-events Elg # # Example 2: to get the stream of the expired keys subscribing to channel # name __keyevent@0__:expired use: # # notify-keyspace-events Ex # # By default all notifications are disabled because most users don't need # this feature and the feature has some overhead. Note that if you don't # specify at least one of K or E, no events will be delivered. notify-keyspace-events "" ############################### ADVANCED CONFIG ############################### # Hashes are encoded using a memory efficient data structure when they have a # small number of entries, and the biggest entry does not exceed a given # threshold. These thresholds can be configured using the following directives. hash-max-ziplist-entries 512 hash-max-ziplist-value 64 # Similarly to hashes, small lists are also encoded in a special way in order # to save a lot of space. The special representation is only used when # you are under the following limits: list-max-ziplist-entries 512 list-max-ziplist-value 64 # Sets have a special encoding in just one case: when a set is composed # of just strings that happens to be integers in radix 10 in the range # of 64 bit signed integers. # The following configuration setting sets the limit in the size of the # set in order to use this special memory saving encoding. set-max-intset-entries 512 # Similarly to hashes and lists, sorted sets are also specially encoded in # order to save a lot of space. This encoding is only used when the length and # elements of a sorted set are below the following limits: zset-max-ziplist-entries 128 zset-max-ziplist-value 64 # HyperLogLog sparse representation bytes limit. The limit includes the # 16 bytes header. When an HyperLogLog using the sparse representation crosses # this limit, it is converted into the dense representation. # # A value greater than 16000 is totally useless, since at that point the # dense representation is more memory efficient. # # The suggested value is ~ 3000 in order to have the benefits of # the space efficient encoding without slowing down too much PFADD, # which is O(N) with the sparse encoding. The value can be raised to # ~ 10000 when CPU is not a concern, but space is, and the data set is # composed of many HyperLogLogs with cardinality in the 0 - 15000 range. hll-sparse-max-bytes 3000 # Active rehashing uses 1 millisecond every 100 milliseconds of CPU time in # order to help rehashing the main Redis hash table (the one mapping top-level # keys to values). The hash table implementation Redis uses (see dict.c) # performs a lazy rehashing: the more operation you run into a hash table # that is rehashing, the more rehashing "steps" are performed, so if the # server is idle the rehashing is never complete and some more memory is used # by the hash table. # # The default is to use this millisecond 10 times every second in order to # active rehashing the main dictionaries, freeing memory when possible. # # If unsure: # use "activerehashing no" if you have hard latency requirements and it is # not a good thing in your environment that Redis can reply form time to time # to queries with 2 milliseconds delay. # # use "activerehashing yes" if you don't have such hard requirements but # want to free memory asap when possible. activerehashing yes # The client output buffer limits can be used to force disconnection of clients # that are not reading data from the server fast enough for some reason (a # common reason is that a Pub/Sub client can't consume messages as fast as the # publisher can produce them). # # The limit can be set differently for the three different classes of clients: # # normal -> normal clients including MONITOR clients # slave -> slave clients # pubsub -> clients subscribed to at least one pubsub channel or pattern # # The syntax of every client-output-buffer-limit directive is the following: # # client-output-buffer-limit # # A client is immediately disconnected once the hard limit is reached, or if # the soft limit is reached and remains reached for the specified number of # seconds (continuously). # So for instance if the hard limit is 32 megabytes and the soft limit is # 16 megabytes / 10 seconds, the client will get disconnected immediately # if the size of the output buffers reach 32 megabytes, but will also get # disconnected if the client reaches 16 megabytes and continuously overcomes # the limit for 10 seconds. # # By default normal clients are not limited because they don't receive data # without asking (in a push way), but just after a request, so only # asynchronous clients may create a scenario where data is requested faster # than it can read. # # Instead there is a default limit for pubsub and slave clients, since # subscribers and slaves receive data in a push fashion. # # Both the hard or the soft limit can be disabled by setting them to zero. client-output-buffer-limit normal 0 0 0 client-output-buffer-limit slave 256mb 64mb 60 client-output-buffer-limit pubsub 32mb 8mb 60 # Redis calls an internal function to perform many background tasks, like # closing connections of clients in timeout, purging expired keys that are # never requested, and so forth. # # Not all tasks are performed with the same frequency, but Redis checks for # tasks to perform accordingly to the specified "hz" value. # # By default "hz" is set to 10. Raising the value will use more CPU when # Redis is idle, but at the same time will make Redis more responsive when # there are many keys expiring at the same time, and timeouts may be # handled with more precision. # # The range is between 1 and 500, however a value over 100 is usually not # a good idea. Most users should use the default of 10 and raise this up to # 100 only in environments where very low latency is required. hz 10 # When a child rewrites the AOF file, if the following option is enabled # the file will be fsync-ed every 32 MB of data generated. This is useful # in order to commit the file to the disk more incrementally and avoid # big latency spikes. aof-rewrite-incremental-fsync yes ================================================ FILE: roles/services/handlers/main.yml ================================================ --- # file: roles/services/handlers/main.yml - include: postgres.yml - include: redis.yml - name: restart nginx command: "echo nop" ================================================ FILE: roles/services/handlers/postgres.yml ================================================ --- # file: roles/services/handlers/postgres.yml - name: restart postgresql service: name=postgresql state=restarted ================================================ FILE: roles/services/handlers/redis.yml ================================================ --- # file: roles/services/handlers/redis.yml - name: restart redis service: name=redis state=restarted ================================================ FILE: roles/services/tasks/main.yml ================================================ --- # file: roles/services/tasks/main.yml - include: yum.yml - include: postgres.yml - include: redis.yml ================================================ FILE: roles/services/tasks/memcached.yml ================================================ --- # file: roles/services/tasks/memcached.yml - name: start memcached service service: name=memcached state=started args: enabled: true tags: memcached-server - name: open port 11211 firewalld: port=11211/tcp args: permanent: true state: enabled zone: internal notify: - reload firewall rules tags: memcached-server ================================================ FILE: roles/services/tasks/postgres.yml ================================================ --- # file: roles/services/tasks/postgres.yml - name : setup postgres command: postgresql-setup initdb args: creates: /var/lib/pgsql/data/PG_VERSION tags: postgresql-server - name : send pg_hba.conf template: src=../templates/pg_hba.conf.j2 dest=/var/lib/pgsql/data/pg_hba.conf args: mode: 0600 owner: postgres group: postgres tags: postgresql-server notify: - restart postgresql - name : send postgres.conf copy: src=../files/postgresql.conf dest=/var/lib/pgsql/data/postgresql.conf args: mode: 0600 owner: postgres group: postgres tags: postgresql-server notify: - restart postgresql - name: start postgresql service service: name=postgresql state=started args: enabled: true tags: postgresql-server - name: make backup folder command: "/bin/bash -c 'mkdir -p /var/backups; chown postgres /var/backups; touch /var/backups/postgres.pgdump.gz'" args: creates: /var/backups/postgres.pgdump.gz tags: postgresql-server - name: send backup rotate copy: src=../files/postgres-bk.logrotate dest=/etc/logrotate.d/postgres-bk args: mode: 0644 tags: postgresql-server - include_vars: "group_vars/{{ myapp_env }}.yml" tags: postgresql-server - name: "create myapp_{{ RAILS_ENV }} db" postgresql_db: args: name: "myapp_{{ RAILS_ENV }}" encoding: 'UTF-8' lc_collate: 'en_US.UTF-8' lc_ctype: 'en_US.UTF-8' tags: postgresql-server - name: create myapp db user postgresql_user: name=myapp args: db: "myapp_{{ RAILS_ENV }}" password: "{{ postgres_password }}" role_attr_flags: SUPERUSER tags: postgresql-server - name: open port 5432 firewalld: service=postgresql args: permanent: true state: enabled zone: internal notify: - reload firewall rules tags: postgresql-server ================================================ FILE: roles/services/tasks/redis.yml ================================================ --- # file: roles/services/tasks/redis.yml - name: start redis service service: name=redis state=started args: enabled: true tags: redis-server - name : send redis.conf copy: src=../files/redis.conf dest=/etc/redis.conf args: mode: 0644 owner: redis group: root tags: redis-server notify: - restart redis - name: open port 6379 firewalld: port=6379/tcp args: permanent: true state: enabled zone: internal notify: - reload firewall rules tags: redis-server ================================================ FILE: roles/services/tasks/yum.yml ================================================ --- # file: roles/services/tasks/yum.yml - name: be sure postgresql-server is installed yum: pkg=postgresql-server state=installed tags: postgresql-server - name: install psycopg2 yum: pkg=python-psycopg2 state=installed tags: python-psycopg2 - name: be sure redis is installed yum: pkg=redis state=installed tags: redis-server - name: be sure memcached is installed yum: pkg=memcached state=installed tags: memcached-server ================================================ FILE: roles/services/templates/pg_hba.conf.j2 ================================================ # PostgreSQL Client Authentication Configuration File # =================================================== # TYPE DATABASE USER ADDRESS METHOD # "local" is for Unix domain socket connections only local all all trust {% for host in groups[myapp_env + '-myapp-apps'] %} host all all {{ host }}/32 md5 {% endfor %} {% for host in groups[myapp_env + '-myapp-workers'] %} host all all {{ host }}/32 md5 {% endfor %} host all all ::1/128 trust ================================================ FILE: roles/workers/files/sidekiq.logrotate ================================================ /var/app/support/logs/error.log { missingok notifempty size 20M daily create 0644 webapp webapp } ================================================ FILE: roles/workers/handlers/main.yml ================================================ --- # file: roles/workers/handlers/main.yml - include: sidekiq.yml - name: restart nginx command: "echo nop" ================================================ FILE: roles/workers/handlers/sidekiq.yml ================================================ --- # file: roles/workers/handlers/sidekiq.yml ================================================ FILE: roles/workers/tasks/main.yml ================================================ --- # file: roles/workers/tasks/main.yml - include: yum.yml - include: sidekiq.yml ================================================ FILE: roles/workers/tasks/sidekiq.yml ================================================ --- # file: roles/workers/tasks/sidekiq.yml - name: send sidekiq service template: src=../templates/sidekiq.service.j2 dest=/etc/systemd/system/sidekiq.service args: mode: 0644 tags: sidekiq notify: - reload services - name: start sidekiq service on boot service: name=sidekiq args: enabled: yes - name: logrotate sidekiq copy: src=../files/sidekiq.logrotate dest=/etc/logrotate.d/sidekiq args: mode: 0644 tags: sidekiq ================================================ FILE: roles/workers/tasks/yum.yml ================================================ --- # file: roles/workers/tasks/yum.yml - name: install awel-media repo command: yum -y install http://awel.domblogger.net/7/base/noarch/awel-release-7-2.noarch.rpm args: creates: /etc/yum.repos.d/awel.repo tags: ffmpeg - name: be sure ffmpeg is installed yum: pkg=ffmpeg enablerepo=awel-media state=installed tags: ffmpeg ================================================ FILE: roles/workers/templates/sidekiq.service.j2 ================================================ [Unit] Description=Sidekiq After=syslog.target network.target remote-fs.target nss-lookup.target [Service] Type=simple WorkingDirectory=/var/app/current PIDFile=/var/app/support/logs/sidekiq.pid EnvironmentFile=/var/app/support/app.env ExecStart=/usr/local/bin/bundle exec sidekiq --index 0 --pidfile /var/app/support/logs/sidekiq.pid --environment {{ RAILS_ENV }} --logfile /var/app/support/logs/sidekiq.log ExecReload=/bin/kill -s HUP $MAINPID ExecStop=/bin/kill -s QUIT $MAINPID Restart=always PrivateTmp=true [Install] WantedBy=multi-user.target ================================================ FILE: services.yml ================================================ --- # file: services.yml - hosts: "{{ myapp_env }}-myapp-services" remote_user: root roles: - common - services ================================================ FILE: test.yml ================================================ - hosts: all connection: local remote_user: root tasks: - group_by: key=name ================================================ FILE: workers.yml ================================================ --- # file: workers.yml - hosts: "{{ myapp_env }}-myapp-workers" remote_user: root roles: - common - workers