Repository: garywiz/chaperone Branch: master Commit: 9ff2c3a5b9c6 Files: 201 Total size: 473.5 KB Directory structure: gitextract_pnlxl5op/ ├── .gitignore ├── CHANGELOG.md ├── LICENSE ├── README ├── README.md ├── chaperone/ │ ├── __init__.py │ ├── cproc/ │ │ ├── __init__.py │ │ ├── client.py │ │ ├── commands.py │ │ ├── process_manager.py │ │ ├── pt/ │ │ │ ├── __init__.py │ │ │ ├── cron.py │ │ │ ├── forking.py │ │ │ ├── inetd.py │ │ │ ├── notify.py │ │ │ ├── oneshot.py │ │ │ └── simple.py │ │ ├── subproc.py │ │ ├── version.py │ │ └── watcher.py │ ├── cutil/ │ │ ├── __init__.py │ │ ├── config.py │ │ ├── env.py │ │ ├── errors.py │ │ ├── events.py │ │ ├── format.py │ │ ├── logging.py │ │ ├── misc.py │ │ ├── notify.py │ │ ├── patches.py │ │ ├── proc.py │ │ ├── servers.py │ │ ├── syslog.py │ │ ├── syslog_handlers.py │ │ └── syslog_info.py │ └── exec/ │ ├── __init__.py │ ├── chaperone.py │ ├── envcp.py │ ├── sdnotify.py │ ├── sdnotify_exec.py │ └── telchap.py ├── doc/ │ ├── .gitignore │ ├── Makefile │ ├── docserver/ │ │ ├── README │ │ ├── build/ │ │ │ ├── Dockerfile │ │ │ └── install.sh │ │ ├── build.sh │ │ ├── chaperone.d/ │ │ │ ├── 010-start.conf │ │ │ └── 120-apache2.conf │ │ ├── etc/ │ │ │ ├── apache2.conf │ │ │ └── init.sh │ │ └── run.sh │ └── source/ │ ├── _static/ │ │ └── custom.css │ ├── _templates/ │ │ └── layout.html │ ├── conf.py │ ├── guide/ │ │ ├── chap-docker-simple.rst │ │ ├── chap-docker-smaller.rst │ │ ├── chap-docker.rst │ │ ├── chap-intro.rst │ │ ├── chap-other.rst │ │ └── chap-using.rst │ ├── includes/ │ │ ├── defs.rst │ │ └── incomplete.rst │ ├── index.rst │ ├── ref/ │ │ ├── command-line.rst │ │ ├── config-format.rst │ │ ├── config-global.rst │ │ ├── config-logging.rst │ │ ├── config-service.rst │ │ ├── config.rst │ │ ├── env.rst │ │ ├── index.rst │ │ ├── utilities.rst │ │ └── utility-envcp.rst │ └── status.rst ├── samples/ │ ├── README │ ├── chaperone-devbase/ │ │ ├── Dockerfile │ │ ├── apps/ │ │ │ ├── bin/ │ │ │ │ └── README │ │ │ ├── chaperone.d/ │ │ │ │ └── 010-start.conf │ │ │ ├── etc/ │ │ │ │ ├── README │ │ │ │ └── init.sh │ │ │ └── init.d/ │ │ │ └── README │ │ ├── build-image.sh │ │ └── install.sh │ ├── chaperone-lamp/ │ │ ├── Dockerfile │ │ ├── apps/ │ │ │ ├── chaperone.d/ │ │ │ │ ├── 105-mysqld.conf │ │ │ │ └── 120-apache2.conf │ │ │ ├── etc/ │ │ │ │ ├── apache2.conf │ │ │ │ └── mysql/ │ │ │ │ ├── my.cnf │ │ │ │ └── start_mysql.sh │ │ │ ├── init.d/ │ │ │ │ ├── mysql.sh │ │ │ │ └── phpmyadmin.sh │ │ │ └── www/ │ │ │ ├── default/ │ │ │ │ └── index.php │ │ │ └── sites.d/ │ │ │ └── default.conf │ │ ├── build-image.sh │ │ └── install.sh │ ├── docsample/ │ │ ├── Dockerfile │ │ ├── README │ │ └── chaperone.conf │ └── setup-bin/ │ ├── build │ ├── ct_setproxy │ └── dot.bashrc ├── sandbox/ │ ├── .gitignore │ ├── .shinit │ ├── README │ ├── bare_startup.sh │ ├── bareimage/ │ │ ├── Dockerfile │ │ └── install-bareimage.sh │ ├── bash.bashrc │ ├── bin/ │ │ ├── chaperone │ │ ├── cps │ │ ├── fakeentry │ │ └── repeat │ ├── centos.d/ │ │ ├── apache.conf │ │ ├── app.conf │ │ ├── cron.conf │ │ └── sys1.conf │ ├── distserv/ │ │ ├── chaperone.d/ │ │ │ ├── 005-config.conf │ │ │ ├── 010-start.conf │ │ │ └── 120-apache2.conf │ │ ├── etc/ │ │ │ └── apache2.conf │ │ └── run.sh │ ├── etc/ │ │ ├── apache2.conf │ │ └── makezombie.conf │ ├── test.d/ │ │ ├── apache.conf │ │ ├── cron.conf │ │ └── sys1.conf │ ├── testbare │ ├── testcent │ ├── testdock │ ├── testimage │ ├── testvar │ └── user.d/ │ └── sys1.conf ├── setup.py └── tests/ ├── .gitignore ├── README.md ├── bin/ │ ├── chaperone │ ├── daemon │ ├── daemonutil.py │ ├── envcp │ ├── expect-lite-command-run │ ├── expect-lite-image-run │ ├── expect-test-command │ ├── expect-test-image │ ├── get-serial │ ├── is-running │ ├── kill-from-pidfile │ ├── logecho │ ├── proctool │ ├── read_from_port │ ├── sdnotify │ ├── sdnotify-exec │ ├── talkback │ ├── telchap │ └── test-driver ├── el-tests/ │ ├── basic-1/ │ │ ├── chaperone.conf │ │ ├── test-001.elt │ │ └── test-002.elt │ ├── cron-1/ │ │ ├── chaperone.conf │ │ ├── simulate-rotate.sh │ │ ├── test-001.elt │ │ ├── test-004.elt │ │ ├── test-005.elt │ │ ├── test-006.elt │ │ ├── test-007.elt │ │ └── test-008.elt │ ├── exitkills-1/ │ │ ├── chaperone.conf │ │ └── test-001.elt │ ├── fork-1/ │ │ ├── chaperone.conf │ │ ├── test-001.elt │ │ ├── test-001b.elt │ │ ├── test-003.elt │ │ └── test-004.elt │ ├── inetd-1/ │ │ ├── chaperone.conf │ │ ├── test-001.elt │ │ └── test-002.elt │ ├── notify-1/ │ │ ├── chaperone.conf │ │ ├── test-001.elt │ │ ├── test-001b.elt │ │ ├── test-001c.elt │ │ ├── test-001d.elt │ │ └── test-001e.elt │ ├── simple-1/ │ │ ├── chaperone.conf │ │ ├── test-001.elt │ │ ├── test-002.elt │ │ ├── test-003.elt │ │ └── test-004.elt │ └── simple-2/ │ ├── chaperone.conf │ ├── test-001.elt │ ├── test-002.elt │ ├── test-003.elt │ └── test-004.elt ├── env_expand.py ├── env_parse.py ├── events.py ├── prefix.py ├── run-all-tests.sh ├── run-el.sh ├── run-shell.sh ├── service_order.py └── syslog_spec.py ================================================ FILE CONTENTS ================================================ ================================================ FILE: .gitignore ================================================ build/* chaperone.egg* dist/* ================================================ FILE: CHANGELOG.md ================================================ ## 0.3.00 (2015-10-04) This is a major release that adds a number of important features and refinements. Most importantly, a new automated test harness that simulates various process mixes has been added to the release process to assure that Chaperone manages processes in a consistent and reliable way from release to release. In addition, Chaperone now recognizes `NOTIFY_SOCKET` upon start-up, and will inform the host's `systemd` of the status of the container. This adds to Chaperone's existing support for notify-type processes within the container. This means that container designers can choose any of a number of methods of signalling process readiness inside the container while Chaperone will translate those actions into suitable `systemd` notifications for the host. This version is completely backward-compatible with older Chaperone versions. Enhancements: - Chaperone will recognize the `NOTIFY_SOCKET` environment variable if passed upon start-up and provide full `systemd` compatible notifications to the host. - The [detect_exit](http://garywiz.github.io/chaperone/ref/config-global.html#settings-detect-exit) global setting, which defaults to `true` tells Chaperone to attempt to determine when all processes have completed and automatically terminate the container. This was the previous default behavior, but the new setting provides flexibility for containers which remain dormant until processes are started manually. - There is now a `telchap shutdown` command which provides orderly container shutdown from scripts. - Added the `sdnotify-exec` utility which is a multi-purpose wrapper which can be used to proxy `NOTIFY_SOCKET` communication to the host, or can be used to determine if a container is properly started even outside of `systemd` contexts. Refinements: - Exit detection is now smarter about `cron` and `inetd` jobs and will not cause container exit if either of those types have scheduled operations which have not yet been triggered. - The [--disable-services](http://garywiz.github.io/chaperone/ref/command-line.html#option-disable-services) switch now truly disables services rather than not defining them. Therefore, containers in such containers can now be started manually. - Cron-type services now have more well-defined behavior for `telchap stop` which will unschedule the service, and `telchap reset` which will merely kill the current job and reschedule another. - If Chaperone `notify`-type services signal with `ERRNO=n`, then Chaperone will intelligently pass this error number up to `systemd` if the error was the direct cause of container termination, otherwise it is noted in the logs and `systemd` won't find out about it. ## 0.2.40 (2015-09-08) Enhancements: - Both `uid` and `gid` can be specified using the path-format of the [--create-user](http://garywiz.github.io/chaperone/ref/command-line.html#option-create-user) command-line switch. Refinements: - The `${ENV:-foo}` expansion format now behaves like `bash` where 'foo' is the result if the variable `ENV` is undefined or null (blank). Previously, it required that the variable be undefined. This behavior is now consistent throughout all expansion operators. - Improved the environment expansion code to handle outlying cases, as well as be signfiicantly more readable. Used coverage analysis to improve unit test coverage for complex expansions involving recursion. Bug fixes: - Newer versions of Python's `asyncio` (present in some distros) could hang when starting an **inetd**-style socket process. ## 0.2.37 (2015-08-24) Enhancements: - Add support for **inetd**-compatible dynamic TCP socket connections. See the description of the [port configuration parameter](http://garywiz.github.io/chaperone/ref/config-service.html#service-port) for a complete description of this feature. - Added [_CHAP_SERVICE_SERIAL](http://garywiz.github.io/chaperone/ref/env.html#env-chap-service-serial) and [_CHAP_SERVICE_TIME](http://garywiz.github.io/chaperone/ref/env.html#env-chap-service-time) environment variables to provide useful information to 'cron' and 'inetd' services which may execute multiple times. - Added the ability to add a `gid` number to the path-based format of the [--create-user](http://garywiz.github.io/chaperone/ref/command-line.html#option-create-user) command-line switch. Bug Fixes: - Fixed `telchap stop` so that it no longer would cause service restarts to occur. - Improved the service restart logic to handle a wider variety of service failure situations. ## 0.2.31 (2015-08-11) Enhancements: - Add support for --create-user name:/path so that user identity can be based upon the permissions set for a given path. This helps workaround the file permissions issues under OSX/VirtualBox where you can't really modify the mounted file permissions and instead "get what you get". ## 0.2.30 (2015-08-07) Enhancements: - Add support for --archive/-a to envcp. ## 0.2.29 (2015-08-05) Refinements: - Allow backslash-escaping of VBAR construct contents in environment variable if-then-else construct. ## 0.2.28 (2015-08-03) Refinements: - Create a special-case syntax for shell escapes: ``$(`shell-command`)`` mainly to assure that such syntaxes are propery supported instead of being expanded as a side-effect. Previously, the syntax above would treat the result of the command as the name of an environment variable, and since it was not found, would insert the results. Since it was a useful trick, formalizing the use and eliminating edge cases was important. - Disabled shell escapes by default in ``envcp`` and added the ``--shell-enable`` switch to enable them. - Added further documentation about shell escapes to clarify exactly how they work and how they should be used. ## 0.2.27 (2015-08-01) Enhancements: - Added documentation for ``envcp`` in the new utilities section of the documentation. - Enhanced environment-variable expansions so they are smart about nesting. - Fixed syslog receiver so that trailing newlines are stripped (programs like ``sudo`` and ``openvpn`` terminate their log lines this way, even though it is a questionable practice). ## 0.2.26 (2015-07-28) Enhancements: - Added the ``:/`` regex substitution expansion option, which provides a more extensive and useful feature set than the bash-compatible options. - Updated the documentation to reflect the new expansion option and added a footnote about bash compatibility. ## 0.2.25 (2015-07-27) Enhancements: - Added the ``:?`` and ``:|`` environemnt variable expansion options. The first works similarly to bash and raises an error if a variable is not defined. The second adds more versatility to expansions by allowing the expansion to depend upon the particular value of a variable. - Added documntation for the above. ## 0.2.24 (2015-07-27) Bug Fixes: - Made `setproctitle` an optional install so that `--no-install-recommends` can be used on `apt-get` installs to streamline image size ([#1, @mc0e](https://github.com/garywiz/chaperone/issues/1)) Other: - PyPi distribution is no longer done in "wheel" format, since that limits the ability to include optional dependencies. Source format is used instead. ================================================ FILE: LICENSE ================================================ Copyright (c) 2015, Gary J. Wisniewski Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ================================================ FILE: README ================================================ Chaperone is a lean, full-featured top-level system manager, similar to init, systemd, and others, but designed for lean container environments like Docker. It is a single, small program which provides process clean-up, rudimentary logging, and service management without the overhead of additional complex configuration. ================ ====================================================== Documentation http://garywiz.github.io/chaperone chaperone Source http://github.com/garywiz/chaperone pypi link http://pypi.python.org/pypi/chaperone ================ ====================================================== ================================================ FILE: README.md ================================================ # ![](https://s.gravatar.com/avatar/62c4c783c4d7233c73f3a114578df650.jpg?s=50) Chaperone [![Gitter](https://badges.gitter.im/Join_Chat.svg)](https://gitter.im/garywiz/chaperone?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge) [![PyPI version](https://badge.fury.io/py/chaperone.svg)](https://badge.fury.io/py/chaperone) Chaperone is a lean init-style startup manager for Docker-like containers. It runs as a single lightweight full-featured process which runs at the root of a docker container tree and provides all of the following functionality, plus much more: * Monitoring for all processes in the container, automatically shutting down the container when the last process exits. * A complete, configurable syslog facility built in and provided on /dev/log so daemons and other services can have output captured. Configurable to handle log-file rotation, duplication to stdout/stderr, and full Linux logging facility, severity support. No syslog daemon is required in your container. * The ability to start up system services in dependency order, with options for per-service environment variables, restart options, and stdout/stderr capture either to the log service or stdout. * A built-in cron scheduling service. * Emulation of systemd notifications (sd_notify) so services can post ready and status notifications to chaperone. * Process monitoring and zombie elimination, along with organized system shutdown to assure all daemons shut-down gracefully. * The ability to have an optional controlling process, specified on the docker command line, to simplify creating containers which have development mode vs. production mode. * Complete configuration using a ``chaperone.d`` directory which can be located in various places, and even allows different configurations within the container, triggered based upon which user is selected at start-up. * Default behavior designed out-of-the-box to work with simple Docker containers for quick start-up for lean containers. * More... If you want to try it out quickly, the best place to start is on the [chaperone-docker](https://github.com/garywiz/chaperone-docker) repository page. There is a quick section called "Try it out" that uses images available now on Docker Hub. For full details of features and usage: [see the documentation](http://garywiz.github.io/chaperone/index.html). There is some debate about whether docker containers should be transformed into complete systems (so-called "fat containers"). However, it is clear that many containers contain one or more services to provide a single "composite feature", but that such containers need a special, more streamlined approach to managing a number of common daemons. Chaperone is the best answer I've come up with so far, and was inspired by The [Phusion baseimage-docker](http://phusion.github.io/baseimage-docker/) approach. However, unlike the Phusion image, it does not require adding daemons for logging, system services (such as runit). Chaperone is designed to be self-contained. Status ------ Chaperone is now stable and ready for production. If you are currently starting up your container services with Bash scripts, Chaperone is probably a much better choice. Full status is [now part of the documentation](http://garywiz.github.io/chaperone/status.html). Downloading and Installing -------------------------- The easiest way to install `chaperone`` is using ``pip`` from the https://pypi.python.org/pypi/chaperone package: # Ubuntu or debian prerequisites... apt-get install python3-pip # chaperone installation (may be all you need) pip3 install chaperone License ------- Copyright (c) 2015, Gary J. Wisniewski Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ================================================ FILE: chaperone/__init__.py ================================================ # Placeholder ================================================ FILE: chaperone/cproc/__init__.py ================================================ # Placeholder from chaperone.cproc.process_manager import TopLevelProcess ================================================ FILE: chaperone/cproc/client.py ================================================ import asyncio class CommandClient(asyncio.Protocol): @classmethod def sendCommand(cls, cmd): loop = asyncio.get_event_loop() coro = loop.create_unix_connection(lambda: CommandClient(cmd, loop), path = "/dev/chaperone.sock") (transport, protocol) = loop.run_until_complete(coro) loop.run_forever() loop.close() return protocol.result def __init__(self, message, loop): self.message = message self.loop = loop self.result = None def connection_made(self, transport): transport.write(self.message.encode()) def data_received(self, data): msg = data.decode() lines = msg.split("\n") error = None if lines[0] in {'COMMAND-ERROR', 'RESULT'}: self.result = "\n".join(lines[1:]) else: error = "Unexpected response from chaperone: " + str(msg) if error: raise Exception(error) def connection_lost(self, exc): self.loop.stop() ================================================ FILE: chaperone/cproc/commands.py ================================================ import os import asyncio import stat import shlex from functools import partial from docopt import docopt from chaperone.cutil.servers import Server, ServerProtocol from chaperone.cutil.misc import maybe_remove from chaperone.cutil.logging import debug, warn, info import chaperone.cutil.syslog_info as syslog_info COMMAND_DOC = """ Usage: telchap status telchap loglevel [] telchap stop [--force] [--wait] [--disable] [ ...] telchap start [--force] [--wait] [--enable] [ ...] telchap reset [--force] [--wait] [ ...] telchap enable [ ...] telchap disable [ ...] telchap dependencies telchap shutdown [] """ CHAP_FIFO = "/dev/chaperone" CHAP_SOCK = "/dev/chaperone.sock" class _BaseCommand(object): command_name = "X" interactive_only = False interactive = False def match(self, opts): if isinstance(self.command_name, tuple): return all(opts.get(name, False) for name in self.command_name) return opts.get(self.command_name, False) @asyncio.coroutine def exec(self, opts, protocol): #result = yield from self.do_exec(opts, controller) #return str(result) self.interactive = protocol.interactive try: result = yield from self.do_exec(opts, protocol.owner.controller) return str(result) except Exception as ex: return "Command error: " + str(ex) STMSG = """ Running: {0.version} Uptime: {0.uptime} Managed processes: {1} ({2} enabled) """ class statusCommand(_BaseCommand): command_name = "status" interactive_only = True @asyncio.coroutine def do_exec(self, opts, controller): serv = controller.services msg = STMSG.format(controller, len(serv), len([s for s in serv.values() if s.enabled])) msg += "\nServices:\n\n" + str(serv.get_status_formatter().get_formatted_data()) + "\n" return msg class dependenciesCommand(_BaseCommand): command_name = "dependencies" interactive_only = True @asyncio.coroutine def do_exec(self, opts, controller): graph = controller.services.services_config.get_dependency_graph() return "\n".join(graph) class serviceReset(_BaseCommand): command_name = 'reset' @asyncio.coroutine def do_exec(self, opts, controller): wait = opts['--wait'] and self.interactive yield from controller.services.reset(opts[''], force = opts['--force'], wait = wait) return "services reset." class serviceEnable(_BaseCommand): command_name = 'enable' @asyncio.coroutine def do_exec(self, opts, controller): yield from controller.services.enable(opts['']) return "services enabled." class serviceDisable(_BaseCommand): command_name = 'disable' @asyncio.coroutine def do_exec(self, opts, controller): yield from controller.services.disable(opts['']) return "services disabled." class serviceStart(_BaseCommand): command_name = 'start' @asyncio.coroutine def do_exec(self, opts, controller): wait = opts['--wait'] and self.interactive yield from controller.services.start(opts[''], force = opts['--force'], wait = wait, enable = opts['--enable']) if wait: return "services started." return "service start-up queued." class serviceStop(_BaseCommand): command_name = 'stop' @asyncio.coroutine def do_exec(self, opts, controller): wait = opts['--wait'] and self.interactive yield from controller.services.stop(opts[''], force = opts['--force'], wait = wait, disable = opts['--disable']) if wait: return "services stopped." return "services stopping." class loglevelCommand(_BaseCommand): command_name = "loglevel" @asyncio.coroutine def do_exec(self, opts, controller): lev = opts[''] if lev is None: curlev = controller.force_log_level() if curlev is None: return "Forced Logging Level: NOT SET" try: pri = "*." + syslog_info.PRIORITY[curlev] except IndexError: pri = "Forced Logging Level: UNKNOWN" return pri if lev.startswith('*.'): lev = lev[2:] controller.force_log_level(lev) return "All logging set to include priorities >= *." + lev.lower() class shutdownCommand(_BaseCommand): command_name = "shutdown" @asyncio.coroutine def do_exec(self, opts, controller): delay = opts[''] if delay is None or delay.lower() == "now": delay = 0.1 message = "Shutting down now" else: try: delay = float(delay) except ValueError: return "Specified delay is not a valid decimal number: " + str(delay) message = "Shutting down in {0} seconds".format(delay) info("requested shutdown scheduled to occur in {0} seconds".format(delay)) asyncio.get_event_loop().call_later(delay, controller.kill_system) return message ## ## Register all commands here ## COMMANDS = ( loglevelCommand(), shutdownCommand(), statusCommand(), serviceStop(), serviceStart(), serviceReset(), serviceEnable(), serviceDisable(), dependenciesCommand(), ) class CommandProtocol(ServerProtocol): interactive = False @asyncio.coroutine def _interpret_command(self, msg): if not msg: return try: options = docopt(COMMAND_DOC, shlex.split(msg), help=False) except Exception as ex: result = "EXCEPTION\n" + str(ex) except SystemExit as ex: result = "COMMAND-ERROR\n" + str(ex) else: result = "?" for c in COMMANDS: if c.match(options) and (not c.interactive_only or self.interactive): result = yield from c.exec(options, self) break result = "RESULT\n" + result return result @asyncio.coroutine def _command_task(self, cmd, interactive = False): result = yield from self._interpret_command(cmd) if interactive: self.transport.write(result.encode()) self.transport.close() def data_received(self, data): if self.interactive: asyncio.async(self._command_task(data.decode(), True)) else: commands = data.decode().split("\n") for c in commands: asyncio.async(self._command_task(c)) class _InteractiveServer(Server): def _create_server(self): maybe_remove(CHAP_SOCK) return asyncio.get_event_loop().create_unix_server(CommandProtocol.buildProtocol(self, interactive=True), path=CHAP_SOCK) @asyncio.coroutine def server_running(self): os.chmod(CHAP_SOCK, 0o777) def close(self): super().close() maybe_remove(CHAP_SOCK) class CommandServer(Server): controller = None _fifoname = None _iserve = None def __init__(self, controller, filename = CHAP_FIFO, **kwargs): """ Creates a new command FIFO and socket. The controller is the object to which commands and interactions will occur, usually a chaperone.cproc.process_manager.TopLevelProcess. """ super().__init__(**kwargs) self.controller = controller self._fifoname = filename @asyncio.coroutine def server_running(self): self._iserve = _InteractiveServer() self._iserve.controller = self.controller # share this with our domain socket yield from self._iserve.run() def _open(self): name = self._fifoname maybe_remove(name) if not os.path.exists(name): os.mkfifo(name) if not stat.S_ISFIFO(os.stat(name).st_mode): raise TypeError("File is not a fifo: " + str(name)) os.chmod(name, 0o777) return open(os.open(name, os.O_RDWR|os.O_NONBLOCK)) def _create_server(self): return asyncio.get_event_loop().connect_read_pipe(CommandProtocol.buildProtocol(self), self._open()) def close(self): super().close() maybe_remove(CHAP_FIFO) if self._iserve: self._iserve.close() ================================================ FILE: chaperone/cproc/process_manager.py ================================================ import os import pwd import errno import asyncio import shlex import signal import datetime from functools import partial from time import time, sleep import chaperone.cutil.syslog_info as syslog_info from chaperone.cproc.commands import CommandServer from chaperone.cproc.version import DISPLAY_VERSION from chaperone.cproc.watcher import InitChildWatcher from chaperone.cproc.subproc import SubProcess, SubProcessFamily from chaperone.cutil.config import ServiceConfig from chaperone.cutil.env import Environment from chaperone.cutil.notify import NotifySink from chaperone.cutil.logging import warn, info, debug, error, set_log_level from chaperone.cutil.misc import lazydict, objectplus from chaperone.cutil.syslog import SyslogServer from chaperone.cutil.errors import get_errno_from_exception class CustomEventLoop(asyncio.SelectorEventLoop): def _make_socket_transport(self, sock, protocol, waiter=None, *, extra=None, server=None): """ Supports a special protocol method 'acquire_socket' which acceps only a socket. If it returns True, then the passed socket has been detached and no further action will be taken. This is to support inetd-style processes. """ if hasattr(protocol, 'acquire_socket') and protocol.acquire_socket(sock): if waiter: waiter.set_result(None) return None return super()._make_socket_transport(sock, protocol, waiter, extra=extra, server=server) asyncio.DefaultEventLoopPolicy._loop_factory = CustomEventLoop class TopLevelProcess(objectplus): send_sighup = False detect_exit = True _shutdown_timeout = None _ignore_signals = False _services_started = False _syslog = None _command = None _minimum_syslog_level = None _start_time = None _status_interval = None _family = None _exitcode = None _all_killed = False _killing_system = False _kill_future = None _config = None _pending = None _notify_enabled = False notify = None def __init__(self, config): self._config = config self._start_time = time() self._pending = set() self.notify = NotifySink() # whether or not we actually have a notify socket # wait at least 0.5 seconds, zero is totally pointless settings = config.get_settings() self._shutdown_timeout = settings.get('shutdown_timeout', 8) or 0.5 self.detect_exit = settings.get('detect_exit', True) self.enable_syslog = settings.get('enable_syslog', True) policy = asyncio.get_event_loop_policy() w = self._watcher = InitChildWatcher(onNoProcesses = self._queue_no_processes) policy.set_child_watcher(w) self.loop.add_signal_handler(signal.SIGTERM, self.kill_system) self.loop.add_signal_handler(signal.SIGINT, self._got_sigint) self._status_interval = settings.get('status_interval', 30) @property def debug(self): return asyncio.get_event_loop().get_debug() @debug.setter def debug(self, val): asyncio.get_event_loop().set_debug(val) @property def loop(self): return asyncio.get_event_loop() @property def system_alive(self): """ Returns true if the system is considered "alive" and new processes, restarts, and other normal operations should proceed. Generally, the system is alive until it is killed, but the process of shutting down the system may be complex and time consuming, and in the future there may be other factors which cause us to suspend normal system operation. """ return not self._killing_system @property def version(self): "Returns version identifier" return "chaperone version {0}".format(DISPLAY_VERSION) @property def uptime(self): return datetime.timedelta(seconds = time() - self._start_time) @property def services(self): return self._family def force_log_level(self, level = None): """ Specifies the *minimum* logging level that will be applied to all syslog entries. This is primarily useful for debugging, where you want to override any limitations imposed on log file entries. As a (convenient) side-effect, if the level is DEBUG, then debug features of both asyncio as well as chaperone will be enabled. If level is not provided, then returns the current setting. """ if level is None: return self._minimum_syslog_level levid = syslog_info.PRIORITY_DICT.get(level.lower(), None) if not levid: raise Exception("Not a valid log level: {0}".format(level)) set_log_level(levid) self._minimum_syslog_level = levid self.debug = (levid == syslog_info.LOG_DEBUG) if self._syslog: self._syslog.reset_minimum_priority(levid) info("Forcing all log output to '{0}' or greater", level) def _queue_no_processes(self): # Any output from dead processes won't get queued into the logs if we # don't return to the event loop. self.loop.call_later(0.05, self._no_processes) def _no_processes(self, ignore_service_state = False): if not (ignore_service_state or self._services_started): return # do not react during system initialization self._all_killed = True if not self._killing_system: if not self.detect_exit: return if self._family: ss = self._family.get_scheduled_services() if ss: warn("system will remain active since there are scheduled services: " + ", ".join(s.name for s in ss)) return # Passed all checks, now kill system self.notify.stopping() debug("Final termination phase.") self._services_started = False if self._kill_future and not self._kill_future.cancelled(): self._kill_future.cancel() self.activate(self._final_system_stop()) @asyncio.coroutine def _final_system_stop(self): yield from asyncio.sleep(0.1) if self._syslog: self._syslog.close() if self._command: self._command.close() self._cancel_pending() self.loop.stop() def _got_sigint(self): print("\nCtrl-C ... killing chaperone.") self.kill_system(4, True) def signal_ready(self): """ Tells any notify listener that the system is ready. Does nothing if the system is dying due to errors, or if a kill is in progress. """ if not self._services_started or self._killing_system: return self.notify.ready() # This is the time to set up the status monitor if self._status_interval and self._family and self._notify_enabled: self.activate(self._report_status()) @asyncio.coroutine def _report_status(self): while self._status_interval: if self._family: self.notify.status(self._family.get_status()) yield from asyncio.sleep(self._status_interval) def kill_system(self, errno = None, force = False): """ Systematically shuts down the system. With the 'force' argument set to true, does so even if a kill is already in progress. """ if force: self._services_started = True elif self._killing_system: return if self._exitcode is None and errno is not None: self._exitcode = 1 # default exit for an error self.notify.error(errno) warn("Request made to kill system." + ((force and " (forced)") or "")) self._killing_system = True self._kill_future = asyncio.async(self._kill_system_co()) def _cancel_pending(self): "Cancel any pending activated tasks" for p in list(self._pending): if not p.cancelled(): p.cancel() @asyncio.coroutine def _kill_system_co(self): self.notify.stopping() self._cancel_pending() # Tell the family it's been nice. It's unlikely we won't have a process family, but # it's optional, so we should handle the situation. wait_done = False # indicates if shutdown_timeout has expired if self._family: for f in self._family.values(): yield from f.final_stop() # let normal shutdown happen if self._watcher.number_of_waiters > 0 and self._shutdown_timeout: debug("still have {0} waiting, sleeping for shutdown_timeout={1}".format(self._watcher.number_of_waiters, self._shutdown_timeout)) yield from asyncio.sleep(self._shutdown_timeout) wait_done = True try: os.kill(-1, signal.SIGTERM) # first try a sig term if self.send_sighup: os.kill(-1, signal.SIGHUP) except ProcessLookupError: debug("No processes remain when attempting to kill system, just stop.") self._no_processes(True) return if wait_done: # give a short wait just so the signals fire yield from asyncio.sleep(1) # these processes are unknowns else: yield from asyncio.sleep(self._shutdown_timeout) if self._all_killed: return info("Some processes remain after {0}secs. Forcing kill".format(self._shutdown_timeout)) try: os.kill(-1, signal.SIGKILL) except ProcessLookupError: debug("No processes when attempting to force quit") self._no_processes(True) return def activate_result(self, future): self._pending.discard(future) def activate(self, cr): future = asyncio.async(cr) future.add_done_callback(self.activate_result) self._pending.add(future) return future def _system_coro_check(self, f): if f.exception(): error("system startup cancelled due to error: {0}".format(f.exception())) self.kill_system(get_errno_from_exception(f.exception())) def _system_started(self, startup, future=None): if future and not future.cancelled() and future.exception(): self._system_coro_check(future) return info(self.version + ", ready.") if startup: future = self.activate(startup) future.add_done_callback(self._system_coro_check) @asyncio.coroutine def _start_system_services(self): self._notify_enabled = yield from self.notify.connect() if self.enable_syslog: self._syslog = SyslogServer() self._syslog.configure(self._config, self._minimum_syslog_level) try: yield from self._syslog.run() except PermissionError as ex: self._syslog = None warn("syslog service cannot be started: {0}", ex) else: self._syslog.capture_python_logging() info("Switching all chaperone logging to /dev/log") self._command = CommandServer(self) try: yield from self._command.run() except PermissionError as ex: self._command = None warn("command service cannot be started: {0}", ex) def run_event_loop(self, startup_coro = None, exit_when_done = True): """ Sets up the event loop and runs it, setting up basic services such as syslog as well as the command services sockets. Then, calls the startup coroutine (if any) to tailor the environment and start up other services as needed. """ initfuture = asyncio.async(self._start_system_services()) initfuture.add_done_callback(lambda f: self._system_started(startup_coro, f)) self.loop.run_forever() self.loop.close() if exit_when_done: exit(self._exitcode or 0) @asyncio.coroutine def run_services(self, extra_services, disable_others = False): "Run all services." # First, determine our overall configuration for the services environment. services = self._config.get_services() if extra_services: services = services.deepcopy() if disable_others: for s in services.values(): s.enabled = False for s in extra_services: services.add(s) family = self._family = SubProcessFamily(self, services) tried_any = False errno = None try: tried_any = yield from family.run() except asyncio.CancelledError: pass finally: self._services_started = True if self.detect_exit: if not tried_any: warn("No service startups attempted (all disabled?) - exiting due to 'detect_exit=true'") self.kill_system() else: self._watcher.check_processes() ================================================ FILE: chaperone/cproc/pt/__init__.py ================================================ # Placeholder from chaperone.cproc.process_manager import TopLevelProcess ================================================ FILE: chaperone/cproc/pt/cron.py ================================================ import asyncio from aiocron import crontab from chaperone.cutil.logging import error, warn, debug, info from chaperone.cutil.syslog_info import LOG_CRON from chaperone.cproc.subproc import SubProcess from chaperone.cutil.errors import ChParameterError _CRON_SPECIALS = { '@yearly': '0 0 1 1 *', '@annually': '0 0 1 1 *', '@monthly': '0 0 1 * *', '@weekly': '0 0 * * 0', '@daily': '0 0 * * *', '@hourly': '0 * * * *', } class CronProcess(SubProcess): syslog_facility = LOG_CRON _cron = None _fut_monitor = None def __init__(self, service, family=None): super().__init__(service, family) if not self.interval: raise ChParameterError("interval= property missing, required for cron service '{0}'".format(self.name)) # Support specials with or without the @ real_interval = _CRON_SPECIALS.get(self.interval) or _CRON_SPECIALS.get('@'+self.interval) or self.interval # make a status note self.note = "{0} ({1})".format(self.interval, real_interval) if self.interval != real_interval else real_interval self._cron = crontab(real_interval, func=self._cron_hit, start=False) def default_status(self): if self._cron.handle: return 'waiting' return None @property def scheduled(self): return self._cron and self._cron.handle @asyncio.coroutine def start(self): """ Takes over startup and sets up our cron loop to handle starts instead. """ if not self.enabled or self._cron.handle: return self.start_attempted = True # Start up cron try: self._cron.start() except Exception: raise ChParameterError("not a valid cron interval specification, '{0}'".format(self.interval)) self.loginfo("cron service {0} scheduled using interval spec '{1}'".format(self.name, self.interval)) @asyncio.coroutine def _cron_hit(self): if self.enabled: if not self.family.system_alive: return if self.running: self.logwarn("cron service {0} is still running when next interval expired, will not run again", self.name) else: self.loginfo("cron service {0} running CMD ( {1} )", self.name, self.command) try: yield from super().start() except Exception as ex: self.logerror(ex, "cron service {0} failed to start: {1}", self.name, ex) yield from self.reset(); @property def stoppable(self): return self.scheduled @asyncio.coroutine def stop(self): self._cron.stop() yield from super().stop() @asyncio.coroutine def process_started_co(self): if self._fut_monitor and not self._fut_monitor.cancelled(): self._fut_monitor.cancel() self._fut_monitor = None # We have a successful start. Monitor this service. self._fut_monitor = asyncio.async(self._monitor_service()) self.add_pending(self._fut_monitor) @asyncio.coroutine def _monitor_service(self): result = yield from self.wait() if isinstance(result, int) and result > 0: yield from self._abnormal_exit(result) else: yield from self.reset() ================================================ FILE: chaperone/cproc/pt/forking.py ================================================ import asyncio from chaperone.cproc.subproc import SubProcess from chaperone.cutil.errors import ChProcessError class ForkingProcess(SubProcess): defer_exit_kills = True @asyncio.coroutine def process_started_co(self): result = yield from self.timed_wait(self.process_timeout, self._exit_timeout) if result is not None and not result.normal_exit: if self.ignore_failures: self.logwarn("{0} (ignored) failure on start-up with result '{1}'".format(self.name, result)) else: raise ChProcessError("{0} failed on start-up with result '{1}'".format(self.name, result), resultcode = result) yield from self.wait_for_pidfile() def _exit_timeout(self): service = self.service message = "forking service '{1}' did not exit after {2} second(s), {3}".format( service.type, service.name, self.process_timeout, "proceeding due to 'ignore_failures=True'" if service.ignore_failures else "terminating due to 'ignore_failures=False'") if not service.ignore_failures: self.terminate() raise Exception(message) ================================================ FILE: chaperone/cproc/pt/inetd.py ================================================ import os import asyncio from copy import copy from chaperone.cutil.logging import error, warn, debug, info from chaperone.cproc.subproc import SubProcess from chaperone.cutil.syslog_info import LOG_DAEMON from chaperone.cutil.errors import ChParameterError from chaperone.cutil.servers import Server, ServerProtocol class InetdServiceProtocol(ServerProtocol): _fd = None def acquire_socket(self, sock): # Prepare the socket so it's inheritable sock.setblocking(True) self._fd = sock.detach() sock.close() future = asyncio.async(self.start_socket_process(self._fd)) future.add_done_callback(self._done) self.process.counter += 1 return True def _done(self, f): # Close the socket regardless if self._fd is not None: os.close(self._fd) @asyncio.coroutine def start_socket_process(self, fd): process = self.process service = process.service if not process.family.system_alive: process.logdebug("{0} received connection on port {1}; ignored, system no longer alive".format(service.name, service.port)) return process.logdebug("{0} received connection on port {2}; attempting start '{1}'... ".format(service.name, " ".join(service.exec_args), service.port)) kwargs = {'stdout': fd, 'stderr': fd, 'stdin': fd} if service.directory: kwargs['cwd'] = service.directory env = process.get_expanded_environment().get_public_environment() if service.debug: if not env: process.logdebug("{0} environment is empty", service.name) else: process.logdebug("{0} environment:", service.name) for k,v in env.items(): process.logdebug(" {0} = '{1}'".format(k,v)) create = asyncio.create_subprocess_exec(*service.exec_args, preexec_fn=process._setup_subprocess, env=env, **kwargs) proc = self._proc = yield from create self.pid = proc.pid process.logdebug("{0} instance connected to port {1}", service.name, service.port) process.add_process(proc) yield from proc.wait() process.remove_process(proc) if not proc.returncode.normal_exit: self.logerror("{2} exit status for pid={0} is '{1}'".format(proc.pid, proc.returncode, service.name)) class InetdService(Server): def __init__(self, process): super().__init__() self.process = process def _create_server(self): return asyncio.get_event_loop().create_server(InetdServiceProtocol.buildProtocol(self, process=self.process), '0.0.0.0', self.process.port) class InetdProcess(SubProcess): syslog_facility = LOG_DAEMON server = None counter = 0 def __init__(self, service, family=None): super().__init__(service, family) self._proclist = set() if not service.port: raise ChParameterError("inetd-type service {0} requires 'port=' parameter".format(self.name)) def add_process(self, proc): self._proclist.add(proc) def remove_process(self, proc): self._proclist.discard(proc) @property def scheduled(self): return self.server is not None @property def note(self): if self.server: msg = "waiting on port " + str(self.port) if self.counter: msg += "; req recvd = " + str(self.counter) if len(self._proclist): msg += "; running = " + str(len(self._proclist)) return msg @asyncio.coroutine def start_subprocess(self): """ Takes over process startup and sets up our own server socket. """ self.server = InetdService(self) yield from self.server.run() self.loginfo("inetd service {0} listening on port {1}".format(self.name, self.port)) @asyncio.coroutine def reset(self, dependents = False, enable = False, restarts_ok = False): if self.server: self.server.close() self.server = None plist = copy(self._proclist) if plist: self.logwarn("{0} terminating {1} processes on port {2} that are still running".format(self.name, len(plist), self.port)) for p in plist: p.terminate() yield from super().reset(dependents, enable, restarts_ok) @asyncio.coroutine def final_stop(self): yield from self.reset() ================================================ FILE: chaperone/cproc/pt/notify.py ================================================ import asyncio import socket import re from functools import partial from chaperone.cutil.errors import ChProcessError from chaperone.cutil.proc import ProcStatus from chaperone.cutil.notify import NotifyListener from chaperone.cproc.subproc import SubProcess class NotifyProcess(SubProcess): process_timeout = 300 defer_exit_kills = True _fut_monitor = None _listener = None _ready_event = None def _close_listener(self): if self._listener: self._listener.close() self._listener = None @asyncio.coroutine def process_prepare_co(self, environ): if not self._listener: self._listener = NotifyListener('@/chaperone/' + self.service.name, onNotify = self._notify_received) yield from self._listener.run() environ['NOTIFY_SOCKET'] = self._listener.socket_name # Now, set up an event which is triggered upon ready self._ready_event = asyncio.Event() def _notify_timeout(self): service = self.service message = "notify service '{1}' did not receive ready notification after {2} second(s), {3}".format( service.type, service.name, self.process_timeout, "proceeding due to 'ignore_failures=True'" if service.ignore_failures else "terminating due to 'ignore_failures=False'") if not service.ignore_failures: self.terminate() raise ChProcessError(message) @asyncio.coroutine def reset(self, dependents = False, enable = False, restarts_ok = False): yield from super().reset(dependents, enable, restarts_ok) self._close_listener() @asyncio.coroutine def final_stop(self): yield from super().final_stop() self._close_listener() @asyncio.coroutine def process_started_co(self): if self._fut_monitor and not self._fut_monitor.cancelled(): self._fut_monitor.cancel() self._fut_monitor = None yield from self.do_startup_pause() self._fut_monitor = asyncio.async(self._monitor_service()) self.add_pending(self._fut_monitor) if self._ready_event: try: if not self.process_timeout: raise asyncio.TimeoutError() yield from asyncio.wait_for(self._ready_event.wait(), self.process_timeout) except asyncio.TimeoutError: self._ready_event = None self._notify_timeout() else: if self._ready_event: self._ready_event = None rc = self.returncode if rc is not None and not rc.normal_exit: if self.ignore_failures: warn("{0} (ignored) failure on start-up with result '{1}'".format(self.name, rc)) else: raise ChProcessError("{0} failed with reported error {1}".format(self.name, rc), resultcode = rc) @asyncio.coroutine def _monitor_service(self): """ We only care about errors here. The rest is dealt with by having notifications occur. """ result = yield from self.wait() if isinstance(result, int) and result > 0: self._setready() # simulate ready self._ready_event = None self._close_listener() yield from self._abnormal_exit(result) def _notify_received(self, which, var, value): callfunc = getattr(self, "notify_" + var.upper(), None) #print("NOTIFY RECEIVED", var, value) if callfunc: callfunc(value) def _setready(self): if self._ready_event: self._ready_event.set() return True return False def notify_MAINPID(self, value): try: pid = int(value) except ValueError: self.logdebug("{0} got MAINPID={1}, but not a valid pid#", self.name, value) return self.pid = pid def notify_BUSERROR(self, value): code = ProcStatus(value) if not self._setready(): self.process_exit(code) else: self.returncode = code def notify_ERRNO(self, value): try: intval = int(value) except ValueError: self.logdebug("{0} got ERROR={1}, not a valid error code", self.name, value) return code = ProcStatus(intval << 8) if not self._setready(): self.process_exit(code) else: self.returncode = code def notify_READY(self, value): if value == "1": self._setready() def notify_STATUS(self, value): self.note = value @property def status(self): if self._ready_event: return "activating" return super().status ================================================ FILE: chaperone/cproc/pt/oneshot.py ================================================ import asyncio from chaperone.cproc.subproc import SubProcess from chaperone.cutil.errors import ChProcessError class OneshotProcess(SubProcess): process_timeout = 60.0 # default for a oneshot is 90 seconds @asyncio.coroutine def process_started_co(self): result = yield from self.timed_wait(self.process_timeout, self._exit_timeout) if result is not None and not result.normal_exit: if self.ignore_failures: warn("{0} (ignored) failure on start-up with result '{1}'".format(self.name, result)) else: raise ChProcessError("{0} failed on start-up with result '{1}'".format(self.name, result), resultcode = result) def _exit_timeout(self): service = self.service message = "oneshot service '{1}' did not exit after {2} second(s), {3}".format( service.type, service.name, self.process_timeout, "proceeding due to 'ignore_failures=True'" if service.ignore_failures else "terminating due to 'ignore_failures=False'") if not service.ignore_failures: self.terminate() raise Exception(message) ================================================ FILE: chaperone/cproc/pt/simple.py ================================================ import asyncio from chaperone.cproc.subproc import SubProcess class SimpleProcess(SubProcess): _fut_monitor = None @asyncio.coroutine def process_started_co(self): if self._fut_monitor and not self._fut_monitor.cancelled(): self._fut_monitor.cancel() self._fut_monitor = None # We wait a short time just to see if the process errors out immediately. This avoids a retry loop # and catches any immediate failures now. yield from self.do_startup_pause() # If there is a pidfile, sit here and wait for a bit yield from self.wait_for_pidfile() # We have a successful start. Monitor this service. self._fut_monitor = asyncio.async(self._monitor_service()) self.add_pending(self._fut_monitor) @asyncio.coroutine def _monitor_service(self): result = yield from self.wait() if isinstance(result, int) and result > 0: yield from self._abnormal_exit(result) ================================================ FILE: chaperone/cproc/subproc.py ================================================ import os import asyncio import shlex import importlib import signal import errno from functools import partial from time import time, sleep import chaperone.cutil.syslog_info as syslog_info from chaperone.cutil.env import Environment, ENV_SERIAL, ENV_SERVTIME from chaperone.cutil.logging import warn, info, debug, error from chaperone.cutil.proc import ProcStatus from chaperone.cutil.misc import lazydict, lookup_user, get_signal_name, executable_path from chaperone.cutil.errors import ChNotFoundError, ChProcessError, ChParameterError from chaperone.cutil.format import TableFormatter @asyncio.coroutine def _process_logger(stream, kind, service): name = service.name.replace('.service', '') while True: data = yield from stream.readline() if not data: return line = data.decode('ascii', 'ignore').rstrip() if not line: continue # ignore blank lines in stdout/stderr if kind == 'stderr': # we map to warning because stderr output is "to be considered" and not strictly # erroneous warn(line, program=name, pid=service.pid, facility=syslog_info.LOG_DAEMON) else: info(line, program=name, pid=service.pid, facility=syslog_info.LOG_DAEMON) class SubProcess(object): service = None # service object family = None process_timeout = 30.0 # process_timeout will be set to this unless it is overridden by # the service entry syslog_facility = None # specifies any additional syslog facility to use when using # logerror, logdebug, logwarn, etc... start_attempted = False # used to determine if a service is truly dormant defer_exit_kills = False # if true, then exit_kills will wait until a proper PID is returned # from a subprocess, then will kill when the real process exits error_count = 0 # counts errors for informational purposes _proc = None _pid = None # the pid, often associated with _proc, but not necessarily in the # case of notify processes _returncode = None # an alternate returncode, set with returncode property _exit_event = None # an event to be fired if an exit occurs, in the case of an # attached PID _orig_executable = None # original unexpanded exec_args[0] _pwrec = None # the pwrec looked up for execution user/group _cond_starting = None # a condition which, if present, indicates that this service is starting _cond_exception = None # exception which was raised during startup (for other waiters) _started = False # true if a start has occurred, either successful or not _restarts_allowed = None # number of starts permitted before we give up (if None then restarts allowed according to service def) _prereq_cache = None _procenv = None # process environment ready to be expanded _pending = None # pending futures _note = None # Class variables _cls_ptdict = lazydict() # dictionary of process types _cls_serial = 0 # serial number for process creation def __new__(cls, service, family=None): """ New Subprocesses are managed by subclasses derived from SubProcess so that complex process behavior can be isolated and loaded only when needed. That keeps this basic superclass logic less convoluted. """ # If we are trying to create a subclass, just inherit __new__ simply if cls is not SubProcess: return super(SubProcess, cls).__new__(cls) # Lookup and cache the class object used to create this type. stype = service.type ptcls = SubProcess._cls_ptdict.get(stype) if not ptcls: mod = importlib.import_module('chaperone.cproc.pt.' + stype) ptcls = SubProcess._cls_ptdict[stype] = getattr(mod, stype.capitalize() + 'Process') assert issubclass(ptcls, cls) return ptcls(service, family) def __init__(self, service, family=None): self.service = service self.family = family self._pending = set() if service.process_timeout is not None: self.process_timeout = service.process_timeout if not service.environment: self._procenv = Environment() else: self._procenv = service.environment if not service.exec_args: raise ChParameterError("No command or arguments provided for service") # If the service is enabled, assure we check for the presence of the executable now. This is # to catch any start-up situations (such as cron jobs without their executables being present). # However, we don't check this if a service is disabled. self._orig_executable = service.exec_args[0] if service.enabled: self._try_to_enable() def __getattr__(self, name): "Proxies value from the service description if we don't override them." return getattr(self.service, name) def __setattr__(self, name, value): """ Any service object attribute supercedes our own except for privates or those we keep separately, in which case there is a distinction. """ if name[0:0] != '_' and hasattr(self.service, name) and not hasattr(self, name): setattr(self.service, name, value) else: object.__setattr__(self, name, value) def _setup_subprocess(self): if self._pwrec: os.setgid(self._pwrec.pw_gid) os.setuid(self._pwrec.pw_uid) if self.setpgrp: os.setpgrp() if not self.directory: try: os.chdir(self._pwrec.pw_dir) except Exception as ex: pass return def _get_states(self): states = list() if self.started: states.append('started') if self.failed: states.append('failed') if self.ready: states.append('ready') if self.running: states.append('running') return ' '.join(states) # pid and returncode management @property def pid(self): return self._pid @pid.setter def pid(self, newpid): if self._pid is not None and newpid is not None and self._pid is not newpid: self.logdebug("{0} changing PID to {1} (from {2})", self.name, newpid, self._pid) try: pgid = os.getpgid(newpid) except ProcessLookupError as ex: raise ChProcessError("{0} attempted to attach the process with PID={1} but there is no such process". format(self.name, newpid), errno = ex.errno) self._attach_pid(newpid) self._pid = newpid @property def returncode(self): if self._returncode is not None: return self._returncode return self._proc and self._proc.returncode @returncode.setter def returncode(self, val): self._returncode = ProcStatus(val) self.logdebug("{0} got explicit return code '{1}'", self.name, self._returncode) # Logging methods which may do special things for this service def loginfo(self, *args, **kwargs): info(*args, facility=self.syslog_facility, **kwargs) def logerror(self, *args, **kwargs): self.error_count += 1 error(*args, facility=self.syslog_facility, **kwargs) def logwarn(self, *args, **kwargs): warn(*args, facility=self.syslog_facility, **kwargs) def logdebug(self, *args, **kwargs): debug(*args, facility=self.syslog_facility, **kwargs) @property def note(self): return self._note @note.setter def note(self, value): self._note = value @property def status(self): serv = self.service proc = self._proc rs = "" if serv.restart and self._restarts_allowed is not None and self._restarts_allowed > 0: rs = "+r#" + str(self._restarts_allowed) if self._cond_starting: return "starting" if proc: rc = self._returncode if self._returncode is not None else proc.returncode if rc is None: return "running" elif rc.normal_exit and self._started: return "started" elif rc: return rc.briefly + rs if not serv.enabled: return "disabled" return self.default_status() def default_status(self): if self.ready: return 'ready' return None @property def enabled(self): return self.service.enabled @enabled.setter def enabled(self, val): if val and not self.service.enabled: self._try_to_enable() else: self.service.enabled = False def _try_to_enable(self): service = self.service if self._orig_executable: try: service.exec_args[0] = executable_path(self._orig_executable, service.environment.expanded()) except FileNotFoundError: if service.optional: service.enabled = False self.loginfo("optional service {0} disabled since '{1}' is not present".format(self.name, self._orig_executable)) return elif service.ignore_failures: service.enabled = False self.logwarn("(ignored) service {0} executable '{1}' is not present".format(self.name, self._orig_executable)) return raise ChNotFoundError("executable '{0}' not found".format(service.exec_args[0])) # Now we know this service is truly enabled, we need to assure its credentials # are correct. senv = service.environment if senv and senv.uid is not None and not self._pwrec: self._pwrec = lookup_user(senv.uid, senv.gid) service.enabled = True @property def scheduled(self): """ True if this is a process which WILL fire up a process in the future. A "scheduled" process does not include one which will be started manually, nor does it include proceses which will be started due to dependencies. Processes like "cron" and "inetd" return True if they are active and may start processes in the future. """ return False @property def kill_signal(self): ksig = self.service.kill_signal if ksig is not None: return ksig return signal.SIGTERM @property def running(self): "True if this process has started, is running, and has a pid" return self._proc and self._proc.returncode is None @property def started(self): """ True if this process has started normally. It may have forked, or executed, or is scheduled. """ return self._started @property def stoppable(self): """ True if this process can be stopped. By default, returns True if the service is started, but some job types such as cron and inetd may be stoppable even when processes themselves are not running. """ return self.started @property def failed(self): "True if this process has failed, either during startup or later." return ((self._returncode is not None and not self._returncode.normal_exit) or self._proc and (self._proc.returncode is not None and not self._proc.returncode.normal_exit)) @property def ready(self): """ True if this process is ready to run, or running. If not running, To be ready to run, all prerequisites must also be ready. """ if not self.enabled or self.failed: return False if self.started: return True if any(p.enabled and not p.ready for p in self.prerequisites): return False return True @property def prerequisites(self): """ Return a list of prerequisite objects. Right now, these must be within our family but this may change, so don't refer to the family or the prereq in services. Use this instead. """ if self._prereq_cache is None: prereq = (self.family and self.service.prerequisites) or () prereq = self._prereq_cache = tuple(self.family[p] for p in prereq if p in self.family) return self._prereq_cache @asyncio.coroutine def start(self): """ Runs this service if it is enabled and has not already been started. Starts prerequisite services first. A service is considered started if a) It is enabled, and started up normally. b) It is disabled, and an attempt was made to start it. c) An error occurred, it did not start, but failures we an acceptable outcome and the service has not been reset since the errors occurred. """ service = self.service if self._started: self.logdebug("service {0} already started. further starts ignored.", service.name) return if not service.enabled: self.logdebug("service {0} not enabled, will be skipped", service.name) return else: self.logdebug("service {0} enabled, queueing start request", service.name) # If this service is already starting, then just wait until it completes. cond_starting = self._cond_starting if cond_starting: yield from cond_starting.acquire() yield from cond_starting.wait() cond_starting.release() # This is an odd situation. Since every waiter expects start() to succeed, or # raise an exception, we need to be sure we raise the exception that happened # in the original start() request. if self._cond_exception: raise self._cond_exception return cond_starting = self._cond_starting = asyncio.Condition() self._cond_exception = None # Now we can procede self.start_attempted = True try: prereq = self.prerequisites if prereq: for p in prereq: yield from p.start() self.logdebug("service {0} prerequisites satisfied", service.name) if self.family: # idle only makes sense for families if "IDLE" in service.service_groups and service.idle_delay and not hasattr(self.family, '_idle_hit'): self.family._idle_hit = True self.logdebug("IDLE transition hit. delaying for {0} seconds", service.idle_delay) yield from asyncio.sleep(service.idle_delay) # STOP if the system is no longer alive because a prerequisite failed if not self.family.system_alive: return try: yield from self.start_subprocess() except Exception as ex: if service.ignore_failures: self.loginfo("service {0} ignoring failures. Exception: {1}", service.name, ex) else: self._cond_exception = ex self.logdebug("{0} received exception during attempted start. Exception: {1}", service.name, ex) raise finally: self._started = True yield from cond_starting.acquire() cond_starting.notify_all() cond_starting.release() self._cond_starting = None self.logdebug("{0} notified waiters upon completion", service.name) def get_expanded_environment(self): SubProcess._cls_serial += 1 penv = self._procenv penv[ENV_SERIAL] = str(SubProcess._cls_serial) penv[ENV_SERVTIME] = str(int(time())) return penv.expanded() @asyncio.coroutine def start_subprocess(self): service = self.service self.logdebug("{0} attempting start '{1}'... ".format(service.name, " ".join(service.exec_args))) kwargs = dict() if service.stdout == 'log': kwargs['stdout'] = asyncio.subprocess.PIPE if service.stderr == 'log': kwargs['stderr'] = asyncio.subprocess.PIPE if service.directory: kwargs['cwd'] = service.directory env = self.get_expanded_environment() yield from self.process_prepare_co(env) if env: env = env.get_public_environment() if service.debug: if not env: self.logdebug("{0} environment is empty", service.name) else: self.logdebug("{0} environment:", service.name) for k,v in env.items(): self.logdebug(" {0} = '{1}'".format(k,v)) create = asyncio.create_subprocess_exec(*service.exec_args, preexec_fn=self._setup_subprocess, env=env, **kwargs) if service.exit_kills: self.logwarn("system will be killed when '{0}' exits", service.exec_args[0]) yield from asyncio.sleep(0.2) proc = self._proc = yield from create self.pid = proc.pid if service.stdout == 'log': self.add_pending(asyncio.async(_process_logger(proc.stdout, 'stdout', self))) if service.stderr == 'log': self.add_pending(asyncio.async(_process_logger(proc.stderr, 'stderr', self))) if service.exit_kills and not self.defer_exit_kills: self.add_pending(asyncio.async(self._wait_kill_on_exit())) yield from self.process_started_co() self.logdebug("{0} successfully started", service.name) @asyncio.coroutine def process_prepare_co(self, environment): pass @asyncio.coroutine def process_started_co(self): pass @asyncio.coroutine def wait_for_pidfile(self): """ If the pidfile option was specified, then wait until we find a valid pidfile, and register the new PID. This is not done automatically, but is implemented here as a utility for process types that need it. """ if not self.pidfile: return self.logdebug("{0} waiting for PID file: {1}".format(self.name, self.pidfile)) pidsleep = 0.02 # work incrementally up to no more than process_timeout minsleep = 3 expires = time() + self.process_timeout last_ex = None while time() < expires: if not self.family.system_alive: return yield from asyncio.sleep(pidsleep) # ramp up until we hit the minsleep ceiling pidsleep = min(pidsleep*2, minsleep) try: newpid = int(open(self.pidfile, 'r').read().strip()) except FileNotFoundError: continue except Exception as ex: # Don't raise this immediately. The service may create the file before writing the PID. last_ex = ChProcessError("{0} found pid file '{1}' but contents did not contain an integer".format( self.name, self.pidfile), errno = errno.EINVAL) continue self.pid = newpid return if last_ex is not None: raise last_ex raise ChProcessError("{0} did not find pid file '{1}' before {2}sec process_timeout expired".format( self.name, self.pidfile, self.process_timeout), errno = errno.ENOENT) @asyncio.coroutine def _wait_kill_on_exit(self): yield from self.wait() self._kill_system() def _attach_pid(self, newpid): """ Attach this process to a new PID, creating a condition which will be used by the child watcher to determine when the PID has exited. """ with asyncio.get_child_watcher() as watcher: watcher.add_child_handler(newpid, self._child_watcher_callback) self._exit_event = asyncio.Event() def _child_watcher_callback(self, pid, returncode): asyncio.get_event_loop().call_soon_threadsafe(self.process_exit, returncode) def process_exit(self, code): self.returncode = code if self._exit_event: self._exit_event.set() self._exit_event = None if self.exit_kills: self.logwarn("{0} terminated with exit_kills enabled", self.service.name); # Since we're dead, and the system is going away, disable any process management self._proc = None self.pid = None self._kill_system(); if code.normal_exit or self.kill_signal == code.signal: return asyncio.async(self._abnormal_exit(code)) @asyncio.coroutine def _abnormal_exit(self, code): service = self.service if service.exit_kills: self.logwarn("{0} terminated abnormally with {1}", service.name, code) return # A disabled service should not do recovery if not service.enabled: return if self._started and service.restart: if self._restarts_allowed is None: self._restarts_allowed = service.restart_limit if self._restarts_allowed > 0: self._restarts_allowed -= 1 controller = self.family.controller if controller.system_alive: if service.restart_delay: self.loginfo("{0} pausing between restart retries ({1} left)", service.name, self._restarts_allowed) yield from asyncio.sleep(service.restart_delay) if controller.system_alive: yield from self.reset() #yield from self.start() f = asyncio.async(self.start()) # queue it since we will just return here f.add_done_callback(self._restart_callback) return if service.ignore_failures: self.logdebug("{0} abnormal process exit ignored due to ignore_failures=true", service.name) yield from self.reset() return self.logerror("{0} terminated abnormally with {1}", service.name, code) def _restart_callback(self, fut): # Catches a restart result, reporting it as a warning, and either passing back to _abnormal_exit # or accepting glorious success. ex = fut.exception() if not ex: self.logdebug("{0} restart succeeded", self.name) else: self.logwarn("{0} restart failed: {1}", self.name, ex) asyncio.async(self._abnormal_exit(self._proc and self._proc.returncode)) def _kill_system(self): self.family.controller.kill_system() def add_pending(self, future): self._pending.add(future) future.add_done_callback(lambda f: self._pending.discard(future)) @asyncio.coroutine def reset(self, dependents = False, enable = False, restarts_ok = False): self.logdebug("{0} received reset", self.name) if self._exit_event: self.terminate() elif self._proc: if self._proc.returncode is None: self.terminate() yield from self.wait() self.pid = None self._proc = None self._started = False if restarts_ok: self._restarts_allowed = None if enable: self.enabled = True # If there is a pidfile, then remove it if self.pidfile: try: os.remove(self.pidfile) except Exception: pass # Reset any non-ready dependents if dependents: for p in self.prerequisites: if not p.ready and (enable or p.enabled): yield from p.reset(dependents, enable, restarts_ok) @asyncio.coroutine def stop(self): yield from self.reset(restarts_ok = True) @asyncio.coroutine def final_stop(self): "Called when the whole system is killed, but before drastic measures are taken." self._exit_event = None self.terminate() for p in list(self._pending): if not p.cancelled(): p.cancel() def terminate(self): proc = self._proc otherpid = self.pid if proc: if otherpid == proc.pid: otherpid = None if proc.returncode is None: if self.service.kill_signal is not None: # explicitly check service self.logdebug("using {0} to terminate {1}", get_signal_name(self.kill_signal), self.name) proc.send_signal(self.kill_signal) else: proc.terminate() if otherpid: self.logdebug("using {0} to terminate {1}", get_signal_name(self.kill_signal), self.name) try: os.kill(otherpid, self.kill_signal) except Exception as ex: warn("{0} could not be killed using PID={1}: ".format(ex, otherpid)) self._pid = None @asyncio.coroutine def do_startup_pause(self): """ Wait a short time just to see if the process errors out immediately. This avoids a retry loop and catches any immediate failures now. Can be used by process implementations if needed. """ if not self.startup_pause: return try: result = yield from self.timed_wait(self.startup_pause) except asyncio.TimeoutError: result = None if result is not None and not result.normal_exit: if self.ignore_failures: warn("{0} (ignored) failure on start-up with result '{1}'".format(self.name, result)) else: raise ChProcessError("{0} failed on start-up with result '{1}'".format(self.name, result), resultcode = result) @asyncio.coroutine def timed_wait(self, timeout, func = None): """ Timed wait waits for process completion. If process completion occurs normally, the returncode for process startup is returned. Upon timeout either: 1. asyncio.TimeoutError is raised if 'func' is not provided, or... 2. func is called and the result is returned from timed_wait(). """ try: if not timeout: raise asyncio.TimeoutError() # funny situation, but settings can cause this if users attempt it result = yield from asyncio.wait_for(asyncio.shield(self.wait()), timeout) except asyncio.TimeoutError: if not func: raise result = func() except asyncio.CancelledError: result = self.returncode return result @asyncio.coroutine def wait(self): proc = self._proc if self._exit_event: yield from self._exit_event.wait() elif proc: yield from proc.wait() else: raise Exception("Process not running (or attached), can't wait") if proc.returncode is not None and proc.returncode.normal_exit: self.logdebug("{2} exit status for pid={0} is '{1}'".format(proc.pid, proc.returncode, self.name)) else: self.loginfo("{2} exit status for pid={0} is '{1}'".format(proc.pid, proc.returncode, self.name)) return proc.returncode class SubProcessFamily(lazydict): controller = None # top level system controller services_config = None _start_time = None def __init__(self, controller, services_config): """ Given a pre-analyzed list of processes, complete with prerequisites, build a process family. """ super().__init__() self.controller = controller self.services_config = services_config for s in services_config.get_startup_list(): self[s.name] = SubProcess(s, family = self) def get_status_formatter(self): df = TableFormatter('pid', 'name', 'enabled', 'status', 'note', sort='name') df.add_rows(self.values()) return df @property def system_alive(self): return self.controller.system_alive def get_scheduled_services(self): return [s for s in self.values() if s.scheduled] def get_status(self): if not self._start_time: return "Not yet started" secs = time() - self._start_time total = len(self.values()) scheduled = started = failed = errors = 0 for s in self.values(): if s.scheduled: scheduled += 1 if s.started: started += 1 if s.failed: failed += 1 errors += s.error_count m,s = divmod(int(secs), 60) h,m = divmod(m, 60) msg = "Uptime {0:02}:{1:02}:{2:02}; {3} service{4} started".format(h, m, s, started or "No", started != 1 and 's' or '') if scheduled: msg += "; {0} scheduled".format(scheduled) if failed: msg += "; {0} failed".format(failed) if errors: msg += "; {0} total errors".format(errors) return msg @asyncio.coroutine def run(self, servicelist = None): """ Runs the family, starting up services in dependency order. If any problems occur, an exception is raised. Returns True if any attempts were made to start services, otherwize False if the configuration contained no services that were enabled and ready to run. """ # Note that all tasks are started simultaneously, but they resolve their # interdependencies themselves. if not servicelist: servicelist = self.values() yield from asyncio.gather(*[s.start() for s in servicelist]) self._start_time = time() # Indicate if any attempts were made return any(s.start_attempted for s in servicelist) def _lookup_services(self, names): result = set() for name in names: serv = self.get(name) if not serv: serv = self.get(name + ".service") if not serv: raise ChParameterError("no such service: " + name) result.add(serv) return result @asyncio.coroutine def start(self, service_names, force = False, wait = False, enable = False): slist = self._lookup_services(service_names) not_enab = [s for s in slist if not s.enabled] if not force: if not_enab and not enable: raise Exception("can only start services which have been enabled: " + ", ".join([s.shortname for s in not_enab])) started = [s for s in slist if s.started] if started: raise Exception("can't restart services without stop/reset: " + ", ".join([s.shortname for s in started])) notready = [s for s in slist if not s.ready and (s.enabled and not enable)] if notready: raise Exception("services or their prerequisites are not ready: " + ", ".join([s.shortname for s in notready])) resets = () if not_enab and enable: resets = not_enab # If forcing, then reset all services, as well as any non-ready dependents. if force: resets = [s for s in slist if (not s.ready or s.started)] for s in resets: yield from s.reset(dependents=True, enable=enable, restarts_ok=True) if not wait: asyncio.async(self._queued_start(slist, service_names)) else: yield from self.run(slist) @asyncio.coroutine def _queued_start(self, slist, names): try: yield from self.run(slist) except Exception as ex: error("queued start (for {0}) failed: {1}", names, ex) @asyncio.coroutine def stop(self, service_names, force = False, wait = False, disable = False): slist = self._lookup_services(service_names) started = [s for s in slist if s.stoppable] if not force: if len(started) != len(slist): raise Exception("can't stop services which aren't started: " + ", ".join([s.shortname for s in slist if not s.stoppable])) if not wait: asyncio.async(self._queued_stop(slist, service_names, disable)) else: for s in slist: yield from s.stop() if disable: s.enabled = False @asyncio.coroutine def _queued_stop(self, slist, names, disable): try: for s in slist: yield from s.stop() if disable: s.enabled = False except Exception as ex: error("queued stop (for {0}) failed: {1}", names, ex) @asyncio.coroutine def reset(self, service_names, force = False, wait = False): slist = self._lookup_services(service_names) if not force: running = [s for s in slist if s.running] if running: raise Exception("can't reset services which are running: " + ", ".join([s.shortname for s in running])) if not wait: asyncio.async(self._queued_reset(slist, service_names)) else: for s in slist: yield from s.reset(restarts_ok = True) @asyncio.coroutine def _queued_reset(self, slist, names): try: for s in slist: yield from s.reset(restarts_ok = True) except Exception as ex: error("queued reset (for {0}) failed: {1}", names, ex) @asyncio.coroutine def enable(self, service_names): slist = self._lookup_services(service_names) for s in slist: s.enabled = True @asyncio.coroutine def disable(self, service_names): slist = self._lookup_services(service_names) for s in slist: s.enabled = False ================================================ FILE: chaperone/cproc/version.py ================================================ # This file is designed to be used as a package module, but also as a main program runnable # by Python2 or Python3 which will print the version. Used in setup.py VERSION = (0,3,9) DISPLAY_VERSION = ".".join([str(v) for v in VERSION]) LICENSE = "Apache License, Version 2.0" MAINTAINER = "Gary Wisniewski " LINK_PYPI = "https://pypi.python.org/pypi/chaperone" LINK_DOC = "http://garywiz.github.io/chaperone" LINK_SOURCE = "http://github.com/garywiz/chaperone" LINK_QUICKSTART = "http://github.com/garywiz/chaperone-baseimage" LINK_LICENSE = "http://www.apache.org/licenses/LICENSE-2.0" import sys import os VERSION_MESSAGE = """ This is '{1}' version {0.DISPLAY_VERSION}. Documentation and source is available at {0.LINK_SOURCE}. Licensed under the {0.LICENSE}. """.format(sys.modules[__name__], os.path.basename(sys.argv[0])) if __name__ == '__main__': print(DISPLAY_VERSION) ================================================ FILE: chaperone/cproc/watcher.py ================================================ import os import asyncio import threading from functools import partial from asyncio.unix_events import BaseChildWatcher from chaperone.cutil.logging import warn, info, debug from chaperone.cutil.proc import ProcStatus from chaperone.cutil.misc import get_signal_name from chaperone.cutil.events import EventSource class InitChildWatcher(BaseChildWatcher): """An init-responsible child watcher. Plugs into the asyncio child watcher framework to allow harvesting of both known and unknown child processes. """ def __init__(self, **kwargs): super().__init__() self.events = EventSource(**kwargs) self._callbacks = {} self._lock = threading.Lock() self._zombies = {} self._forks = 0 self._no_processes = None self._had_children = False def close(self): self._callbacks.clear() self._zombies.clear() super().close() def __enter__(self): with self._lock: self._forks += 1 return self def __exit__(self, a, b, c): with self._lock: self._forks -= 1 if self._forks or not self._zombies: return collateral_victims = str(self._zombies) self._zombies.clear() info( "Caught subprocesses termination from unknown pids: %s", collateral_victims) @property def number_of_waiters(self): return len(self._callbacks) def add_child_handler(self, pid, callback, *args): assert self._forks, "Must use the context manager" with self._lock: try: returncode = self._zombies.pop(pid) except KeyError: # The child is running. self._callbacks[pid] = callback, args return # The child is dead already. We can fire the callback. callback(pid, returncode, *args) def remove_child_handler(self, pid): try: del self._callbacks[pid] return True except KeyError: return False def check_processes(self): # Checks to see if any processes terminated, and triggers onNoProcesses self._do_waitpid_all() def _do_waitpid_all(self): # Because of signal coalescing, we must keep calling waitpid() as # long as we're able to reap a child. while True: try: pid, status = os.waitpid(-1, os.WNOHANG) debug("REAP pid={0},status={1}".format(pid,status)) except ChildProcessError: # No more child processes exist. if self._had_children: debug("no child processes present") self.events.onNoProcesses() return else: self._had_children = True if pid == 0: # A child process is still alive. return returncode = ProcStatus(status) with self._lock: try: callback, args = self._callbacks.pop(pid) except KeyError: # unknown child if self._forks: # It may not be registered yet. self._zombies[pid] = returncode continue callback = None if callback is None: info( "Caught subprocess termination from unknown pid: " "%d -> %d", pid, returncode) else: callback(pid, returncode, *args) ================================================ FILE: chaperone/cutil/__init__.py ================================================ # Placeholder ================================================ FILE: chaperone/cutil/config.py ================================================ import os import re import pwd import shlex from operator import attrgetter from copy import deepcopy from itertools import chain import yaml import voluptuous as V from chaperone.cutil.env import Environment, ENV_CONFIG_DIR, ENV_SERVICE from chaperone.cutil.errors import ChParameterError from chaperone.cutil.logging import info, warn, debug from chaperone.cutil.misc import lazydict, lookup_user, get_signal_number @V.message('not an executable file', cls=V.FileInvalid) @V.truth def IsExecutable(v): return os.path.isfile(v) and os.access(v, os.X_OK) _config_schema = V.Any( { V.Match('^.+\.service$'): { 'after': str, 'before': str, V.Required('command'): str, 'directory': str, 'debug': bool, 'enabled': V.Any(bool, str), 'env_inherit': [ str ], 'env_set': { str: str }, 'env_unset': [ str ], 'exit_kills': bool, 'gid': V.Any(str, int), 'ignore_failures': bool, 'interval': str, 'kill_signal': str, 'optional': bool, 'port': V.Any(str, int), 'pidfile': str, 'process_timeout': V.Any(float, int), 'startup_pause': V.Any(float, int), 'restart': bool, 'restart_limit': int, 'restart_delay': int, 'service_groups': str, 'setpgrp': bool, 'stderr': V.Any('log', 'inherit'), 'stdout': V.Any('log', 'inherit'), 'type': V.Any('oneshot', 'simple', 'forking', 'notify', 'cron', 'inetd'), 'uid': V.Any(str, int), }, V.Match('^settings$'): { 'debug': bool, 'detect_exit': bool, 'env_inherit': [ str ], 'env_set': { str: str }, 'env_unset': [ str ], 'gid': V.Any(str, int), 'idle_delay': V.Any(float, int), 'ignore_failures': bool, 'process_timeout': V.Any(float, int), 'startup_pause': V.Any(float, int), 'shutdown_timeout': V.Any(float, int), 'uid': V.Any(str, int), 'logrec_hostname': str, 'enable_syslog': bool, 'status_interval': V.Any(float, int), }, V.Match('^.+\.logging'): { 'enabled': V.Any(bool, str), 'extended': bool, 'file': str, 'syslog_host': str, 'selector': str, 'stderr': bool, 'stdout': bool, 'overwrite': bool, 'uid': V.Any(str, int), 'gid': V.Any(str, int), 'logrec_hostname': str, }, } ) validator = V.Schema(_config_schema) _RE_LISTSEP = re.compile(r'\s*,\s*') def print_services(label, svlist): # Useful for debugging startup order print(label) for s in svlist: print(s) p = getattr(s, 'prerequisites', None) if p: print(' prereq:', p) # Note that we extend YAML by allowing an empty string to mean "false". This makes some macro # expansions work better, such as ... enabled:"$(MYSQL_ENABLED:+true)" _RE_YAML_BOOL = re.compile(r'^\s*(?:(?Py|true|yes|on)|(n|false|no|off|))\s*$', re.IGNORECASE) class _BaseConfig(object): name = None environment = None env_set = None env_unset = None env_inherit = ['*'] _repr_pat = None _expand_these = {} _typecheck = {} _settings_defaults = {} @classmethod def createConfig(cls, config=None, **kwargs): """ Creates a new configuration given a system configuration object. Initializes the environment as triggers any per-configuration attribute initialization. """ return cls(kwargs, env=config.get_environment(), settings=config.get_settings()) def _typecheck_assure_bool(self, attr): "Assures that the specified attribute is a legal boolean." val = getattr(self, attr) if val is None or isinstance(val, bool): return # First, try both 'true' and 'false' according to YAML conventions match = _RE_YAML_BOOL.match(str(val)) if not match: raise ChParameterError("invalid boolean parameter for '{0}': '{1}'".format(attr, val)) setattr(self, attr, bool(match.group('true'))) def _typecheck_assure_int(self, attr): "Assures that the specified attribute is a legal integer." val = getattr(self, attr) if val is None or isinstance(val, int): return try: setattr(self, attr, int(val)) except ValueError: raise ChParameterError("invalid integer parameter for '{0}': '{1}'".format(attr, val)) def __init__(self, initdict, name = "MAIN", env = None, settings = None): self.name = name if settings: for sd in self._settings_defaults: if sd not in initdict: val = settings.get(sd) if val is not None: setattr(self, sd, val) for k,v in initdict.items(): setattr(self, k, v) # User names always have .xxx qualifier because of schema restrictions. Otherwise, it's a user # defined name subject to restrictions. splitname = self.name.rsplit('.', 1) if len(splitname) == 2 and splitname[0] == splitname[0].upper(): raise ChParameterError("all-uppercase names such as '{0}' are reserved for the system.".format(self.name)) # UID and GID are expanded according to the incoming environment, # since the new environment depends upon these. if env: env.expand_attributes(self, 'uid', 'gid') uid = self.get('uid') gid = self.get('gid') if gid is not None and uid is None: raise Exception("cannot specify 'gid' without 'uid'") # We can now use 'self' as our config, with all defaults. env = self.environment = Environment(env, uid=uid, gid=gid, config=self, resolve_xid = not self.get('optional', False)) self.augment_environment(env) if self._expand_these: env.expand_attributes(self, *self._expand_these) for attr,func in self._typecheck.items(): getattr(self, '_typecheck_'+func)(attr) self.post_init() def shortname(self): return self.name def post_init(self): pass def augment_environment(self, env): pass def get(self, attr, default = None): return getattr(self, attr, default) def __repr__(self): if self._repr_pat: return self._repr_pat.format(self) return super().__repr__() class ServiceConfig(_BaseConfig): after = None before = None command = None debug = None directory = None enabled = True exit_kills = False gid = None interval = None ignore_failures = False kill_signal = None optional = False pidfile = None # the pidfile to monitor port = None # used for inetd processes process_timeout = None # time to elapse before we decide a process has misbehaved startup_pause = 0.5 # time to wait momentarily to see if a service starts (if needed) restart = False restart_limit = 5 # number of times to invoke a restart before giving up restart_delay = 3 # number of seconds to delay between restarts setpgrp = True # if this process should run in its own process group service_groups = "default" # will be transformed into a tuple() upon construction stderr = "log" stdout = "log" type = 'simple' uid = None exec_args = None # derived from bin/command/args, but may be preset using createConfig idle_delay = 1.0 # present, but mirrored from settings, not settable per-service # since it is only triggered once when the first IDLE group item executes prerequisites = None # a list of service names which are prerequisites to this one _repr_pat = "Service:{0.name}(service_groups={0.service_groups}, after={0.after}, before={0.before})" _expand_these = {'command', 'stdout', 'stderr', 'interval', 'directory', 'exec_args', 'pidfile', 'enabled', 'port'} _typecheck = {'enabled': 'assure_bool', 'port': 'assure_int'} _assure_bool = {'enabled'} _settings_defaults = {'debug', 'idle_delay', 'process_timeout', 'startup_pause', 'ignore_failures'} system_group_names = ('IDLE', 'INIT') system_service_names = ('CONSOLE', 'MAIN') @property def shortname(self): return self.name.replace('.service', '') def augment_environment(self, env): if self.name: env[ENV_SERVICE] = self.name def post_init(self): # Assure that exec_args is set to the actual arguments used for execution if self.command: self.exec_args = shlex.split(self.command) # Lookup signal number if self.kill_signal is not None: self.kill_signal = get_signal_number(self.kill_signal) # Expand before, after and service_groups into sets/tuples self.before = set(_RE_LISTSEP.split(self.before)) if self.before is not None else set() self.after = set(_RE_LISTSEP.split(self.after)) if self.after is not None else set() self.service_groups = tuple(_RE_LISTSEP.split(self.service_groups)) if self.service_groups is not None else tuple() for sname in chain(self.before, self.after): if sname.upper() == sname and sname not in chain(self.system_group_names, self.system_service_names): raise ChParameterError("{0} dependency reference not valid; '{1}' is not a recognized system name" .format(self.name, sname)) for sname in self.service_groups: if sname.upper() == sname and sname not in self.system_group_names: raise ChParameterError("{0} contains an unrecognized system group name '{1}'".format(self.name, sname)) if 'IDLE' in self.after: raise Exception("{0} cannot specify services which start *after* service_group IDLE".format(self.name)) if 'INIT' in self.before: raise Exception("{0} cannot specify services which start *before* service_group INIT".format(self.name)) class LogConfig(_BaseConfig): selector = '*.*' file = None stderr = False stdout = False enabled = True overwrite = False extended = False # include facility/priority information uid = None # used to control permissions on logfile creation gid = None logrec_hostname = None # hostname used to override hostname in syslog record syslog_host = None # remote IP of syslog handler _expand_these = {'selector', 'file', 'enabled', 'logrec_hostname', 'syslog_host'} _typecheck = {'enabled': 'assure_bool'} _settings_defaults = {'logrec_hostname'} @property def shortname(self): return self.name.replace('.logging', '') class ServiceDict(lazydict): _ordered_startup = None def __init__(self, servdict, env = None, settings = None): """ Accepts a dictionary of values to be turned into services. """ super().__init__( ((k,ServiceConfig(v,k,env,settings)) for (k,v) in servdict) ) def add(self, service): self[service.name] = service def clear(self): super().clear() self._ordered_startup = None def get_dependency_graph(self): """ Returns a set of dependency groups. Each group represents a set of dependencies starting at the root of the dependency tree. This is valuable for debugging dependencies. The output graph is ascii-art which shows the earliest start times and latest stop times for each service, roughly in order of start-up. """ sep = ' | ' sulist = self.get_startup_list() curcol = 0 maxwidth = 0 for s in sulist: ourlen = len(s.shortname) s._column = curcol + ourlen - 1 curcol += ourlen + len(sep) maxwidth = max(maxwidth, ourlen) def histogram(serv): # find the earliest prerequsite, or 0 if there is none pcols = tuple(s._column for s in sulist if s.name in serv.prerequisites) start = (pcols and max(pcols) + 1) or 0 return (' ' * start) + ('=' * (serv._column - start + 1)) lines = list() lines.append(' ' * (maxwidth + len(sep)) + sep.join(s.shortname for s in sulist)) for s in sulist: lines.append(s.shortname.ljust(maxwidth) + sep + histogram(s)) lines.append(('-' * (maxwidth)) + '-> depends on...') for s in sulist: lines.append(s.shortname.ljust(maxwidth) + sep + ', '.join(pr.replace('.service', '') for pr in s.prerequisites)) return lines def get_startup_list(self): """ Returns the list of start-up items in priority order by examining before: and after: attributes. """ if self._ordered_startup is not None: return self._ordered_startup services = self.deepcopy() groups = lazydict() for k,v in services.items(): for g in v.service_groups: groups.setdefault(g, lambda: lazydict())[k] = v #print_services('initial', services.values()) # The "IDLE" and "INIT" groups are special. Revamp things so that any services in the "IDLE" group # have an implicit "after: 'all-others'" and any services in "INIT" have an implicit "before: 'all-others' # where all-others is an explicit list of all services NOT in the respective group if 'IDLE' in groups: nonidle = set(k for k,v in services.items() if "IDLE" not in v.service_groups) for s in groups['IDLE'].values(): s.after.update(nonidle) if 'INIT' in groups: noninit = set(k for k,v in services.items() if "INIT" not in v.service_groups) for s in groups['INIT'].values(): s.before.update(noninit) # We want to only look at the "after:" attribute, so we will eliminate the relevance # of befores... for k,v in services.items(): for bef in v.before: if bef in groups: for g in groups[bef].values(): g.after.add(v.name) elif bef in services: services[bef].after.add(v.name) v.before = None # Before is now gone, make sure that all "after... groups" are translated into "after.... service" for group in groups.values(): afters = set() for item in group.values(): afters.update(item.after) for a in afters: if a in groups: names = groups[a].keys() for item in group.values(): item.after.update(names) # Now remove any undefined services or groups and turn the 'after' attribute into a definitive # graph. # # Note: sorted() occurs a couple times below. The main reason is so that the results # are deterministic in cases where exact order is not defined. afters = set(services.keys()) for v in services.values(): v.refs = sorted(map(lambda n: services[n], v.after.intersection(afters)), key=attrgetter('name')) #print_services('before add nodes', services.values()) svlist = list() # this will be our final list, containing original items svseen = set() def add_nodes(items): for item in items: if hasattr(item, 'active'): raise Exception("circular dependency in service declaration") item.active = True add_nodes(item.refs) del item.active if item.name not in svseen: svseen.add(item.name) svlist.append(self[item.name]) # set startup prerequisite dependencies svlist[-1].prerequisites = set(r.name for r in item.refs) add_nodes(sorted(services.values(), key=attrgetter('name'))) #print_services('final service list', svlist) self._ordered_startup = svlist return svlist class Configuration(object): uid = None # specifies if a system-wide user was provided gid = None _conf = None _env = None # calculated environment @classmethod def configFromCommandSpec(cls, spec, user = None, default = None, extra_settings = None, disable_console_log = False): """ A command specification (typically specified with the --config= command line option) is used to create a configuration object. The target may be either a file or a directory. If it is a file, then the file itself will be the only configuration read. If it is a directory, then a search is made for any top-level files which end in .conf or .yaml, and those will be combined according to lexicographic order. If the configuration path is a relative path, then it is relative to either the root directory, or the home directory of the given user. This allows a user-specific configuration to automatically take effect if desired. """ frombase = '/' if user: frombase = lookup_user(user).pw_dir trypath = os.path.join(frombase, spec) debug("TRY CONFIG PATH: {0}".format(trypath)) if not os.path.exists(trypath): return cls(default = default) else: os.environ[ENV_CONFIG_DIR] = os.path.dirname(trypath) if os.path.isdir(trypath): return cls(*[os.path.join(trypath, f) for f in sorted(os.listdir(trypath)) if f.endswith('.yaml') or f.endswith('.conf')], default = default, uid = user, extra_settings = extra_settings, disable_console_log = disable_console_log) return cls(trypath, default = default, uid = user, extra_settings = extra_settings, disable_console_log = disable_console_log) def __init__(self, *args, default = None, uid = None, extra_settings = None, disable_console_log = False): """ Given one or more files, load our configuration. If no configuration is provided, then use the configuration specified by the default. """ debug("CONFIG INPUT (uid={1}): '{0}'".format(args, uid)) self.uid = uid self._conf = lazydict() for fn in args: if os.path.exists(fn): self._merge(yaml.load(open(fn, 'r').read().expandtabs())) if not self._conf and default: self._conf = lazydict(yaml.load(default)) validator(self._conf) if extra_settings: self.update_settings(extra_settings) s = self.get_settings() self.uid = s.get('uid', self.uid) self.gid = s.get('gid', self.gid) # Special case used by --no-console-log. It really was just easiest to do it this way # rather than try to build some special notion of "console logging" into the log services # backends. if disable_console_log: for k,v in self._conf.items(): if k.endswith('.logging'): if 'stdout' in v: del v['stdout'] if 'stderr' in v: del v['stderr'] def _merge(self, items): if type(items) == list: items = {k:dict() for k in items} conf = self._conf for k,v in items.items(): if k in conf and not k.endswith('.service'): conf.smart_update(k,v) else: conf[k] = v def get_services(self): env = self.get_environment() return ServiceDict( ((k,v) for k,v in self._conf.items() if k.endswith('.service')), env, self._conf.get('settings') ) def get_logconfigs(self): env = self.get_environment() settings = self._conf.get('settings') return lazydict( ((k,LogConfig(v,k,env,settings)) for k,v in self._conf.items() if k.endswith('.logging')) ) def get_settings(self): return self._conf.get('settings') or {} def update_settings(self, updates): curset = self.get_settings() curset.update(updates) self._conf['settings'] = curset def get_environment(self): if not self._env: self._env = Environment(config=self.get_settings(), uid=self.uid, gid=self.gid) return self._env def dump(self): debug('FULL CONFIGURATION: {0}'.format(self._conf)) ================================================ FILE: chaperone/cutil/env.py ================================================ import re import os import subprocess from fnmatch import fnmatch from chaperone.cutil.logging import error, debug, warn from chaperone.cutil.misc import lookup_user, lazydict from chaperone.cutil.errors import ChVariableError, ChParameterError, ChNotFoundError ## ## ALL chaperone configuration variables defined here for easy reference ENV_CONFIG_DIR = '_CHAP_CONFIG_DIR' # directory which CONTAINS the config file *or* directory ENV_INTERACTIVE = '_CHAP_INTERACTIVE' # if this session is interactive (has a ptty attached) ENV_SERVICE = '_CHAP_SERVICE' # name of the current service ENV_SERIAL = '_CHAP_SERVICE_SERIAL' # Contains a monotonic unique serial number for each started service, starting with 1 ENV_SERVTIME = '_CHAP_SERVICE_TIME' # Timestamp when service started running ENV_TASK_MODE = '_CHAP_TASK_MODE' # if we are running in --task mode ENV_CHAP_OPTIONS = '_CHAP_OPTIONS' # Preset before chaperone runs to set default options # Technically IEEE 1003.1-2001 states env vars can contain anything except '=' and NUL but we need to # obviously exclude the terminator! # # Minimal support is included for nested parenthesis when operators are used, as in: # $(VAR:-$(VAL)) # However, more levels of nesting are not supported and will cause substitutions to be unrecognised. _RE_BACKTICK = re.compile(r'`([^`]+)`', re.DOTALL) # Parsing for operators within expansions _RE_OPERS = re.compile(r'^(?:([^:]+):([-|?+_/])(.*)|(`.+`))$', re.DOTALL) _RE_SLASHOP = re.compile(r'^(.+)(?= 0: s0 = (not stack and -1) or stack[-1] if s0 == ci: stack.pop() # We are totally done if the stack is empty if not stack: results.append(func(buf[startpos:pos], buf[match.start():pos+1], *args)) startpos = pos + 1 pos = buflen match = st.search(buf, startpos) break elif ci >= halfnest and s0 < nestlen: # don't match within quotes # at matching end delimiter, which may be nesting, or not stack.append(ci-halfnest if ci < nestlen else ci) pos += 1 if pos >= buflen: startpos = match.start(0) match = None break if pos != startpos: results.append(buf[startpos:pos]) return ''.join(results) class Environment(lazydict): uid = None gid = None # This is a cached version of this environment, expanded _expanded = None # The _shadow Environment contains a pointer to the environment which contained # the LAST active value for each env_set item so that we can deal with self-referential # cases like: # 'PATH': '/usr/local:$(PATH)' _shadow = None # A class variable to keep track of backtick expansions so we don't do them more than once _cls_btcache = dict() _cls_use_btcache = True # if shell expansions should be cached once or re-executed _cls_backtick = True # indicates backticks are enabled # Default scanner _cls_scan = EnvScanner() @classmethod def set_parse_parameters(cls, variable_id = None, open_expansion = None): cls._cls_scan = EnvScanner(variable_id, open_expansion) @classmethod def set_backtick_expansion(cls, enabled = True, cache = True): cls._cls_backtick = enabled cls._cls_use_btcache = cache def __init__(self, from_env = os.environ, config = None, uid = None, gid = None, resolve_xid = True): """ Create a new environment. An environment may have a user associated with it. If so, then it will be pre-populated with the user's HOME, USER and LOGNAME so that expansions can reference these. Note that if resolve_xid is False, then credentials if they do not exist, but leave the uid/gid the same. This means that certain features, like HOME variables, will not be properly set, leading to possible interactions between the optional components and their actual specification. However, this is better than having optional components trigger errors because uninstalled software did not create uid's needed for operation. The onus is on the service itself (in cproc) to assure that checking is performed. Note also that environments which use backtick expansions will *still* fail, because the backticks must occur within the context of the specified user, and it would be a security violation to allow a default. """ super().__init__() #print("\n--ENV INIT", config, uid, from_env, from_env and getattr(from_env, 'uid', None)) userenv = dict() # Inherit user from passed-in environment self._shadow = getattr(from_env, '_shadow', None) shadow = None # we don't bother to recreate this in any complex fashion unless we need to if uid is None: self.uid = getattr(from_env, 'uid', self.uid) self.gid = getattr(from_env, 'gid', self.gid) else: pwrec = None try: pwrec = lookup_user(uid, gid) except ChNotFoundError: if resolve_xid: raise self.uid = uid self.gid = gid if pwrec: self.uid = pwrec.pw_uid self.gid = pwrec.pw_gid userenv['HOME'] = pwrec.pw_dir userenv['USER'] = userenv['LOGNAME'] = pwrec.pw_name if not config: if from_env: self.update(from_env) self.update(userenv) else: inherit = config.get('env_inherit') or ['*'] if inherit and from_env: self.update({k:v for k,v in from_env.items() if any([fnmatch(k,pat) for pat in inherit])}) self.update(userenv) add = config.get('env_set') unset = config.get('env_unset') if add or unset: self._shadow = shadow = (getattr(self, '_shadow') or _DICT_CONST).copy() if add: for k,v in add.items(): if from_env and k in from_env: shadow[k] = from_env # we keep track of the environment where the predecessor originated self[k] = v if unset: patmatch = lambda p: any([fnmatch(p,pat) for pat in unset]) for delkey in [k for k in self.keys() if patmatch(k)]: del self[delkey] for delkey in [k for k in shadow.keys() if patmatch(k)]: del shadow[delkey] #print(' DONE (.uid={0}): {1}\n'.format(self.uid, self)) def _get_shadow_environment(self, var): """ Returns the environment where var existed before the specified variable was set, even that occurred long ago. Delays expansion of the parent environment until this point, since it is only rarely that self-referential environment variables need to consult the shadow. """ try: shadow = self._shadow[var] except (TypeError, KeyError): return None try: return shadow.expanded() except AttributeError: pass # Note shadow may be None at this point, or a dict() self._shadow[var] = shadow = Environment(shadow) return shadow.expanded() def __setitem__(self, key, value): super().__setitem__(key, value) self._expanded = None def __delitem__(self, key): super().__delitem__(key) self._expanded = None def clear(self): super().clear() self._expanded = None def _elookup(self, match): whole = match.group(0) return self.get(whole[2:-1], whole) def expand(self, instr): """ Expands an input string by replacing environment variables of the form ${ENV} or $(ENV). If an expansion is not found, the substituion is ignored and the original reference remains. Two bash features are employed to allow tests: $(VAR:-sub) Expands to sub if VAR not defined $(VAR:+sub) Expands to sub if VAR IS defined If a list is provided instead of a string, a list will be returned with each item separately expanded. """ if isinstance(instr, list): return [self.expand(item) for item in instr] if not isinstance(instr, str): return instr return self._cls_scan.parse(instr, self._expand_into, self) def expand_attributes(self, obj, *args): """ Given an object and a set of attributes, expands each and replaces the originals with expanded versions. Implicitly expands the environment to assure all variable substitutions occur correctly. """ explist = (k for k in args if hasattr(obj, k)) if not explist: return env = self.expanded() for attr in explist: setattr(obj, attr, env.expand(getattr(obj, attr))) def expanded(self): """ Does a recursive expansion on all variables until there are no matches. Circular recursion is halted rather than reported as an error. Returns a version of this environment which has been expanded. Asking an expanded() copy for another expanded() copy returns self unless the expanded copy has been modified. """ if self._expanded is not None: return self._expanded result = Environment(None) for k in sorted(self.keys()): # sorted so outcome is deterministic self._expand_into(k, None, result, k) # Copy uid after we expand, since any user information is already present in our # own environment. result.uid = self.uid result.gid = self.gid result._shadow = self._shadow # Cache a copy, but also tell the cached copy that it's expanded cached copy is itself. result._expanded = result self._expanded = result return result def _expand_into(self, k, wholematch, result, parent = None): """ Internal workhorse that expands the variable 'k' INTO the given result dictionary. The result dictionary will conatin the expanded values. The result dictionary is also a cache for nested and recursive environment expansion. 'wholematch' is None unless called from in an re.sub() (or similar context). If set, it indicates the complete expansion expression, including adornments. It is used as the default expansion when a variable is not defined. 'parent' is the name of the variable which was being expanded in the last recursion, to catch the special case of self-referential variables. """ match = _RE_OPERS.match(k) if match: (k, oper, repl, backtick) = match.groups() # Phase 1: Base variable value. Start by determining the value of variable # 'k' within the current context. # 1A: We have a backtick shortcut, such as $(`date`) if match and backtick: return self._recurse(result, backtick, parent) # 1B: We have an embedded self reference such as "PATH": "/bin:$(PATH)". We use # the last defined value in a prior environment as the value. elif parent == k and wholematch is not None: val = (self._get_shadow_environment(k) or _DICT_CONST).get(k) or '' # 1C: We have already calculated a result and will use that instead, but only # in a nested expansion. We re-evaluate top-levels all the time. elif wholematch is not None and k in result: val = result[k] # 1D: We have a variable which is not part of our environment at all, and # either treat it as empty, or as the wholematch value for further # processing elif k not in self: val = "" if match else wholematch # 1E: Finally, we will store this value and expand further. else: result[k] = self[k] # assure that recursion attempts stop with this value val = result[k] = self._recurse(result, self[k], k) # We now have, in 'val', the fully expanded contents of the variable 'k' if not match: return val # Phase 2: Process any operators to return a possibily modified # value as the result of the complete expression. if oper == '?': if not val: raise ChVariableError(self._recurse(result, repl, parent)) elif oper == '/': smatch = _RE_SLASHOP.match(repl) if not smatch: raise ChParameterError("invalid regex replacement syntax in '{0}'".format(match.group(0))) val = self._recurse(result, re.sub((smatch.group(3) and "(?" + smatch.group(3) + ")") + smatch.group(1), smatch.group(2).replace('\/', '/'), val), parent) elif oper == '|': vts = _RE_BAREBAR.split(repl, 3) if len(vts) == 1: # same as + val = '' if not val else self._recurse(result, vts[0], parent) elif len(vts) == 2: val = self._recurse(result, vts[0] if val else vts[1], parent) elif len(vts) >= 3: editval = vts[1] if fnmatch(val.replace(r'\|', '|').lower(), vts[0].lower()) else vts[2] val = self._recurse(result, editval.replace(r'\|', '|'), parent) elif oper == "+": val = '' if not val else self._recurse(result, repl, parent) elif oper == "_": # strict opposite of + val = '' if val else self._recurse(result, repl, parent) elif oper == "-": # bash :- if not val: val = self._recurse(result, repl, parent) return val def _recurse(self, result, buf, parent_var = None): "Worker method to isolate recursive env variable expansion, with backtick support" return _RE_BACKTICK.sub(self._backtick_expand, self._cls_scan.parse(buf, self._expand_into, result, parent_var)) def _backtick_expand(self, cmd): """ Performs rudimentary backtick expansion after all other environment variables have been expanded. Because these are cached, the user should not expect results to differ for different environment contexts, nor should the environment itself be relied upon. """ # Accepts either a string or match object if not isinstance(cmd, str): cmd = cmd.group(1) if not self._cls_backtick: return "`" + cmd + "`" key = '{0}:{1}:{2}'.format(self.uid, self.gid, cmd) result = self._cls_btcache.get(key) if result is None: if self.uid: try: pwrec = lookup_user(self.uid, self.gid) except ChNotFoundError as ex: ex.annotate('(required for backtick expansion `{0}`)'.format(cmd)) raise ex else: pwrec = None def _proc_setup(): if pwrec: os.setgid(pwrec.pw_gid) os.setuid(pwrec.pw_uid) try: result = subprocess.check_output(cmd, shell=True, stderr=subprocess.STDOUT, preexec_fn=_proc_setup) result = result.decode() except Exception as ex: error(ex, "Backtick expansion returned error: " + str(ex)) result = "" result = result.strip().replace("\n", " ") if self._cls_use_btcache: self._cls_btcache[key] = result return result def get_public_environment(self): """ Public variables are those which are exported to the application and do NOT start with an underscore. All underscore names will be kept private. """ return {k:v for k,v in self.expanded().items() if not (k.startswith('_') or v in (None, ''))} ================================================ FILE: chaperone/cutil/errors.py ================================================ import errno class ChError(Exception): # Named the same as OSError so that exception code can detect the presence # of an errno for reporting purposes errno = None annotation = None def annotate(self, text): if self.annotation: self.annotation += ' ' + text else: self.annotation = text def __str__(self): supmsg = super().__str__() if self.annotation: supmsg += ' ' + self.annotation return supmsg def __init__(self, message = None, errno = None): super().__init__(message) if errno is not None: self.errno = errno class ChParameterError(ChError): errno = errno.EINVAL class ChNotFoundError(ChError): errno = errno.ENOENT class ChSystemError(ChError): pass class ChProcessError(ChError): def __init__(Self, message = None, errno = None, resultcode = None): if resultcode is not None and errno is None: errno = resultcode.errno super().__init__(message, errno) class ChVariableError(ChError): pass def get_errno_from_exception(ex): try: return ex.errno except AttributeError: return None ================================================ FILE: chaperone/cutil/events.py ================================================ IS_EVENT = lambda e: e.startswith('on') and len(e) > 2 and e[2:3].isupper() def SWALLOW_EVENT(*args, **kwargs): pass class EventSource: """ This is a elegant generic class to set up and handle events. Events are always identified by keyword arguments of the format onXxxxx. def __init__(self, **kwargs): events = EventSource() kwargs = events.add(**kwargs) def foo(self): self.events.onMiscEvent() """ __events = None def __init__(self, **kwargs): self.__events = dict() if kwargs: self._exec_kwargs(self._do_add, kwargs) def __getattribute__(self, key): if IS_EVENT(key): return self.__events.get(key, SWALLOW_EVENT) return object.__getattribute__(self, key) def _exec_kwargs(self, oper, kwargs): events = [e for e in kwargs.keys() if IS_EVENT(e)] if not events: return kwargs for e in events: oper(e, kwargs[e]) del kwargs[e] return kwargs def clear(self): "Removes all event handlers." self.__events.clear() def reset(self, **kwargs): "Removes all event handlers and sets new ones." self.__events.clear() return self._exec_kwargs(self._do_add, kwargs) def add(self, **kwargs): """ Adds one or more events: add(onError = handler, onExit = handler) Returns the kwargs not processed. """ return self._exec_kwargs(self._do_add, kwargs) def remove(self, **kwargs): """ Removes one or more events: remove(onError = handler, onExit = handler) Returns the kwargs not processed. """ return self._exec_kwargs(self._do_remove, kwargs) def _do_add(self, name, value): assert callable(value) e = self.__events.get(name) # No such event, add a singleton if not e: self.__events[name] = value return # Add to multi-event dispatcher try: e.__eventlist.append(value) return except AttributeError: pass # Create multi-event dispatcher displist = [e, value] def dispatcher(*args, _displist = displist, **kwargs): for edisp in _displist: edisp(*args, **kwargs) dispatcher.__eventlist = displist self.__events[name] = dispatcher def _do_remove(self, name, value): e = self.__events.get(name) if not name: return try: e.__eventlist.remove(value) except ValueError: return # not in list, ignore except AttributeError: try: del self.__events[name] # singleton except KeyError: return # no singleton, ignore ================================================ FILE: chaperone/cutil/format.py ================================================ def fstr(s): if s is None: return '-' if isinstance(s, bool): return str(s).lower() return str(s) class TableFormatter(list): """ A quick formatting class which allows you to build a table, then output it neatly with columns and headings. """ attributes = None headings = None _sortfield = None def __init__(self, *args, sort=None): self.attributes = tuple(isinstance(a, tuple) and a[1] or a for a in args) self.headings = tuple(isinstance(a, tuple) and a[0] or a for a in args) self._hsize = list(len(h) for h in self.headings) if sort in self.attributes: self._sortfield = self.attributes.index(sort) def add_rows(self, rows): for r in rows: row = tuple(getattr(r, attr, None) for attr in self.attributes) for i in range(len(row)): self._hsize[i] = max(self._hsize[i], len(fstr(row[i]))) self.append(row) def get_formatted_data(self): if self._sortfield is not None: rows = sorted(self, key=lambda r: r[self._sortfield]) else: rows = self hz = self._hsize fieldcount = range(len(hz)) sep = " " dividers = tuple("-" * hz[i] for i in fieldcount) return "\n".join(sep.join(fstr(row[i]).ljust(hz[i]) for i in fieldcount) for row in [self.headings] + [dividers] + rows) ================================================ FILE: chaperone/cutil/logging.py ================================================ import logging import os import sys import traceback from time import strftime from logging.handlers import SysLogHandler from functools import partial import chaperone.cutil.syslog_info as syslog_info logger = logging.getLogger(__name__) _root_logger = logging.getLogger(None) _stderr_handler = logging.StreamHandler() _cur_level = logging.NOTSET _format = logging.Formatter() _stderr_handler.setFormatter(_format) _root_logger.addHandler(_stderr_handler) def set_log_level(lev): global _cur_level _cur_level = syslog_info.syslog_to_python_lev(lev) logger.setLevel(_cur_level) def set_custom_handler(handler, enable = True): if enable: _root_logger.addHandler(handler) _root_logger.removeHandler(_stderr_handler) logger.setLevel(logging.DEBUG) else: _root_logger.removeHandler(handler) _root_logger.addHandler(_stderr_handler) logger.setLevel(_cur_level) def _versatile_logprint(delegate, fmt, *args, facility=None, exceptions=False, program=None, pid=None, **kwargs): """ In addition to standard log formatting, the following two special cases are covered: 1. If there are no formatting characters (%), then simply concatenate repr() of *args 2. If there are '{' formatting arguments, then apply new-style .format using arguments provided. Additionally, you can pass an exception as the first argument: 1. If no other arguments are provided, then the exception message will be the log item. 2. A traceback will be printed in the case where the logger priority level is set to debug. """ if isinstance(fmt, Exception): ex = fmt args = list(args) if len(args) == 0: fmt = [str(ex)] else: fmt = args.pop(0) else: ex = None if facility is not None or program or pid: extra = kwargs['extra'] = {} if facility: extra['_facility'] = facility if program: extra['program_name'] = str(program) if pid: extra['program_pid'] = str(pid) if ex and (exceptions or logger.level == logging.DEBUG): # use python level here trace = "\n" + traceback.format_exc() else: trace = "" if not len(args): delegate(fmt, **kwargs) elif '%' not in fmt: if '{' in fmt: delegate('%s', fmt.format(*args) + trace, **kwargs) else: delegate('%s', " ".join([repr(a) for a in args]) + trace, **kwargs) else: delegate(fmt, *args, **kwargs) warn = partial(_versatile_logprint, logger.warning) info = partial(_versatile_logprint, logger.info) debug = partial(_versatile_logprint, logger.debug, exceptions=True) error = partial(_versatile_logprint, logger.error) ================================================ FILE: chaperone/cutil/misc.py ================================================ import os import pwd import grp import copy import signal import subprocess from chaperone.cutil.errors import ChNotFoundError, ChParameterError, ChSystemError class objectplus: """ An object which provides some general-purpose useful patterns. """ _cls_singleton = None @classmethod def sharedInstance(cls): "Return a singleton object for this class." if not cls._cls_singleton: cls._cls_singleton = cls() return cls._cls_singleton class lazydict(dict): __slots__ = () # create no __dict__ overhead for a pure dict subclass def __init__(self, *args): """ Allow a series of iterables as an initializer. """ super().__init__() for a in args: self.update(a) def get(self, key, default = None): """ A very of get() that accepts lazy defaults. You can provide a callable which will be invoked only if necessary. """ if key in self: return self[key] return default() if callable(default) else default def setdefault(self, key, default = None): """ A version of setdefault that works the way it should, by having a lambda that is executed only in the case where the item does not exist. """ if key in self: return self[key] self[key] = value = default() if callable(default) else default return value def smart_update(self, key, theirs): """ Smart update replaces values in our dictionary with values from the other. However, in the case where both dictionaries contain sub-dictionaries, the sub-dictionaries are updated rather than replaced. (This makes things like env_set inheritance easier.) """ ours = super().get(key) if ours is None: ours[key] = theirs return for k,v in theirs.items(): oursub = ours.get(k) if isinstance(oursub, dict) and isinstance(v, dict): oursub.update(v) else: ours[k] = v def deepcopy(self): return copy.deepcopy(self) def maybe_remove(fn, strict = False): """ Tries to remove a file but ignores a FileNotFoundError or Permission error. If an exception would have been raised, returns the exception, otherwise None. If "strict" then the file must either be missing, or successfully removed. Other errors will still raise exceptions. """ try: os.remove(fn) except (FileNotFoundError if strict else (FileNotFoundError, PermissionError)) as ex: return ex return None def is_exe(p): return os.path.isfile(p) and os.access(p, os.X_OK) def executable_path(fn, env = os.environ): """ Returns the fully qualified pathname to an executable. The PATH is searched, and any tilde expansions are performed. Exceptions are raised as usual. """ penv = env.get("PATH") newfn = os.path.expanduser(fn) path,prog = os.path.split(newfn) if not path and penv: for path in penv.split(os.pathsep): if is_exe(os.path.join(path, prog)): newfn = os.path.join(path, prog) break if not os.path.isfile(newfn): raise FileNotFoundError(fn) if not os.access(newfn, os.X_OK): raise PermissionError(fn) return newfn _lookup_user_cache = {} def lookup_user(uid, gid = None): """ Looks up a user using either a name or integer user value. If a group is specified, Then set the group explicitly in the returned pwrec """ key = (uid, gid) retval = _lookup_user_cache.get(key) if retval: return retval # calculate the new entry intuid = None try: intuid = int(uid) except ValueError: pass try: if intuid is not None: pwrec = pwd.getpwuid(intuid) else: pwrec = pwd.getpwnam(uid) except KeyError: raise ChNotFoundError("specified user ('{0}') does not exist".format(uid)) if gid is None: return pwrec retval = _lookup_user_cache[key] = type(pwrec)( (pwrec.pw_name, pwrec.pw_passwd, pwrec.pw_uid, lookup_group(gid, True), pwrec.pw_gecos, pwrec.pw_dir, pwrec.pw_shell) ) return retval def lookup_group(gid, optional = False): """ Looks up a user using either a name or integer user value. If 'optional' is true, then does not require that the group exist, and always returns the numeric value of 'gid', or the mapping from 'gid' if it is a name. Otherwise returns the group record. """ intgid = None try: intgid = int(gid) except ValueError: pass if intgid is not None: if optional: return intgid findit = grp.getgrgid else: findit = grp.getgrnam try: grrec = findit(gid) except KeyError: raise ChNotFoundError("specified group ('{0}') does not exist".format(gid)) return grrec.gr_gid if optional else grrec def groupadd(name, gid): """ Adds a group to the system with the specified name and GID. """ # First, try the gnu tools way try: if subprocess.call(['groupadd', '-g', str(gid), name]) == 0: return raise ChSystemError("Unable to add a group with name={0} and GID={1}".format(name, gid)) except FileNotFoundError: pass # Now, try using 'addgroup' with the busybox syntax if subprocess.call("addgroup -g {0} {1}".format(gid, name), shell=True) == 0: return raise ChSystemError("Unable to add a group with name={0} and GID={1}".format(name, gid)) def useradd(name, uid = None, gid = None, home = None): """ Adds a user to the system given an optional UID and numeric GID. """ ucmd = ['useradd', '--no-create-home'] if uid is not None: ucmd += ['-u', str(uid)] if gid is not None: ucmd += ['-g', str(gid)] if home is not None: ucmd += ['--home-dir', home] ucmd += [name] tried = " ".join(ucmd) # try gnu tools first try: if subprocess.call(ucmd) == 0: return raise ChSystemError("Error while trying to add user: {0} ({1})".format(name, tried)) except FileNotFoundError: pass ucmd = "adduser -D -H" if uid is not None: ucmd += " -u " + str(uid) if gid is not None: ucmd += " -G " + str(gid) if home is not None: ucmd += " -h '{0}'".format(home) ucmd += " " + name tried += "\n" + ucmd # try busybox-style adduser if subprocess.call(ucmd, shell=True) == 0: return raise ChSystemError("Error while trying to add user: {0}\ntried:\n{1}".format(name, tried)) def userdel(name): """ Removes a user from the system. """ del_ex = ChSystemError("Error while trying to remove user: {0}".format(name)) # try gnu tools first try: if subprocess.call(['userdel', name]) == 0: return raise del_ex except FileNotFoundError: pass # try busybox-style adduser if subprocess.call("deluser " + name, shell=True) == 0: return raise del_ex # User Directories Directory cache _udd = None def get_user_directories_directory(): """ Determines the directory where user directories are stored. This is actually not that easy, and different systems have different ways of doing it. So, we try adding a user called '_chaptest_' just to see where the directory goes, and use that. """ global _udd if _udd is not None: return _udd try: testuser = "_chaptest_" useradd(testuser) userinfo = lookup_user(testuser) _udd = os.path.dirname(userinfo.pw_dir) userdel(testuser) except Exception: _udd = "/" # default if any error occurs return _udd def maybe_create_user(user, uid = None, gid = None, using_file = None, default_home = None): """ If the user does not exist, then create one with the given name, and optionally the specified uid. If a gid is specified, create a group with the same name as the user, and the given gid. If the user does exist, then confirm that the uid and gid match, if either or both are specified. If 'using_file' is specified, then uid/gid are ignored and replaced with the uid/gid of the specified file. The file must exist and be readable. """ if using_file: stat = os.stat(using_file) if uid is None: uid = stat.st_uid if gid is None: gid = stat.st_gid if uid is not None: try: uid = int(uid) except ValueError: raise ChParameterError("Specified UID is not a number: {0}".format(uid)) try: pwrec = lookup_user(user) except ChNotFoundError: pwrec = None # If the user exists, we do nothing, but we do validate that their UID and GID # exist. if pwrec: if uid is not None and uid != pwrec.pw_uid: raise ChParameterError("User {0} exists, but does not have expected UID={1}".format(user, uid)) if gid is not None and lookup_group(gid).gr_gid != pwrec.pw_gid: raise ChParameterError("User {0} exists, but does not have expected GID={1}".format(user, gid)) return # Now, we need to create the user, and optionally the group. if gid is not None: create_group = False try: newgid = lookup_group(gid).gr_name # always use name except ChNotFoundError: create_group = True try: newgid = int(gid) # must be a number at this point except ValueError: # We don't report the numeric error, because we *know* there is no such group # and we won't create a symbolic group with a randomly-created number. raise ChParameterError("Group does not exist: {0}".format(gid)) if create_group: groupadd(user, newgid) newgid = lookup_group(user).gr_name gid = newgid # always will be the group name # Test to see if the user directory itself already exists, which should be the case. # If it doesn't, then use the default, if provided. home = None if default_home: udd = get_user_directories_directory() if not os.path.exists(os.path.join(udd, user)): home = default_home useradd(user, uid, gid, home) def _assure_dir_for(path, pwrec, gid): # gid is present so we know if we need to set group modes, but # we always use the one in pwrec if os.path.exists(path): return _assure_dir_for(os.path.dirname(path), pwrec, gid) os.mkdir(path, 0o755 if not gid else 0o775) if pwrec: os.chown(path, pwrec.pw_uid, pwrec.pw_gid if gid else -1) def open_foruser(filename, mode = 'r', uid = None, gid = None, exists_ok = True): """ Similar to open(), but assures all directories exist (similar to os.makedirs) and assures that all created objects are writable by the given user, and optionally by the given group (causing mode to be set accordingly). """ if uid: pwrec = lookup_user(uid, gid) else: pwrec = None gid = None rp = os.path.realpath(filename) _assure_dir_for(os.path.dirname(rp), pwrec, gid) fobj = open(rp, mode) if pwrec: os.chown(rp, pwrec.pw_uid, pwrec.pw_gid if gid else -1) os.chmod(rp, 0o644 if not gid else 0o664) return fobj SIGDICT = dict((v,k) for k,v in sorted(signal.__dict__.items()) if k.startswith('SIG') and not k.startswith('SIG_')) def remove_for_recreate(filename): """ Indicates the intention to recreate the file at the given path. This is function can be used in advance to assure that a) any existing file is gone, and b) full permissions and directories exist for creation of a new file in it's place """ ex = maybe_remove(filename, strict = True) open_foruser(filename, mode='w').close() os.remove(filename) def get_signal_name(signum): return SIGDICT.get(signum, "SIG%d" % signum) def get_signal_number(signame): sup = signame.upper() if sup.startswith('SIG') and not sup.startswith('SIG_'): num = getattr(signal, sup, None) else: try: num = int(signame) except ValueError: num = None if num is None: raise ChParameterError("Invalid signal specifier: " + str(signame)) return num ================================================ FILE: chaperone/cutil/notify.py ================================================ import asyncio import socket import os import re from chaperone.cutil.servers import Server, ServerProtocol from chaperone.cutil.misc import maybe_remove from chaperone.cutil.logging import debug _RE_NOTIFY = re.compile(r'^([A-Za-z]+)=(.+)$') class NotifyProtocol(ServerProtocol): def datagram_received(self, data, addr): lines = data.decode().split("\n") for line in lines: m = _RE_NOTIFY.match(line) if m: self.events.onNotify(self.owner, m.group(1), m.group(2)) class NotifyListener(Server): def _create_server(self): loop = asyncio.get_event_loop() return loop.create_datagram_endpoint(NotifyProtocol.buildProtocol(self), family=socket.AF_UNIX) @property def is_client(self): return False @property def socket_name(self): return self._socket_name @property def bind_name(self): if self._socket_name.startswith('@'): return self._socket_name.replace('@', "\0") return self._socket_name def __init__(self, socket_name, **kwargs): super().__init__(**kwargs) self._socket_name = socket_name @asyncio.coroutine def send(self, message): if not self.server: yield from self.run() self.server[0].sendto(message.encode(), self.bind_name) @asyncio.coroutine def server_running(self): (transport, protocol) = self.server bindname = self.bind_name # Clients connect to an existing socket if self.is_client: loop = asyncio.get_event_loop() yield from loop.sock_connect(transport._sock, bindname) return # Servers set up a binding to a new one transport._sock.bind(bindname) if not bindname.startswith("\0"): # if not abstract socket os.chmod(bindname, 0o777) def close(self): super().close() if not (self.is_client or self._socket_name.startswith('@')): maybe_remove(self._socket_name) # A lot like a socket server, there are only subtle differences. class NotifyClient(NotifyListener): @property def is_client(self): return True # A sink to specific notify messages. Can operate with or without a client, # and has multiple levels of support. class NotifySink: NSLEV = 0 # level 0: nothing NSLEV = 1 # level 1: only READY notifications NSLEV = 2 # level 2: READY and STATUS NSLEV = 3 # level 3: adds ERRNO, STARTING and STOPPING messages _LEVS = [ set(), {'READY'}, {'READY', 'STATUS'}, {'READY', 'STATUS', 'ERRNO', 'STOPPING'}, ] _client = None _lev = None _sent = None def __init__(self): self.level = 99 self._sent = set() @property def level(self): try: return self._LEVS.index(self._lev) except ValueError: return None @level.setter def level(self, val): if val > len(self._LEVS): val = len(self._LEVS) - 1 self._lev = self._LEVS[val].copy() def enable(self, ntype): self._lev.add(ntype.upper()) def disable(self, ntype): self._lev.discard(ntype.upper()) def error(self, val): if not self.sent("ERRNO"): self.send("ERRNO", int(val)) def stopping(self): if not self.sent("STOPPING"): self.send("STOPPING", 1) def ready(self): if not self.sent("READY"): self.send("READY", 1) def status(self, statmsg): self.send("STATUS", statmsg) def mainpid(self): self.send("MAINPID", os.getpid()) def sent(self, name): return name in self._sent def send(self, name, val): if name not in self._lev: return self._sent.add(name) if self._client: debug("queueing '{0}={1}' to notify socket '{2}'".format(name, val, self._client.socket_name)) asyncio.async(self._do_send("{0}={1}".format(name, val))) @asyncio.coroutine def _do_send(self, msg): if self._client: yield from self._client.send(msg) @asyncio.coroutine def connect(self, socket = None): """ Connects to the notify socket. However, if we can't, it's not considered an error. We just return False. """ self.close() if socket is None: if "NOTIFY_SOCKET" not in os.environ: return False socket = os.environ["NOTIFY_SOCKET"] self._client = NotifyClient(socket, onClose = lambda which,exc: self.close(), onError = lambda which,exc: debug("{0} error, notifications disabled".format(socket))) try: yield from self._client.run() except OSError as ex: debug("could not connect to notify socket '{0} ({1})".format(socket, ex)) self.close() return False return True def close(self): if not self._client: return self._client.close() self._client = None ================================================ FILE: chaperone/cutil/patches.py ================================================ import inspect import importlib # This module contains patches to Python. A patch wouldn't appear here if it didn't have major impact, # and they are constructed and researched carefully. Avoid if possible, please. # Patch routine for patching classes. Ignore ALL exceptions, since there could be any number of # reasons why a distribution may not allow such patching (though most do). Exact code is compared, # so there is little chance of an error in deciding if the patch is relevant. def PATCH_CLASS(module, clsname, member, oldstr, newfunc): try: cls = getattr(importlib.import_module(module), clsname) should_be = ''.join(inspect.getsourcelines(getattr(cls, member))[0]) if should_be == oldstr: setattr(cls, member, newfunc) except Exception: pass # PATCH for Issue23140: https://bugs.python.org/issue23140 # WHERE asyncio # IMPACT Eliminates exceptions during process termination # WHY There is no workround except upgrading to Python 3.4.3, which dramatically affects # distro compatibility. Mostly, this benefits Ubuntu 14.04LTS. OLD_process_exited = """ def process_exited(self): # wake up futures waiting for wait() returncode = self._transport.get_returncode() while self._waiters: waiter = self._waiters.popleft() waiter.set_result(returncode) """ def NEW_process_exited(self): # wake up futures waiting for wait() returncode = self._transport.get_returncode() while self._waiters: waiter = self._waiters.popleft() if not waiter.cancelled(): waiter.set_result(returncode) PATCH_CLASS('asyncio.subprocess', 'SubprocessStreamProtocol', 'process_exited', OLD_process_exited, NEW_process_exited) ================================================ FILE: chaperone/cutil/proc.py ================================================ import os from chaperone.cutil.misc import get_signal_name class ProcStatus(int): _other_error = None _errno = None def __new__(cls, val): try: intval = int(val) except ValueError: rval = int.__new__(cls, 0) rval._other_error = str(val) return rval return int.__new__(cls, intval) @property def exited(self): return os.WIFEXITED(self) @property def signaled(self): return os.WIFSIGNALED(self) @property def stopped(self): return os.WIFSTOPPED(self) @property def continued(self): return os.WIFCONTINUED(self) @property def exit_status(self): status = (os.WIFEXITED(self) or None) and os.WEXITSTATUS(self) if not status and self._errno: return 1 # default to exit_status = 1 in the case of an errno value return status @property def normal_exit(self): return self.exit_status == 0 and not self._other_error @property def errno(self): "Map situation to an errno, even if contrived, unless one was provided." if self._errno is not None: return self._errno if self.signal: return 4 #EINTR return 8 #ENOEXEC @errno.setter def errno(self, val): self._errno = val @property def exit_message(self): es = self.exit_status if es is not None: return os.strerror(es) return None @property def signal(self): if os.WIFSTOPPED(self): return os.WSTOPSIG(self) if os.WIFSIGNALED(self): return os.WTERMSIG(self) return None @property def briefly(self): if self.signaled or self.stopped: return get_signal_name(self.signal) if self.exited: return "exit({0})".format(self.exit_status) return '?' def __format__(self, spec): if spec: return int.__format__(self, spec) msg = "" ================================================ FILE: chaperone/cutil/servers.py ================================================ import asyncio from functools import partial from chaperone.cutil.events import EventSource class ServerProtocol(asyncio.Protocol): @classmethod def buildProtocol(cls, owner, **kwargs): return partial(cls, owner, **kwargs) def __init__(self, owner, **kwargs): """ Copy keywords directly into attributes when each protocol is created. This creates flexibility so that various servers can pass information to protocols. """ super().__init__() self.owner = owner self.events = self.owner.events for k,v in kwargs.items(): setattr(self, k, v) def connection_made(self, transport): self.transport = transport self.events.onConnection(self.owner) def error_received(self, exc): self.events.onError(self.owner, exc) self.events.onClose(self.owner, exc) def connection_lost(self, exc): self.events.onClose(self.owner, exc) class Server: server = None def __init__(self, **kwargs): self.events = EventSource(**kwargs) @asyncio.coroutine def run(self): self.loop = asyncio.get_event_loop() self.server = yield from self._create_server() yield from self.server_running() @asyncio.coroutine def server_running(self): pass def close(self): s = self.server if s: if isinstance(s, tuple): s = s[0] s.close() ================================================ FILE: chaperone/cutil/syslog.py ================================================ import asyncio import socket import os import re import sys import logging from time import strftime from functools import partial from chaperone.cutil.logging import info, warn, debug, set_custom_handler from chaperone.cutil.misc import lazydict, maybe_remove, remove_for_recreate from chaperone.cutil.servers import ServerProtocol, Server from chaperone.cutil.syslog_handlers import LogOutput import chaperone.cutil.syslog_info as syslog_info _RE_SPEC = re.compile(r'^(?P!?)(?:/(?P.+)/|\[(?P.+)\]|(?P[,*0-9a-zA-Z]+))\.(?P!?=?)(?P[*a-zA-Z]+)$') _RE_SPECSEP = re.compile(r' *; *') # The following is based on RFC3164 with some tweaks to deal with anomalies. # One anomaly worth mentioning is that some log sources append newlines (or whitespace) to their messages, # or include embedded newlines. Here is a good JIRA discussion about how Apache dealt with this, including some background: # https://issues.apache.org/jira/browse/LOG4NET-370 # At present we merely DISCARD whitespace from the end of messages, but don't attempt to break multiple # messages into separate lines so that UDP syslog destinations don't have to deal with packet reordering, # which is a real pain for some people, with an example here: # https://redmine.pfsense.org/issues/1938 _RE_RFC3164 = re.compile(r'^<(?P\d+)>(?P\w{3} [ 0-9][0-9] \d\d:\d\d:\d\d) (?:(?P[^ :\[]+) )?(?P[^ :\[]+)(?P[:\[ ].+?)\s*$', re.DOTALL) class _syslog_spec_matcher: """ This class supports matching a classic syslog.conf spec: . where: facility is a list of comma-separated faclities, or '*' priority is a priority (meaning >=priority) or =priority (meaning exactly that priority) either may be preceded by '!' to invert the match. And the extensions: /regex/. where regex will match the entire message [prog]. where prog will match the program specifier, if any One or more of the above can be combined, separated by semicolons. Note that the syslogd semantics are hard to actually figure out, even if you scour the web. So, here are some rules. The semicolon "joins" constraints by combining all negative constraints (those which omit facilities or priorities) and positive constraints separately. The result will be logged ONLY if all the positive constraints are true and all of the negative constraints are false! So, *.!emerg LOGS NOTHING (missing inclusions) *.*;*.!emerg logs everything bug .emerg *.info;![cron].* logs all info or higher, but omits everything from program "cron" *.*;![cron].!=info Omits the info messages from any program BUT cron [cron].*;*.!info includes all cron messages except those of info and above More specifically: *.info Includes info through emergency (6->0) but not Debug *.!info Excludes info through emergency but does not exclude debug *.=info Includes just info itself *.!=info Excludes everything BUT info !f.!=info Excludes everyting BUT info from everything BUT f Why all this bother? 1. Basic cases are pretty easy to read and understand. 2. Negations can be understood if documented, and are useful. 3. I don't want to introduce a completely new syntax. 3. Somewhere out here, there is some nerdy OCD guy who will say "But wait, your selector format is so CLOSE to the syslog format that you MUST support it with the same semantics or you're going to alienate [me]." Just nipping that in the bud. """ __slots__ = ('_regexes', '_match', 'debugexpr', 'selector') def __init__(self, selector, minimum_priority = None): self.selector = selector self._compile(minimum_priority) def reset_minimum_priority(self, minimum_priority = None): """ Recompile the spec using a new minimum priority. minimum_priority may be None to eliminate any such minimum from having an effect and reverting to the exact selectors. """ self._compile(minimum_priority) def _compile(self, minimum_priority): self._regexes = [] pieces = _RE_SPECSEP.split(self.selector) # Build the list of negations and positive expressions neg = list() pos = list() for p in pieces: self._init_spec(p, neg, pos, minimum_priority) if not pos: self._buildex("False") elif not neg: self._buildex(" or ".join(pos)) else: self._buildex("(" + (" and ".join(neg)) + ") and (" + (" or ".join(pos)) + ")") def _buildex(self, expr): # Perform some quick peepole optimization, then compile nexpr = expr.replace("True and ", "").replace(" and True", "") nexpr = nexpr.replace("not True", "False").replace(" and ((True))", "") nexpr = nexpr.replace("False or ", "").replace(" or False", "") self.debugexpr = nexpr self._match = eval("lambda s,p,f,g,buf: " + nexpr) def _init_spec(self, spec, neg, pos, minpri): match = _RE_SPEC.match(spec) if not match: raise Exception("Invalid log spec syntax: " + spec) # Compile an expression to match gdict = match.groupdict() if gdict['regex'] is not None: self._regexes.append(re.compile(gdict['regex'], re.IGNORECASE)) c1 = 'bool(s._regexes[%d].search(buf))' % (len(self._regexes) - 1) elif gdict['prog'] is not None: c1 = '(g and "%s" == g.lower())' % gdict['prog'].lower() elif gdict['fac'] != '*': faclist = [syslog_info.FACILITY_DICT.get(f) for f in gdict.get('fac', '').lower().split(',')] if None in faclist: raise Exception("Invalid logging facility code, %s: %s" % (gdict['fac'], spec)) c1 = '(' + ' or '.join(['f==%d' % f for f in faclist]) + ')' else: c1 = 'True' pri = gdict['pri'] pfx = gdict.get('pfx', '') if pri == '*': c2 = 'True' else: prival = syslog_info.PRIORITY_DICT.get(pri.lower()) if prival == None: raise Exception("Invalid logging priority, %s: %s" % (pri, spec)) if minpri is not None and minpri > prival: prival = minpri if '=' in pfx: c2 = "p==%d" % prival else: c2 = "p<=%d" % prival fpfx = gdict.get('fpfx', '') # Assess negatives and positives. # neg will contain "EXCLUDE IF" and pos will contain "INCLUDE IF" if '!' in fpfx: # Double exclusion means to exclude everything except the given priority from # everything except the given facility if '!' in pfx: neg.append("(not %s and not %s)" % (c1, c2)) else: neg.append("not (%s and %s)" % (c1, c2)) elif '!' in pfx: neg.append("(not %s or not %s)" % (c1, c2)) else: pos.append("(%s and %s)" % (c1, c2)) def match(self, msg, prog = None, priority = syslog_info.LOG_ERR, facility = syslog_info.LOG_SYSLOG): result = self._match(self, priority, facility, prog, msg) #print('MATCH', prog, result, self.debugexpr) return result class SyslogServerProtocol(ServerProtocol): def datagram_received(self, data, addr): self.data_received(data) def data_received(self, data): try: message = data.decode('ascii', 'ignore') except Exception as ex: self._output("Could not decode SYSLOG record data") sys.stdout.flush() return messages = message.split("\0") for m in messages: if m: self.owner.parse_to_output(m) sys.stdout.flush() class SyslogServer(Server): _loglist = list() _server = None _log_socket = None _capture_handler = None # our capture handler to redirect python logs def __init__(self, logsock = "/dev/log", datagram = True, **kwargs): super().__init__(**kwargs) self._datagram = datagram self._log_socket = logsock try: os.remove(logsock) except Exception: pass def _create_server(self): if not self._datagram: return self.loop.create_unix_server( SyslogServerProtocol.buildProtocol(self), path=self._log_socket) # Assure we will be able to bind later remove_for_recreate(self._log_socket) return self.loop.create_datagram_endpoint( SyslogServerProtocol.buildProtocol(self), family=socket.AF_UNIX) @asyncio.coroutine def server_running(self): # Bind the socket if it's a datagram if self._datagram: transport = self.server[0] transport._sock.bind(self._log_socket) os.chmod(self._log_socket, 0o777) def close(self): self.capture_python_logging(False) for logitem in self._loglist: for m in logitem[1]: m.close() super().close() maybe_remove(self._log_socket) def configure(self, config, minimum_priority = None): loglist = self._loglist = list() lc = config.get_logconfigs() for k,v in lc.items(): matcher = _syslog_spec_matcher(v.selector or '*.*', minimum_priority) loglist.append( (matcher, LogOutput.getOutputHandlers(v)) ) def reset_minimum_priority(self, minimum_priority = None): """ Specifies a new minimum priority for logging. Recompiles all selectors, so it's best to provide this when the configure is done, if possible. """ for m in self._loglist: m[0].reset_minimum_priority(minimum_priority) def capture_python_logging(self, enable = True): if enable: if not self._capture_handler: self._capture_handler = CustomSysLog(self) set_custom_handler(self._capture_handler) elif self._capture_handler: set_custom_handler(self._capture_handler, False) self._capture_handler = None def parse_to_output(self, msg): # For a description of what a valid syslog line can look like, see: # http://www.rsyslog.com/doc/syslog_parsing.html match = _RE_RFC3164.match(msg) if not match: pri = syslog_info.LOG_SYSLOG * 8 + syslog_info.LOG_ERR logattrs = { 'tag': '?', 'format_error': True, 'host' : None } else: logattrs = match.groupdict() pri = int(logattrs['pri']) if logattrs['tag'][0] == '/': logattrs['tag'] = os.path.basename(logattrs['tag']) logattrs['raw'] = msg self.writeLog(logattrs, priority = pri & 7, facility = pri // 8) def writeLog(self, logattrs, priority, facility): #print("\nWRITELOG", priority, facility, logattrs) for m in self._loglist: if m[0].match(logattrs['raw'], logattrs['tag'], priority, facility): for logger in m[1]: logger.writeLog(logattrs, priority, facility) class SysLogFormatter(logging.Formatter): """ Handles formatting Python output in the same format as normal syslog daemons. """ def __init__(self, program, pid): self.default_program = program self.default_pid = pid super().__init__('{asctime} {program_name}[{program_pid}]: {message}', style='{') def format(self, record): if not hasattr(record, 'program_name'): setattr(record, 'program_name', self.default_program) if not hasattr(record, 'program_pid'): setattr(record, 'program_pid', self.default_pid) return super().format(record) def formatTime(self, record, datefmt=None): timestr = strftime('%b %d %H:%M:%S', self.converter(record.created)) # this may be picky, but people parse syslogs, let's not annoy them if timestr[3:5] == ' 0': return timestr.replace(' 0', ' ', 1) return timestr class CustomSysLog(logging.Handler): """ A custom Python logging class that makes it easy to redirect Python output to our internal syslog capture handler. """ PRIORITY_NAMES = { "ALERT": syslog_info.LOG_ALERT, "CRIT": syslog_info.LOG_CRIT, "CRITICAL": syslog_info.LOG_CRIT, "DEBUG": syslog_info.LOG_DEBUG, "EMERG": syslog_info.LOG_EMERG, "ERR": syslog_info.LOG_ERR, "ERROR": syslog_info.LOG_ERR, # DEPRECATED "INFO": syslog_info.LOG_INFO, "NOTICE": syslog_info.LOG_NOTICE, "PANIC": syslog_info.LOG_EMERG, # DEPRECATED "WARN": syslog_info.LOG_WARNING, # DEPRECATED "WARNING": syslog_info.LOG_WARNING, } def __init__(self, owner): super().__init__(logging.DEBUG) # enable all levels since we manage filtering ourselves self._owner = owner self.setFormatter(SysLogFormatter(sys.argv[0] or '-', os.getpid())) def emit(self, record): facility = getattr(record, '_facility', syslog_info.LOG_LOCAL5) priority = self.PRIORITY_NAMES.get(record.levelname, syslog_info.LOG_ERR) self._owner.parse_to_output("<{0}>".format(facility << 3 | priority) + self.format(record)) ================================================ FILE: chaperone/cutil/syslog_handlers.py ================================================ import sys import os import socket import asyncio from time import time, localtime, strftime from chaperone.cutil.misc import lazydict, open_foruser from chaperone.cutil.syslog_info import get_syslog_info _our_hostname = socket.gethostname() class LogOutput: name = None config_match = lambda c: False _cls_handlers = lazydict() _cls_reghandlers = list() @classmethod def register(cls, handlercls): cls._cls_reghandlers.append(handlercls) @classmethod def getOutputHandlers(cls, config): return list(filter(None, [h.getHandler(config) for h in cls._cls_reghandlers])) @classmethod def getName(cls, config): return cls.name @classmethod def matchesConfig(cls, config): return config.enabled and cls.config_match(config) @classmethod def getHandler(cls, config): if not cls.matchesConfig(config): return None name = cls.getName(config) if name is None: return None return cls._cls_handlers.setdefault(name, lambda: cls(config)) def __init__(self, config): self.name = config.name self.config = config def close(self): pass def writeLog(self, logattrs, priority, facility): if logattrs.get('format_error'): msg = "??" + logattrs['raw'] else: # Note that 'rest' always starts with a ':', '[' or ' '. msg = (logattrs['date'] + ' ' + (self.config.logrec_hostname or logattrs['host'] or _our_hostname) + ' ' + logattrs['tag'] + logattrs['rest']) if self.config.extended: msg = get_syslog_info(facility, priority) + " " + msg self.write(msg) def write(self, data): h = self.handle h.write(data) h.write("\n") h.flush() class StdoutHandler(LogOutput): name = "sys:stdout" handle = sys.stdout config_match = lambda c: c.stdout LogOutput.register(StdoutHandler) class StderrHandler(LogOutput): name = "sys:stderr" handle = sys.stderr config_match = lambda c: c.stderr LogOutput.register(StderrHandler) class RemoteClientProtocol: def __init__(self, loop): self.loop = loop self.transport = None def connection_made(self, transport): self.transport = transport def send(self, message): self.transport.sendto(message.encode()) def datagram_received(self, data, addr): pass def error_received(self, exc): pass def connection_lost(self, exc): self.transport = None def close(self): if self.transport: self.transport.close() class RemoteHandler(LogOutput): config_match = lambda c: c.syslog_host is not None _pending = None # a pending future to setup this handler _protocol = None # protocol for this logger @classmethod def getName(cls, config): return "syslog_host:" + config.syslog_host @asyncio.coroutine def setup_handler(self): loop = asyncio.get_event_loop() connect = loop.create_datagram_endpoint(lambda: RemoteClientProtocol(loop), remote_addr=(self.config.syslog_host, 514)) (transport, protocol) = yield from connect self._pending = None self._protocol = protocol def __init__(self, config): super().__init__(config) self._pending = asyncio.async(self.setup_handler()) def write(self, data): if self._protocol: self._protocol.send(data) def close(self): if self._pending: if not self._pending.cancelled(): self._pending.cancel() self._pending = None if self._protocol: self._protocol.close() self._protocol = None LogOutput.register(RemoteHandler) class FileHandler(LogOutput): config_match = lambda c: c.file is not None CHECK_INTERVAL = 60 _orig_filename = None _cur_filename = None _next_check = 0 _stat = None @classmethod def getName(cls, config): return 'file:' + config.file def __init__(self, config): super().__init__(config) self._orig_filename = os.path.abspath(config.file) self._maybe_reopen() def _maybe_reopen(self): new_filename = strftime(self.config.file, localtime()) if new_filename != self._cur_filename or not self._stat: reopen = True else: try: newstat = os.stat(new_filename) except FileNotFoundError: newstat = None reopen = not newstat or (newstat.st_dev != self._stat.st_dev or newstat.st_ino != self._stat.st_ino) if not reopen: return if self._stat: self.handle.flush() self.handle.close() self.handle = self._stat = None env = self.config.environment self._cur_filename = new_filename self.handle = open_foruser(new_filename, 'w' if self.config.overwrite else 'a', env.uid, env.gid) self._stat = os.fstat(self.handle.fileno()) def close(self): if self._stat: self.handle.close() self._stat = None self._next_check = 0 self._cur_filename = None def write(self, data): if self._next_check <= time(): self._maybe_reopen() self._next_check = time() + self.CHECK_INTERVAL super().write(data) LogOutput.register(FileHandler) ================================================ FILE: chaperone/cutil/syslog_info.py ================================================ import logging from logging.handlers import SysLogHandler # Copy all syslog levels for k,v in SysLogHandler.__dict__.items(): if k.startswith('LOG_'): globals()[k] = v FACILITY = ('kern', 'user', 'mail', 'daemon', 'auth', 'syslog', 'lpr', 'news', 'uucp', 'cron', 'authpriv', 'ftp', 'ntp', 'audit', 'alert', 'altcron', 'local0', 'local1', 'local2', 'local3', 'local4', 'local5', 'local6', 'local7') FACILITY_DICT = {FACILITY[i]:i for i in range(len(FACILITY))} PRIORITY = ('emerg', 'alert', 'crit', 'err', 'warn', 'notice', 'info', 'debug') PRIORITY_DICT = {PRIORITY[i]:i for i in range(len(PRIORITY))} PRIORITY_DICT['warning'] = PRIORITY_DICT['warn'] PRIORITY_DICT['error'] = PRIORITY_DICT['err'] # Python equivalent for PRIORITY settings PRIORITY_PYTHON = (logging.CRITICAL, logging.CRITICAL, logging.CRITICAL, logging.ERROR, logging.WARNING, logging.INFO, logging.INFO, logging.DEBUG) def get_syslog_info(facility, priority): try: f = FACILITY[facility] except IndexError: f = '?' try: return f + '.' + PRIORITY[priority] except IndexError: return f + '.?' def syslog_to_python_lev(lev): if lev < 0 or lev > len(PRIORITY): return logging.DEBUG return PRIORITY_PYTHON[lev] ================================================ FILE: chaperone/exec/__init__.py ================================================ # Placeholder ================================================ FILE: chaperone/exec/chaperone.py ================================================ """ Lightweight process and service manager Usage: chaperone [--config=] [--user= | --create-user=] [--default-home=] [--exitkills | --no-exitkills] [--ignore-failures] [--log-level=] [--no-console-log] [--debug] [--force] [--disable-services] [--no-defaults] [--no-syslog] [--version] [--show-dependencies] [--task] [ [ ...]] Options: --config= Specifies file or directory for configuration (default is /etc/chaperone.d) --create-user= Create a new user with an optional UID (name or name/uid), then run as if --user was specified. --default-home= If the --create-user home directory does not exist, then use this directory as the default home directory for the new user instead. --debug Turn on debugging features (same as --log-level=DEBUG) --disable-services Does not run any services, only the given command (troubleshooting) --exitkills When given command exits, kill the system (default if container running interactive) --force If chaperone normally refuses, do it anyway and take the risk. --ignore-failures Assumes that "ignore_failures:true" was specified on all services (troubleshooting) --log-level= Specify log level filtering, such as INFO, DEBUG, etc. --no-console-log Disable all logging to stdout and stderr (useful when the container produces non-log output) --no-exitkills When givencommand exits, don't kill the system (default if container running daemon) --no-defaults Ignores any default options in the CHAPERONE_OPTIONS environment variable --no-syslog The internal syslog server will not be started (useful when a separate syslog daemon is started later). --user= Start first process as user (else root) --show-dependencies Shows a list of service dependencies then exits --task Run in task mode (see below). --version Display version and exit Notes: * If a user is specified, then the --config is relative to the user's home directory. * Chaperone makes the assumption that an interactive command should shut down the system upon exit, but a non-interactive command should not. You can reverse this assumption with options. * --task is used in cases where you wish to execute a script in the container environment for utility purposes, such as a script to extract data from the container, etc. This switch is equivalent to "--log err --exitkills --disable-services" and also requires a command to be specified as usual. """ # perform any patches first import chaperone.cutil.patches # regular code begins import sys import shlex import os import re import asyncio import subprocess from functools import partial from docopt import docopt from chaperone.cproc import TopLevelProcess from chaperone.cproc.version import VERSION_MESSAGE from chaperone.cutil.config import Configuration, ServiceConfig from chaperone.cutil.env import ENV_INTERACTIVE, ENV_TASK_MODE, ENV_CHAP_OPTIONS from chaperone.cutil.misc import maybe_create_user from chaperone.cutil.logging import warn, info, debug, error MSG_PID1 = """Normally, chaperone expects to run as PID 1 in the 'init' role. If you want to go ahead anyway, use --force.""" MSG_NOTHING_TO_DO = """There are no services configured to run, nor is there a command specified on the command line to run as an application. You need to do one or the other.""" # We require usernames to start with a letter or underscore. This is consistent with default Linux # rules. Yeah I know, regexes can get complicated, but they can also do a lot of work to make the # rest of the code simpler. Note that matches strings like /foo:bar as a path of "/foo" with a # groupname of bar, but the colon can be escaped if you actualy have a filename that contains # a colon like "/foo\:bar". RE_CREATEUSER = re.compile( r'''(?P[a-z_][a-z0-9_-]*) # ALWAYS start with the username (?::(?P/(?:\\:|[^:])+))? # File is next if it's :/path (?::(?P\d*))? # Either /uid or :uid introduces a uid (number may be missing) (?::(?P[a-z_][a-z0-9_-]*|\d+)?)? # followed by an optional GID $''', re.IGNORECASE | re.X) def main_entry(): # parse these first since we may disable the environment check options = docopt(__doc__, options_first=True, version=VERSION_MESSAGE) if options['--task']: options['--disable-services'] = True options['--no-console-log'] = not options['--debug'] options['--exitkills'] = True os.environ[ENV_TASK_MODE] = '1' if not options['--no-defaults']: envopts = os.environ.get(ENV_CHAP_OPTIONS) if envopts: try: defaults = docopt(__doc__, argv=(shlex.split(envopts)), options_first=True) except SystemExit as ex: print("Error occurred in {0} environment variable: {1}".format(ENV_CHAP_OPTIONS, envopts)) raise # Replace any "false" command option with the default version. options.update({k:defaults[k] for k in options.keys() if not options[k]}) if options['--config'] is None: options['--config'] = '/etc/chaperone.d' if options['--debug']: options['--log-level'] = "DEBUG" print('COMMAND OPTIONS', options) force = options['--force'] if not force and os.getpid() != 1: print(MSG_PID1) exit(1) tty = sys.stdin.isatty() os.environ[ENV_INTERACTIVE] = "1" if tty else "0" kill_switch = options['--exitkills'] or (False if options['--no-exitkills'] else tty) cmd = options[''] if options['--task'] and not cmd: error("--task can only be used if a shell command is specified as an argument") exit(1) # It's possible that BOTH --create-user and --user exist due to the way _CHAP_OPTIONS is overlaid # with command line options. So, in such a case, note that we ignore --user. create = options['--create-user'] if create is None: user = options['--user'] else: match = RE_CREATEUSER.match(create) if not match: print("Invalid format for --create-user argument: {0}".format(create)) exit(1) udata = match.groupdict() try: maybe_create_user(udata['user'], udata['uid'] or None, udata['gid'] or None, udata['file'] and udata['file'].replace(r'\:', ':'), options['--default-home']) except Exception as ex: print("--create-user failure: {0}".format(ex)) exit(1) user = udata['user'] extras = dict() if options['--ignore-failures']: extras['ignore_failures'] = True if options['--no-syslog']: extras['enable_syslog'] = False try: config = Configuration.configFromCommandSpec(options['--config'], user=user, extra_settings=extras, disable_console_log=options['--no-console-log']) services = config.get_services() except Exception as ex: error(ex, "Configuration Error: {0}", ex) exit(1) if not (services or cmd): print(MSG_NOTHING_TO_DO) exit(1) if options['--show-dependencies']: dg = services.get_dependency_graph() print("\n".join(dg)) exit(0) if not cmd and options['--disable-services']: error("--disable-services not valid without specifying a command to run") exit(1) # Now, create the tlp and proceed tlp = TopLevelProcess(config) if options['--log-level']: tlp.force_log_level(options['--log-level']) if tlp.debug: config.dump() # Set proctitle and go proctitle = "[" + os.path.basename(sys.argv[0]) + "]" if cmd: proctitle += " " + cmd try: from setproctitle import setproctitle setproctitle(proctitle) except ImportError: pass # Define here so we can share scope @asyncio.coroutine def startup_done(): if options['--ignore-failures']: warn("ignoring failures on all service startups due to --ignore-failures") if options['--disable-services'] and services: warn("services will not be configured due to --disable-services") extra_services = None if cmd: cmdsvc = ServiceConfig.createConfig(config=config, name="CONSOLE", exec_args=[cmd] + options[''], uid=user, kill_signal=("SIGHUP" if tty else None), setpgrp=not tty, exit_kills=kill_switch, service_groups="IDLE", ignore_failures=not options['--task'], stderr='inherit', stdout='inherit') extra_services = [cmdsvc] yield from tlp.run_services(extra_services, disable_others = options['--disable-services']) tlp.signal_ready() tlp.run_event_loop(startup_done()) ================================================ FILE: chaperone/exec/envcp.py ================================================ """ Copy text files and expand environment variables as you copy. Usage: envcp [options] FILE ... Options: --strip suffix If the destination is a directory, strip "suffix" off source files. --overwrite Overwrite destination files rather than exiting with an error. -v --verbose Display progress. -a --archive Preserve permissions when copying. --shell-enable Enable shell escapes using backticks, as in $(`ls`) --xprefix char The leading string to identify a variable. Defaults to '$' --xgrouping chars Grouping types which are recognized, defaults to '({' Copies a file to a destination file (two arguments), or any number of files to a destination directory. As files are copied, environment variables will be expanded. If the destination is a directory, then --strip can be used to specify a file suffix to be stripped off. Formats allowed: are $(ENV) or ${ENV}. The bareword $ENV is not recognized. """ # perform any patches first import chaperone.cutil.patches # regular code begins import sys import os import asyncio import shlex from docopt import docopt from chaperone.cproc.version import VERSION_MESSAGE from chaperone.cutil.env import Environment def check_canwrite(flist, overok): for f in flist: if os.path.exists(f) and not overok: print("error: file {0} exists, won't overwrite".format(f)) exit(1) def main_entry(): options = docopt(__doc__, version=VERSION_MESSAGE) files = options['FILE'] start = options['--xprefix'] braces = options['--xgrouping'] if braces: if any([b not in '{([' for b in braces]): print("error: --xgrouping can accept one or more of '{{', '[', or '(' only. Not this: '{0}'.".format(braces)) exit(1) # Enable or disable, but don't cache them if enabled Environment.set_backtick_expansion(bool(options['--shell-enable']), False) Environment.set_parse_parameters(start, braces) env = Environment() # Support stdin/stdout behavior if '-' is the only file specified on the command line if '-' in files: if len(files) > 1: print("error: '-' for stdin/stdout cannot be combined with other filename arguments") exit(1) sys.stdout.write(env.expand(sys.stdin.read())) sys.stdout.flush() exit(0) if len(files) < 2: print("error: must include two or more filename arguments") exit(1) destdir = os.path.abspath(files[-1]); destfile = None if os.path.isdir(destdir): if not os.access(destdir, os.W_OK|os.X_OK): print("error: directory {0} exists but is not writable".format(destdir)) st = options['--strip'] if st: files = [(f, os.path.basename(f).rstrip(st)) for f in files[:-1]] else: files = [(f, os.path.basename(f)) for f in files[:-1]] check_canwrite([os.path.join(destdir, p[1]) for p in files], options['--overwrite']) elif len(files) != 2: print("error: destination is not a directory and more than 2 files specified") exit(1) else: destfile = files[1] files = [(files[0], files[0])] check_canwrite([destfile], options['--overwrite']) # files is now a list of pairs [(source, dest-basename), ...] for curpair in files: if not os.path.exists(curpair[0]): print("error: file does not exist, {0}".format(curpair[0])) exit(1) if not os.access(curpair[0], os.R_OK): print("error: file is not readable, {0}".format(curpair[0])) exit(1) for curpair in files: if not destfile: destfile = os.path.join(destdir, curpair[1]) try: oldstat = os.stat(curpair[0]) oldf = open(curpair[0], 'r') except Exception as ex: print("error: cannot open input file {0}: {1}".format(curpair[0], ex)) exit(1) try: newf = open(destfile, 'w') except Exception as ex: print("error: cannot open output file {0}: {1}".format(destfile, ex)) exit(1) newf.write(env.expand(oldf.read())) oldf.close() newf.close() if options['--archive']: # ATTEMPT to retain permissions try: os.chown(destfile, oldstat.st_uid, oldstat.st_gid); except PermissionError: # Try them separately. User first, then group. try: os.chown(destfile, oldstat.st_uid, -1); except PermissionError: pass try: os.chown(destfile, -1, oldstat.st_gid); except PermissionError: pass try: os.chmod(destfile, oldstat.st_mode); except PermissionError: pass try: os.utime(destfile, times=(oldstat.st_atime, oldstat.st_mtime)) except PermissionError: pass if options['--verbose']: print("envcp {0} {1}".format(curpair[0], destfile)) destfile = None ================================================ FILE: chaperone/exec/sdnotify.py ================================================ """ Systemd notify tool (compatible with systemd-notify) Usage: sdnotify [options] [VARIABLE=VALUE ...] Options: --pid PID Inform chaperone/systemd of MAINPID (must say --pid=self if you want the programs PID) --status=STATUS Inform chaperone/systemd of status information --ready Send the ready signal (READY=1) --booted Indicate whether we were booted with systemd. (Note: Always indicates 'no', exit status 1.) --ignore Silently ignore inability to send notifications. (Always ignored if NOTIFY_SOCKET is not set.) All of the above specified will be sent in the order given above, then any VARIABLE=VALUE pairs will be sent. This is provided by Chaperone as an alternative to systemd-notify for distros which may not have one. """ # perform any patches first import chaperone.cutil.patches # regular code begins import sys import os import socket from docopt import docopt from chaperone.cproc.version import VERSION_MESSAGE def _mkabstract(socket_name): if socket_name.startswith('@'): socket_name = '\0%s' % socket_name[1:] return socket_name def do_notify(msg): notify_socket = os.getenv('NOTIFY_SOCKET') if notify_socket: sock = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM) try: sock.connect(_mkabstract(notify_socket)) sock.sendall(msg.encode()) except EnvironmentError as ex: raise Exception("Systemd notification failed: " + str(ex)) finally: sock.close() def main_entry(): options = docopt(__doc__, version=VERSION_MESSAGE) mlist = list() if options['--pid']: pid = options['--pid'] if pid == 'self': mlist.append("MAINPID="+str(os.getpid())) else: try: pidval = int(pid) except ValueError: print("error: not a valid PID '{0}'".format(pid)) exit(1) mlist.append("MAINPID="+str(pid)) if options['--status']: mlist.append("STATUS=" + options['--status']) if options['--ready']: mlist.append("READY=1") for vv in options['VARIABLE=VALUE']: vvs = vv.split('=') if len(vvs) != 2: print("error: not a valid format for VARIABLE=VALUE, '{0}'".format(vv)) exit(1) mlist.append("{0}={1}".format(vvs[0].upper(), vvs[1])) for msg in mlist: try: do_notify(msg) except Exception as ex: if not options['--ignore']: print("error: could not send sd_notify message, " + str(ex)) exit(1) if options['--booted']: exit(1) ================================================ FILE: chaperone/exec/sdnotify_exec.py ================================================ """ Systemd notify exec shell (compatible with systemd-notify) Runs a program and either proxies or simulates sd-notify functionality. Usage: sdnotify-exec [options] COMMAND [ARGS ...] Options: --noproxy Ignores NOTIFY_SOCKET if inherited in the environment and does not proxy messages. Useful with --wait-xxx options.. --wait-ready If COMMAND exits normally, wait until either READY=1 or ERRNO=n, are sent to the notify socket, then return the exit value from the command. --wait-stop Will continue running even if COMMAND exits, continuing proxy services until ERRNO=n or STOPPING=1 are detected. MAINPID notifications will be blocked, since the proxy will continue to be the main program. Overrides --wait-ready. --timeout secs Specifies the timeout before the lack of response triggers an error exit. COMMAND may continue to run. (no effect without --wait-ready or --wait-stop) --socket name Name of socket file created. By default, a unique socket name will be chosen automatically. --template value Sets %{SOCKET_ARGS} template to 'value'. --verbose Provide information about activity Environment variables (one of which is SOCKET_ARGS) can be used anywhere in the command by using the syntax %{VAR}. The default SOCKET_ARGS template is designed for Docker and is set to: '--env NOTIFY_SOCKET=/tmp/notify-%{PID}.sock -v %{NOTIFY_SOCKET}:/tmp/notify-%{PID}.sock' Thus, you can easily use "docker run" like this: sdnotify-exec docker run %{SOCKET_ARGS} some-image Environment variables that can be useful: NOTIFY_SOCKET Newly created notification socket ORIG_NOTIFY_SOCKET Original notify socket (if any) passed to this program PID PID of the running sdnotify-exec program SOCKET_ARGS Argument template Only "NOTIFY_SOCKET" itself is passed to the created process, though all are available for command and argument expansion. """ # perform any patches first import chaperone.cutil.patches # regular code begins import sys import os import re import signal import asyncio import shlex from functools import partial from docopt import docopt from chaperone.cproc.version import VERSION_MESSAGE from chaperone.cutil.notify import NotifyListener, NotifyClient from chaperone.cutil.env import Environment DEFAULT_TEMPLATE='--env NOTIFY_SOCKET=/tmp/notify-%{PID}.sock -v %{NOTIFY_SOCKET}:/tmp/notify-%{PID}.sock' loop = asyncio.get_event_loop() parent_socket = os.environ.get("NOTIFY_SOCKET") RE_FIND_UNSAFE = re.compile(r'[^{}\w@%+=:,./-]', re.ASCII).search def maybe_quote(s): if RE_FIND_UNSAFE(s) is None: return s return shlex.quote(s) class SDNotifyExec: exitcode = 0 sockname = None listener = None parent = None timeout = None wait_mode = None verbose = False parent_client = None proxy_enabled = True INFO_MESSAGE = { 'READY': "READY={1}{2}", 'MAINPID': "Process PID (={1}) notification{2}", 'ERRNO': "Process ERROR (={1}) notification{2}", 'STATUS': "Status message = '{1}'{2}", 'default': "{0}={1}{2}", } def __init__(self, options): self.sockname = options['--socket'] if not self.sockname: self.sockname = "/tmp/sdnotify-proxy-{0}.sock".format(os.getpid()) self.proxy_enabled = parent_socket and not options['--noproxy'] if options['--wait-stop']: self.wait_mode = 'stop' elif options['--wait-ready']: self.wait_mode = 'ready' if options['--timeout'] and self.wait_mode: self.timeout = float(options['--timeout']) self.verbose = options['--verbose'] # Modify original environment os.environ['NOTIFY_SOCKET'] = self.sockname # Set up the environment, reparse the options, build the final command Environment.set_parse_parameters('%', '{') env = Environment() env['PID'] = str(os.getpid()) env['SOCKET_ARGS'] = options['--template'] or DEFAULT_TEMPLATE if parent_socket: env['ORIG_NOTIFY_SOCKET'] = parent_socket env = env.expanded() self.proc_args = shlex.split(env.expand(' '.join(maybe_quote(arg) for arg in [options['COMMAND']] + options['ARGS']))) self.listener = NotifyListener(self.sockname, onNotify = self.notify_received, onClose = self._parent_closed) loop.add_signal_handler(signal.SIGTERM, self._got_sig) loop.add_signal_handler(signal.SIGINT, self._got_sig) proctitle = '[sdnotify-exec]' try: from setproctitle import setproctitle setproctitle(proctitle) except ImportError: pass def info(self, msg): if self.verbose: print("info: " + msg) def _got_sig(self): self.kill_program() def kill_program(self, exitcode = None): if exitcode is not None: self.exitcode = exitcode loop.call_soon(self._really_kill) def _really_kill(self): self.listener.close() loop.stop() def _parent_closed(self, which, ex): if which == self.parent_client: self.proxy_enabled = False self.parent_client = None @asyncio.coroutine def _do_proxy_send(self, name, value): if not (parent_socket and self.proxy_enabled): return if not self.parent_client: self.parent_client = NotifyClient(parent_socket, onClose = self._parent_closed) yield from self.parent_client.run() yield from self.parent_client.send("{0}={1}".format(name, value)) def send_to_proxy(self, name, value): asyncio.async(self._do_proxy_send(name, value)) def notify_received(self, which, name, value): self.send_to_proxy(name, value) sent_info = False if self.wait_mode: if name == "READY" and value == "1": if self.wait_mode == 'ready': sent_info = True self.info("ready notification received (will exit)") self.kill_program(0) elif name == "ERRNO": sent_info = True self.info("error notification ({0}) received from {1}".format(value, self.proc_args[0])) self.kill_program(int(value)) elif name == "STOPPING" and value == "1": sent_info = True self.info("STOP notification received from {0} (will exit)".format(self.proc_args[0])) self.kill_program() if not sent_info: self.info(self.INFO_MESSAGE.get(name, self.INFO_MESSAGE['default']). format(name, value, ' (ignored but passed on)' if self.proxy_enabled else ' (ignored)')) @asyncio.coroutine def _notify_timeout(self): self.info("waiting {0} seconds for notification".format(self.timeout)) yield from asyncio.sleep(self.timeout) print("ERROR: Timeout exceeded while waiting for notification from '{0}'".format(self.proc_args[0])) self.kill_program(1) @asyncio.coroutine def _run_process(self): self.info('running: {0}'.format(self.proc_args[0])) create = asyncio.create_subprocess_exec(*self.proc_args, start_new_session=bool(self.wait_mode)) proc = yield from create if self.timeout: asyncio.async(self._notify_timeout()) exitcode = yield from proc.wait() if not self.exitcode: # may have arrived from ERRNO self.exitcode = exitcode @asyncio.coroutine def run(self): try: yield from self.listener.run() except ValueError as ex: print("Error while trying to create socket: " + str(ex)) self.kill_program() else: try: yield from self._run_process() except Exception as ex: print("Error running command: " + str(ex)) self.kill_program() # Command has executed, now determine our exit and proxy disposition if not self.wait_mode: self.info("program {0} exit({1}), terminating since --wait not specified".format(self.proc_args[0], self.exitcode)) self.kill_program() def main_entry(): options = docopt(__doc__, options_first=True, version=VERSION_MESSAGE) mainclass = SDNotifyExec(options) asyncio.async(mainclass.run()) loop.run_forever() loop.close() exit(mainclass.exitcode) ================================================ FILE: chaperone/exec/telchap.py ================================================ """ Interactive command tool for chaperone Usage: telchap [ ...] """ # perform any patches first import chaperone.cutil.patches # regular code begins import sys import os import asyncio import shlex from docopt import docopt from chaperone.cproc.client import CommandClient from chaperone.cproc.version import VERSION_MESSAGE def main_entry(): options = docopt(__doc__, options_first=True, version=VERSION_MESSAGE) try: result = CommandClient.sendCommand(options[''] + " " + " ".join([shlex.quote(a) for a in options['']])) except (ConnectionRefusedError, FileNotFoundError) as ex: result = "chaperone does not seem to be listening, is it running?\n(Error is: {0})".format(ex) print(result) ================================================ FILE: doc/.gitignore ================================================ build/* docserver/var ================================================ FILE: doc/Makefile ================================================ # Makefile for Sphinx documentation # # You can set these variables from the command line. SPHINXOPTS = SPHINXBUILD = sphinx-build PAPER = BUILDDIR = build # Internal variables. PAPEROPT_a4 = -D latex_paper_size=a4 PAPEROPT_letter = -D latex_paper_size=letter ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) source # the i18n builder cannot share the environment and doctrees with the others I18NSPHINXOPTS = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) source .PHONY: help clean html dirhtml singlehtml pickle json htmlhelp qthelp devhelp epub latex latexpdf text man changes linkcheck doctest gettext help: @echo "Please use \`make ' where is one of" @echo " html to make standalone HTML files" @echo " dirhtml to make HTML files named index.html in directories" @echo " singlehtml to make a single large HTML file" @echo " pickle to make pickle files" @echo " json to make JSON files" @echo " htmlhelp to make HTML files and a HTML help project" @echo " qthelp to make HTML files and a qthelp project" @echo " devhelp to make HTML files and a Devhelp project" @echo " epub to make an epub" @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter" @echo " latexpdf to make LaTeX files and run them through pdflatex" @echo " text to make text files" @echo " man to make manual pages" @echo " texinfo to make Texinfo files" @echo " info to make Texinfo files and run them through makeinfo" @echo " gettext to make PO message catalogs" @echo " changes to make an overview of all changed/added/deprecated items" @echo " linkcheck to check all external links for integrity" @echo " doctest to run all doctests embedded in the documentation (if enabled)" clean: -rm -rf $(BUILDDIR)/* html: $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html @echo @echo "Build finished. The HTML pages are in $(BUILDDIR)/html." dirhtml: $(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml @echo @echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml." singlehtml: $(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml @echo @echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml." pickle: $(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle @echo @echo "Build finished; now you can process the pickle files." json: $(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json @echo @echo "Build finished; now you can process the JSON files." htmlhelp: $(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp @echo @echo "Build finished; now you can run HTML Help Workshop with the" \ ".hhp project file in $(BUILDDIR)/htmlhelp." qthelp: $(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp @echo @echo "Build finished; now you can run "qcollectiongenerator" with the" \ ".qhcp project file in $(BUILDDIR)/qthelp, like this:" @echo "# qcollectiongenerator $(BUILDDIR)/qthelp/chaperone.qhcp" @echo "To view the help file:" @echo "# assistant -collectionFile $(BUILDDIR)/qthelp/chaperone.qhc" devhelp: $(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp @echo @echo "Build finished." @echo "To view the help file:" @echo "# mkdir -p $$HOME/.local/share/devhelp/chaperone" @echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/chaperone" @echo "# devhelp" epub: $(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub @echo @echo "Build finished. The epub file is in $(BUILDDIR)/epub." latex: $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex @echo @echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex." @echo "Run \`make' in that directory to run these through (pdf)latex" \ "(use \`make latexpdf' here to do that automatically)." latexpdf: $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex @echo "Running LaTeX files through pdflatex..." $(MAKE) -C $(BUILDDIR)/latex all-pdf @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." text: $(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text @echo @echo "Build finished. The text files are in $(BUILDDIR)/text." man: $(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man @echo @echo "Build finished. The manual pages are in $(BUILDDIR)/man." texinfo: $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo @echo @echo "Build finished. The Texinfo files are in $(BUILDDIR)/texinfo." @echo "Run \`make' in that directory to run these through makeinfo" \ "(use \`make info' here to do that automatically)." info: $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo @echo "Running Texinfo files through makeinfo..." make -C $(BUILDDIR)/texinfo info @echo "makeinfo finished; the Info files are in $(BUILDDIR)/texinfo." gettext: $(SPHINXBUILD) -b gettext $(I18NSPHINXOPTS) $(BUILDDIR)/locale @echo @echo "Build finished. The message catalogs are in $(BUILDDIR)/locale." changes: $(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes @echo @echo "The overview file is in $(BUILDDIR)/changes." linkcheck: $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck @echo @echo "Link check complete; look for any errors in the above output " \ "or in $(BUILDDIR)/linkcheck/output.txt." doctest: $(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest @echo "Testing of doctests in the sources finished, look at the " \ "results in $(BUILDDIR)/doctest/output.txt." ================================================ FILE: doc/docserver/README ================================================ This is a basic documentation webserver that runs on port 8088 and points to the Sphinx documentation located in ../build/html. Built with chaperone-lamp, of course, in just a few minutes. ================================================ FILE: doc/docserver/build/Dockerfile ================================================ FROM chapdev/chaperone-lamp:latest ADD . /setup/ RUN /setup/build/install.sh ================================================ FILE: doc/docserver/build/install.sh ================================================ cd /setup # remove existing chaperone.d and init.d from /apps so none linger rm -rf /apps/chaperone.d /apps/init.d # copy everything from setup to the root /apps tar cvf - --exclude 'build*' --exclude 'run.sh' . | (cd /apps; tar xf -) # Add additional setup commands for your production image here, if any. rm -rf /setup ================================================ FILE: doc/docserver/build.sh ================================================ #!/bin/bash #Created by chaplocal on `date` # the cd trick assures this works even if the current directory is not current. cd ${0%/*} if [ $# != 1 ]; then echo "Usage: ./build.sh " exit 1 fi prodimage="$1" if [ ! -f build/Dockerfile ]; then echo "Expecting to find Dockerfile in ./build ... not found!" exit 1 fi tar czh --exclude '*~' --exclude 'var/*' . | docker build -t $prodimage -f build/Dockerfile - ================================================ FILE: doc/docserver/chaperone.d/010-start.conf ================================================ # 010-start.conf # # This is the first start-up file for the chaperone base images. Note that start-up files # are processed in order alphabetically, so settings in later files can override those in # earlier files. # General environmental settings. These settings apply to all services and logging entries. # There should be only one "settings" directive in each configuration file. But, any # settings encountered in subsequent configuration files can override or augment these. # Note that variables are expanded as late as possile. So, there can be variables # defined here which depend upon variables which will be defined later (such as _CHAP_SERVICE), # which is defined implicitly for each service. settings: { env_set: { 'LANG': 'en_US.UTF-8', 'LC_CTYPE': '$(LANG)', 'PATH': '$(APPS_DIR)/bin:/usr/local/bin:/bin:/usr/bin:/sbin:/usr/sbin:/usr/local/sbin', # Uncomment the below to tell init.sh to lock-down the root account after the first # successful start. #'SECURE_ROOT': '1', # Variables starting with _CHAP are internal and won't be exported to services, # so we derive public environment variables if needed... 'APPS_DIR': '$(_CHAP_CONFIG_DIR:-/)', 'CHAP_SERVICE_NAME': '$(_CHAP_SERVICE:-)', 'CHAP_TASK_MODE': '$(_CHAP_TASK_MODE:-)', }, } # This is the startup script which manages the contents of $(APPS_DIR)/init.d. It will # run each of the init.d scripts in sequence. Because this is part of the special "INIT" # group, it will be run before any other service which is not in the group. This makes # it unnecessary to worry about 'before:' and 'after:' settings for init scripts. init.service: { type: oneshot, command: '/bin/bash $(APPS_DIR)/etc/init.sh', before: 'default,database,application', service_groups: 'INIT', } # We select all messages from the "chaperone" program itself, which will include # all messages which originate from the chaperone daemon. We put these in a single # log file which will be appended to on each run, so that if these log files # are on a mounted user volume, they will accumulate for historical purposes. chaperone.logging: { enabled: true, selector: '[chaperone].*', file: '$(APPS_DIR)/var/log/chaperone.log', } # The rest, except for chaperone, goes to the syslog syslog.logging: { enabled: true, selector: '*.info;![chaperone].*', file: '$(APPS_DIR)/var/log/syslog.log', } # For the console, we include everything which is a warning except authentication # messages and daemon messages which are not errors. console.logging: { enabled: true, stdout: true, selector: '*.warn;authpriv,auth.!*;daemon.!warn', } ================================================ FILE: doc/docserver/chaperone.d/120-apache2.conf ================================================ # 120-apache2.conf # # Start up apache. This is a "simple" service, so chaperone will monitor Apache and restart # it if necessary. Note that apache2.conf refers to MYSQL_UNIX_PORT (set by 105-mysql.conf) # to tell PHP where MySQL is running. # # In the case where no USER variable is specified, we run as the www-data user. apache2.service: { command: "/usr/sbin/apache2 -f $(APPS_DIR)/etc/apache2.conf -DFOREGROUND", restart: true, uid: "$(USER:-www-data)", env_set: { APACHE_LOCK_DIR: /tmp, APACHE_PID_FILE: /tmp/apache2.pid, APACHE_RUN_USER: www-data, APACHE_RUN_GROUP: www-data, APACHE_LOG_DIR: "$(APPS_DIR)/var/log/apache2", APACHE_SITES_DIR: "$(APPS_DIR)/www", MYSQL_SOCKET: "$(APPS_DIR)/var/run/mysqld.sock", }, # If Apache2 does not require a database, you can leave this out. after: database, } # Use daily logging (the %d) so that log rotation isn't so important. Logs # will be created automatically for each day where they are requied. # See 300-logrotate.conf if you want to enable log rotation as a periodic # job. Note that chaperone watches for logs which are rotated and will # automatically open a new file if the old one is rotated. # # Write logs either as the USER= user, or as www-data. apache2.logging: { enabled: true, selector: 'local1.*;*.!err', file: '$(APPS_DIR)/var/log/apache2/apache-%d.log', uid: "$(USER:-www-data)", } apache2.logging: { enabled: true, selector: 'local1.err', stderr: true, file: '$(APPS_DIR)/var/log/apache2/error-%d.log', uid: "$(USER:-www-data)", } ================================================ FILE: doc/docserver/etc/apache2.conf ================================================ # This is the main Apache server configuration file. It contains the # configuration directives that give the server its instructions. # See http://httpd.apache.org/docs/2.4/ for detailed information about # the directives and /usr/share/doc/apache2/README.Debian about Debian specific # hints. # This is a CHAPERONE-specific configuration designed to keep things lean. It is based loosely # on Ubuntu 14.04 /etc/apache2/apache2.conf, and every attempt has been made to assure that # system-installed modules and configurations will work. # The chaperone configuration is designed to work within a self-contained application directory # defined by APPS_DIR. Note that it may be a user directory, and thus chaperone allows # Apache to run entirely under any user account, along with a MySQL server that is also # sequestered in the same way. This means that you can have containers "point" to apps # directories on your host server and manage per-container resources consistently in # those directories during development, until you move the entire apps directory into # a production container environment or image. # # The accept serialization lock file MUST BE STORED ON A LOCAL DISK. # Mutex file:${APACHE_LOCK_DIR} default PidFile ${APACHE_PID_FILE} # Timeout: The number of seconds before receives and sends time out. Timeout 300 KeepAlive On MaxKeepAliveRequests 100 KeepAliveTimeout 5 # Note that the user and group are defined in chaperone.d/120-apache.conf #User ${APACHE_RUN_USER} #Group ${APACHE_RUN_GROUP} # The default is off because it'd be overall better for the net if people # had to knowingly turn this feature on, since enabling it means that # each client request will result in AT LEAST one lookup request to the # nameserver. HostnameLookups Off # ErrorLog: The location of the error log file. # We dump errors to syslog so that we can easily duplicate it to the container stderr if we want. ErrorLog syslog:local1 # Available values: trace8, ..., trace1, debug, info, notice, warn, # error, crit, alert, emerg. LogLevel warn # Include standard Debian/Ubuntu module configuration: Include /etc/apache2/mods-enabled/*.load Include /etc/apache2/mods-enabled/*.conf # CHAPERONE: Override to listen on 8080 and 8443 Listen 8080 Listen 8443 Listen 8443 # Sets the default security model of the Apache2 HTTPD server. It does # not allow access to the root filesystem outside of /usr/share and /var/www. # The former is used by web applications packaged in Debian, # the latter may be used for local directories served by the web server. If # your system is serving content from a sub-directory in /srv you must allow # access here, or in any related virtual host. Options FollowSymLinks AllowOverride None Require all denied AllowOverride None Require all granted Options Indexes FollowSymLinks AllowOverride None Require all granted AccessFileName .htaccess # The following lines prevent .htaccess and .htpasswd files from being # viewed by Web clients. Require all denied # The following directives define some format nicknames for use with # a CustomLog directive. LogFormat "%v:%p %h %l %u %t \"%r\" %>s %O \"%{Referer}i\" \"%{User-Agent}i\"" vhost_combined LogFormat "%h %l %u %t \"%r\" %>s %O \"%{Referer}i\" \"%{User-Agent}i\"" combined LogFormat "%h %l %u %t \"%r\" %>s %O" common LogFormat "%{Referer}i -> %U" referer LogFormat "%{User-agent}i" agent # Include of directories ignores editors' and dpkg's backup files, # see README.Debian for details. # Include generic snippets of statements IncludeOptional /etc/apache2/conf-enabled/*.conf ## ## CHAPERONE SPECIFICS ## # Point MySQL socket to the right spot #php_admin_value mysql.default_socket ${MYSQL_UNIX_PORT} #php_admin_value mysqli.default_socket ${MYSQL_UNIX_PORT} # Sit definition added here # The ServerName directive sets the request scheme, hostname and port that # the server uses to identify itself. #ServerName www.example.com ServerAdmin webmaster@localhost DocumentRoot ${APPS_DIR}/../build/html # Errors go to the syslog so they can be duplicated to the console easily ErrorLog syslog:local1 CustomLog ${APACHE_LOG_DIR}/default-access.log combined ================================================ FILE: doc/docserver/etc/init.sh ================================================ #!/bin/bash # A quick script to initialize the system # We publish two variables for use in startup scripts: # # CONTAINER_INIT=1 if we are initializing the container for the first time # APPS_INIT=1 if we are initializing the $APPS_DIR for the first time # # Both may be relevant, since it's possible that the $APPS_DIR may be on a mount point # so it can be reused when starting up containers which refer to it. function dolog() { logger -t init.sh -p info $*; } apps_init_file="$APPS_DIR/var/run/apps_init.done" cont_init_file="/container_init.done" export CONTAINER_INIT=0 export APPS_INIT=0 if [ ! -f $cont_init_file ]; then dolog "initializing container for the first time" CONTAINER_INIT=1 su -c "date >$cont_init_file" fi if [ ! -f $apps_init_file ]; then dolog "initializing $APPS_DIR for the first time" APPS_INIT=1 mkdir -p $APPS_DIR/var/run $APPS_DIR/var/log chmod 777 $APPS_DIR/var/run $APPS_DIR/var/log date >$apps_init_file fi if [ -d $APPS_DIR/init.d ]; then for initf in $( find $APPS_DIR/init.d -type f -executable \! -name '*~' ); do dolog "running $initf..." $initf done fi if [ "$SECURE_ROOT" == "1" -a $CONTAINER_INIT == 1 ]; then dolog locking down root account su -c 'passwd -l root' fi ================================================ FILE: doc/docserver/run.sh ================================================ #!/bin/bash #Created by chaplocal on Wed Jun 10 16:08:42 EST 2015 cd ${0%/*} # go to directory of this file APPS=$PWD cd .. options="-t -i -e TERM=$TERM --rm=true" shellopt="/bin/bash" if [ "$1" == '-d' ]; then shift options="-d" shellopt="" fi if [ "$1" == "-h" ]; then echo "Usage: run.sh [-d] [-h] [extra-chaperone-options]" echo " Run chapdev/chaperone-baseimage:latest as a daemon or interactively (the default)." exit fi # Extract our local UID/GID myuid=`id -u` mygid=`id -g` # Run the image with this directory as our local apps dir. # Create a user with uid=$myuid inside the container so the mountpoint permissions # are correct. docker run $options -v /home:/home -p 8088:8080 chapdev/chaperone-lamp:latest \ --create $USER:$myuid --config $APPS/chaperone.d $* $shellopt ================================================ FILE: doc/source/_static/custom.css ================================================ .wy-table-responsive table td, .wy-table-responsive table th { white-space: normal !important; } .wy-table-responsive { overflow: visible !important; } table .caption-number:after { content: ": " } .rst-content p.caption { font-size: 80%; padding-top: 5px; } .rst-content code.kbd { color: #E74C3C; } ================================================ FILE: doc/source/_templates/layout.html ================================================ {% extends "!layout.html" %} {% block footer %} {{ super() }} {% endblock %} ================================================ FILE: doc/source/conf.py ================================================ # -*- coding: utf-8 -*- # # chaperone documentation build configuration file, created by # sphinx-quickstart on Mon May 6 17:19:12 2013. # # This file is execfile()d with the current directory set to its containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. import sys, os # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. #sys.path.insert(0, os.path.abspath('.')) sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath('.')))) # -- General configuration ----------------------------------------------------- # If your documentation needs a minimal Sphinx version, state it here. #needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be extensions # coming with Sphinx (named 'sphinx.ext.*') or your custom ones. extensions = ['sphinx.ext.autodoc', 'sphinx.ext.viewcode', 'sphinx.ext.intersphinx'] # intersphinx_mapping = {'python': ('http://docs.python.org/2.7', None)} # Autodoc settings autodoc_member_order = 'groupwise' # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # The suffix of source filenames. source_suffix = '.rst' # The encoding of source files. #source_encoding = 'utf-8-sig' # The master toctree document. master_doc = 'index' # General information about the project. project = u'chaperone' copyright = u'2015, Gary J. Wisniewski' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. version = '0.3.0' # The full version, including alpha/beta/rc tags. release = '0.3.0' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. #language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: #today = '' # Else, today_fmt is used as the format for a strftime call. #today_fmt = '%B %d, %Y' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. exclude_patterns = ['includes/*'] # The reST default role (used for this markup: `text`) to use for all documents. #default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. #add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). #add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. #show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # A list of ignored prefixes for module index sorting. #modindex_common_prefix = [] # -- Options for HTML output --------------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. html_theme = 'sphinx_rtd_theme' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. #html_theme_options = {} # Add any paths that contain custom themes here, relative to this directory. #html_theme_path = [] # The name for this set of Sphinx documents. If None, it defaults to # " v documentation". #html_title = None # A shorter title for the navigation bar. Default is the same as html_title. #html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. #html_logo = None # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. #html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['_static'] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. #html_last_updated_fmt = '%b %d, %Y' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. #html_use_smartypants = True # Custom sidebar templates, maps document names to template names. #html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. #html_additional_pages = {} # If false, no module index is generated. #html_domain_indices = True # If false, no index is generated. #html_use_index = True # If true, the index is split into individual pages for each letter. #html_split_index = False # If true, links to the reST sources are added to the pages. html_show_sourcelink = False # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. #html_show_sphinx = True # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. #html_show_copyright = True # If true, an OpenSearch description file will be output, and all pages will # contain a tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. #html_use_opensearch = '' # This is the file name suffix for HTML files (e.g. ".xhtml"). #html_file_suffix = None # Output file base name for HTML help builder. htmlhelp_basename = 'chaperonedoc' # -- Options for LaTeX output -------------------------------------------------- latex_elements = { # The paper size ('letterpaper' or 'a4paper'). #'papersize': 'letterpaper', # The font size ('10pt', '11pt' or '12pt'). #'pointsize': '10pt', # Additional stuff for the LaTeX preamble. #'preamble': '', } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, author, documentclass [howto/manual]). latex_documents = [ ('index', 'chaperone.tex', u'Chaperone Documentation', u'Gary J. Wisniewski', 'manual'), ] # The name of an image file (relative to this directory) to place at the top of # the title page. #latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. #latex_use_parts = False # If true, show page references after internal links. #latex_show_pagerefs = False # If true, show URL addresses after external links. #latex_show_urls = False # Documents to append as an appendix to all manuals. #latex_appendices = [] # If false, no module index is generated. #latex_domain_indices = True # -- Options for manual page output -------------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ ('index', 'chaperone', u'Chaperone Documentation', [u'Gary J. Wisniewski'], 1) ] # If true, show URL addresses after external links. #man_show_urls = False # -- Options for Texinfo output ------------------------------------------------ # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ ('index', 'chaperone', u'Chaperone Documentation', u'Gary J. Wisniewski', 'chaperone', 'One line description of project.', 'Miscellaneous'), ] # Documents to append as an appendix to all manuals. #texinfo_appendices = [] # If false, no module index is generated. #texinfo_domain_indices = True # How to display URL addresses: 'footnote', 'no', or 'inline'. #texinfo_show_urls = 'footnote' # -------------------------------------------------------------------------------- # Add custom CSS (garyw did this) # -------------------------------------------------------------------------------- trim_footnote_reference_space = True numfig = True numfig_secnum = 1 def setup(app): #app.add_javascript("custom.js") app.add_stylesheet("custom.css") ================================================ FILE: doc/source/guide/chap-docker-simple.rst ================================================ .. _chap.example-docker: A Simple Docker Example ======================= The following example creates a simple Docker container running an Apache daemon and an SSH server, both managed by Chaperone. In this example, we'll use Chaperone to run both processes as ``root``, configured to work exactly as they were configured in the Ubuntu distribution. This example is based upon a `similar example from docker.com `_ which uses `Supervisor `_ as it's process manager. Chaperone provides a far more powerful featureset than 'supervisor' with a much smaller container footprint. Creating a Dockerfile --------------------- We'll start by creating a basic ``Dockerfile`` for our new image:: FROM ubuntu:14.04 MAINTAINER garyw@blueseastech.com Now, we can install ``openssh-server``, ``apache2``, and ``python3-pip``, then use ``pip3`` to install Chaperone itself. We also need to create a few directories that will be needed by the installed software:: RUN apt-get update && \ apt-get install -y openssh-server apache2 python3-pip && \ pip3 install chaperone RUN mkdir -p /var/lock/apache2 /var/run/apache2 /var/run/sshd /etc/chaperone.d Adding Chaperone's Configuration File ------------------------------------- Now, let's add a configuration file for Chaperone. Chaperone looks in ``/etc/chaperone.d`` by default and will read any configuration files it finds there. So, we'll copy our single configuration there so Chaperone reads it upon startup:: COPY chaperone.conf /etc/chaperone.d/chaperone.conf Let's take a look at what's inside ``chaperone.conf``:: sshd.service: { command: "/usr/sbin/sshd -D" } apache2.service: { command: "bash -c 'source /etc/apache2/envvars && exec /usr/sbin/apache2 -DFOREGROUND'", } console.logging: { selector: '*.warn', stdout: true, } The above is a complete configuration file with three sections. the first two start up both ``sshd`` and ``apache2``. The third section tells Chaperone to intercept all ``syslog`` messages and redirect them to ``stdout``. That way, we'll be able to use the ``docker logs`` command to inspect the status of the running container. The above is really a simple configuration, but you can use the complete :ref:`set of service directives ` to control how each service behaves. Exposing Ports and Running Chaperone ------------------------------------ Let's finish our ``Dockerfile`` by exposing some required ports and specifying Chaperone as the ``ENTRYPOINT`` so that Chaperone will start first and manage our container:: EXPOSE 22 80 ENTRYPOINT ["/usr/local/bin/chaperone"] Here, we've exposed ports 22 and 80 on the container and we're running the ``/usr/local/bin/chaperone`` binary when the container launches. Building the Image ------------------ We can now build our new image:: $ docker build -t /chap-sample . Running the Container --------------------- Once you've built an image, you can launch a container from it:: $ docker run -p 22 -p 80 -t -i /chap-sample Jul 21 04:08:19 6d3e4eee4265 apache2[6]: AH00558: apache2: Could not reliably determine the server's fully qualified domain name, using 172.17.0.90. Set the 'ServerName' directive globally to suppress this message And when you want to stop it, just use ``Ctrl-C``:: C-c C-c^C Ctrl-C ... killing chaperone. Jul 21 04:08:23 6d3e4eee4265 chaperone[1]: Request made to kill system. (forced) Jul 21 04:08:23 6d3e4eee4265 chaperone[1]: sshd.service terminated abnormally with What's Next? ------------ You can build upon the above simple sample if you want. That gives you maximum flexibility to design your container service environmetn exactly as you want. If so, we recommend you scan the :ref:`reference` section so you know what features are available. If you want, you can also use the complete set of pre-built Chaperone images `available here on Docker Hub `_. These images are excellent examples of complete Chaperone-managed development and production environments. You can learn more by reading the introduction to these images `on their GitHub page `_. ================================================ FILE: doc/source/guide/chap-docker-smaller.rst ================================================ .. _chap.small-docker: Creating Small Docker Images ============================ The default official Docker images are not always very compact. For example, the official Ubuntu image is about 180MB, and the official Java image is a whopping 810MB! This is made worse by some distributions (like Ubuntu and Debian) which have defaults which don't cater to small image sizes and prefer to assure that things you *might* need are installed. So, for example, installing Python's package manager ``pip`` will cause about 200MB of extra packages to be installed just "in case" some package requires the full compiler toolchain (which most Python packages, including Chaperone, do not). Chaperone, including all its dependences, need take up no more than 35-40MB maximum, including Python3. So, here is a quick guide to creating small Chaperone packages with a minimum of effort. Eliminating Ubuntu/Debian Recommended Packages ---------------------------------------------- The simplest thing you can do when installing packages under Ubuntu or Debian is use the ``--no-install-recommends`` switch when you run ``apt-get``. For example, the :ref:`Simple Docker Example ` section recommends you install Chaperone, Apache and SSH like this:: RUN apt-get update && \ apt-get install -y openssh-server apache2 python3-pip && \ pip3 install chaperone RUN mkdir -p /var/lock/apache2 /var/run/apache2 /var/run/sshd /etc/chaperone.d If you do, you end up with a docker image which is 451MB:: $ docker images REPOSITORY TAG IMAGE ID CREATED VIRTUAL SIZE sample-simple latest 328d42703323 34 minutes ago 451.8 MB $ However, if you change the install commands to:: RUN apt-get update && \ apt-get install -y --no-install-recommends openssh-server apache2 python3-pip && \ pip3 install chaperone The functionally equivalent image is only 242MB:: sample-simple latest 8839acc1e4ef 24 minutes ago 242 MB A Small Ubuntu Base Image with Chaperone ---------------------------------------- The sample image above contains both SSH as well as Apache. However, let's assume that you want to create the simplest Chaperone base image possible. Here is the ``Dockerfile`` to start with:: FROM ubuntu:14.04 RUN apt-get update && \ apt-get install -y --no-install-recommends python3-pip && \ pip3 install chaperone RUN mkdir -p /etc/chaperone.d COPY chaperone.conf /etc/chaperone.d/chaperone.conf ENTRYPOINT ["/usr/local/bin/chaperone"] The following ``chaperone.conf`` can serve as your starting point:: your.service: { command: "logger -p warn 'Replace this with your service'", } console.logging: { selector: '*.warn', stdout: true, } If you build the above image, it will be just 226MB, only 38MB larger than the Ubuntu image:: $ docker images REPOSITORY TAG IMAGE ID CREATED VIRTUAL SIZE base-ubuntu latest 182521cfa43e About an hour ago 226 MB A 53MB Alpine Image with Chaperone ---------------------------------- If you really care about keeping your images as minimal as possible, consider using `Alpine Linux `_ as your base image. Alpine is a simple, stripped down distribution that is ideal for creating lean, mean containers. Here's a ``Dockerfile`` that will create small Alpine Linux image, complete with both Chaperone as well as Python3:: FROM alpine:3.2 RUN apk add --update python3 && pip3 install chaperone RUN mkdir -p /etc/chaperone.d COPY chaperone.conf /etc/chaperone.d/chaperone.conf ENTRYPOINT ["/usr/bin/chaperone"] The resulting image is less than 53MB:: $ docker images REPOSITORY TAG IMAGE ID CREATED VIRTUAL SIZE base-alpine latest 1c9d85d9bb67 About an hour ago 52.59 MB Pre-Built Images ---------------- When building our official Chaperone base images (`located here on Docker Hub `_), we used the techniques above to create versatile images with reasonably sophisticated start-ups. They may be overkill for most applications, but they may also serve as good configuration examples. Notably, the `chaperone-alpinejava `_ image is a good example of what's possible. It contains a complete Oracle 8 production environment, Python 3, Chaperone, and it's a remarkably small 216MB! Hopefully the above information is a useful way to get started at streamlining images. ================================================ FILE: doc/source/guide/chap-docker.rst ================================================ .. _chap.docker: Using Chaperone with Docker =========================== While Chaperone is a general-purpose program that can be used to manage any small hierarchy of processes, it was designed specifically to solve problems encountered when creating containers. While the goal is to keep containers streamlined and small, ideally containing only one process, the reality is that in many real-world applications, existing daemons may need to be exploited for use within a container to save time or provide commonly-available functionality. Some applications also benefit from greater modularity by breaking up functionality into multiple processes to better exploit CPU resources. The moment a container contains even two cooperating proceses, the problem of management arises, and ``chaperone`` was designed to solve multi-process management simple and well-contained. .. toctree:: :maxdepth: 2 chap-docker-simple.rst chap-docker-smaller.rst ================================================ FILE: doc/source/guide/chap-intro.rst ================================================ .. _intro: Introduction to Chaperone ========================= Overview -------- Container technologies like Docker and Rocket have changed dramatically the way we bundle and distribute applications. While many containers are built with a single contained process in mind, other applications require a small suite of processes bundled into the "black box" that containers provide. When this happens, the need arises for a container control system, but the available technologies such as ``systemd`` or ``upstart`` are both too modular and too heavy, resulting in "fat containers" which introduce the very kinds of overhead container technologies are designed to eliminate. Chaperone is designed to solve this problem by providing a single, self-contained "caretaker" process which provides the following capabilities within the container: * Dependency-based parallel start-up of services. * A robust process manager with service types for forking, oneshot, simple, and notify service types modelled after systemd. * Port-triggered services inside the container using the inetd service type. * A "cron" service type to schedule periodic tasks. * A built-in highly configurable syslog service which can direct syslog messages to multiple output files and duplicate selected streams or severities to the container stdout as well. * Control capabilities so that services can be stopped, started, or restarted easily at the command line or within application programs. * Emulation of systemd's ``sd_notify`` capability, allocating notify sockets for each service so that cgroups and other privileges are not needed within the container. Chaperone also recognizes a passed-in ``NOTIFY_SOCKET`` and will inform the host systemd of final container readiness and status. * Features to support the creation of "mini-systems" within a single directory so that system services can run in userspace, or be mounted on host shares to keep development processes and production processes as close to identical as possible (see ``chaperone-lamp`` for an example of how this can be realized). In addition, many incidental features are present, such as process monitoring and zombie clean-up, clean shutdown and container restarts, and interactive console process detection so that applications know when they are being run interactively. ================================================ FILE: doc/source/guide/chap-other.rst ================================================ .. include:: /includes/incomplete.rst .. _chap.other: Other Uses for Chaperone ======================== Chaperone was designed for container use in scenarios such as Docker containers. However, it has also been designed to operate as a non-root process manager, though this has not been tested very well. If runnnig as a non-root user, observe the following: * The :ref:`--force ` switch will need to be used at startup. * Chaperone will not create it's ``syslog`` service at ``/dev/log``. * Chaperone will not create the ``telchap`` command socket at ``/dev/chaperone.sock``. * Process cleanup will not occur if processes are reparented, since they will be reparented to PID 1. Other than these notes, Chaperone *should* work as a process manager within userspace for managing small groups of related processes. If you find use cases outside of container management, let me know. ================================================ FILE: doc/source/guide/chap-using.rst ================================================ .. _chap.using: Using Chaperone =============== Chaperone is a simple, but full-featured process manager. It is designed to be as flexible as possible. .. toctree:: :maxdepth: 2 chap-docker.rst chap-other.rst ================================================ FILE: doc/source/includes/defs.rst ================================================ .. |ENV| replace:: :kbd:`$ENV` ================================================ FILE: doc/source/includes/incomplete.rst ================================================ .. note:: This section is being worked on and is not yet complete. The :ref:`reference` is currently complete and ready to use. For status information about Chaperone and documentation, see :ref:`status`. ================================================ FILE: doc/source/index.rst ================================================ .. chaperone documentation master file, created by sphinx-quickstart on Mon May 6 17:19:12 2013. You can adapt this file completely to your liking, but it should at least contain the root `toctree` directive. Chaperone: A lightweight, all-in-one process manager for lean containers ======================================================================== Chaperone is a lightweight alternative to process environment managers like ``systemd`` or ``upstart``. While chaperone provides an extensive feature set, including dependency-based startup, syslog logging, zombie harvesting, and job scheduling, it does all of this in a single self-contained process that can run as a "system init" daemon or can run in userspace. This makes Chaperone an ideal tool for managing "small" process spaces like Docker containers while still providing the system services many daemons expect. If you are using Chaperone with Docker, we suggest reading the :ref:`intro`, then try out the ``chaperone-lamp`` Docker image by `chaperone-docker github page `_ Any bugs should be reported as issues at https://github.com/garywiz/chaperone/issues. Current status of Chaperone and related repositories is located on the :ref:`Project Status ` page. Contents -------- .. toctree:: :maxdepth: 2 guide/chap-intro.rst guide/chap-using.rst ref/index.rst Downloading and Installing -------------------------- The easiest way to install ``chaperone`` is using ``pip`` from the https://pypi.python.org/pypi/chaperone package:: # Ubuntu or debian prerequisites... apt-get install python3-pip # chaperone (may be all you need) pip3 install chaperone If you're interested in the source code, or contributing, you can find the ``chaperone`` source code at https://github.com/garywiz/chaperone. License ------- Copyright (c) 2015, Gary J. Wisniewski Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ================================================ FILE: doc/source/ref/command-line.rst ================================================ .. chaperone documentation n command line documentation .. _ref.chaperone: Chaperone Command Reference =========================== Command Quick Reference ----------------------- Chaperone is usually executed as a container entrypoint and has the following syntax:: chaperone [options] [initial-command [args...]] The initial command is optional. If provided, it will be run as an "IDLE" oneshot service, running after all other services have been started. Options are described in the table below, followed by more extensive reference information. ============================================================= ================================================================================= command-line switch function ============================================================= ================================================================================= :ref:`--config=config-location ` Specifies a file or directory where configuration information is found. Default is ``/etc/chaperone.d``. :ref:`--debug ` Turns debugging features. (Implies ``--log-level=debug`` as well) :ref:`--disable-services ` No services will be started. Only the command-line command will execute. :ref:`--exitkills ` When the command specified on the command line terminates, the chaperone will execute a normal shutdown operation. :ref:`--no-exitkills ` Reverses the effect of ``--exitkills``. Useful when the ``--exitkills`` is implied or specified as a default. :ref:`--force ` If chaperone refuses to do something, tell it to try anyway. --help Displays command and option help. :ref:`--ignore-failures ` Run as if :ref:`ignore_failures ` were true for all services. :ref:`--log-level=level ` Force the syslog log output level to this value. (one of 'emerg', 'alert', 'crit', 'err', 'warn', 'notice', 'info', or 'debug). :ref:`--no-console-log ` Forces 'stderr' and 'stdout' to *false* for all logging services. :ref:`--no-defaults ` Ignore the :ref:`_CHAP_OPTIONS ` environment variable, if present. :ref:`--no-syslog ` Disable the syslog service at start-up and do not create ``/dev/log``. :ref:`--user=username ` Run all processes as ``user`` (uid number or name). The user must exist. By default, all processes run as ``root``. :ref:`--create-user=newuser[:uid:gid] ` Create a new user upon start-up with optional ``uid`` and ``gid``. Then run as if ``--user=`` was specified. :ref:`--default-home=directory ` If :ref:`--create-user ` specifies a user whose home directory does not exist, then create the new user account with this directory as the user's home directory. :ref:`--show-dependencies ` Display service dependency graph, then exit. :ref:`--task ` Run in "task mode". This implies ``--log-level=err``, ``--disable-services``, and ``--exitkills``. This switch is useful when the container publishes commands which must run in isolation, such as displaying container internal information such as version information. --version Displays the chaperone version number. ============================================================= ================================================================================= Chaperone Command Execution --------------------------- Chaperone goes through a set of startup phases in order to establish a working environment. 1. Chaperone first examines the environment looking for the :ref:`_CHAP_OPTIONS ` variable. If found, Chaperone uses it to establish default values. The remaining environment variables will be passed to running services depending upon the both global and per-service settings. 2. Command line options are read and combined with any default options to form the final command option set. Configuration information is optional, and if no configuration is found, it is not considered an error. 3. Once configuration information is present, chaperone proceeds to start it's internal ``syslog`` service, creating sockets such as ``/dev/log`` and starts it's internal command processor which accepts commands at ``/dev/chaperone`` or interactive commands (via :ref:`telchap `) at ``/dev/chaperone.sock``. Chaperone also sets up utility environment variables such as :ref:`_CHAP_INTERACTIVE ` so that they can be used in service configurations. 4. If a command and arguments are provided on the command line, an "IDLE" oneshot service is configured so that it runs after all other services are started. If chaperone is running interactively, :option:`--exitkills ` is implied, otherwise, termination of this service will leave the system running just as if any other oneshot service exited normally. 5. Services in the "INIT" service group (if any) are executed and must start successfully before other services are started. 6. All other services are started in dependency order. Failures during startup comprise a system failure unless :option:`--ignore-failures ` is used on the command line, or the service is declared with :ref:`ignore_failures ` set to "true". 7. Services in the "IDLE" service group (if any) are executed (which includes any command specified on the command line). Once started, Chaperone monitors all services, performs logging, and cleans up zombie processes when they exit. When it receives a ``SIGTERM`` it will shutdown all processes in an orderly fashion. Note that when a command is specified on the chaperone command line, chaperone starts a ``CONSOLE`` service internally. This service can be managed just like any other service, and shows up in service listings when using the :ref:`telchap ` command. If chaperone is started in an interactive environment (has a pseudo-tty as ``stdin``), it uses ``SIGHUP`` to terminate the process. Otherwise, it uses ``SIGTERM`` as usual. This is to accommodate login shells such ``bash`` and ``sh``, which expect this behavior. Option Reference Information ---------------------------- .. program:: chaperone .. _option.config: .. option:: --config Specifies the full or relative path to the Chaperone's configuration directory or configuration file. For example, assume that ``chaperone.conf`` is a file and ``chaperone.d`` is the name of a directory:: chaperone --config /home/wwwuser/chaperone.conf will tell Chaperone to read all configuration directives from the single self-contained configuration file specified. No other directives will be read. Or,:: chaperone --config /home/wwwuser/chaperone.d specifies that the contents of the directory ``chaperone.d`` should be scanned and any file ending with ``.conf`` or ``.yaml`` will be read (in alphabetic order) to create the final configuration. To understand how Chaperone handles directives which occur in multiple files, see :ref:`config.file-format`. If not specified, defaults to ``/etc/chaperone.d``, or uses the default option set in the ``_CHAP_OPTIONS`` (see :ref:`ch.env`) environment variable. .. _option.debug: .. option:: --debug Enables debugging features. When debugging is enabled: * chaperone will print out a raw dump of all command line options (including those derived from defaults), as well as configuration information. * Internal debugging messages will be turned on, describing service start-up in more detail. * Traceback for internal errors will be enabled, making it easier to report bugs. * syslog logging will be forced to output all log levels (the same as using ``filter: '*.debug'`` in all logging entries. .. _option.disable-services: .. option:: --disable-services When set to 'true', then no services will be started or configured, though dependencies and configuration syntax will be checked normally. This switch can be useful in cases where services do not start correctly, or you want to enter a fresh container for inspection or other purposes. For example:: chaperone --disable-services /bin/bash will run ``bash`` alone as a child of chaperone, or in the case of using chaperone-enabled Docker images:: docker run -t -i chapdev/chaperone-lamp --disable-services /bin/bash creates a fresh LAMP container running only ``bash`` so you can inspect the contents of the container without enabling any of the services. .. _option.exitkills: .. option:: --exitkills This option works in conjunction with an ``initial-command`` specified on the command line, and will cause the entire container to shut down when the command completes. Chaperone attempts to anticipate what is needed automatically, and if run in an interactive container, will default to ``--exitkills`` or when run as a daemon defaults to ``--no-exitkills``. For example, the following docker command will cause an exit after ``bash`` completes:: docker run -t -i --rm=true chapdev/chaperone-baseimage /bin/bash whereas the following command will not exit upon bash's completion:: docker run -d chapdev/chaperone-baseimage /bin/bash Both this option as well as :ref:`--no-exitkills ` are provided when Chaperone's default behavior is not desired. .. _option.no-exitkills: .. option:: --no-exitkills Will not shutdown the system when the ``initial-command`` exits. See :ref:`--exitkills `. .. _option.force: .. option:: --force This option can be used to force Chaperone to attempt an operation even though it typically would refuse. At present, there are not many situations where this command is useful, but that may change. In cases where it can be used, Chaperone will display an alert, for example:: wheezy:~$ chaperone Normally, chaperone expects to run as PID 1 in the 'init' role. If you want to go ahead anyway, use --force. wheezy:~$ .. _option.ignore-failures: .. option:: --ignore-failures Running with this option causes Chaperone to run as if the global setting :ref:`ignore_failures ` were set to "true". This can be useful when a service is failing on startup and causes sytem failure (as described in the :ref:`table.service-types` table). In such situations, troubleshooting can be difficult since the container may be transient and failure information may be lost. For example, to run a shell in a container even if it is failing on startup:: docker run -t -i --rm=true chapdev/chaperone-lamp --ignore-failures /bin/bash .. _option.log-level: .. option:: --log-level level-name Normally, Chaperone should be configured to do logging with :ref:`logging directives `. However, at times, more detail is needed in the logs for troubleshooting purposes. This option should be followed by one of the log levels: **emerg**, **alert**, **crit**, **err**, **warn**, **notice**, **info**, or **debug**. When specified, it forces the logging system to behave as if *all* log definitions have a minimum severity of ``level-name``. For example, ``--log-level info`` assures that all types messages except debugging messages will be displayed in all logs; ``--log-level debug`` assures that all types of messages are displayed. Note that logging still must be configured so that syslog messages have some destination. By default, log messages are captured but not directed to 'stdout' or a file. Most configurations include at least a simple logging directive like this:: console.logging: { selector: '*.warn', stdout: true, } which tells Chaperone to direct any messages of warning level or greater severity to 'stdout'. Including ``--log-level info``, for example, would cause Chaperone to behave as if the declaration looked like this:: console.logging: { selector: '*.info', stdout: true, } Note also that using the :ref:`--debug ` switch automatically sets the log level to 'debug', so use of this switch in such cases is redundant. .. _option.no-console-log: .. option:: --no-console-log This switch unsets any :ref:`stdout ` and :ref:`stderr ` logging directives, thus disabling any logging to the console. Disabling console output can be useful in special-case situations, such as when a command-line command wishes to dump container internals to ``stdout`` in some format (such as ``gzip``) which may be corrupted if inadvertent console messages are produced. .. _option.no-syslog: .. option:: --no-syslog This switch tells Chaperone to disable the normal creation of ``/dev/log`` and to perform all of its own logging to the console. Chaperone defaults to automatically starting its own internal logging service. Disabling syslog can be useful in cases where a container has some other method of logging, or wants to start a standard syslog deamon itself. This switch is equivalent to setting the global setting :ref:`enable_syslog ` to `false` and will override any settings in Chaperone's configuration files. .. _option.no-defaults: .. option:: --no-defaults Using this switch causes Chaperone to ignore any configuration defaults set in the :ref:`_CHAP_OPTIONS ` environment variable. Only the options provided on the command line itself will be recognized when this switch is used. .. _option.user: .. option:: --user name-or-number Normally, when Chaperone is started, it runs as the same user which executed the ``chaperone`` command (usually ``root``). However, in many cases, it is desirable to have Chaperone spawn all services and use permissions of a different user. This switch specifies the user account under which Chaperone will start all processes and logging services. For example, assume you have an account within a container called ``appuser`` and all services should run under that user account. You would simply do this:: docker run -d my_chaperone_image --user appuser Chaperone will automatically assure that ``HOME``, ``LOGIN`` and ``LOGNAME`` are set correctly so that the application make sure all files are located relative to the application home directory. Typically, a production container would be built with this switch incorporated into the built image itself. (Such as using Docker's ``CMD`` or ``ENTRYPOINT`` directives in a `Dockerfile `_. Note the user *must exist* already inside the container's configuration. If not, you can use :ref:`--create-user ` to dynamically create a new user inside the container upon startup. .. _option.create-user: .. option:: --create-user name[:uid[:gid]] or --create-user name:/path/to/file[:uid[:gid]] Often, a generic container can be designed to allow userspace mount points, isolating persistent data outside the container so that the container becomes entirely transient. Because containers have a set of isolated user credentials, sharing files and permissions with the host volumes can often lead to difficulties. The ``--create-user`` switch allows you to "match" the host user (and optionally group) to the running process tree within the container so that file permissions are consistent. This switch accepts the following: * A ``name`` parameter which should be the name of a user that will be created the first time the container runs. * An optional ``uid`` which must be the numeric user ID of the user to be created. If omitted, a new user ID will be assigned. * An optional ``gid`` which can be the name or number of an existing group, or the number of a new group to be created specifically for the new user. * An optional format where the name is followed by the path to an *existing* file on the system whose ``uid`` and ``gid`` will be used to create the new user. The final alternative form is specified by including the path as follows:: --create-user name:/path/to/file When ``uid`` and ``gid`` or the file option are omitted, Chaperone will use the container's installed OS policy to determine how to assign user credentials. This feature can be used to create generic start-up scripts for containers so that they share the credentials of whatever user created them. Here is an example:: #!/bin/bash # Extract host user UID/GID myuid=`id -u` mygid=`id -g` # Run the daemon docker run -d -v /home:/home my-app-image --create-user $USER:$myuid:$mygid Once started, the image can now be stopped and restarted while retaining the credential relationship with the host. .. note:: Because containers are often *not* transient, and can be restarted, Chaperone is a bit smart about interpreting this switch, which usually be present both when the container is first started and when it is started again. So, if the user name specified by ``--create-user`` already exists, Chaperone will check to assure that any ``uid`` or ``gid`` are correct, and proceed silently. If the user credentials are defined differently, then an error will occur. .. _option.default-home: .. option:: --default-home directory This option is meaningful only when used in combination with :ref:`--create-user ` and specifies the home directory to use if the user's home directory does not exist. This switch can be useful if a user's home directory may optionally be mounted as part of a volume mount, or if no such mount is provided, the user directory can default to an alternate location within the container itself. For example, assume that a container normally accepts a mount-point for ``/home``, where the specified user (in this case ``joebloggs``) has a pre-existing home directory, as follows:: docker run -v /home:/home myimage --create-user joebloggs --config apps/chaperone.conf In this case, chaperone would find it's configuration in ``/home/joebloggs/apps/chaperone.conf``. But, if you wanted the container to be more versatile, you may want to create an application directory *inside* the container as well so that the container could run with either an internal configuration, or an external configuration to simplify development. So, the following could be used to provide a default home:: docker run -v myimage --create-user joebloggs --default-home /defhome \ --config apps/chaperone.conf The above command would instead find chaperone's configuration in ``/defhome/apps/chaperone.conf``, providing that no directory ``/home/joebloggs`` exists inside the container. Typically, when a container is first built, this switch is included in the :ref:`_CHAP_OPTIONS ` environment variable. Doing so allows the container to be executed with a home directory mountpoint, or without. .. _option.show-dependencies: .. option:: --show-dependencies More complex service scenarios which use service directives :ref:`before `, :ref:`after ` and :ref:`service_groups ` can sometimes require debugging to assure the startup sequence is correct. This switch provides some assistance by creating an ASCII dependency graph which shows the relationship between services after Chaperone analyzes service dependencies. Here is how you can see a sample:: $ docker run -i --rm=true chapdev/chaperone-lamp --show-dependencies init | mysql | apache2 | logrotate | sample init | ==== mysql | ======== apache2 | ========== logrotate | ====================== sample | ========= ----------> depends on... init | mysql | init apache2 | mysql, init logrotate | mysql, init sample | logrotate, apache2, mysql, init The output consists of two sections. The top section shows the earliest start time for each service, relative to other defined services, rougly in the order Chaperone will start them. The lower section contains the explicit dependencies after they have been resolved. You can also obtain this information from inside the container using the ":ref:`telchap dependencies `" command:: rbunion@69c0e692d78c:~$ telchap dependencies telchap dependencies init | mysql | apache2 | logrotate | sample | CONSOLE init | ==== mysql | ======== apache2 | ========== logrotate | ====================== sample | ========= CONSOLE | ========== ----------> depends on... init | mysql | init apache2 | init, mysql logrotate | init, mysql sample | apache2, logrotate, init, mysql CONSOLE | apache2, logrotate, init, mysql, sample If the container is running with a command-line command (such as ``bash``) you will also see the ``CONSOLE`` service listed, which is the service which was created internally to manage the interactive console. Because the console is part of the :ref:`IDLE group `, you can see that it depends upon all other services before it will start. .. _option.task: .. option:: --task This is a convenience switch which is presently equivalent to combining: * :ref:`--no-console-log `, * :ref:`--disable-services `, and * :ref:`--exitkills `. It is useful when the command provided on the command line does some utility task which circumvents the normal operation of the container. For example, imagine that you create a complex container with several internal components, and want to provide an easy way to report on the versions of software inside the container. You could write a simple script, perhaps called ``/app/bin/report-versions`` then run it like this:: $ docker run -i --rm=true my-app-image --task /app/bin/report-versions ngnnx: 1.9.1 cluster-supervisor: git tag = 'production-1.22' replicator: 0.1 $ The ``--task`` switch attempts to silence any other output, and assure the container does nothing except start the command-line command (using the configured Chaperone environment), then exit. See the :ref:`get-chaplocal ` task for an example of how this switch has been used in practice. ================================================ FILE: doc/source/ref/config-format.rst ================================================ .. chaperone documentation configuration directives .. _config.file-format: Configuration File Format ========================= Chaperone's configuration is contained either in a single file, or a directory of configuration. You specify the configuration with the :ref:`--config ` switch on the command line. If none is specified, the default `/etc/chaperone.d` is used. If a directory is chosen, then only the top-level of the directory will be searched, and only files ending in ``.conf`` or ``.yaml`` will be recognized and read in alphabetic order. Configuration files are written using `YAML Version 2 `_. For example, you can define two chaperone services like this:: mysql.service: command: "/etc/init.d/mysql start" apache2.service: command: "/etc/init.d/apache2 start" after: mysql.service While the above works perfectly fine, we prefer to use the `YAML "flow style" `_ which looks very similar to JSON. In flow format, the above looks like this:: mysql.service: { command: "/etc/init.d/mysql start" } apache2.service: { command: "/etc/init.d/apache2 start", after: mysql.service, } The flow style is both easy to read, and works better when configurations become more complex. So, throughout the chaperone documentation, we'll stick to the flow format. Comments can be included both between lines and at the end of lines using the hash symbol (``#``). Here is a complete well-commented configuration section for a sample service that's included with the ``chaperone-baseimage`` docker image:: # This is a sample oneshot service that runs at IDLE time, just before # the console app, if present. It will output something so at least # something appears on the screen. sample.service: { # This is a oneshot service, but most likely a real applicaton will be another type # such as 'simple', or 'forking'. type: oneshot, enabled: true, # CHANGE TO 'false' so this app doesn't run any more # Command output goes directly to stdout instead of to the syslog. # Note that you normally want to have services output to the syslog, because # chaperone's logging directives allow you to echo syslog data to stdout. That's # a better place to control things (see 010-start.conf). command: "$(APPS_DIR)/bin/sample_app", stdout: inherit, # Because we're in the IDLE group, we will run only after all system services have # started. However, if there is a command line program, like /bin/bash, we want to # run before that one. All upper-case group names have special meanings. However, # You can define your own service groups, then use them to declare startup # dependencies. service_groups: "IDLE", before: "CONSOLE", # These environment variables will be added only for your service env_set: { 'INTERACTIVE': '$(_CHAP_INTERACTIVE)', } } ================================================ FILE: doc/source/ref/config-global.rst ================================================ .. chaperone documentation configuration directives .. include:: /includes/defs.rst .. _config.settings: Configuration: Global Settings ============================== Settings Quick Reference ------------------------ Global settings are identified by a configuration file section titled settings, for example:: settings: { ignore_failures: true, env_set: { 'LANG': 'en_US.UTF-8', 'LC_CTYPE': '$(LANG)', 'PATH': '$(APPS_DIR)/bin:/usr/local/bin:/bin:/usr/bin:/sbin:/usr/sbin:/usr/local/sbin', }, } Directives applied in the setting section apply globally and some define defaults to be inherited by logging or service declarations. Entries below marked with |ENV| support :ref:`environment variable expansion `. .. _table.settings-quick: .. table:: Global Settings Quick Reference =================================================== ============================================================================= settings variable meaning =================================================== ============================================================================= :ref:`env_inherit ` An array of patterns which can match one or more environment variables. Environment variables which do not match any pattern will be excluded. Default is ``['*']``. :ref:`env_set ` Additional environment variables to be set. :ref:`env_unset ` Environment variables to be removed. :ref:`idle_delay ` The "grace period" after all services have started before services in the "IDLE" group will begin running. Default is 1.0 seconds. :ref:`ignore_failures ` Specifies the ``ignore_failures`` default for services. :ref:`process_timeout ` Specifies the amount of time Chaperone will wait for a service to start. The default varies for each type of service. See :ref:`service process_timeout ` for more information. :ref:`shutdown_timeout ` The amount of time Chaperone will wait for services to complete shutdown before forcing a kill with SIGKILL. Default is 8 seconds. :ref:`startup_pause ` Specifies the ``startup_pause`` default for services. :ref:`enable_syslog ` Specifies whether Chaperone will start its own internal syslog service at start-up. Defaults to ``true``. :ref:`detect_exit ` If true (the default), then Chaperone tries to intelligently detect when all processes have exit and none are schedule, then terminates. :ref:`uid ` The default uid (name or number) for all services and logging tasks. Overrides the value specified by :ref:`--user ` or :ref:`--create-user `. |ENV| :ref:`gid ` The default gid (name or number) for all services and logging tasks. |ENV| =================================================== ============================================================================= Settings Reference ------------------ .. _settings.env_inherit: .. describe:: env_inherit [ 'pattern', 'pattern', ... ] Specifies a list of patterns which define what will be inherited from the environment passed to Chaperone when it was executed. Patterns are standard filename "glob" patterns. By default, all environment variables will be inherited. For example:: settings: { env_inherit: [ 'PATH', 'TERM', 'HOST', 'SSH_*' ], } .. _settings.env_set: .. describe:: env_set { 'NAME': 'value', ... } Provides a list of name/value pairs for setting or overriding environment variables. The values may contain :ref:`variable expansions `. Note that variables are not expanded immediately, so you can refer to variables which may be defined later in services. For example:: settings: { env_set: { 'SHELL': '/bin/ksh', 'PATH': '/services/$(_CHAP_SERVICE)/bin:$(PATH)' } } In the above, while the value of ``SHELL`` is known, the value of ``_CHAP_SERVICE`` will not be valid until a service executes. However, because variables use "late expansion", you can define variables such as the above as templates so that they will be available to all services. .. _settings.env_unset: .. describe:: env_unset [ 'pattern, 'pattern', ... ] Removes the environment variables which match any of the given patterns from the environment. These variables will not be passed down to services or logging directives. Patterns are standard filename 'glob' patterns. .. _settings.idle_delay: .. describe:: idle_delay seconds Specifies the number of seconds Chaperone will pause before tasks in the :ref:`IDLE service group ` will be started. May contain fractional values such as "0.1". Defaults to 1 second. This delay is useful in at least two common situations: 1. When service startup may cause log messages to appear at the console, the console program (usually a shell) may have its prompt interleaved with console messages. This delay decreases the likelihood of this happening. 2. When services of type :ref:`simple ` are used, there is no real way to determine if services have fully started. However, the idle delay does nothing except add a "fudge factor", which, while useful, would be better implemented using proper 'notify', or 'forking' services. .. _settings.ignore_failures: .. describe:: ignore_failures ( false | true ) If set to 'true', then any the default for the service's :ref:`ignore_failures ` will be 'true' rather than the normal 'false' default. Any setting by a service overrides this value. Primarily, this is useful for debugging and has similar utility as the command-line switch :ref:`--ignore-failures ` since it allows you to bypass normal system failure checks and allow services to start even though dependencies may have failed. .. _settings.process_timeout: .. describe:: process_timeout: seconds This allows you to set the global default for service :ref:`process_timeout `. Normally the process timeout value is determined by the :ref:`service type `. Setting this value globally will cause *all* processes to use the same process timeout as their defaults. If a service specifies its own value, it will always take precedence over this default. .. _settings.shutdown_timeout: .. describe:: shutdown_timeout When Chaperone receives a shutdown request (usually ``SIGTERM``), it goes through an orderly shutdown, telling each service to stop. If there are still services running after the shutdown timeout, Chaperone will force all processes to quit using ``SIGKILL``. The default for this value is 10 seconds. .. _settings.startup_pause: .. describe:: startup_pause This allows you to set the global default for the service :ref:`startup_pause ` value. If not specified, the service default will be used. If a service specifies its own value, it will always take precedence over this default. .. _settings.enable_syslog: .. describe:: enable_syslog This setting allows you to enable or disable Chaperone's internal syslog service. If set to ``false`` then the ``/dev/log`` file will not be created, and Chaperone will not intercept and redirect logging from running applications. Note that applications which write to ``stdout`` and ``stderr`` will still be intercepted and processed by Chaperone's logging directives. If omitted, this setting defaults to `true`. Syslog can also be disabled by using the Chaperone command line option :ref:`--no-syslog `. .. _settings.detect_exit: .. describe:: detect_exit When 'true' (the default), then Chaperone intelligently watches the process environment to determine whether it should automatically exit. Chaperone will exit when: * All processes have exited, and ... * There are no pending ``inetd`` or ``cron`` services which are configured and active. Generally, this behavior is desirable, but there are situations where disabling this can be useful. For example, if a container contains a set of dormant (disabled) services, and they are manually enabled or disabled during runtime, setting this to 'false' will cause Chaperone to remain running even if there are no active services and all work has completed. If set to 'false', then Chaperone will only exit whenever it is explicitly killed with ``SIGTERM``, or when a service exits whose :ref:`exit_kills ` configuration value is set to 'true'. .. _settings.uid: .. describe:: uid user-name-or-number This sets the default user account which will be used by services and logging directives. If the ``uid`` setting is not specified, the default will the user specified on the command line with :option:`--user ` or :option:`--create-user `. If none of the above are specified, the Chaperone runs the service normally under its own account without specifying a new user. Services and logging are affected differently by user credentials: * See :ref:`service uid `, or ... * :ref:`logging uid ` for more details. .. _settings.gid: .. describe:: gid group-name-or-number When :ref:`uid ` is specified (either explicitly or implicitly inherited), the ``gid`` directive can be used to specify an alternate group to be used for logging or services. ================================================ FILE: doc/source/ref/config-logging.rst ================================================ .. chapereone documentation configuration directives .. include:: /includes/defs.rst .. _logging: Configuration: Logging Declarations =================================== Logging Quick Reference ----------------------- Chaperone has its own internal ``syslog`` service which listens on the ``/dev/log`` socket. However, by default, none of the messages sent to the syslog will be stored or output unless logging declarations are made. The simplest logging directive tells chaperone what to do with log entries using a superset of the familiar `syslogd configuration format `_. For example, the following will direct all messages at the warning level (or greater) to ``stdout``:: console.logging: { selector: '*.warn', stdout: true, } You can define as many different logging entries, and all will be respected as individual output targets. If you have services which do significant syslog output, you can decide on a per-service basis which logs go where, what aspects are sent to ``stdout`` and which go to log files. An overview of logging directives follow, then detailed reference information. Entries below marked with |ENV| support :ref:`environment variable expansion `. .. _table.logging-quick: .. table:: Logging Directives Quick Reference ================================================= ============================================================================= logging keyword meaning ================================================= ============================================================================= :ref:`selector ` Specifies the syslog-compatible selection filter for this logging entry. |ENV| :ref:`file ` Specifies an optional file for output. |ENV| :ref:`stderr ` Directs output to ``stderr`` (can be used with ``file``). :ref:`stdout ` Directs output to ``stdout`` (can be used with ``file``). :ref:`syslog_host ` Directs output to the host or IP address specified (can be used in combination with ``file``, ``stderr``, and ``stdout``. :ref:`enabled ` Can be set to ``false`` to disable this logging entry. |ENV| :ref:`logrec_hostname ` Overrides the normal hostname inserted in syslog output records. :ref:`overwrite ` If ``file`` is provided, then setting this to ``true`` will overwrite the file upon opening. By default, log files operate in append mode. :ref:`extended ` Prefixes log entries with their facility and priority (useful primarily for debugging). :ref:`uid ` The uid (name or number) for permissions on created files and directories. |ENV| :ref:`gid ` The gid (name or number) for permissions on created files and directories. |ENV| ================================================= ============================================================================= .. _logging.sect.selectors: Syslog Selectors ---------------- The method used for selecting which log entries are sent to which logging services are specified using a selector format similar to the one used by the standard ``syslogd`` daemon. [#f1]_ Chaperone includes some extensions to the standard format to introduce greater flexiblity without deviating too far from the well-known syntax. In the absence of a selector, Chaperone will direct all syslog output to the given location, so this entry echoes literally every ``syslog`` message to the container's ``stdout``:: everything.logging: { stdout: true } While this may be alright for simple applications, or for debugging, most applications require more nuanced control of what goes where. This is done by using *selectors*. For example, the following includes a selector which echoes only messages which have 'err' severity or greater to ``stdout``:: badstuff.logging { stdout:true, selector: '*.err' } Selector Format *************** The general format for selectors is: [!] ** . [!][=] ** ; ... where ** Describes the subsystem where the syslog message originated. It is a comma-separated list of one of the following, with the last two options being Chaperone extensions: 1. An asterisk (``*``) indicating all facilities. 2. One of the keywords **kern**, **user**, **mail**, **daemon**, **auth**, **syslog**, **lpr**, **news**, **uucp**, **clock**, **authpriv**, **ftp**, **ntp**, **audit**, **alert**, **cron**, or **local0** through **local7**. 3. A program identifier enclosed in brackets, such as ``[httpd]`` or ``[chaperone]``. 4. A regular expression which will match any text within the message, such as ``/error/`` or ``/seg.*fault/``. ** Describes the priority of the message, and is either an asterisk (``*``) or one of the following keywords in ascending order of severity: **debug**, **info**, **notice**, **warn** (or **warning**), **err** (or **error**), **crit**, **alert**, **emerg**. Selectors including an exclamation mark are *negative* selectors, omitting otherwise included log entries. A selector *must* include positive selectors or no log entries will be selected. For example:: # Select all errors (or more severe) except those sent to the auth subsystem selector: '*.err;auth,authpriv.!*' However, the following selector will select nothing because there is no positive component:: # Does nothing selector: 'auth,authpriv.!*' Facility Selection ****************** Chaperone includes a more versatile set of options for selecting the facility where the message originated. You can include the classic ``syslog`` facility indication, or a program name (in brackets) or even a regular expression to match. For example, assume a syslog message from ``sshd``:: Jun 3 19:40:16 weevil sshd[1642]: Accepted publickey for root from ::1 port 48488 ssh2: RSA 24:2d:95:ec:09:fb:49:fa:e9:ff:e0:9e:c2:4d:13:42 Since ``sshd`` defaults to logging to the ``auth`` subsystem, the following would select the above message:: selector: 'auth,authpriv.*' You could also specify the program name:: selector: '[sshd].*' You could even use a regular expression to match arbitrary strings to select the message (assuming the above message is written at priority 'info' or greater:: selector: '/publickey/.info' You could also select all info messages which did not contain the word "publickey" like this:: selector: '*.info;!/publickey/.*' Priority Selection ****************** Priority selection is simpler, but it's important to notice that choosing a priority means that messages of that level *or greater severity* are selected:: selector: '*.err' will select messages of **err**, **crit**, **alert**, or **emerge**, whereas:: selector: '*.*;*.!err' will select messages of **debug**, **info**, **notice** or **warn**. If you want to specify a priority which is exact (either for exclusion or inclusion), use the ``=`` prefix. The following selector includes log entries *only* if they are at level 'debug':: selector: '*.=debug' Logging Config Reference ------------------------ .. _logging.selector: .. describe:: selector: "selector; [selector; ...]" Specifies the logging entries which will be selected for reporting by this service. Multiple selectors can be provided, separated by semicolons. If no selector option is provided, Chaperone assumes a selector of ``*.*``. See the separate section above :ref:`on syslog selectors ` for more details. .. _logging.file: .. describe:: file: "filepath" Indicates that output should be written to ``filepath``, which must be a full pathname or a pathname relative to the home directory of the logging user (implicitly defined, or defined by the :ref:`uid ` directive. *Note*: this should be an actual file, not a system file such as ``/dev/stdout``. You can use the :ref:`stdout ` directive to cause syslog output to be directed to ``stdout``. Chaperone supports two special features for logging filenames: 1. You can include substitutions within a log filename using the '%' substitution set compatible with `strftime `_. If so, Chaperone will close and reopen the log file whenever the name changes. For example:: filename: "$(APPS_DIR)/var/log/app-messages-%a.log" would create log files for each day of the week with names ``app-messages-sun.log``, ``app-messages-mon.log``. Sometimes, this allows you to eliminate the need for log rotation. 2. If Chaperone notices that the file's 'inode' or mountpoint has changed, it will close and reopen the file automatically. This means you can create jobs to do log-rotation, or manually rename or move the existing logfile and Chaperone will take notice and assure a new log file is opened. Note that you can combine this directive with :ref:`stdout `, :ref:`stderr `, and :ref:`syslog_host `. Output will be simultaneously written to all chosen locations. .. _logging.stdout: .. describe:: stdout ( false | true ) If this is 'true', then all selected syslog records will be copied to the 'stdout' of the container. Defaults to 'false'. Note that you can combine this directive with :ref:`stderr `, :ref:`file `, and :ref:`syslog_host `. Output will be simultaneously written to all chosen locations. .. _logging.stderr: .. describe:: stderr ( false | true ) If this is 'true', then all selected syslog records will be copied to the 'stderr' of the container. Defaults to 'false'. Note that you can combine this directive with :ref:`stdout `, :ref:`file `, and :ref:`syslog_host `. Output will be simultaneously written to all chosen locations. .. _logging.syslog_host: .. describe:: syslog_host hostname-or-ip When set, chaperone will send all matching log records to the remote host specified by ``hostname-or-ip``. The remote host should be running a ``syslog`` daemon on UDP port 514. Since UDP is a connectionless protocol, no error will be given if the remote host is unreachable, or is not running the ``syslog`` daemon. Packets will silently be sent and ignored. Note that you can combine this directive with :ref:`stdout `, :ref:`stderr `, and :ref:`file `. Output will be simultaneously written to all chosen locations. .. _logging.logrec_hostname: .. describe:: logrec_hostname hostname-string Normally, syslog records include the hostname of the current host. For example:: Jul 16 02:53:54 813703fb4021 sudo : pam_unix(sudo:session): session closed for user root Note in the above line, that the string ``813703fb4021`` is the hostname of the current machine, which in the case of Docker, is a randomly generated string. You can use this directive to force the hostname to a particular string. For example, you could set ``logrec_hostname`` to ``dirserv-1``, which would cause the above sample line to instead be written like this:: Jul 16 02:53:54 dirserv-1 sudo : pam_unix(sudo:session): session closed for user root This can be useful when logs are being consolidated using remote logging, and some consistent means of identifying the log source is desirable. .. _logging.enabled: .. describe:: enabled ( true | false ) Set this to 'false' to disable all logging to this logging service. .. _logging.overwrite: .. describe:: overwrite ( false | true ) By default, Chaperone will append logs to any existing log file which matches the :ref:`file ` directive. Setting this to 'true' will overwrite any log file. Note that log files are opened when Chaperone starts running, so any overwrite will be immediate. .. _logging.extended: .. describe:: extended ( false | true ) This option prefixes every output syslog line with the facility and priority which was used to write to the syslog. Normally, this is not desirable, since often people rely upon the format of a log file line, which typically looks like this:: Jun 15 02:09:33 su [27]: pam_unix(su:session): session opened for user root by (uid=1000) If you set ``extended=true``, then log output lines will look like this:: authpriv.info Jun 15 02:09:33 su [27]: pam_unix(su:session): session opened for user root by (uid=1000) Note that ``authpriv.info`` is at the beginning of the line, and indicates the facility and priority. This is primarily useful for debugging and fine-tuning logging output, as there is no good way to determine the exact facility and priority used by some daemons if they do not clearly document it. .. _logging.uid: .. describe:: uid user-name-or-number Chaperone will create and manage log files as the user specified by ``uid``. If ``uid`` is not specified, the :ref:`settings uid ` will be used, and finally the user specified on the command line with :option:`--user ` or :option:`--create-user `. If none of the above are specified, the Chaperone runs the service normally under its own account without specifying a new user. Specifying a user requires root privileges. Within containers like Docker, chaperone usually runs as root, so service configurations can specify alternate users even if they are run under a different user account. For example, if Chaperone were run from docker using the `chaperone-baseimage `_ image like this:: docker run -d chapdev/chaperone-baseimage \ --user wwwuser --config /home/wwwuser/chaperone.conf there is no reason that ``chaperone.conf`` could not contain the following logging definitions:: mysql.logging: { uid: root, selector: "[mysql].*", file: "/var/log/mysql-%d.log", } In this case, "mysql.logging" would be written as 'root', regardless of what the user configuration is for other services. Typically, when using a :ref:`userspace development model `, you want daemon log files to be written under the development user's ID for easy management. .. _logging.gid: .. describe:: gid group-name-or-number When :ref:`uid ` is specified (either explicitly or implicitly inherited), the ``gid`` directive can be used to specify an alternate group to be used for logging. If not specified, then the user's primary group will be used. As with :ref:`uid ` specifying a group requires root priviliges. .. rubric:: Notes .. [#f1] The "standard" ``syslogd``, for our purposes, is the one authored by `Wettstein and Schulze `_. While it has been in use for decades, there are also many variations and some inconsistencies in the way selectors are interpreted. ================================================ FILE: doc/source/ref/config-service.rst ================================================ .. chaperone documentation configuration directives .. include:: /includes/defs.rst .. _service: Configuration: Service Declarations =================================== Service Quick Reference ----------------------- Service configurations are identified by user-defined names and end with the suffix ``.service``. So, for example, the following defines a registration script called ``register_my_app`` which runs when all other services have been launched:: myreg.service: { type: oneshot, command: "/usr/local/bin/register_my_app --host central-registry.example.com", service_groups: IDLE, } Multiple services can be declared in a single file. Order within a configuration file is not important. However, if several configuration files are involved, services in subsequent files (alphabetically) will replace earlier services defined with the same name. Each service inherits the environment defined by the :ref:`settings directive ` and can be tailored separately for the needs of each service. Entries below marked with |ENV| support :ref:`environment variable expansion `. .. _table.service-quick: .. table:: Service Directives Quick Reference ================================================ ============================================================================= service variable meaning ================================================ ============================================================================= :ref:`type ` Defines the service type: 'oneshot', 'simple', forking', 'notify', 'inetd', or 'cron'. Default is 'simple'. :ref:`command ` Specifies the command to execute. The command is not processed by a shell, but environment variable expansion is supported. |ENV| :ref:`enabled ` If 'false', the service will not be started, nor will it be required by any dependents. Default is 'true'. |ENV| :ref:`stderr ` Either 'log' to write stderr to the syslog, or 'inherit' to write stderr to the container's stderr file handle. Default is 'log'. |ENV| :ref:`stdout ` Either 'log' to write stdout to the syslog, or 'inherit' to write stdout to the container's stdout file handle. Default is 'log'. |ENV| :ref:`port ` For service type 'inetd', specifies the dynamic port number for connections. There is no default. |ENV| :ref:`after ` A comma-separated list of services or service groups which must start before this service is allowed ot start (dependencies). :ref:`before ` A comma-separated list of services or service groups which cannot be started until this service starts successfully (dependents). :ref:`directory ` The directory where the command will be executed. Otherwise, the account home directory will be used. |ENV| :ref:`env_inherit ` An array of patterns which can match one or more environment variables. Environment variables which do not match any pattern will be excluded. Default is ``['*']``. :ref:`env_set ` Additional environment variables to be set. :ref:`env_unset ` Environment variables to be removed. :ref:`exit_kills ` If 'true' the entire system should be shut down when this service stops. Default is 'false'. :ref:`ignore_failures ` If 'true', failures of this service will be ignored but logged. Dependent services are still allowed to start. :ref:`interval ` For `type=cron` services, specifies the crontab-compatible interval in standard ``M H DOM MON DOW`` format. |ENV| :ref:`kill_signal ` The signal used to kill this process. Default is ``SIGTERM``. :ref:`optional ` If 'true', then if the command file is not present on the system, the service will act as if it were not enabled. :ref:`pidfile ` The full path to the file which will contain the process 'pid' upon startup. ('forking' and 'simple' types only) |ENV| :ref:`process_timeout ` Specifies the amount of time Chaperone will wait for a service to start. The default varies for each type of service. See :ref:`service types ` for more information. :ref:`restart ` If 'true', then chaperone will restart this service if it fails (but not if it terminates normally). Default is 'false'. :ref:`restart_delay ` The number of seconds to pause between restarts. Default is 3 seconds. :ref:`restart_limit ` The maximum number of restart attempts. Default is 5. :ref:`service_groups ` A comma-separated list of service groups this service belongs to. All uppercase services are reserved by the system. :ref:`setpgrp ` If 'true', then the service will be isolated in its own process group upon startup. This is the default. :ref:`startup_pause ` The amount of time Chaperone will wait to see if a service fails immediately upon startup. Defaults is 0.5 seconds. :ref:`uid ` The uid (name or number) of the user for this service. |ENV| :ref:`gid ` The gid (name or number) of the group for this service. |ENV| ================================================ ============================================================================= .. _service.sect.type: Service Types ------------- The ``type`` option defines how the service will be treated, when it is considered active, and what happens when the service terminates either normally, or abnormally. Valid service types are: *simple* (the default), *oneshot*, *forking*, *notify*, and *cron*. These service types are patterned loosely after service types defined by `systemd `_, but there are important differences [#f1]_ , so this section should be read carefully before making any assumptions. As shown in :numref:`table.service-types`, each service type has a different behavior. In the event the service's process reports an error, it is either a *system failure* or *service failure*. A system failure results in an immediate, orderly shutdown of any services which have been started, along with logging an error report and termination of the system. A service failure is an isolated situation affecting only the service itself. .. _table.service-types: .. table:: Service Types ================ ========================================================== ========================= ========================= type behavior system failure service failure ================ ========================================================== ========================= ========================= simple This is the default type. Chaperone considers a service Service terminates Service terminates "started" as soon as the startup grace period abnormally during grace abnormally later despite (defined by :ref:`startup_pause `) period or pidfile not retries. elapses. found (if specified) If the service terminates normally at any time, the before process timeout. service is considered "started" until reset. forking A forking service is expected to set up all Service terminates Service terminates communications channels and assure that the service abnormally during the abnormally later despite is ready for application use, then exit normally process timeout, or retries (only if pidfile before the the pidfile cannot be specified). Otherwise, :ref:`process_timeout ` found (if specified) never. [#f2]_ expires. *Note*: The default process timeout for during the timeout forking services is 300 seconds. period. oneshot A oneshot service is designed to execute scripts which Service terminates Service terminates complete an operation and are considered started once abnormally during abnormally during a they run successfully. *Note*: The default process the process timeout. manual "start" timeout for oneshot services is 60 seconds. operation. notify A notify service is expected to establish communication Service terminates Service sends a with chaperone using the *sd_notify* protcol. The abnormally during the failure notification. :ref:`NOTIFY_SOCKET ` process timeout. environment variable will be set, and chaperone will consider the service started only when notified appropriately. *Note*: The default process timeout for a notify service is 30 seconds. inetd The "inetd" type listens for TCP connections on the port Service executable Never. Services specified by the is missing or invalid, which fail are logged :ref:`port ` parameter. When a connection or TCP port is invalid but new connections is received, chaperone will start a service connecting or already in use. will still be `stdin`, `stdout` and `stderr` of the inbound socket accepted. to the specified command. cron The cron type schedules a script or program for periodic Service executable Never. Failures of execution. The service is considered started once is missing or invalid isolated executions successfully scheduled. Both scheduling parameters but not optional. do not constitute (specified using :ref:`interval `) a permanent service as well as the presence of the executable specified failure. in :ref:`command ` will be checked before scheduling is considered successful. Cron services which are declared as :ref:`optional ` will not be scheduled and will be treated as if they were disabled. ================ ========================================================== ========================= ========================= Note: Unlike ``systemd``, Chaperone does not have an "idle" service type. This is accomplished instead using a special system-defined service group called "IDLE", thereby permitting any service type to be activated when startup is complete. See :ref:`service_groups ` for more information. Service Config Reference ------------------------ .. _service.type: .. describe:: type: ( simple | forking | oneshot | notify | inetd | cron ) The ``type`` option defines how the service will be treated, when it is considered active, and what happens when the service terminates either normally, or abnormally. See the :ref:`separate section on service types ` for a full description of what chaperone service types are and how they behave. This setting is optional. If omitted, the default is "simple". .. _service.command: .. describe:: command: "executable args ..." The ``command`` option defines the command and arguments which will be executed when the service is started. Both :ref:`environment variable expansion ` and "tilde" expansion for user names are supported, though "tilde" expansion is supported only on the command name itself, not on arguments. Note that the command line is *not* passed to a shell, so other shell meta-characters or shell environment variable syntax not supported. The first token on the command line must be an executable program available in the ``PATH``. If it is not found, it will be considered an error. However, if :ref:`optional ` is set to 'true', then the service will be disabled in such cases. This makes it easy to define configurations for programs which may or may not be installed. *Note*: If the executable is present, but permissions deny access, it is considered an error regardless of whether the service is declared optional. In all cases, the environment that is used for ``PATH`` and expansions is the same environment that would be passed to the service. If the executable is not available in the service's ``PATH`` then a fully qualified pathname should be used. .. _service.enabled: .. describe:: enabled: ( true | false ) If enabled is 'true' (the default), then the service will start normally as per its type. If it is set to 'false', then the service will be ignored upon start-up, and any dependencies will be considered satisfied. Services can be enabled and disabled dynamically while Chaperone is running using the :ref:`telchap command `. Since you can use environment variable expansions, it can be useful to make service startup conditional based upon some environment variable setting, such as:: mysql.service: { type: simple, enabled: "$(ENABLE_MYSQL:+true)", ... } .. _service.env_inherit: .. describe:: env_inherit [ 'pattern', 'pattern', ... ] Specifies a list of patterns which define what will be inherited from the environment defined by the :ref:`global settings ` Patterns are standard filename "glob" patterns. By default, all environment variables will be inherited from the settings environment. For example:: sample.service: { command: '/opt/app/bin/do_the_stuff', env_inherit: [ 'PATH', 'TERM', 'HOST', 'SSH_*' ], } .. _service.env_set: .. describe:: env_set { 'NAME': 'value', ... } Provides a list of name/value pairs for setting or overriding environment variables. The values may contain :ref:`variable expansions `. The inherited environment will be the one configured using similar settings directives such as :ref:`settings env_set `. .. _service.env_unset: .. describe:: env_unset [ 'pattern, 'pattern', ... ] Removes the environment variables which match any of the given patterns from the environment. Patterns are standard filename 'glob' patterns. .. _service.stdout: .. describe:: stdout: ( 'log' | 'inherit' ) Can be set to 'log' to output service `stdout` to syslog (the default) or 'inherit' to output service messages directly to the container's stdout. While it may be tempting to use 'inherit', we suggest you use the syslog service instead, then tailor :ref:`logging ` entries accordingly if console output desired. This will provide much more flexibility. Messages from the process `stdout` will be logged as syslog facility and severity of `daemon.info`. [#f3]_ .. _service.stderr: .. describe:: stderr: ( 'log' | 'inherit' ) Can be set to 'log' to output service `stderr` to syslog (the default) or 'inherit' to output service messages directly to the container's stderr. While it may be tempting to use 'inherit', we suggest you use the syslog service instead, then tailor :ref:`logging ` entries accordingly if console output desired. This will provide much more flexibility. Messages from the process `stderr` will be logged as syslog facility and severity of `daemon.warn`. [#f3]_ .. _service.port: .. describe:: port: tcp-port-number Specifies the TCP port number associated with an 'inetd' service, and must be specified when the type is 'inetd'. When this service is started, Chaperone will bind to the specified TCP port and listen for incoming connections. When a connection is received, Chaperone will start the service specified by the given :ref:`command ` parameter. The service will be started with `stdin`, `stdout`, and `stderr` connected to the started process. For example, the following script would initiate a simple "echo" service which would terminate when a blank line is sent:: #!/usr/bin/python3 import sys while True: result = input("echo:") if not result or result.strip() == "": exit(0) print("echoed ->", result) sys.stdout.flush() Note the ``sys.stdout.flush()`` command. Generally, such a command (or equivalent) will be necessary to assure that the program flushes it's output buffer. Commands can be simple informational services, or long-running servers. If Chaperone receives multiple socket connections, it will start up as many processes as are needed to satisfy each request. In other words, a single command invocation is responsible for a single client connection. If the script needs to do logging, it will need to do so via ``/dev/log``, or an equivalent syslog facility within the language, since `stderr` also is connected to the remote socket. There are many use-cases for creating simple port-triggerable services, especially in environments like Docker where containers contain only one or two processes, but auxilliary features may be desired without committing a long-running daemon to the task. For example, here is a blog post which describes `Service Monitoring with xinetd `_. The same type of scripts work identically with Chaperone. .. _service.after: .. describe:: after: "service-or-group, ..." Specifies one or more services or service groups which must be started successfully before this service will start. The value specified is a comma-separated list of services or service groups. Services are always identified with a ``.service`` suffix. Otherwise, the reference is to a service group. Thus:: some.service: { after: "one.service, setup", command: "echo some" } defines a service which will start only after the service "one.service" and all services which are members of the "setup" group. For more information see :ref:`service_groups `. .. _service.before: .. describe:: before: "service-or-group, ..." Specifies one or more services or service groups which will not be started until this service starts successfully. The value specified is a comma-separated list of services or service groups. Services are always identified with a ``.service`` suffix. Otherwise, the reference is to a service group. Thus:: some.service: { before: "one.service, application", command: "echo some" } defines a service which will start before "one.service" and any services which are members of the "application" group. For more information see :ref:`service_groups `. .. _service.directory: .. describe:: directory: "directory-path" Specifies the start-up directory for this service. If not provided, then the start-up directory is the home directory for the user under which the service will run. .. _service.exit_kills: .. describe:: exit_kills ( false | true ) If set to 'true', then when this service terminates, Chaperone will initiate an orderly system shutdown. This is useful in cases where the lifetime of a controlling service, such as a shell or main application should dictate the lifetime of the container. .. _service.ignore_failures: .. describe:: ignore_failures ( false | true ) If set to 'true', then any failure by the service will be logged but ignored. Service failures are logged using syslog facility `local5.info` (`local5` is the facility used for all messages that originate from Chaperone itself. .. _service.interval: .. describe:: interval: "cron-interval-spec" This is required for service ``type=cron`` and contains the cron specification which indicates the interval for period execution. Nearly all features documented in `this crontab man page `_ are supported, including extensions for ranges and special keywords such as ``@hourly`` which can be specified with or without the leading ``@``. So, a simple hourly cron service can be defined like this:: cleanup_cookies.service: { type: cron, interval: hourly, command: "/opt/superapp/bin/clean_temp_cookies --silent", } which is equivalent to:: cleanup_cookies.service: { type: cron, interval: "0 * * * *", command: "/opt/superapp/bin/clean_temp_cookies --silent", } Chaperone also supports an optional sixth field [#f4]_ for seconds so that seconds can be provided, so the following runs every 15 seconds:: pingit.service: { type: cron, interval: "* * * * * * */15" command: "/opt/superapp/bin/ping_central_hub", } Note that the ``@reboot`` special nickname is not supported, since Chaperone provides similar features using the ``INIT`` service group. .. _service.kill_signal: .. describe:: kill_signal: ( name | number ) Specifies the signal which is sent to the process for normal termination. By default, Chaperone sends ``SIGTERM``. .. _service.optional: .. describe:: optional: ( false | true ) If 'true', then this service is considered optional and will be disabled upon start-up if the executable is not found. Only a "file not found" error triggers optional service behavior. If the executable file exists, but permissions are incorrect, it is still considered a failure. Optional services may be started manually later if, for example, the executable should become available after system start-up. .. _service.pidfile: .. describe:: pidfile: file-path This setting specifies the "PID file" which the service will create upon startup to indicate it's controlling process ID. This is valid only for 'simple', and 'forking' services. The appearance of the pidfile is an indication that the service has been activated. When the ``pidfile`` directive exists: 1. Chaperone start the service command normally. 2. If the executable runs without error, Chaperone will watch for the appearance of the file specified in the ``pidfile`` directive. 3. If the PID file does not appear within the timeframe given by the :ref:`process_timeout `, then it is considered a failure. If the ``pidfile`` is seen, and contains a valid integer process ID *which denotes a running process*, then Chaperone will monitor the status of that process for failures to determine the disposition of the service. For 'simple' service types, it is possible (and likely) that the PID value will be the same as the PID of the originally running process, since 'simple' types are not expected to exit for the duration of their activity. .. _service.process_timeout: .. describe:: process_timeout: seconds When Chaperone is waiting for a service to start, it will wait for this number of seconds before it considers that the service has failed. This value is meaningful for process types `oneshot`, `forking`, and `notify` only and is ignored for other types: For `oneshot` services: Chaperone assumes that a oneshot service is only started once it completes its task successfully, and therefore waits ``process_timeout`` seconds before allowing dependent services ot start. For oneshot services the default process timeout is *60 seconds*. For `forking` services: Chaperone assumes a forking service does set-up, then proceeds to launch subprocesses to provide services. The default process timeout for a forking service is *30 seconds*. For `notify` services: Since a notify service has an explicit means to tell chaperone about it's status, the process timeout defaults to *300 seconds* to provide the service with a greater amount of startup time. .. _service.restart: .. describe:: restart: ( false | true ) By default, chaperone will not restart a service once it has failed. Setting this to 'true' will tell chaperone to wait :ref:`restart_delay ` seconds after a failure, then restart the service until the :ref:`restart_limit ` is reached. If all restarts fail, the chaperone considers the service to be failed. Note that restarts do *not* happen during system startup. If a service fails during system startup, the failure is considered a system failure (unless :ref:`ignore_failures ` is 'true') .. _service.restart_delay: .. describe:: restart_delay: seconds When a service fails and is about to be restarted, chaperone delays for this interval before attempting restart. By default, this value is *0.5 seconds*. Consider increasing the restart delay for services which may fail because of network issues, since network issues may be transient (such as routers rebooting). .. _service.restart_limit: .. describe:: restart_limit: number-of-retries This value indicate the number of restarts which will be performed when a service fails. Once the service starts successfully, the restart counter is reset. .. _service.service_groups: .. describe:: service_groups: "group[,group,...]" This directive declares that the service has membership in one or more service groups. If not specified, all services have membership in the group "default". There are also two system-defined groups which have special meaning: ``INIT`` This group will be started first, before any other service that is *not a member of the INIT group* itself. The order in which services will start within the INIT group is unspecified unless services make explicit :ref:`before ` or :ref:`after ` declarations. ``IDLE`` This group will be started after all other services that are *not a member of the IDLE group* itself. The order in which services will start within the IDLE group is unspecified unless services make explicit :ref:`before ` or :ref:`after ` declarations. User-defined groups can be defined and used for any purpose, but must not have names which are all uppercase, as these are reserved for system groups. Group membership does *not* imply that the group will be started as a unit, or that the entire group will complete startup before other groups start. For example, consider these service declarations:: one.service: { service_group: "setup", command: "echo one" } two.service: { service_group: "setup", command: "echo two" } three.service: { service_group: "sanity_checks", command: "echo three" } four.service: { service_group: "sanity_checks", command: "echo four" } Chaperone does not consider members of the same group to be related in any way, and will start them randomly in parallel at start-up. Assuring a sequence of start-up operations *must* be done using :ref:`before ` or :ref:`after `, as follows:: one.service: { service_group: "setup", command: "echo one" } two.service: { service_group: "setup", command: "echo two" } three.service: { service_group: "sanity_checks", after: "setup" command: "echo three" } four.service: { service_group: "sanity_checks", command: "echo four" } The "after" declaration assures that "three.service" will start only once all services in the "setup" group have successfully started. *But*, "four.service" is still independent and can start at any time. So, for "four.service" there are two options. By declaring "four.service" like this:: four.service: { service_group: "sanity_checks", after: "setup", command: "echo four" } it will also wait for all "setup" services, *but* it will start in parallel with "three.service", whereas the declaration:: four.service: { service_group: "sanity_checks", after: "three.service", command: "echo four" } achieves two goals: it assures the "four.service" starts after "three.service" but also assures all "setup" services will be completed, since "three.service" already expresses such a dependency. .. note:: In all cases, references to a service group operate identically to explicit references to all group members. Group references are merely a shortcut. Therefore:: four.service: { service_group: "sanity_checks", after: "setup", command: "echo four" } is functionally identical to:: four.service: { service_group: "sanity_checks", after: "one.service,two.service,three.service", command: "echo four" } .. _service.setpgrp: .. describe:: setpgrp ( true | false ) By default, chaperone makes each newly created service the parent of it's own process group. This has the advantage of providing partial isolation for the service, and assures that if signals are sent to the group, no other processes are affected. It also provides a poor man's method of tracking service groupings. [#f5]_ While this is a reasonable default, some interactive processes (such as shells like ``/bin/bash``) should be executed with ``setpgrp: false``, since they use process groups extensively themselves and will want to set up process groups according to their job control strategy. .. _service.startup_pause: .. describe:: startup_pause seconds When Chaperone starts a service, it waits a short time to determine whether the service fails immediately. This is the "startup_pause" and defaults to 0.5 seconds. Currently, Chaperone only uses this technique for ``type=simple`` and ``type=notify`` services, so it will have no impact on other service types. Because "simple" services are considered started as soon as process execution begins, the this short pause catches errors which occur within the first few moments of process initialization (such as unexpected permission problems) rather than allowing dependent services to start immediately. .. _service.uid: .. describe:: uid user-name-or-number Chaperone will run the service as the user specified by ``uid``. If ``uid`` is not specified for the service, the :ref:`settings uid ` will be used, and finally the user specified on the command line with :option:`--user ` or :option:`--create-user `. When Chaperone is told to use a particular user account, it also sets the ``HOME``, ``USER``, and ``LOGNAME`` environment variables to reflect those associated with the user. If none of the above are specified, the Chaperone runs the service normally under its own account without specifying a new user. Specifying a user requires root privileges. Within containers like Docker, chaperone usually runs as root, so service configurations can specify alternate users even if they are run under a different user account. For example, if Chaperone were run from docker using the `chaperone-baseimage `_ image like this:: docker run -d chapdev/chaperone-baseimage \ --user wwwuser --config /home/wwwuser/chaperone.conf there is no reason that ``chaperone.conf`` could not contain the following service definitions:: mysql.service: { uid: root, command: "/etc/init.d/mysql start" } myapp.service: { command: "~/bin/my_application" } In this case, "myapp.service" would run as user "wwwuser" becaues no ``uid`` was specified. However because Docker runs chaperone as root, it is perfectly valid for the configuration file to tell Chaperone to run the "mysql" startup command as root. .. _service.gid: .. describe:: gid group-name-or-number When :ref:`uid ` is specified (either explicitly or implicitly inherited), the ``gid`` directive can be used to specify an alternate group to be used for execution. If not specified, then the user's primary group will be used. As with :ref:`uid ` specifying a group requires root priviliges. .. rubric:: Notes .. [#f1] Making chaperone's service types similar to ``systemd`` service types is a blessing and a curse. The blessing is that ``systemd`` is rapidly becoming the new standard for init daemons, so over time, there will be a good general knowledge of what various service types mean. The downside is that chaperone is significantly simpler than ``systemd`` and there will be subtle (and probably to some, annoying) differences. However, we took the risk of choosing a similar model, which we believe will benefit from the standardization of important process management techniques like `sd_notify `_ as well as making it easier for those familiar with ``systemd`` to use their knowledge in defining chaperone configurations. .. [#f2] Chaperone does not attempt "PID guessing" as ``systemd`` and some other process managers attempt to do. The assumption is that "notify" will be the preferred means to determine if a service has started successfully, and to know what it's PID is in case of a crash or internal notification. .. [#f3] Syslog facilities and severity levels are documented `on Wikipedia `_. .. [#f4] Yes, the seconds field appears at the *end*. This is inherited from the `croniter package `_ which we use to parse and manage the internal cron intervals. We considered not documenting it because it seems a bit non-standard, then figured... hey, could be useful. .. [#f5] There is really only one bulletproof way to manage isolated groups of processes: `control groups (or cgroups) `_. Chaperone intentionally avoids using control groups for a number of reasons, but mostly because they require privileges which make containers less secure. In addition, despite their power and utility, control groups are have become a contentious feature right now, being used extensively, and often in incompatible ways, by both `Docker `_ and `systemd `_. Chaperone is intended to be lean, simple and compatible with containers. For now, avoiding cgroups we believe will keep Chaperone a more useful and simple accessory. ================================================ FILE: doc/source/ref/config.rst ================================================ .. chaperone documentation configuration directives .. _config: Chaperone Configuration ======================= Chaperone has a versatile configuration language that can be quick and easy to use, or can comprise many services and dependencies. For example, the following user appllication plus MySQL database server along with syslog redirection could be defined simply in just a few lines:: mysql.service: { command: "/etc/init.d/mysql start", type: forking } myapp.service: { command: "/opt/apps/bin/my_application", restart: true, after: mysql.service } syslog.logging: { filter: "*.info", stdout: true } Configurations can be as sophisticated as desired, including cron-type scheduling, multiple types of jobs, and complex job trees. These sections provide a complete reference to how the chaperone configuration directives. .. toctree:: config-format.rst config-global.rst config-service.rst config-logging.rst ================================================ FILE: doc/source/ref/env.rst ================================================ .. include:: /includes/defs.rst .. _ch.env: Environment Variables ===================== Overview and Quick Reference ---------------------------- Chaperone-specific environment variables are described here. Because environment variables are an important configuration component for many applications, Chaperone tries to make sure any Chaperone-specific variables do not automatically pollute the environment, and yet are available when needed. So, with few exceptions, Chaperone environment variables start with the prefix ``_CHAP_``, but are not automatically passed down to services. If you want to make these available to services, simply define an environment variable in your configuration which expands to one of the internal variables:: settings: { env_set: { # Make the relevant service name available to all processes 'SERVICE_NAME': '$(_CHAP_SERVICE)', } } .. _table.env-quick: .. table:: Environment Variable Quick Reference ====================================================== ===================================================================== environment variable meaning ====================================================== ===================================================================== :ref:`_CHAP_CONFIG_DIR ` Will be set to the full path to the directory containing the configuration file or directory. :ref:`_CHAP_INTERACTIVE ` Will be set to "1" if chaperone is running with a controlling tty. :ref:`_CHAP_OPTIONS ` Recognized during start-up and contains any default command-line options. :ref:`_CHAP_SERVICE ` Contains the name of the current service. :ref:`_CHAP_SERVICE_SERIAL ` Contains a monotonically-increasing serial number which starts at 1 and increases each time a service command is invoked. :ref:`_CHAP_SERVICE_TIME ` Contains the Unix timestamp of the start-time of the service. :ref:`_CHAP_TASK_MODE ` Will be set to "1" if chaperone was invoked with the :ref:`--task ` option. :ref:`NOTIFY_SOCKET ` Set to the per-service systemd-compatible notify socket for service started with :ref:`type=notify `. ====================================================== ===================================================================== Managing Environment Variables ------------------------------ Environment Inheritance *********************** Chaperone provides extensive control over environment variables as they are passed from the parent (often the container technology, like Docker), and eventually down to individual services. .. _figure.env: .. figure:: /images/env_inherit.svg :align: center Chaperone Environment Management As shown in :numref:`figure.env`, Chaperone controls the environment at two levels, and with three separate directives: 1. Chaperone creates a "global" settings environment which consists of environment variables inherited from the parent environment, modified by the three environment directives :ref:`env_inherit `, :ref:`env_set `, and :ref:`env_unset `. 2. Each service can further modify the resulting environment using the same directives, and the changes apply only to the environment of the selected service. In each case, Chaperone processes each set of directives in the same way: 1. The new environment is initialized based upon the setting of :ref:`env_inherit `, a list of patterns. If not specified, Chaperone assumes all environment variables will be inherited. 2. Then, Chaperone sets any new environment variables specified by :ref:`env_set `. 3. Finally, any environment variables specified by :ref:`env_unset ` are removed if they exist. .. _env.expansion: Environment Variable Expansion ****************************** Environment variable directives (as well as some others), can contain bash-inspired [#f1]_ environment variable expansions, as indicated below: ``$(ENVVAR)`` or ``${ENVVAR}`` Expands to the specified environment variable. If the environment variable is not defined, the expansion text is not replace and will appear as is. ``$(ENVVAR:-default)`` Inserts the environment variable if it is present, otherwise, expands to the string specified by ``default`` (which can be blank). ``$(ENVVAR:+ifpresent)`` Inserts ``ifpresent`` if the environment variable *is defined*, otherwise inserts the empty string. ``$(ENVVAR:_default)`` Inserts the empty string if the environment variable *is defined*, otherwise inserts ``default``. (This is the opposite of the previous ``:+`` operation.) ``$(ENVVAR:?error-message)`` Inserts the environment variable, or stops Chaperone with the specified ``error-message`` if the variable is not defined. ``$(ENVVAR:|present-val|absent-val)`` If the environment variable is defined, then inserts the expansion of ``present-val``, otherwise inserts the expansion of ``absent-val``. ``$(ENVVAR:|check-val|equal|notequal)`` Compares the expanded value of ``ENVVAR`` to ``check-val`` using case-insensitive filename glob matching rules. If they match, then inserts ``equal`` otherwise inserts ``notequal``. For example, you can use a match expression of ``[ty]*`` to match any value which starts with 't' or 'y'. ``$(ENVVAR:/regex/repl/[i])`` Expands the named environment variable, then performs a regular expression substitution using ``regex`` with the replacement string ``repl``. If either contains slashes, they must be escaped using a backslash. The optional flags can be set to ``i`` if case-insensitive matching is required. Parenthesized groups in ``regex`` can be referred to in the replacement as ``\n`` where 'n' is zero to refer to the entire matched string, or 1-n to specify the group number. ``$(`shell-command`)`` Executes the specified shell command and inserts the resulting output. Note that the shell command may contain references to other environment variables. The forms above are patterned after ``bash`` and can be useful in cases where defaults are required. For example, if you wanted to specify the user for a service in the event no user was otherwise specified:: sample.service: { uid: "$(USER:-www-data)" ... } The above would expand to the value of ``USER`` if it exists, otherwise would expand to ``www-data``. Not all directives support environment expansion. When it is supported, it will be explicitly indicated in the reference documentation for the directive (for example, the :ref:`service directory ` directive). .. note:: Environment variables are expanded *as late as possible* so that declarations defined at the global level can, if desired, be filled in automatically at lower levels. For example, consider this globally set environment variable declaration:: settings: { env_set: { 'MY_NAME': '$(_CHAP_SERVICE)', 'HAS_NOTIFY_SOCKET': '$(NOTIFY_SOCKET:+1)', 'PATH': '/service-bins/$(MY_NAME):$(PATH)', } } In the above case, note that all environment variables are dependent upon values which will *not exist* until later when a service is executed. Specifically ``_CHAP_SERVICE`` is set to the service name, and ``NOTIFY_SOCKET`` will be set only if a socket is allocated when the process is run. However, Chaperone assures that such environment variables use late-expansion so that templates such as the above can be created and inherited by both logging and service declarations. .. _env.backtick: Backtick Expansion ****************** Chaperone supports backtick expansion similar to most command shells. Backtick expansion can be used wherever environment variables can be used (denoted by the |ENV| symbol in the directive documentation). Any valid system command can be included, and the output will be substituted for the backtick expression. For example, to set an environment variable to the default gateway (normally the Docker bridged network):: settings: { env_set: { "GATEWAY_IP": "`ip route | awk '/default/ { print $3 }'`" } } Backtick expansions are not intended to be a general-purpose shell escape, but intended for situations (like the example) where some system information needs to be collected for configuration purposes. Specifically, backtick expansion have the following characteristics: * Backticks will be processed *after* all dependent environment variables are expanded. * Expansions are done only once, even if they are present in multiple locations. Thus, the backtick expression `\`date\`` will expand to the same value no matter how many times it is used. * The environment passed to the backtick command will be *the initial chaperone environment* before any directives are processed. * Backtick expansions will be performed as the user specified by the `uid` and `gid` relevant to the section where the backtick expansion is used. However, note that backtick expansions may include references to other environment variables, such as:: settings: { env_set: { "LOCALDATE": "`TZ=${TZ} date`", "TZ": "America/Los_Angeles", } Note in the above that the `TZ` variable will be expanded first (if necessary) before the backtick expression. Chaperone also supports a special syntax when backtick expansion is the only desired outcome of a variable insertion. The following two methods are equivalent:: env_set: { "HOSTNAME1": "$(`hostname -s`)", "HOSTNAME2": "`hostname -s`", } This alternate syntax is primarily useful in :ref:`the envcp utiltiy ` since backticks are not expanded in bare text as they are within Chaperone directives. Variable Reference ------------------ .. _env._CHAP_CONFIG_DIR: .. envvar:: _CHAP_CONFIG_DIR This is the path to the directory which *contains* the target specified by the :option:`--config ` option. For example, if you start Chaperone with the following command:: chaperone --config /home/appsuser/firstapp/chaperone.conf then this environment variable will be set to ``/home/appsuser/firstapp``. Note that the method is the same *even if a configuration directory is specified*. Thus, this command:: chaperone --config /home/appsuser/firstapp/chaperone.d would set ``_CHAP_CONFIG_DIR`` to exactly the same value even though the target is a directory rather than a file. One very useful application of this variable is to define "self-relative" execution environments where all application files are stored relative to the location of the configuration directory. The ``chaperone-baseimage`` does this with the following declaration:: settings: { env_set: { 'APPS_DIR': '$(_CHAP_CONFIG_DIR:-/)', } } Then, all other files, commands and configurations operate relative to the ``APPS_DIR`` environment variable. If this principle is observed carefully you can easily run:: docker run --config /myapps/prerelease/chaperone.d to run an isolated set of applications stored in ``/myapps/prerelease`` and another set of isolated applications in the same image like this:: docker run --config /myapps/stable/chaperone.d .. _env._CHAP_OPTIONS: .. envvar:: _CHAP_OPTIONS When Chaperone starts, it reads options both from the command line and from this environment variable. The environment variable provides defaults which should be used if they are not present on the command line. For example, in the ``chaperone-baseimage`` image configuration, the default value for ``--config`` is set:: ENV _CHAP_OPTIONS --config apps/chaperone.d ENTRYPOINT ["/usr/local/bin/chaperone"] .. _env._CHAP_INTERACTIVE: .. envvar:: _CHAP_INTERACTIVE This variable will always be set by Chaperone to either "0" or "1". A "1" value indicates that Chaperone detected a controlling terminal (pseudo-tty). For example:: $ docker run -t -i chapdev/chaperone-baseimage --task /bin/echo '$(_CHAP_INTERACTIVE)' 1 $ docker run -i chapdev/chaperone-baseimage --task /bin/echo '$(_CHAP_INTERACTIVE)' 0 $ Exporting this value to services can allow services to detect interactive vs. daemon containers in order to tailor their operation. .. _env._CHAP_SERVICE: .. envvar:: _CHAP_SERVICE For each :ref:`service definition `, this variable will be set to the name of the service itself, including the ``.service`` suffix. So, the service:: mydata.service: { command: "/bin/bash -c '/bin/echo $(_CHAP_SERVICE) >/tmp/service.txt'" } will write ``mydata.service`` to the file ``/tmp/service.txt`` (not particularly useful). Note that even the main command runs as a conventional service named "CONSOLE":: $ docker run -i chapdev/chaperone-baseimage --task /bin/echo '$(_CHAP_SERVICE)' CONSOLE $ .. _env._CHAP_SERVICE_TIME: .. envvar:: _CHAP_SERVICE_TIME Every time a service command is executed, this variable will contain the Unix time (integral number of days since January 01, 1970) of command invocation. .. _env._CHAP_SERVICE_SERIAL: .. envvar:: _CHAP_SERVICE_SERIAL Every time a service command is executed, this variable will contain an integral value which starts at 1 and will be incremented for each invocation of the command. This value can be especially useful for 'inetd' or 'cron' services which may run multiple times and need a unique identifier for each invocation. .. _env._CHAP_TASK_MODE: .. envvar:: _CHAP_TASK_MODE This variable will be defined and set to "1" whenever Chaperone was run with the :ref:`--task ` command-line option. It can be used within scripts or applications to tailor behavior, if desired. Notify Socket ------------- .. _env.NOTIFY_SOCKET: .. envvar:: NOTIFY_SOCKET Chaperone attempts to emulate ``systemd`` behavior by providing a :ref:`"notify" service type `. Processes created by this type will have the additional variable ``NOTIFY_SOCKET`` set in their environment, which is the path to a UNIX domain socket created privately within the container. The service should use this environment variable to trigger notifications compatible with ``systemd``. .. rubric:: Notes .. [#f1] Originally, the intent was to duplicate ``bash`` environment variable expansion syntax as compatibly as possible. Over time, however, it became clear that pattern matching replacements such as ``${NAME/*.jpg/something}`` relied upon many arcane ``bash`` details such as arrays and filename globbing. Therefore, while the basic environment tests (such as those for defaults as in ``$(HOME:-/home)``) are compatible, a more useful set of regex-based features were added to eliminate the need for many ``bash`` substitution options. ================================================ FILE: doc/source/ref/index.rst ================================================ .. chaperone documentation master file, created by sphinx-quickstart on Mon May 6 17:19:12 2013. You can adapt this file completely to your liking, but it should at least contain the root `toctree` directive. .. _reference: Chaperone Reference =================== This is the full Chaperone Reference and describes in detail how to run and configuration Chaperone. However, if you are using Chaperone with Docker, you can save time and see how a pre-built container works and come back here later when you need more detail. To get started, we suggest reading the :ref:`intro`, then try out the ``chaperone-lamp`` Docker image by following the instructions on the `chaperone-docker github page `_ Any bugs should be reported as issues at https://github.com/garywiz/chaperone/issues. .. toctree:: :maxdepth: 2 command-line config env utilities ================================================ FILE: doc/source/ref/utilities.rst ================================================ .. chaperone documentation configuration directives .. _utilities: Additional Utilities ==================== In addition the main Chaperone program (see :ref:`ref.chaperone`), several utilities are provided. Only the :ref:`ref.telchap` utility requires Chaperone itself. The others can be used independently. .. toctree:: utility-envcp.rst ================================================ FILE: doc/source/ref/utility-envcp.rst ================================================ .. chaperone documentation n command line documentation .. _ref.envcp: Utility: ``envcp`` ================== Overview -------- The ``envcp`` utility is a simple method to create template and expand them using the contents of the environment. Basic usage is as follows:: envcp [options] FILE1 ... FILEn DESTINATION Where: ``FILE1`` ... ``FILEn`` A list of one or more files to be copied to the destination. If the destination is a directory, then one or more files will be copied. If the destination does not exist, then only a single file can be specified and the destination will be the name of the result file. ``DESTINATION`` A directory to contain the result files, or a single file which should be the result of the copy. The following options can be specified: ============================= ================================================================================ Option ============================= ================================================================================ -v / --verbose Echo file operations to ``stdout`` as files are copied. -a / --archive Attempt to preserve file permissions, ownership, access and modification times. --overwrite Overwrite destination files. If not specified, ``envcp`` will terminate with an error when any destination file already exists. --strip *suffix* When files are copied, strip off the specified filename suffix to derive the filename that should be used in the destination directory. --shell-enable Enables backtick expansion features. --xprefix *char* Specify the introductory prefix used for variable expansions. Defaults to the dollar-sign character (`$`). --xgrouping *charlist* Specify a list of opening brace types. Defaults to the left curly brace and the left parenthesis (``{(``). ============================= ================================================================================ The special option character ``-`` can be used to tell ``envcp`` that input should be taken from ``stdin`` and output should be written to ``stdout``:: $ envcp - output.txt Applications ------------ The ``envcp`` utility is usually used as a "poor-man's macro processor", similar to the way `GNU M4 `_ is often employed, but much simpler. Using a simple bash-like syntax, you can create template files and then customize them based upon the current set of environment variables. For example, the `nginx `_ web server unfortunately does not support environment variables inside configuration files. So, configuration lines like the following give an error:: ## # Logging Settings ## access_log ${NGINX_LOG_DIR}/access.log; However, if the ``NGINX_LOG_DIR`` environment variable is found, then the following command can be used to reprocess a template file to create the true ``nginx`` configuration, like this:: $ envcp /apps/templates/nginx.conf.tpl /apps/config/nginx.conf You can even process a complete set of templates by telling ``envcp`` to strip off the template suffix when it makes the copy:: $ envcp --strip .tpl /apps/templates/*.tpl /apps/config Advanced Templates ------------------ Any files copied by ``envcp`` can support the full Chaperone :ref:`environment variable expansion syntax `. However, it is important to note that Chaperone environment variable expansions can span multiple lines, making it possible to create reasonably complicated conditional macro expansions. For example, this excerpt from a `bind9 `_ configuration file demonstrates how the ``forwarders`` section can be included only if the ``CONFIG_FWD_HOST`` variable is set:: $(CONFIG_FWD_HOST:+ forwarders { $(CONFIG_FWD_HOST); }; forward only; ) You can also match the contents of variables using the *if-then-else* construct:: $(ENABLE_ADMIN_PANEL:|T*| Alias /admin/ /apps/www/admin_panel_live | Alias /admin/ /apps/www/errors/admin_dead/ ) In some situations, you may be creating shell-scripts which themselves are templates. In this case, you may want to customize the ``envcp`` variable prefix so that you can be sure any shell syntax is not interpreted by ``envcp``. So, for example, if you have a shell script template like this:: #!/bin/bash BASENAME=%%(IMAGE_BASENAME) FULLNAME=${PWD:-/home}/${BASENAME} you can tell ``envcp`` to use ``%%`` as the expansion prefix when you do the copy:: $ envcp --xprefix '%%' script.sh.tpl script.sh Backtick Syntax --------------- Chaperone has built-in support for shell-escapes using :ref:`backtick expansion syntax `. While this is normally enabled in Chaperone configuration files, it is *disabled* by default in ``envcp`` to minimize the chance of accidental (or malicious) shell injection within template scripts. So, for example, if you have a file ``test.txt`` which contains:: The date is ... $(`date;echo yes`) `ls -l` Then, you will see the following by default using ``envcp``:: $ envcp - `_ Documentation status: * `Reference Section `_: Complete. Will be updated always to reflect feature changes and clarifications. * Usage Section: In progress. Will contain samples and best practices. * Tools Section: Not started. Command-line tools like ``telchap``, ``envcp``, and ``sdnotify`` which are bundled with Chaperone need documentation pages of their own. * Appendices: Documentation for chaperone-based images (such as those at `chaperone-docker `_ will be located as appendices of the Chaperone reference. There are several production-quality images which we are building both for our own use, and as samples of various Chaperone use-cases. These are separately maintained and have their own read-me pages at `chaperone-docker `_. Help is always appreciated. ================================================ FILE: samples/README ================================================ These are some early samples that may still be useful. However, it is much better to take a look at: https://github.com/garywiz/chaperone-docker There you will find a set of working examples of various configurations. ================================================ FILE: samples/chaperone-devbase/Dockerfile ================================================ FROM ubuntu:14.04 ADD setup-bin/* *.sh /setup-bin/ ADD apps/ /apps/ ADD chaperone/ /setup-bin/chaperone/ RUN /setup-bin/install.sh # We use the environment variable instead of entrypoint args so that any default can be overridden by CMD ENV CHAPERONE_OPTIONS --config apps/chaperone.d ENTRYPOINT ["/usr/local/bin/chaperone"] ================================================ FILE: samples/chaperone-devbase/apps/bin/README ================================================ Put commands which need to be executed at the command line, or by application programs here. This directory will automaticaly be included the path for all running services. ================================================ FILE: samples/chaperone-devbase/apps/chaperone.d/010-start.conf ================================================ # General environmental settings settings: { env_set: { 'PATH': '$(APPS_DIR)/bin:/usr/local/bin:/bin:/usr/bin:/sbin:/usr/sbin:/usr/local/sbin', 'APPS_DIR': '$(_CHAP_CONFIG_DIR:-/)', #'SECURE_ROOT': '1', }, } init.service: { type: oneshot, command: '/bin/bash $(APPS_DIR)/etc/init.sh', before: 'default,database,application', process_timeout: 20, # init may take longer service_group: 'init', } chaperone.logging: { enabled: true, filter: '[chaperone].*', file: '$(APPS_DIR)/var/log/chaperone-%d.log', } syslog.logging: { enabled: true, filter: '*.info;![chaperone].*', file: '$(APPS_DIR)/var/log/syslog-%d.log', } console.logging: { enabled: true, stdout: true, filter: '*.warn;authpriv,auth.!*;daemon.!warn', } ================================================ FILE: samples/chaperone-devbase/apps/etc/README ================================================ This is a "mini etc" directory which, as much as possible, is where all normal application and service configuration files are stored. For example, in the chaperone-lamp configuration, all MySQL and Apache configurations are stored here, but may make reference to other files on the system (such as modules and plugins). However, the normal startup files in /etc/apache2 and /etc/mysql are not used, as they expect a normal fully-booted system. System start-up is controlled by the init.sh script, which reads additional startup files from ../init.d. This is not built into chaperone, but rather is a custom configuration defined within chaperone.d. If you want, you can completely change the way things work and invent new startup schemes. But, this is a good place to start. ================================================ FILE: samples/chaperone-devbase/apps/etc/init.sh ================================================ #!/bin/bash # A quick script to initialize the system # We publish two variables for use in startup scripts: # # CONTAINER_INIT=1 if we are initializing the container for the first time # APPS_INIT=1 if we are initializing the $APPS_DIR for the first time # # Both may be relevant, since it's possible that the $APPS_DIR may be on a mount point # so it can be reused when starting up containers which refer to it. function dolog() { logger -t init.sh -p info $*; } apps_init_file="$APPS_DIR/var/run/apps_init.done" cont_init_file="/container_init.done" export CONTAINER_INIT=0 export APPS_INIT=0 if [ ! -f $cont_init_file ]; then dolog "initializing container for the first time" CONTAINER_INIT=1 su -c "date >$cont_init_file" fi if [ ! -f $apps_init_file ]; then dolog "initializing $APPS_DIR for the first time" APPS_INIT=1 mkdir -p $APPS_DIR/var/run $APPS_DIR/var/log chmod 777 $APPS_DIR/var/run $APPS_DIR/var/log date >$apps_init_file fi if [ -d $APPS_DIR/init.d ]; then for initf in $( find $APPS_DIR/init.d -type f -executable \! -name '*~' ); do dolog "running $initf..." $initf done fi if [ "$SECURE_ROOT" == "1" -a $CONTAINER_INIT == 1 ]; then dolog locking down root account su -c 'passwd -l root' fi ================================================ FILE: samples/chaperone-devbase/apps/init.d/README ================================================ Files in this directory are executed upon container startup by the ../etc/init.sh script. There are two modes: 1. When the container is first set up, CONTAINER_INIT=="1" and the script can use 'su' without a password. This is so that any setup activities can be performed which require full access to the system. 2. On subsequent boots (if the container is stopped and started), the same scripts will be run with CONTAINER_INIT=="0". However, root access is locked down if env var SECURE_ROOT=1. Note that SECURE_ROOT is not defined by default. In all cases, scripts are run as either root, or the user specified by --user on the chaperone command line. ================================================ FILE: samples/chaperone-devbase/build-image.sh ================================================ #!/bin/bash # the cd trick assures this works even if the current directory is not current. cd ${0%/*} ./setup-bin/build -x docker tag chaperone-devbase chaperone-base ================================================ FILE: samples/chaperone-devbase/install.sh ================================================ #!/bin/bash # Assumes there is an "optional" apt-get proxy running on our HOST # on port 3142. You can run one by looking here: https://github.com/sameersbn/docker-apt-cacher-ng # Does no harm if nothing is running on that port. /setup-bin/ct_setproxy # see https://github.com/docker/docker/issues/1724 apt-get update # Normal install steps apt-get -y install python3-pip # We install from the local directory rather than pip so we can test and develop. cd /setup-bin/chaperone python3 setup.py install # Now, just so there is no confusion, create a new, empty /var/log directory so that any logs # written will obviously be written by the current container software. Keep the old one so # it's there for reference so we can see what the distribution did. cd /var mv log log-dist mkdir log chmod 775 log chown root.syslog log # Customize some system files cp /setup-bin/dot.bashrc /root/.bashrc # Allow unfettered root access by users. This is done so that apps/init.d scripts can # have unfettered access to root on their first startup to configure userspace files # if needed (see mysql in chaperone-lamp for an example). At the end of the first startup # this is then locked down by apps/etc/init.sh. passwd -d root sed -i 's/nullok_secure/nullok/' /etc/pam.d/common-auth ================================================ FILE: samples/chaperone-lamp/Dockerfile ================================================ FROM chapdev/chaperone-base:latest ADD *.sh /setup-bin/ ADD apps/ /apps/ RUN /setup-bin/install.sh EXPOSE 8080 ================================================ FILE: samples/chaperone-lamp/apps/chaperone.d/105-mysqld.conf ================================================ settings: { env_set: { 'MYSQL_HOME': '$(APPS_DIR)/etc/mysql', 'MYSQL_UNIX_PORT': '$(APPS_DIR)/var/run/mysqld.sock', }, } mysql1.service: { type: forking, command: "/etc/init.d/mysql start", enabled: false, uid: root, service_group: database, } mysql.service: { type: simple, command: "$(APPS_DIR)/etc/mysql/start_mysql.sh", enabled: true, service_group: database, } ================================================ FILE: samples/chaperone-lamp/apps/chaperone.d/120-apache2.conf ================================================ apache2.service: { command: "/usr/sbin/apache2 -f $(APPS_DIR)/etc/apache2.conf -DFOREGROUND", enabled: true, restart: true, optional: true, uid: "$(USER:-www-data)", env_set: { APACHE_LOCK_DIR: /tmp, APACHE_PID_FILE: /tmp/apache2.pid, APACHE_RUN_USER: www-data, APACHE_RUN_GROUP: www-data, APACHE_LOG_DIR: "$(APPS_DIR)/var/log/apache2", APACHE_SITES_DIR: "$(APPS_DIR)/www", MYSQL_SOCKET: "$(APPS_DIR)/var/run/mysqld.sock", }, after: database, } apache2.logging: { enabled: true, filter: 'local1.*;*.!err', file: '$(APPS_DIR)/var/log/apache2/access-%d.log', uid: "$(USER:-www-data)", } apache2.logging: { enabled: true, filter: 'local1.err', stderr: true, file: '$(APPS_DIR)/var/log/apache2/error-%d.log', uid: "$(USER:-www-data)", } ================================================ FILE: samples/chaperone-lamp/apps/etc/apache2.conf ================================================ # This is the main Apache server configuration file. It contains the # configuration directives that give the server its instructions. # See http://httpd.apache.org/docs/2.4/ for detailed information about # the directives and /usr/share/doc/apache2/README.Debian about Debian specific # hints. # This is a CHAPERONE-specific configuration designed to keep things lean. It is based loosely # on Ubuntu 14.04 /etc/apache2/apache2.conf, and every attempt has been made to assure that # system-installed modules and configurations will work. # The chaperone configuration is designed to work within a self-contained application directory # defined by APPS_DIR. Note that it may be a user directory, and thus chaperone allows # Apache to run entirely under any user account, along with a MySQL server that is also # sequestered in the same way. This means that you can have containers "point" to apps # directories on your host server and manage per-container resources consistently in # those directories during development, until you move the entire apps directory into # a production container environment or image. # # The accept serialization lock file MUST BE STORED ON A LOCAL DISK. # Mutex file:${APACHE_LOCK_DIR} default PidFile ${APACHE_PID_FILE} # Timeout: The number of seconds before receives and sends time out. Timeout 300 KeepAlive On MaxKeepAliveRequests 100 KeepAliveTimeout 5 # Note that the user and group are defined in chaperone.d/120-apache.conf #User ${APACHE_RUN_USER} #Group ${APACHE_RUN_GROUP} # The default is off because it'd be overall better for the net if people # had to knowingly turn this feature on, since enabling it means that # each client request will result in AT LEAST one lookup request to the # nameserver. HostnameLookups Off # ErrorLog: The location of the error log file. # We dump errors to syslog so that we can easily duplicate it to the container stderr if we want. ErrorLog syslog:local1 # Available values: trace8, ..., trace1, debug, info, notice, warn, # error, crit, alert, emerg. LogLevel warn # Include standard Debian/Ubuntu module configuration: Include /etc/apache2/mods-enabled/*.load Include /etc/apache2/mods-enabled/*.conf # CHAPERONE: Override to listen on 8080 and 8443 Listen 8080 Listen 8443 Listen 8443 # Sets the default security model of the Apache2 HTTPD server. It does # not allow access to the root filesystem outside of /usr/share and /var/www. # The former is used by web applications packaged in Debian, # the latter may be used for local directories served by the web server. If # your system is serving content from a sub-directory in /srv you must allow # access here, or in any related virtual host. Options FollowSymLinks AllowOverride None Require all denied AllowOverride None Require all granted Options Indexes FollowSymLinks AllowOverride None Require all granted AccessFileName .htaccess # The following lines prevent .htaccess and .htpasswd files from being # viewed by Web clients. Require all denied # The following directives define some format nicknames for use with # a CustomLog directive. LogFormat "%v:%p %h %l %u %t \"%r\" %>s %O \"%{Referer}i\" \"%{User-Agent}i\"" vhost_combined LogFormat "%h %l %u %t \"%r\" %>s %O \"%{Referer}i\" \"%{User-Agent}i\"" combined LogFormat "%h %l %u %t \"%r\" %>s %O" common LogFormat "%{Referer}i -> %U" referer LogFormat "%{User-agent}i" agent # Include of directories ignores editors' and dpkg's backup files, # see README.Debian for details. # Include generic snippets of statements IncludeOptional /etc/apache2/conf-enabled/*.conf ## ## CHAPERONE SPECIFICS ## # Apache configuration files for chaperone sites (Note that we do NOT look in /etc/apache2/sites-enabled) IncludeOptional ${APACHE_SITES_DIR}/sites.d/*.conf # Point MySQL socket to the right spot php_admin_value mysql.default_socket ${APPS_DIR}/var/run/mysqld.sock php_admin_value mysqli.default_socket ${APPS_DIR}/var/run/mysqld.sock ================================================ FILE: samples/chaperone-lamp/apps/etc/mysql/my.cnf ================================================ # # The MySQL database server configuration file. # # You can copy this to one of: # - "/etc/mysql/my.cnf" to set global options, # - "~/.my.cnf" to set user-specific options. # # One can use all long options that the program supports. # Run program with --help to get a list of available options and with # --print-defaults to see which it would actually understand and use. # # For explanations see # http://dev.mysql.com/doc/mysql/en/server-system-variables.html # This will be passed to all mysql clients # It has been reported that passwords should be enclosed with ticks/quotes # escpecially if they contain "#" chars... # Remember to edit /etc/mysql/debian.cnf when changing the socket location. [client] port = 3306 #socket = /var/run/mysqld/mysqld.sock # Here is entries for some specific programs # The following values assume you have at least 32M ram # This was formally known as [safe_mysqld]. Both versions are currently parsed. [mysqld_safe] #socket = /var/run/mysqld/mysqld.sock nice = 0 [mysqld] # # * Basic Settings # #pid-file = /var/run/mysqld/mysqld.pid #socket = /var/run/mysqld/mysqld.sock port = 3306 basedir = /usr #datadir = /var/lib/mysql tmpdir = /tmp lc-messages-dir = /usr/share/mysql skip-external-locking # # Instead of skip-networking the default is now to listen only on # localhost which is more compatible and is not less secure. bind-address = 127.0.0.1 # # * Fine Tuning # key_buffer = 16M max_allowed_packet = 16M thread_stack = 192K thread_cache_size = 8 # This replaces the startup script and checks MyISAM tables if needed # the first time they are touched myisam-recover = BACKUP #max_connections = 100 #table_cache = 64 #thread_concurrency = 10 # # * Query Cache Configuration # query_cache_limit = 1M query_cache_size = 16M # # * Logging and Replication # # Both location gets rotated by the cronjob. # Be aware that this log type is a performance killer. # As of 5.1 you can enable the log at runtime! #general_log_file = /var/log/mysql/mysql.log #general_log = 1 # # Error log - should be very few entries. # #log_error = /var/log/mysql/error.log # # Here you can see queries with especially long duration #log_slow_queries = /var/log/mysql/mysql-slow.log #long_query_time = 2 #log-queries-not-using-indexes # # The following can be used as easy to replay backup logs or for replication. # note: if you are setting up a replication slave, see README.Debian about # other settings you may need to change. #server-id = 1 #log_bin = /var/log/mysql/mysql-bin.log expire_logs_days = 10 max_binlog_size = 100M #binlog_do_db = include_database_name #binlog_ignore_db = include_database_name # # * InnoDB # # InnoDB is enabled by default with a 10MB datafile in /var/lib/mysql/. # Read the manual for more InnoDB related options. There are many! # # * Security Features # # Read the manual, too, if you want chroot! # chroot = /var/lib/mysql/ # # For generating SSL certificates I recommend the OpenSSL GUI "tinyca". # # ssl-ca=/etc/mysql/cacert.pem # ssl-cert=/etc/mysql/server-cert.pem # ssl-key=/etc/mysql/server-key.pem [mysqldump] quick quote-names max_allowed_packet = 16M [mysql] #no-auto-rehash # faster start of mysql but no tab completition [isamchk] key_buffer = 16M # # * IMPORTANT: Additional settings that can override those from this file! # The files must end with '.cnf', otherwise they'll be ignored. # #!includedir /etc/mysql/conf.d/ ================================================ FILE: samples/chaperone-lamp/apps/etc/mysql/start_mysql.sh ================================================ #!/bin/bash # For a general query log, include the following: # --general-log-file=$APPS_DIR/log/mysqld-query.log # --general-log=1 exec /usr/sbin/mysqld \ --defaults-file=$APPS_DIR/etc/mysql/my.cnf \ --user ${USER:-mysql} \ --datadir=$APPS_DIR/var/mysql \ --socket=$APPS_DIR/var/run/mysqld.sock \ --pid-file=$APPS_DIR/var/run/mysqld.pid \ --log-error=$APPS_DIR/var/log/mysqld-error.log \ --plugin-dir=/usr/lib/var/mysql/plugin ================================================ FILE: samples/chaperone-lamp/apps/init.d/mysql.sh ================================================ #!/bin/bash distdir=/var/lib/mysql appdbdir=$APPS_DIR/var/mysql function dolog() { logger -t mysql.sh -p info $*; } if [ $CONTAINER_INIT == 1 ]; then dolog "hiding distribution mysql files in /etc so no clients see them" su -c "cd /etc; mv my.cnf my.cnf-dist; mv mysql mysql-dist; mv $distdir $distdir-dist" fi if [ $APPS_INIT == 1 ]; then if [ ! -d $appdbdir ]; then dolog "copying distribution $distdir to $appdbdir" su -c "cp -a $distdir-dist $appdbdir; chown -R ${USER:-mysql} $appdbdir" else dolong "existing $appdbdir found when initializing $APPS_DIR for the first time, not changed." fi fi ================================================ FILE: samples/chaperone-lamp/apps/init.d/phpmyadmin.sh ================================================ #!/bin/bash puser=${USER:-www-data} function dolog() { logger -t mysql.sh -p info $*; } if [ $CONTAINER_INIT == 1 ]; then dolog setting phpmyadmin user permissions for "$puser" su -c "chown -R $puser: /var/lib/phpmyadmin/tmp; chgrp --reference /var/lib/phpmyadmin/tmp /var/lib/phpmyadmin/*.php" su -c "chgrp --reference /var/lib/phpmyadmin/tmp \`find /etc/phpmyadmin -group www-data\`" fi ================================================ FILE: samples/chaperone-lamp/apps/www/default/index.php ================================================ ================================================ FILE: samples/chaperone-lamp/apps/www/sites.d/default.conf ================================================ # The ServerName directive sets the request scheme, hostname and port that # the server uses to identify itself. #ServerName www.example.com ServerAdmin webmaster@localhost DocumentRoot ${APACHE_SITES_DIR}/default # Errors go to the syslog so they can be duplicated to the console easily ErrorLog syslog:local1 CustomLog ${APACHE_LOG_DIR}/default-access.log combined ================================================ FILE: samples/chaperone-lamp/build-image.sh ================================================ #!/bin/bash # the cd trick assures this works even if the current directory is not current. cd ${0%/*} ./setup-bin/build -x ================================================ FILE: samples/chaperone-lamp/install.sh ================================================ #!/bin/bash MYSQL_ROOT_PW='ChangeMe' # Assumes there is an "optional" apt-get proxy running on our HOST # on port 3142. You can run one by looking here: https://github.com/sameersbn/docker-apt-cacher-ng # Does no harm if nothing is running on that port. /setup-bin/ct_setproxy # Normal install steps apt-get install -y apache2 debconf-set-selections <<< "debconf debconf/frontend select Noninteractive" debconf-set-selections <<< "mysql-server mysql-server/root_password password $MYSQL_ROOT_PW" debconf-set-selections <<< "mysql-server mysql-server/root_password_again password $MYSQL_ROOT_PW" debconf-set-selections <<< "phpmyadmin phpmyadmin/dbconfig-install boolean true" debconf-set-selections <<< "phpmyadmin phpmyadmin/app-password password $MYSQL_ROOT_PW" debconf-set-selections <<< "phpmyadmin phpmyadmin/app-password-confirm password $MYSQL_ROOT_PW" debconf-set-selections <<< "phpmyadmin phpmyadmin/mysql/app-pass password $MYSQL_ROOT_PW" debconf-set-selections <<< "phpmyadmin phpmyadmin/mysql/admin-pass password $MYSQL_ROOT_PW" debconf-set-selections <<< "phpmyadmin phpmyadmin/reconfigure-webserver multiselect apache2" apt-get install -y mysql-server /usr/bin/mysqld_safe & # Install phpmyadmin. Actual setup occurs at first boot, since it depends on what user we run the container # as. apt-get install -y phpmyadmin php5enmod mcrypt apt-get install -y php-pear ================================================ FILE: samples/docsample/Dockerfile ================================================ FROM ubuntu:14.04 MAINTAINER garyw@blueseastech.com RUN apt-get update && \ apt-get install -y openssh-server apache2 python3-pip && \ pip3 install chaperone RUN mkdir -p /var/lock/apache2 /var/run/apache2 /var/run/sshd /etc/chaperone.d COPY chaperone.conf /etc/chaperone.d/chaperone.conf EXPOSE 22 80 ENTRYPOINT ["/usr/local/bin/chaperone"] ================================================ FILE: samples/docsample/README ================================================ This is a sample designed as a substitute for the Docker "supervisor" sample at: https://docs.docker.com/articles/using_supervisord/ It is updated for Ubuntu 14.04 as well as uses Chaperone as it's supervisor daemon. ================================================ FILE: samples/docsample/chaperone.conf ================================================ sshd.service: { command: "/usr/sbin/sshd -D" } apache2.service: { command: "bash -c 'source /etc/apache2/envvars && exec /usr/sbin/apache2 -DFOREGROUND'", } console.logging: { selector: '*.warn', stdout: true, } ================================================ FILE: samples/setup-bin/build ================================================ #!/bin/bash # This is a great little program to make it easy to share basic build components across # a set of docker files. Basically, you do this: # cd sandbox/someimage # ln -s ../setup-bin #if needed # ./setup-bin/build # helpmsg=" usage: setup/build\n \n -n name the image (else directoryname is used)\n -x disable the cache\n -y ask no questions and do the default\n -p ? specify prefix to use for build tag (default chapdev/)\n \n If you have additional arguments to docker, then include them after a --\n " if [ "$0" != './setup-bin/build' ] ; then echo 'Sorry, I only work if executed as "./setup-bin/build"' exit 1 fi if [ ! -f Dockerfile ]; then echo 'Hey, where is your ./Dockerfile?' exit 1 fi ipfx='chapdev/' buildargs=(-t ${PWD##*/}) noquestions='' while getopts "n:hxy" opt; do case $opt in n) buildargs[1]=$OPTARG ;; h) echo -e $helpmsg exit 0 ;; y) noquestions='true' yn='y' ;; p) ipfx=$OPTARG ;; x) buildargs+=(--no-cache) ;; \?) exit 1 ;; esac done shift $((OPTIND-1)) buildargs[1]=$ipfx${buildargs[1]} imagename=${buildargs[1]} echo Building image: $imagename oldimage=`docker images -q $imagename` echo docker build ${buildargs[*]} $* - tar czh . | docker build ${buildargs[*]} $* - newimage=`docker images -q $imagename` if [ "$oldimage" -a "$oldimage" != "$newimage" ]; then if [ ! "$noquestions" ]; then read -p "Delete old image $oldimage? (y/n) " yn fi if [ "$yn" = "y" ]; then docker rmi $oldimage echo $oldimage removed fi fi ================================================ FILE: samples/setup-bin/ct_setproxy ================================================ #/bin/bash # If our host has an apt proxy container running at 3142, then use it for apt defhost=`ip route | awk '/default/ { print $3; }'` if nc -z $defhost 3142; then echo "Acquire::http { Proxy \"http://$defhost:3142\"; };" >/etc/apt/apt.conf.d/01proxy echo ADDED PROXY FOR apt-get on $defhost else rm -f /etc/apt/apt.conf.d/01proxy echo NO PROXY FOR apt-get fi ================================================ FILE: samples/setup-bin/dot.bashrc ================================================ # ~/.bashrc: executed by bash(1) for non-login shells. # This is a simpler, stripped-down version for containers # If not running interactively, don't do anything [ -z "$PS1" ] && return # don't put duplicate lines in the history. See bash(1) for more options # ... or force ignoredups and ignorespace HISTCONTROL=ignoredups:ignorespace # append to the history file, don't overwrite it shopt -s histappend # for setting history length see HISTSIZE and HISTFILESIZE in bash(1) HISTSIZE=1000 HISTFILESIZE=2000 # make less more friendly for non-text input files, see lesspipe(1) [ -x /usr/bin/lesspipe ] && eval "$(SHELL=/bin/sh lesspipe)" case "$TERM" in xterm*|rxvt*) PS1="\[\u@\h: \w\a\]$PS1" ;; *) ;; esac # some more ls aliases alias ll='ls -alF' alias la='ls -A' alias l='ls -CF' # Alias definitions. if [ -f ~/.bash_aliases ]; then . ~/.bash_aliases fi ================================================ FILE: sandbox/.gitignore ================================================ apps-* var-* ================================================ FILE: sandbox/.shinit ================================================ echo THIS IS THE SHELL INIT ================================================ FILE: sandbox/README ================================================ Files in this directory were created ad-hoc by me as a sandbox testing area. Typically, I create a docker image and point /home to my host's /home, then keep chaperone in a sub-directory where I work on it without needing to install it each time. I run my docker image with ./testdock and it isolates operation in this sandbox directory. The "testimage" script is especially useful since it lets you work with a standard docker chaperone image while substituting the current chaperone source instead of using the installed version. ================================================ FILE: sandbox/bare_startup.sh ================================================ #!/bin/bash # Used to start up a bare chaperone test image using ubuntu:latest. Helps for streamlining installation # and startup issues for new users. echo Bare Ubuntu startup # Start up an apt-get proxy which runs on our host in another container, if it's present /setup/ct_setproxy cd $SANDBOX/../dist pip3 install chaperone-*.tar.gz exec bash -i ================================================ FILE: sandbox/bareimage/Dockerfile ================================================ FROM ubuntu:14.04 ADD setup-bin/* *.sh /setup-bin/ RUN /setup-bin/install-bareimage.sh ================================================ FILE: sandbox/bareimage/install-bareimage.sh ================================================ #!/bin/bash /setup-bin/ct_setproxy apt-get update apt-get -y install --no-install-recommends python3-pip pip3 install setuptools ================================================ FILE: sandbox/bash.bashrc ================================================ PS1="image:\W$ " if [ "$EMACS" == "t" ]; then stty -echo fi cd $APPS_DIR/.. PATH=$PWD/bin:$PATH cd $APPS_DIR echo "" echo "Now running inside container. Directory is: $APPS_DIR" echo "" ================================================ FILE: sandbox/bin/chaperone ================================================ #!/usr/bin/python3 import sys import os # Assure we use the local package for testing and development sys.path[0] = os.path.dirname(os.path.dirname(sys.path[0])) from chaperone.exec.chaperone import main_entry main_entry() ================================================ FILE: sandbox/bin/cps ================================================ #!/bin/bash # Shortcut for more relevant PS for containers ps --forest -weo 'user,pid,ppid,pgid,sid,%cpu,%mem,stat,command' ================================================ FILE: sandbox/bin/fakeentry ================================================ #!/bin/bash # Useful for testing if you want to inject a shell BEFORE chaperone starts by changing the entry point. export ENTARGS="$*" exec /bin/bash ================================================ FILE: sandbox/bin/repeat ================================================ #!/usr/bin/python3 """ Repeat utility for testing Usage: repeat [--nosignals] [-n=] [-i=] [-e] Options: -n= Specify number of repetitions, or infinite if absent -i= Specify interval, or 1 second if absent --nosignals Ignore all signals if present -e Output to stderr instead of stdout. """ import signal from time import sleep, strftime, localtime from docopt import docopt import sys opt = docopt(__doc__) if opt['--nosignals']: signal.signal(signal.SIGTERM, lambda signum, frame: print("ignoring SIGTERM")) signal.signal(signal.SIGHUP, lambda signum, frame: print("ignoring SIGHUP")) signal.signal(signal.SIGINT, lambda signum, frame: print("ignoring SIGINT")) reps = iter(int,1) if not opt['-n'] else range(int(opt['-n'])) delay = 1 if not opt['-i'] else int(opt['-i']) handle = sys.stderr if opt['-e'] else sys.stdout msg = " " + opt[''] + "\n" for n in reps: handle.write(strftime("%M:%S", localtime()) + msg) handle.flush() sleep(delay) ================================================ FILE: sandbox/centos.d/apache.conf ================================================ apache1.service: { type: notify, command: "/usr/sbin/httpd -DFOREGROUND", enabled: true, restart: true, env_set: { LANG: C, } } mysql.service: { command: "/etc/init.d/mysql start", enabled: true, ignore_failures: true, } apache2.service: { command: "/etc/init.d/apache2 start", after: "mysql.service", enabled: true, ignore_failures: true, } ================================================ FILE: sandbox/centos.d/app.conf ================================================ main.logging: { stderr: false, } ================================================ FILE: sandbox/centos.d/cron.conf ================================================ cron.service: { bin: /usr/sbin/cron, args: -f, optional: true, restart: true, } ================================================ FILE: sandbox/centos.d/sys1.conf ================================================ settings: { env_inherit: ['SANDBOX', '_*'], env_set: {'TERM': 'xpath-revisited', 'QUESTIONER': 'the-law', 'WITHIN-HOME': '$(HOME)/inside-home', 'INTERACTIVE': '$(_CHAPERONE_INTERACTIVE)', 'CONFIG_DIR': '$(_CHAPERONE_CONFIG_DIR)', 'PROCTOOL': '$(SANDBOX)/proctool', 'ENV': '$(SANDBOX)/.shinit', 'PATH': '$(SANDBOX):/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/bin', }, uid: 0, idle_delay: 1, debug: true, } notify.service: { type: notify, command: "$(PROCTOOL) --wait 20 --dump --notify '--ready' 'notify process'", stdout: inherit, enabled: true, } fake1.service: { command: "$(PROCTOOL) --hang 'fake1 process'", enabled: false, } fake2.service: { command: "$(PROCTOOL) --hang 'fake2 service'", enabled: false, stdout: inherit, uid: 1000, env_inherit: ['Q*'], } fake3.service: { command: "$(PROCTOOL) 'oneshot service'", enabled: false, type: oneshot, stdout: inherit, ignore_failures: true, uid: garyw, service_group: 'earlystuff', before: "default", } exittest.service: { enabled: true, restart: true, restart_limit: 5, ignore_failures: true, command: "$(PROCTOOL) --exit 20 'Exiting with 20'", } repeat.service: { command: "$(SANDBOX)/repeat -i4 'Repeat to stdout'", enabled: false, } repeat_err.service: { command: "$(SANDBOX)/repeat -i4 -e 'Repeat to stderr'", enabled: false, } beforemain.service: { type: "oneshot", enabled: false, command: "sh -c 'echo START IDLE TASK; sleep 2; echo ENDING IDLE TASK'", stdout: inherit, before: "MAIN", service_group: "IDLE", } main.logging: { filter: "[chaperone].*", file: /var/log/chaperone-%d.log, enabled: true, } console.logging: { stdout: true, filter: '*.warn;![debian-start].*;authpriv,auth.!*;!/Repeat to std/.*', extended: true, enabled: true, } debian.logging: { filter: '[debian-start].*', file: /var/log/debian-start.log, enabled: true, } syslog.logging: { filter: '*.info;![debian-start].*;![chaperone].*', file: '/var/log/syslog-%d-%H%M', enabled: true, } ================================================ FILE: sandbox/distserv/chaperone.d/005-config.conf ================================================ # 005-config.conf # # Put container configuration variables here. This should strictly be for configuration # variables that are passed into the container. 100% of container configuraiton should # be possible by setting these variables here or on the 'docker run' command line. settings: { env_set: { # This is the hostname of the host machine. Generally, this is only needed # by certain applications (such as those supporting SSL certiifcates, but is common # enough to include as a standard option. CONFIG_EXT_HOSTNAME: "$(CONFIG_EXT_HOSTNAME:-localhost)", # HTTP ports of exported ports. These are good policy to define in your "docker run" # command so that internal applications know what ports the public interfaces are # visible on. Sometimes this is necessary, such as when appliations push their # endpoints via API's or when webservers do redirects. The default launchers # for Chaperone containers handle this for you automatically. CONFIG_EXT_HTTP_PORT: "$(CONFIG_EXT_HTTP_PORT:-8080)", CONFIG_EXT_HTTPS_PORT: "$(CONFIG_EXT_HTTPS_PORT:-8443)", # Configure this to enable SSL and generate snakeoil keys for the given domain CONFIG_EXT_SSL_HOSTNAME: "$(CONFIG_EXT_SSL_HOSTNAME:-)", # Create additional configuration variables here. Start them with "CONFIG_" # so they can be easily identified... } } ================================================ FILE: sandbox/distserv/chaperone.d/010-start.conf ================================================ # 010-start.conf # # This is the first start-up file for the chaperone base images. Note that start-up files # are processed in order alphabetically, so settings in later files can override those in # earlier files. # General environmental settings. These settings apply to all services and logging entries. # There should be only one "settings" directive in each configuration file. But, any # settings encountered in subsequent configuration files can override or augment these. # Note that variables are expanded as late as possile. So, there can be variables # defined here which depend upon variables which will be defined later (such as _CHAP_SERVICE), # which is defined implicitly for each service. settings: { env_set: { 'LANG': 'en_US.UTF-8', 'LC_CTYPE': '$(LANG)', 'PATH': '$(APPS_DIR)/bin:/usr/local/bin:/bin:/usr/bin:/sbin:/usr/sbin:/usr/local/sbin', 'RANDFILE': '/tmp/openssl.rnd', # Uncomment the below to tell startup.sh to lock-down the root account after the first # successful start. #'SECURE_ROOT': '1', # Variables starting with _CHAP are internal and won't be exported to services, # so we derive public environment variables if needed... 'APPS_DIR': '$(_CHAP_CONFIG_DIR:-/)', 'CHAP_SERVICE_NAME': '$(_CHAP_SERVICE:-)', 'CHAP_TASK_MODE': '$(_CHAP_TASK_MODE:-)', # The best use-cases will want to move $(VAR_DIR) out of the container to keep # the container emphemeral, so all references to var should always use this # environment variable. 'VAR_DIR': '$(APPS_DIR)/var', CHAPERONE_ROOT: "`bash -c 'cd $(APPS_DIR)/../..; echo $PWD'`" }, } # For the console, we include everything which is a warning except authentication # messages and daemon messages which are not errors. console.logging: { enabled: true, stdout: true, selector: '*.warn;authpriv,auth.!*;daemon.!warn', } ================================================ FILE: sandbox/distserv/chaperone.d/120-apache2.conf ================================================ # 120-apache2.conf # # Start up apache. This is a "simple" service, so chaperone will monitor Apache and restart # it if necessary. Note that apache2.conf refers to MYSQL_UNIX_PORT (set by 105-mysql.conf) # to tell PHP where MySQL is running. # # In the case where no USER variable is specified, we run as the www-data user. settings: { env_set: { HTTPD_SERVER_NAME: apache, } } apache2.service: { command: "/usr/sbin/apache2 -f $(APPS_DIR)/etc/apache2.conf -DFOREGROUND", restart: true, stdout: inherit, stderr: inherit, uid: "$(USER:-www-data)", env_set: { APACHE_LOCK_DIR: /tmp, APACHE_PID_FILE: /tmp/apache2.pid, APACHE_RUN_USER: www-data, APACHE_RUN_GROUP: www-data, APACHE_RUN_DIR: "/tmp", APACHE_LOG_DIR: "/tmp", }, # If Apache2 does not require a database, you can leave this out. after: database, } apache2.logging: { enabled: true, selector: 'local1.*;*.!err', stderr: true, } ================================================ FILE: sandbox/distserv/etc/apache2.conf ================================================ # This is the main Apache server configuration file. It contains the # configuration directives that give the server its instructions. # See http://httpd.apache.org/docs/2.4/ for detailed information about # the directives and /usr/share/doc/apache2/README.Debian about Debian specific # hints. # This is a CHAPERONE-specific configuration designed to keep things lean. It is based loosely # on Ubuntu 14.04 /etc/apache2/apache2.conf, and every attempt has been made to assure that # system-installed modules and configurations will work. # The chaperone configuration is designed to work within a self-contained application directory # defined by APPS_DIR. Note that it may be a user directory, and thus chaperone allows # Apache to run entirely under any user account, along with a MySQL server that is also # sequestered in the same way. This means that you can have containers "point" to apps # directories on your host server and manage per-container resources consistently in # those directories during development, until you move the entire apps directory into # a production container environment or image. # # The accept serialization lock file MUST BE STORED ON A LOCAL DISK. # Mutex file:${APACHE_LOCK_DIR} default PidFile ${APACHE_PID_FILE} # Timeout: The number of seconds before receives and sends time out. Timeout 300 KeepAlive On MaxKeepAliveRequests 100 KeepAliveTimeout 5 # Note that the user and group are defined in chaperone.d/120-apache.conf #User ${APACHE_RUN_USER} #Group ${APACHE_RUN_GROUP} # The default is off because it'd be overall better for the net if people # had to knowingly turn this feature on, since enabling it means that # each client request will result in AT LEAST one lookup request to the # nameserver. HostnameLookups Off # ErrorLog: The location of the error log file. # We dump errors to syslog so that we can easily duplicate it to the container stderr if we want. ErrorLog syslog:local1 # Available values: trace8, ..., trace1, debug, info, notice, warn, # error, crit, alert, emerg. LogLevel warn # Include standard Debian/Ubuntu module configuration: Include /etc/apache2/mods-enabled/*.load Include /etc/apache2/mods-enabled/*.conf # CHAPERONE: Override to listen on 8080 and 8443 Listen 8080 Listen 8443 Listen 8443 # Sets the default security model of the Apache2 HTTPD server. It does # not allow access to the root filesystem outside of /usr/share and /var/www. # The former is used by web applications packaged in Debian, # the latter may be used for local directories served by the web server. If # your system is serving content from a sub-directory in /srv you must allow # access here, or in any related virtual host. Options FollowSymLinks AllowOverride None Require all denied AllowOverride None Require all granted DocumentRoot ${CHAPERONE_ROOT} Options Indexes FollowSymLinks AllowOverride None Require all granted AccessFileName .htaccess # The following lines prevent .htaccess and .htpasswd files from being # viewed by Web clients. Require all denied # The following directives define some format nicknames for use with # a CustomLog directive. LogFormat "%v:%p %h %l %u %t \"%r\" %>s %O \"%{Referer}i\" \"%{User-Agent}i\"" vhost_combined LogFormat "%h %l %u %t \"%r\" %>s %O \"%{Referer}i\" \"%{User-Agent}i\"" combined LogFormat "%h %l %u %t \"%r\" %>s %O" common LogFormat "%{Referer}i -> %U" referer LogFormat "%{User-agent}i" agent # Include of directories ignores editors' and dpkg's backup files, # see README.Debian for details. # Include generic snippets of statements IncludeOptional /etc/apache2/conf-enabled/*.conf ================================================ FILE: sandbox/distserv/run.sh ================================================ #!/bin/bash #Developer's startup script #Created by chaplocal on Thu Oct 15 03:47:31 UTC 2015 IMAGE="chapdev/chaperone-apache" INTERACTIVE_SHELL="/bin/bash" # You can specify the external host and ports for your webserver here. These variables # are also passed into the container so that any application code which does redirects # can use these if need be. EXT_HOSTNAME=localhost EXT_HTTP_PORT=9980 EXT_HTTPS_PORT=9943 # Uncomment to enable SSL and specify the certificate hostname #EXT_SSL_HOSTNAME=secure.example.com PORTOPT="-p $EXT_HTTP_PORT:8080 -e CONFIG_EXT_HTTP_PORT=$EXT_HTTP_PORT \ -p $EXT_HTTPS_PORT:8443 -e CONFIG_EXT_HTTPS_PORT=$EXT_HTTPS_PORT" usage() { echo "Usage: run.sh [-d] [-p port#] [-h] [extra-chaperone-options]" echo " Run $IMAGE as a daemon or interactively (the default)." echo " First available port will be remapped to $EXT_HOSTNAME if possible." exit } if [ "$CHAP_SERVICE_NAME" != "" ]; then echo run.sh should be executed on your docker host, not inside a container. exit fi cd ${0%/*} # go to directory of this file APPS=$PWD cd .. options="-t -i -e TERM=$TERM --rm=true" shellopt="/bin/bash" while getopts ":-dp:n:" o; do case "$o" in d) options="-d" shellopt="" ;; n) options="$options --name $OPTARG" ;; p) PORTOPT="-p $OPTARG" ;; -) # first long option terminates break ;; *) usage ;; esac done shift $((OPTIND-1)) # Run the image with this directory as our local apps dir. # Create a user with a uid/gid based upon the file permissions of the chaperone.d # directory. MOUNT=${PWD#/}; MOUNT=/${MOUNT%%/*} # extract user mountpoint SELINUX_FLAG=$(sestatus 2>/dev/null | fgrep -q enabled && echo :z) docker run --name distserv $options -v $MOUNT:$MOUNT$SELINUX_FLAG $PORTOPT \ -e CONFIG_EXT_HOSTNAME="$EXT_HOSTNAME" \ -e CONFIG_EXT_SSL_HOSTNAME="$EXT_SSL_HOSTNAME" \ $IMAGE \ --create $USER:$APPS/chaperone.d --config $APPS/chaperone.d $* $shellopt ================================================ FILE: sandbox/etc/apache2.conf ================================================ # This is the main Apache server configuration file. It contains the # configuration directives that give the server its instructions. # See http://httpd.apache.org/docs/2.4/ for detailed information about # the directives and /usr/share/doc/apache2/README.Debian about Debian specific # hints. # # # Summary of how the Apache 2 configuration works in Debian: # The Apache 2 web server configuration in Debian is quite different to # upstream's suggested way to configure the web server. This is because Debian's # default Apache2 installation attempts to make adding and removing modules, # virtual hosts, and extra configuration directives as flexible as possible, in # order to make automating the changes and administering the server as easy as # possible. # It is split into several files forming the configuration hierarchy outlined # below, all located in the /etc/apache2/ directory: # # /etc/apache2/ # |-- apache2.conf # | `-- ports.conf # |-- mods-enabled # | |-- *.load # | `-- *.conf # |-- conf-enabled # | `-- *.conf # `-- sites-enabled # `-- *.conf # # # * apache2.conf is the main configuration file (this file). It puts the pieces # together by including all remaining configuration files when starting up the # web server. # # * ports.conf is always included from the main configuration file. It is # supposed to determine listening ports for incoming connections which can be # customized anytime. # # * Configuration files in the mods-enabled/, conf-enabled/ and sites-enabled/ # directories contain particular configuration snippets which manage modules, # global configuration fragments, or virtual host configurations, # respectively. # # They are activated by symlinking available configuration files from their # respective *-available/ counterparts. These should be managed by using our # helpers a2enmod/a2dismod, a2ensite/a2dissite and a2enconf/a2disconf. See # their respective man pages for detailed information. # # * The binary is called apache2. Due to the use of environment variables, in # the default configuration, apache2 needs to be started/stopped with # /etc/init.d/apache2 or apache2ctl. Calling /usr/bin/apache2 directly will not # work with the default configuration. # Global configuration # # # ServerRoot: The top of the directory tree under which the server's # configuration, error, and log files are kept. # # NOTE! If you intend to place this on an NFS (or otherwise network) # mounted filesystem then please read the Mutex documentation (available # at ); # you will save yourself a lot of trouble. # # Do NOT add a slash at the end of the directory path. # #ServerRoot "/etc/apache2" # # The accept serialization lock file MUST BE STORED ON A LOCAL DISK. # Mutex file:${APACHE_LOCK_DIR} default # # PidFile: The file in which the server should record its process # identification number when it starts. # This needs to be set in /etc/apache2/envvars # PidFile ${APACHE_PID_FILE} # # Timeout: The number of seconds before receives and sends time out. # Timeout 300 # # KeepAlive: Whether or not to allow persistent connections (more than # one request per connection). Set to "Off" to deactivate. # KeepAlive On # # MaxKeepAliveRequests: The maximum number of requests to allow # during a persistent connection. Set to 0 to allow an unlimited amount. # We recommend you leave this number high, for maximum performance. # MaxKeepAliveRequests 100 # # KeepAliveTimeout: Number of seconds to wait for the next request from the # same client on the same connection. # KeepAliveTimeout 5 # These need to be set in /etc/apache2/envvars User ${APACHE_RUN_USER} Group ${APACHE_RUN_GROUP} # # HostnameLookups: Log the names of clients or just their IP addresses # e.g., www.apache.org (on) or 204.62.129.132 (off). # The default is off because it'd be overall better for the net if people # had to knowingly turn this feature on, since enabling it means that # each client request will result in AT LEAST one lookup request to the # nameserver. # HostnameLookups Off # ErrorLog: The location of the error log file. # If you do not specify an ErrorLog directive within a # container, error messages relating to that virtual host will be # logged here. If you *do* define an error logfile for a # container, that host's errors will be logged there and not here. # ErrorLog syslog:local1 # # LogLevel: Control the severity of messages logged to the error_log. # Available values: trace8, ..., trace1, debug, info, notice, warn, # error, crit, alert, emerg. # It is also possible to configure the log level for particular modules, e.g. # "LogLevel info ssl:warn" # LogLevel warn # Include module configuration: IncludeOptional mods-enabled/*.load IncludeOptional mods-enabled/*.conf # Include list of ports to listen on Include ports.conf # Sets the default security model of the Apache2 HTTPD server. It does # not allow access to the root filesystem outside of /usr/share and /var/www. # The former is used by web applications packaged in Debian, # the latter may be used for local directories served by the web server. If # your system is serving content from a sub-directory in /srv you must allow # access here, or in any related virtual host. Options FollowSymLinks AllowOverride None Require all denied AllowOverride None Require all granted Options Indexes FollowSymLinks AllowOverride None Require all granted # # Options Indexes FollowSymLinks # AllowOverride None # Require all granted # # AccessFileName: The name of the file to look for in each directory # for additional configuration directives. See also the AllowOverride # directive. # AccessFileName .htaccess # # The following lines prevent .htaccess and .htpasswd files from being # viewed by Web clients. # Require all denied # # The following directives define some format nicknames for use with # a CustomLog directive. # # These deviate from the Common Log Format definitions in that they use %O # (the actual bytes sent including headers) instead of %b (the size of the # requested file), because the latter makes it impossible to detect partial # requests. # # Note that the use of %{X-Forwarded-For}i instead of %h is not recommended. # Use mod_remoteip instead. # LogFormat "%v:%p %h %l %u %t \"%r\" %>s %O \"%{Referer}i\" \"%{User-Agent}i\"" vhost_combined LogFormat "%h %l %u %t \"%r\" %>s %O \"%{Referer}i\" \"%{User-Agent}i\"" combined LogFormat "%h %l %u %t \"%r\" %>s %O" common LogFormat "%{Referer}i -> %U" referer LogFormat "%{User-agent}i" agent # Include of directories ignores editors' and dpkg's backup files, # see README.Debian for details. # Include generic snippets of statements IncludeOptional conf-enabled/*.conf # Include the virtual host configurations: IncludeOptional sites-enabled/*.conf # vim: syntax=apache ts=4 sw=4 sts=4 sr noet ================================================ FILE: sandbox/etc/makezombie.conf ================================================ # A chaperone.d configuration which will create a zombie process zombie.service: { command: "$(APPS_DIR)/../bin/daemon $(APPS_DIR)/../bin/proctool --hang" } ================================================ FILE: sandbox/test.d/apache.conf ================================================ apache1.service: { command: "/usr/sbin/apache2 -f $(SANDBOX)/etc/apache2.conf", enabled: true, restart: false, optional: true, env_set: { APACHE_LOCK_DIR: /tmp, APACHE_PID_FILE: /tmp/apache2.pid, APACHE_RUN_USER: www-data, APACHE_RUN_GROUP: www-data, APACHE_LOG_DIR: /var/log/apache2, } } mysql.service: { command: "/etc/init.d/mysql start", enabled: false, } apache2.service: { command: "/etc/init.d/apache2 start", after: "mysql.service", enabled: false, } ================================================ FILE: sandbox/test.d/cron.conf ================================================ cron.service: { command: '/usr/sbin/cron -f', restart: true, enabled: false, } ================================================ FILE: sandbox/test.d/sys1.conf ================================================ settings: { env_inherit: ['SANDBOX', '_*'], env_set: {'TERM': 'xpath-revisited', 'QUESTIONER': 'the-law', 'WITHIN-HOME': '$(HOME)/inside-home', 'INTERACTIVE': '$(_CHAP_INTERACTIVE)', 'CONFIG_DIR': '$(_CHAP_CONFIG_DIR)', 'PROCTOOL': '$(SANDBOX)/proctool', 'ENV': '$(SANDBOX)/.shinit', 'PATH': '$(SANDBOX):/usr/local/sbin:/usr/local/bin:/services/$(_CHAP_SERVICE)/bin:/usr/sbin:/usr/bin:/bin', 'APPS_PATH': '$(HOME:-)/apps', }, uid: 0, idle_delay: 1, debug: true, } cron1.service: { type: cron, stdout: inherit, stderr: inherit, interval: "*/2 * * * *", command: "proctool --wait 2 'running cron1.service'" } hometest.service: { type: oneshot, command: "$(PROCTOOL) my.$(APPS_PATH).apps-path", } fake1.service: { command: "$(PROCTOOL) --dump --hang 'fake1 process'", env_set: { 'PATH': '/binno/proctool:$(PATH)' }, env_unset: [ '*HOME*', 'APPS_PATH' ], stdout: inherit, enabled: true, debug: true, } fake2.service: { command: "$(PROCTOOL) --hang 'fake2 service'", enabled: false, stdout: inherit, uid: 1000, env_inherit: ['Q*', 'SANDBOX', 'PROCTOOL'], } fake3.service: { command: "$(PROCTOOL) 'oneshot service'", enabled: false, type: oneshot, stdout: inherit, ignore_failures: true, uid: garyw, service_groups: 'earlystuff', before: "default", } exittest.service: { enabled: true, restart: true, restart_limit: 3, ignore_failures: true, command: "$(PROCTOOL) --exit 20 'Exiting with 20'", } repeat.service: { command: "$(SANDBOX)/repeat -i4 'Repeat to stdout'", enabled: true, } repeat_err.service: { command: "$(SANDBOX)/repeat -i4 -e 'Repeat to stderr'", enabled: false, } beforemain.service: { type: "oneshot", enabled: false, command: "sh -c 'echo START IDLE TASK; sleep 2; echo ENDING IDLE TASK'", stdout: inherit, before: "MAIN", service_groups: "IDLE", } main.logging: { selector: "[chaperone].*", file: /var/log/chaperone-%d.log, enabled: true, } console.logging: { stdout: true, selector: '*.warn;![debian-start].*;authpriv,auth.!*;!/Repeat to std/.*', extended: true, enabled: true, } debian.logging: { selector: '[debian-start].*', file: /var/log/debian-start.log, enabled: true, } syslog.logging: { selector: '*.info;![debian-start].*;![chaperone].*', file: '/var/log/syslog-%d-%H%M', enabled: true, } ================================================ FILE: sandbox/testbare ================================================ #!/bin/bash # Used to test a bareimage, an image which was created "as if" chaperone was JUST installed from pip, # mostly to be sure that if people do a "pip3 install chaperone" and then run chaperone, that errors # and other feedback are reasonable. SANDBOX=$PWD docker run -t -i --rm=true -e "TERM=$TERM" -v /home:/home -e "SANDBOX=$SANDBOX" chapdev/bareimage \ /bin/bash /home/garyw/dev/chaperone/sandbox/bare_startup.sh ================================================ FILE: sandbox/testcent ================================================ #!/bin/bash SANDBOX=$PWD docker run -t -i -e "TERM=$TERM" --rm=true -v /home:/home --entrypoint=$SANDBOX/chaperone -e "SANDBOX=$SANDBOX" bst/chapdev-centos \ --config dev/chaperone/sandbox/centos.d $* \ --user garyw /bin/bash ================================================ FILE: sandbox/testdock ================================================ #!/bin/bash SANDBOX=$PWD docker run -t -i -e "TERM=$TERM" --rm=true -v /home:/home --entrypoint=$SANDBOX/chaperone -e "SANDBOX=$SANDBOX" bst/chapdev \ --config dev/chaperone/sandbox/test.d $* \ --user garyw /bin/bash ================================================ FILE: sandbox/testimage ================================================ #!/bin/bash # Used to create an apps directory here in the sandbox which runs a # standard docker image, however uses the local chaperone sources # and creates an app directory here in the sandbox. This is for # development of chaperone itself, and allows you to duplicate the # environment of an image. Especially useful for reproducing problems # and troubleshooting images. if [ $# == 0 ]; then echo "usage: testimage image-suffix" exit 1 fi # the cd trick assures this works even if the current directory is not current. cd ${0%/*} SUFFIX=$1 shift # remaining arguments are for chaperone # Try with chaperone- prefix first IMAGE=chapdev/chaperone-$SUFFIX if ! docker inspect $IMAGE >/dev/null 2>&1; then IMAGE=chapdev/$SUFFIX fi SANDBOX=$PWD APPSDIR=$SANDBOX/apps-$SUFFIX bashcmd="/bin/bash --rcfile $SANDBOX/bash.bashrc" if [ "$1" == "-" ]; then bashcmd="" shift fi myuid=`id -u` mygid=`id -g` # Copy the apps into this sandbox directory so we can work on it. if [ ! -d $APPSDIR ]; then docker run -i --rm=true -v /home:/home $IMAGE --disable --exitkills --log err --user root \ /bin/bash -c "cp -a /apps $APPSDIR; chown -R $myuid:$mygid $APPSDIR" fi # Run the lamp image using our local copy of chaperone as well as the local apps directory docker run -t -i -e "TERM=$TERM" -e "EMACS=$EMACS" --rm=true -v /home:/home \ --name run-$SUFFIX \ --entrypoint $SANDBOX/bin/chaperone $IMAGE \ --create $USER:$myuid \ --default-home / \ --config $APPSDIR/chaperone.d $* $bashcmd ================================================ FILE: sandbox/testvar ================================================ #!/bin/bash # Used to create an apps directory here in the sandbox which runs a # standard docker image, however uses the local chaperone sources. # Creates a data-only "var" directory instead of a full apps directory # to test things like --default-home if [ $# == 0 ]; then echo "usage: testvar image-suffix" exit 1 fi # the cd trick assures this works even if the current directory is not current. cd ${0%/*} SUFFIX=$1 shift # remaining arguments are for chaperone IMAGE=chapdev/chaperone-$SUFFIX SANDBOX=$PWD VARDIR=$SANDBOX/var-$SUFFIX bashcmd="/bin/bash --rcfile $SANDBOX/bash.bashrc" if [ "$1" == "-" ]; then bashcmd="" shift fi myuid=`id -u` mygid=`id -g` # Run the lamp image using our local copy of chaperone as well as the local var-only directory mkdir -p $VARDIR docker run -t -i -e "TERM=$TERM" -e "EMACS=$EMACS" --rm=true -v /home:/sandbox \ -v $VARDIR:/apps/var \ --name run-$SUFFIX \ --entrypoint /sandbox${SANDBOX#/home}/bin/chaperone $IMAGE \ --create $USER/$myuid \ --default-home / \ $* $bashcmd ================================================ FILE: sandbox/user.d/sys1.conf ================================================ settings: { env_inherit: ['SANDBOX', '_*'], env_set: {'TERM': 'xpath-revisited', 'QUESTIONER': 'the-law', 'WITHIN-HOME': '$(HOME)/inside-home', 'INTERACTIVE': '$(_CHAP_INTERACTIVE)', 'CONFIG_DIR': '$(_CHAP_CONFIG_DIR)', 'PROCTOOL': '$(SANDBOX)/proctool', 'ENV': '$(SANDBOX)/.shinit', 'PATH': '$(SANDBOX):/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/bin', 'APPS_PATH': '$(HOME:-)/apps', }, #uid: 0, idle_delay: 1, debug: true, } cron1.service: { type: cron, stdout: inherit, stderr: inherit, interval: '* * * * *', command: "$(PROCTOOL) --wait 20 'running cron1.service'" } hometest.service: { type: oneshot, command: "$(PROCTOOL) my.$(APPS_PATH).apps-path", } fake1.service: { command: "$(PROCTOOL) --hang 'fake1 process'", enabled: false, } fake2.service: { command: "$(PROCTOOL) --hang 'fake2 service'", enabled: true, stdout: inherit, uid: 1000, env_inherit: ['Q*', 'SANDBOX', 'PROCTOOL'], } fake3.service: { command: "$(PROCTOOL) 'oneshot service'", enabled: false, type: oneshot, stdout: inherit, ignore_failures: true, uid: garyw, service_groups: 'earlystuff', before: "default", } exittest.service: { enabled: false, restart: true, restart_limit: 5, ignore_failures: true, command: "$(PROCTOOL) --exit 20 'Exiting with 20'", } repeat.service: { command: "$(SANDBOX)/repeat -i4 'Repeat to stdout'", enabled: false, } repeat_err.service: { command: "$(SANDBOX)/repeat -i4 -e 'Repeat to stderr'", enabled: false, } beforemain.service: { type: "oneshot", enabled: false, command: "sh -c 'echo START IDLE TASK; sleep 2; echo ENDING IDLE TASK'", stdout: inherit, before: "MAIN", service_groups: "IDLE", } main.logging: { filter: "[chaperone].*", file: "$(HOME)/tmp/chaperone-%d.log", enabled: true, } console.logging: { stdout: true, filter: '*.warn;![debian-start].*;authpriv,auth.!*;!/Repeat to std/.*', extended: true, enabled: true, } debian.logging: { filter: '[debian-start].*', file: "$(HOME)/tmp/debian-start.log", enabled: true, } syslog.logging: { filter: '*.info;![debian-start].*;![chaperone].*', file: '$(HOME)/tmp/syslog-%d-%H%M', enabled: true, } ================================================ FILE: setup.py ================================================ import os import sys import subprocess from setuptools import setup, find_packages if sys.version_info < (3,): print("You must run setup.py with Python 3 only. Python 2 distributions are not supported.") exit(1) ourdir = os.path.dirname(__file__) def read(fname): return open(os.path.join(ourdir, fname)).read() def get_version(): return subprocess.check_output([sys.executable, os.path.join("chaperone/cproc/version.py")]).decode().strip() def which(program): def is_exe(fpath): return os.path.isfile(fpath) and os.access(fpath, os.X_OK) fpath, fname = os.path.split(program) if fpath: if is_exe(program): return program else: for path in os.environ["PATH"].split(os.pathsep): path = path.strip('"') exe_file = os.path.join(path, program) if is_exe(exe_file): return exe_file return None requires_list = ['docopt>=0.6.2', 'PyYAML>=3.1.1', 'voluptuous>=0.8.7', 'aiocron>=0.3'] if which('gcc'): requires_list += ["setproctitle>=1.1.8"] setup( name = "chaperone", version = get_version(), description = 'Simple system init daemon for Docker-like environments', long_description = read('README'), packages = find_packages(), #test_suite = "pyt_tests.tests.test_all", entry_points={ 'console_scripts': [ 'chaperone = chaperone.exec.chaperone:main_entry', 'telchap = chaperone.exec.telchap:main_entry', 'envcp = chaperone.exec.envcp:main_entry', 'sdnotify = chaperone.exec.sdnotify:main_entry', 'sdnotify-exec = chaperone.exec.sdnotify_exec:main_entry', ], }, license = "Apache Software License", author = "Gary Wisniewski", author_email = "garyw@blueseastech.com", url = "http://github.com/garywiz/chaperone", keywords = "docker init systemd syslog", install_requires = requires_list, classifiers = [ "Development Status :: 5 - Production/Stable", "Intended Audience :: Developers", "License :: OSI Approved :: Apache Software License", "Natural Language :: English", "Operating System :: POSIX :: Linux", "Programming Language :: Python :: 3", "Topic :: System :: Logging", "Topic :: System :: Boot :: Init", ] ) ================================================ FILE: tests/.gitignore ================================================ test_logs ================================================ FILE: tests/README.md ================================================ This directory contains both Chaperone unit tests as well as more complex integration tests. The `run-all-tests.sh` script runs them all. However, integration tests in this directory have several requirements. They will run both on Ubuntu as well as RHEL. Docker 1.8.1 is required, since socket mount permissions have problems with SELinux for earlier versions. For both, you'll need everything Chaperone itself requires, and may need to install them manually since Chaperone may not be installed on the development system: pip3 install docopt pip3 install PyYAML pip3 install voluptuous pip3 install croniter You will also need a working `chapdev/chaperone-lamp` image. This is the image that is used for all of the tests in this directory and you can simply pull it if it isn't already available. Wait, there's more. For Ubuntu, you'll then need: apt-get install expect-lite apt-get install nc # should already be there For CentOS/RHEL, it is a bit more complicated. You'll need: yum install expect yum install nc and then you'll need to manually install `expect-lite` using the instructions [on the developer website](http://expect-lite.sourceforge.net/expect-lite_install.html). (It's pretty easy actually, and foolproof). ================================================ FILE: tests/bin/chaperone ================================================ #!/usr/bin/python3 import sys import os # Assure we use the local package for testing and development sys.path[0] = os.path.dirname(os.path.dirname(sys.path[0])) from chaperone.exec.chaperone import main_entry main_entry() ================================================ FILE: tests/bin/daemon ================================================ #!/usr/bin/python3 """ Forks a process in a daemon-like fashion for testing. Usage: daemon [--wait=seconds] [--ignore-signals] [--exit=code] COMMAND [ARGS ...] """ import signal import sys import subprocess from time import sleep from docopt import docopt import os from daemonutil import Daemon options = docopt(__doc__, options_first=True) if options['--ignore-signals']: signal.signal(signal.SIGTERM, lambda signum, frame: print("ignoring SIGTERM")) signal.signal(signal.SIGHUP, lambda signum, frame: print("ignoring SIGHUP")) signal.signal(signal.SIGINT, lambda signum, frame: print("ignoring SIGINT")) else: signal.signal(signal.SIGTERM, lambda signum, frame: not print("received SIGTERM")) signal.signal(signal.SIGHUP, lambda signum, frame: not print("received SIGHUP")) signal.signal(signal.SIGINT, lambda signum, frame: not print("received SIGINT")) if options['--wait']: print("Waiting {0} ...".format(options['--wait'])) sleep(float(options['--wait'])) args = [options['COMMAND']] + options['ARGS'] print("{1}:Launching {0} ...".format(args, os.getpid())) class mydaemon(Daemon): def run(self): subprocess.Popen(args, start_new_session=True) d = mydaemon() if options['--exit']: d.start(int(options['--exit'])) else: d.start() ================================================ FILE: tests/bin/daemonutil.py ================================================ """ Generic linux daemon base class for python 3.x. From: http://www.jejik.com/articles/2007/02/a_simple_unix_linux_daemon_in_python/ Thank you! """ import sys, os, time, atexit, signal class Daemon: """A generic daemon class. Usage: subclass the daemon class and override the run() method.""" def __init__(self, pidfile = None): self.pidfile = pidfile def daemonize(self, exitwith = 0): """Deamonize class. UNIX double fork mechanism.""" sys.stdout.flush() sys.stderr.flush() try: pid = os.fork() if pid > 0: # exit first parent sys.exit(exitwith) except OSError as err: sys.stderr.write('fork #1 failed: {0}\n'.format(err)) sys.exit(1) # decouple from parent environment os.chdir('/') os.setsid() os.umask(0) # do second fork try: pid = os.fork() if pid > 0: # exit from second parent sys.exit(0) except OSError as err: sys.stderr.write('fork #2 failed: {0}\n'.format(err)) sys.exit(1) # redirect standard file descriptors sys.stdout.flush() sys.stderr.flush() si = open(os.devnull, 'r') so = open(os.devnull, 'a+') se = open(os.devnull, 'a+') os.dup2(si.fileno(), sys.stdin.fileno()) os.dup2(so.fileno(), sys.stdout.fileno()) os.dup2(se.fileno(), sys.stderr.fileno()) # write pidfile if self.pidfile: atexit.register(self.delpid) pid = str(os.getpid()) with open(self.pidfile,'w+') as f: f.write(pid + '\n') def delpid(self): os.remove(self.pidfile) def start(self, exitwith = 0): """Start the daemon.""" # Check for a pidfile to see if the daemon already runs if self.pidfile: try: with open(self.pidfile,'r') as pf: pid = int(pf.read().strip()) except IOError: pid = None if pid: message = "pidfile {0} already exist. " + \ "Daemon already running?\n" sys.stderr.write(message.format(self.pidfile)) sys.exit(1) # Start the daemon self.daemonize(exitwith) self.run() def stop(self): """Stop the daemon.""" assert self.pidfile, "Requires pidfile to use stop()" # Get the pid from the pidfile try: with open(self.pidfile,'r') as pf: pid = int(pf.read().strip()) except IOError: pid = None if not pid: message = "pidfile {0} does not exist. " + \ "Daemon not running?\n" sys.stderr.write(message.format(self.pidfile)) return # not an error in a restart # Try killing the daemon process try: while 1: os.kill(pid, signal.SIGTERM) time.sleep(0.1) except OSError as err: e = str(err.args) if e.find("No such process") > 0: if os.path.exists(self.pidfile): os.remove(self.pidfile) else: print (str(err.args)) sys.exit(1) def restart(self): """Restart the daemon.""" self.stop() self.start() def run(self): """You should override this method when you subclass Daemon. It will be called after the process has been daemonized by start() or restart().""" ================================================ FILE: tests/bin/envcp ================================================ #!/usr/bin/python3 import sys import os # Assure we use the local package for testing and development sys.path[0] = os.path.dirname(os.path.dirname(sys.path[0])) from chaperone.exec.envcp import main_entry main_entry() ================================================ FILE: tests/bin/expect-lite-command-run ================================================ #!/bin/bash function RUNTASK() { expect-lite-image-run --task $* } function RUNIMAGE() { export CHTEST_DOCKER_CMD="sdnotify-exec --noproxy --verbose --wait-stop docker run %{SOCKET_ARGS}" export CHTEST_DOCKER_OPTS=$* expect-lite-image-run } function RUNIMAGE_READY() { export CHTEST_DOCKER_CMD="sdnotify-exec --noproxy --verbose --wait-ready docker run %{SOCKET_ARGS}" export CHTEST_DOCKER_OPTS=$* expect-lite-image-run } export -f RUNTASK RUNIMAGE RUNIMAGE_READY bash -i ================================================ FILE: tests/bin/expect-lite-image-run ================================================ #!/bin/bash options="" if [ "$CHTEST_CONTAINER_NAME" != "" ]; then options="--name $CHTEST_CONTAINER_NAME" fi if [[ " $CHTEST_DOCKER_OPTS " != *\ -d* ]]; then options="$options -i -t --rm" fi if [ "$CHTEST_DOCKER_CMD" == "" ]; then CHTEST_DOCKER_CMD="docker run" fi SELINUX_FLAG=$(sestatus 2>/dev/null | fgrep -q enabled && echo :z) exec $CHTEST_DOCKER_CMD $options \ -v /home:/home$SELINUX_FLAG \ -e TESTHOME=$TESTHOME \ -e TESTDIR=$TESTDIR \ -e CHTEST_HOME=$CHTEST_HOME \ $CHTEST_DOCKER_OPTS \ --entrypoint $TESTHOME/bin/chaperone \ $CHTEST_IMAGE \ --create $USER:$TESTHOME \ --default-home $CHTEST_HOME \ --config $CHTEST_HOME/../chaperone.conf \ $* ================================================ FILE: tests/bin/expect-test-command ================================================ #!/bin/bash export EL_SHELL="expect-lite-command-run" exec expect-lite $1 ================================================ FILE: tests/bin/expect-test-image ================================================ #!/bin/bash export EL_SHELL="expect-lite-image-run" exec expect-lite $1 ================================================ FILE: tests/bin/get-serial ================================================ #!/bin/bash serfile=$CHTEST_HOME/serial.dat if [ ! -f $serfile ]; then current=0 else current=$(cat $serfile) fi let current=current+1 echo $current >$serfile echo $current ================================================ FILE: tests/bin/is-running ================================================ #!/bin/bash ps -C $1 >/dev/null && exit 0 exit 1 ================================================ FILE: tests/bin/kill-from-pidfile ================================================ #!/bin/bash pidfile=$1 if [ -f $pidfile ]; then sudo kill `cat $1` fi ================================================ FILE: tests/bin/logecho ================================================ #!/bin/bash if [ "$SERVICE_NAME" == "" ]; then SERVICE_NAME="pid$$" fi logger -p info -t $SERVICE_NAME "$*" ================================================ FILE: tests/bin/proctool ================================================ #!/usr/bin/python3 """ Tool to create processes for various purposes. Usage: proctool [--dump] [--hang] [--wait=seconds] [--ignore-signals] [--exit=code] [--notify=CMD] [MESSAGE] """ import signal import sys from time import sleep from docopt import docopt import os options = docopt(__doc__) if options['MESSAGE']: sys.stdout.write('proctool says: ' + options['MESSAGE'] + "\n") sys.stdout.flush() if options['--notify']: cmd = options['--notify'] os.system('sdnotify ' + cmd) if options['--dump']: print("UID:{0} GID:{1} PID:{2} Environment:".format(os.getuid(), os.getgid(), os.getpid())) for k,v in os.environ.items(): print(" {0}={1}".format(k,v)) if options['--ignore-signals']: signal.signal(signal.SIGTERM, lambda signum, frame: print("ignoring SIGTERM")) signal.signal(signal.SIGHUP, lambda signum, frame: print("ignoring SIGHUP")) signal.signal(signal.SIGINT, lambda signum, frame: print("ignoring SIGINT")) else: signal.signal(signal.SIGTERM, lambda signum, frame: print("received SIGTERM") or exit() ) signal.signal(signal.SIGHUP, lambda signum, frame: not print("received SIGHUP")) signal.signal(signal.SIGINT, lambda signum, frame: not print("received SIGINT")) if options['--wait']: sleep(float(options['--wait'])) if options['--hang']: while True: sleep(100) if options['--exit']: exit(int(options['--exit'])) ================================================ FILE: tests/bin/read_from_port ================================================ #!/bin/bash if nc --version >/dev/null 2>&1; then # nmap.org accepts --version and has different syntax (lovely eh) nc --recv-only $* else # bsd version nc $* fi ================================================ FILE: tests/bin/sdnotify ================================================ #!/usr/bin/python3 import sys import os # Assure we use the local package for testing and development sys.path[0] = os.path.dirname(os.path.dirname(sys.path[0])) from chaperone.exec.sdnotify import main_entry main_entry() ================================================ FILE: tests/bin/sdnotify-exec ================================================ #!/usr/bin/python3 import sys import os # Assure we use the local package for testing and development sys.path[0] = os.path.dirname(os.path.dirname(sys.path[0])) from chaperone.exec.sdnotify_exec import main_entry main_entry() ================================================ FILE: tests/bin/talkback ================================================ #!/usr/bin/python3 # Simple echo script to test inetd import sys for line in sys.stdin: if "EXIT" in line: exit(0) print("Echoing: ", line) sys.stdout.flush() ================================================ FILE: tests/bin/telchap ================================================ #!/usr/bin/python3 import sys import os # Assure we use the local package for testing and development sys.path[0] = os.path.dirname(os.path.dirname(sys.path[0])) from chaperone.exec.telchap import main_entry main_entry() ================================================ FILE: tests/bin/test-driver ================================================ #!/bin/bash # Assumes the current directory contains executable files and runs them all. function relpath() { python -c "import os,sys;print(os.path.relpath(*(sys.argv[1:])))" "$@"; } function extract_title() { script=$1 title=`sed -n 's/^#TITLE: *//p' $script` [ "$title" == "" ] && title=$script echo $title } export CHTEST_CONTAINER_NAME=CHAP-TEST-CONTAINER-$$ function kill_test_container() { sleep 1 # Sometimes it takes docker a while to actually kill the container. if docker inspect $CHTEST_CONTAINER_NAME >/dev/null 2>&1; then echo Container still running: Forcing removal docker kill $CHTEST_CONTAINER_NAME >/dev/null docker rm -v $CHTEST_CONTAINER_NAME >/dev/null fi } shellmode=0 if [ "$1" == '--shell' ]; then shellmode=1 shift fi export TESTDIR=$(readlink -f $1) export TESTHOME=$PWD export CHTEST_HOME=$TESTDIR/_temp-$$_ if [ "$CHTEST_LOGDIR" == "" ]; then export CHTEST_LOGDIR=$TESTHOME/test_logs fi if [ "$2" == "" ]; then IMAGE_NAME=chapdev/chaperone-lamp else IMAGE_NAME=$2 fi export CHTEST_IMAGE=$IMAGE_NAME if [ ! -d $TESTDIR ]; then exit fi if [ -e $CHTEST_HOME ]; then echo "Can't continue... $CHTEST_HOME already exists." exit 1 fi if [ "`which expect-lite`" == "" ]; then echo "expect-lite must be installed for tests to run" exit 1 fi mkdir -p $CHTEST_LOGDIR if [ $shellmode == 1 ]; then mkdir $CHTEST_HOME expect-lite-image-run --disable-services /bin/bash rm -rf $CHTEST_HOME exit fi ( exitcode=0 for sf in $( find $TESTDIR -type f -executable \! -name '*~' ); do if [ "$CHTEST_ONLY_ENDSWITH" != "" -a "${sf%*/$CHTEST_ONLY_ENDSWITH}" == "$sf" ]; then continue fi mkdir $CHTEST_HOME; cd $CHTEST_HOME logfile=$CHTEST_LOGDIR/$(basename $TESTDIR)_${sf/*\//}.log rm -f $logfile.err title=$(extract_title $sf) echo "RUNNING TEST: $title" echo "" >>$logfile.err echo "##" >>$logfile.err echo "## RUNNING TEST: $title" >>$logfile.err echo "## $sf" >>$logfile.err echo "##" >>$logfile.err if ! $sf >>$logfile.err 2>&1; then echo "TEST FAILED: $sf (see $(relpath $logfile.err $TESTHOME))" exitcode=2 else mv $logfile.err $logfile fi kill_test_container cd $TESTDIR; [ ! -f keep.tempdir ] && rm -rf $CHTEST_HOME done if [ $exitcode != 0 ]; then echo "Some tests failed in: $TESTDIR" fi exit $exitcode ) ================================================ FILE: tests/el-tests/basic-1/chaperone.conf ================================================ settings: { env_set: { PATH: "$(TESTHOME)/bin:$(PATH)", } } echo.service: { command: "echo first output", stdout: inherit, } default.logging: { selector: "*.debug", stdout: true, } ================================================ FILE: tests/el-tests/basic-1/test-001.elt ================================================ #!/usr/bin/env expect-test-command #TITLE: Test simplest possible commmand service >RUNIMAGE RUNTASK proctool testing-123 (sleep 15; echo "K""ILL ME NOW")& >RUNIMAGE -e ENABLE_CRON1=true @30 ^C (sleep 25; echo "K""ILL ME NOW")& >RUNIMAGE -e ENABLE_APACHE4=true ^C <(sleep 25; echo "K""ILL ME NOW")& >RUNIMAGE -e ENABLE_APACHE5=true ^C <RUNIMAGE -e ENABLE_APACHE6=true (sleep 20; echo "K""ILL ME NOW")& >RUNIMAGE -e ENABLE_APACHE7=true ^C RUNIMAGE -e ENABLE_APACHE8=true /tmp/kid.pid; logecho daemon running; sleep 10; logecho wait completed: daemon exiting'", pidfile: "/tmp/kid.pid", exit_kills: true, service_groups: IDLE, } # Debugging output for all default.logging: { selector: "*.debug", stdout: true, } ================================================ FILE: tests/el-tests/exitkills-1/test-001.elt ================================================ #!/usr/bin/env expect-test-command #TITLE: Forking service - combined with exit_kills @20 >RUNIMAGE <: daemon running RUNIMAGE -e ENABLE_EXIT1=true RUNIMAGE -e ENABLE_EXIT1B=true (sleep 8; echo "K""ILL ME NOW")& >RUNIMAGE -e ENABLE_APACHE3=true ^C RUNIMAGE -e ENABLE_APACHE4=true echo running...; echo CID:`RUNIMAGE_READY -d -P -e ENABLE_INETD1=true` +$cid=CID:([0-9a-f]{32,}) >docker port $cid +$ourport=8080/tcp -> 0.0.0.0:([0-9]+) # Fire up an inetd connection inside the container and verify it works (ready assured) >read_from_port localhost $ourport sleep 3 >docker stop $cid >docker logs $cid docker rm -v $cid ================================================ FILE: tests/el-tests/inetd-1/test-002.elt ================================================ #!/usr/bin/env expect-test-command #TITLE: inetd - second service disables both # Start the image and capture the container ID and port # (Note: the initial "echo" below is needed to take care of a timing issue with 'docker run') >echo running...; echo CID:`RUNIMAGE_READY -d -P -e ENABLE_INETD1=true -e ENABLE_INETD2=true` +$cid=CID:([0-9a-f]{32,}) >docker port $cid +$ourport=8080/tcp -> 0.0.0.0:([0-9]+) >docker port $cid +$otherport=8443/tcp -> 0.0.0.0:([0-9]+) # Fire up an inetd connection inside the container and verify it works (ready assured) >read_from_port localhost $ourport sleep 2 >read_from_port localhost $otherport # Kill and inspect logs >sleep 3 >docker logs $cid docker rm -v $cid ================================================ FILE: tests/el-tests/notify-1/chaperone.conf ================================================ settings: { env_set: { PATH: "$(TESTHOME)/bin:$(PATH)", SERVICE_NAME: "$(_CHAP_SERVICE)" }, process_timeout: 5, } test1-exit1.service: { type: notify, enabled: "$(ENABLE_EXIT1:-false)", command: "daemon bash -c 'logecho daemon running; sleep 3; logecho daemon exiting'", } test1-exit1b.service: { type: notify, enabled: "$(ENABLE_EXIT1B:-false)", command: "daemon --exit 3 bash -c 'logecho daemon running; sleep 3; logecho daemon exiting'", } test1-exit1c.service: { type: notify, enabled: "$(ENABLE_EXIT1C:-false)", command: "daemon --exit 3 --wait 8 bash -c 'logecho daemon running; sleep 3; logecho daemon exiting'", } test1-exit1d.service: { type: notify, enabled: "$(ENABLE_EXIT1D:-false)", command: "daemon bash -c 'logecho daemon running; sleep 3; sdnotify ERRNO=55'", } test1-exit1e.service: { type: notify, enabled: "$(ENABLE_EXIT1E:-false)", process_timeout: 15, command: "daemon bash -c 'logecho daemon running; sleep 3; sdnotify --ready --pid $$; sleep 2'", } # Debugging output for all default.logging: { selector: "*.debug", stdout: true, } ================================================ FILE: tests/el-tests/notify-1/test-001.elt ================================================ #!/usr/bin/env expect-test-command #TITLE: Notify service - spawn daemon normally - never gets notified >RUNIMAGE -e ENABLE_EXIT1=true RUNIMAGE -e ENABLE_EXIT1B=true ' RUNIMAGE -e ENABLE_EXIT1C=true RUNIMAGE -e ENABLE_EXIT1D=true <: daemon running RUNIMAGE -e ENABLE_EXIT1E=true <: daemon running RUNIMAGE -e ENABLE_EXIT1=true RUNIMAGE (sleep 8; echo "K""ILL ME NOW")& >RUNIMAGE -e ENABLE_APACHE3=true ^C RUNIMAGE -e ENABLE_APACHE4=true (sleep 5; echo "K""ILL ME NOW")& >RUNIMAGE -e ENABLE_EXIT1=true ^C (sleep 5; echo "K""ILL ME NOW")& >RUNIMAGE ^C (sleep 8; echo "K""ILL ME NOW")& >RUNIMAGE -e ENABLE_APACHE3=true ^C (sleep 8; echo "K""ILL ME NOW")& >RUNIMAGE -e ENABLE_APACHE4=true ^C > {0}".format(compare[k])) def canonical(d, nl = False): if not nl: return str([(k,d[k]) for k in sorted(d.keys())]) result = list() for k in sorted(d.keys()): result.append("('{0}', '{1}')".format(k, d[k].replace("\n", "\\\\n"))) return "[" + (', '.join(result)) + "]"; class TestEnvOrder(unittest.TestCase): maxDiff = 5000 def test_expand1(self): env = Environment(from_env = ENV1).expanded() #printdict(env) envstr = canonical(env) #print('RESULT1 = "' + envstr + '"') self.assertEqual(envstr, RESULT1) def test_expand2(self): env = Environment(from_env = ENV2).expanded() #printdict(env) envstr = canonical(env) #print('RESULT2 = "' + envstr + '"') self.assertEqual(envstr, RESULT2) def test_expand3(self): env = Environment(from_env = ENV3).expanded() #printdict(env, compare = RESULT3) envstr = canonical(env) #print('RESULT3 = "' + canonical(env, True) + '"') self.assertEqual(envstr, RESULT3) def test_expand4(self): env = Environment(from_env = ENV4).expanded() #printdict(env) envstr = canonical(env) #print('RESULT4 = "' + envstr + '"') self.assertEqual(envstr, RESULT4) def test_expand5(self): "Try simple expansion" env = Environment(from_env = ENV4).expanded() self.assertEqual(env.expand("hello $(UBERNEST)"), "hello nest:inside/usr/garyw and /usr/garyw/apps and:/usr/garyw/apps/theap") self.assertEqual(env.expand("hello $(MAYBE5) and $(MAYBE4)"), "hello blach/foo and /usr/garyw/foo") self.assertEqual(env.expand("hello $(MAYBE5:+$(MAYBE5)b) and $(MAYBE41)"), "hello blach/foob and $(MAYBE41)") self.assertEqual(env.expand("hello $(MAYBE5:+$(MAYBE5)b) and $(MAYBE41:-gone$(MAYBE4))"), "hello blach/foob and gone/usr/garyw/foo") def test_expand6(self): "Try self-referential expansions" enva = Environment(ENV4a, CONFIG4a) self.assertEqual(canonical(enva.expanded()), "[('ADMINVAR1', 'user'), ('ADMINVAR2', ''), ('ADMINVAR3', 'admin'), ('PATH', '/usr/local/bin:/bin'), ('THEREPATH', '/there')]") envb = Environment(enva) self.assertEqual(canonical(envb.expanded()), "[('ADMINVAR1', 'user'), ('ADMINVAR2', ''), ('ADMINVAR3', 'admin'), ('PATH', '/usr/local/bin:/bin'), ('THEREPATH', '/there')]") envc = Environment(envb, CONFIG4c) self.assertEqual(canonical(envc.expanded()), "[('ADMINVAR1', 'user'), ('ADMINVAR2', ''), ('ADMINVAR3', 'admin'), ('MISCPATH', '/mislibs'), ('PATH', '/usr/python/bin:/usr/local/bin:/bin'), ('PYAGAIN', '/mislibs:/pythonlibs:'), ('PYPATH', '/pythonlibs:'), ('THEREPATH', '/mislibs:/there')]") def test_expand7(self): "Test some self-referential anomalies" env = Environment(ENV7, CONFIG7a).expanded() envstr = canonical(env) #print('RESULT7 = "' + envstr + '"') self.assertEqual(envstr, RESULT7) def test_expand8(self): "Test conditional expansion" env = Environment(from_env = ENV8).expanded() #printdict(env, compare = RESULT8) envstr = canonical(env) #print('RESULT8 = "' + envstr + '"') self.assertEqual(envstr, RESULT8) if __name__ == '__main__': unittest.main() ================================================ FILE: tests/env_parse.py ================================================ from prefix import * from chaperone.cutil.env import EnvScanner TEST1 = ( ('Nothing',), ('A normal $(expansion) is here',), ('An unterminated $(expansion is here',), ('Two $(expansions) are $(also) here',), ('Nested $(expansions are $(also) here) too.',), ('Nested $(expansions are "$(also" here) too.',), ('Nested $(expansions are ["$(also" here),$(next)] finally) too.',), ('Ignore $(stuff))) like this.',), ('escape \\$(stuff) like this.',), ('exp $(stuff) but \$(do not $(except [{$(foo)}] this) but \${not} like this.',), ('Nested ${expansions are ["$(also" here),$(next)] finally} too.',), ) TEST1 = ( ('Nothing', 'Nothing'), ('A normal $(expansion) is here', 'A normal is here'), ('An unterminated $(expansion is here', 'An unterminated $(expansion is here'), ('Two $(expansions) are $(also) here', 'Two are here'), ('Nested $(expansions are $(also) here) too.', 'Nested too.'), ('Nested $(expansions are "$(also" here) too.', 'Nested too.'), ('Nested $(expansions are ["$(also" here),$(next)] finally) too.', 'Nested too.'), ('Ignore $(stuff))) like this.', 'Ignore )) like this.'), ('escape \$(stuff) like this.', 'escape $(stuff) like this.'), ('exp $(stuff) but \$(do not $(except [{$(foo)}] this) but \${not} like this.', 'exp but $(do not but ${not} like this.'), ('Nested ${expansions are ["$(also" here),$(next)] finally} too.', 'Nested too.'), ) class ScanTester: def __init__(self, test): self._test = test self._scanner = EnvScanner() def run(self, tc): for t in self._test: r = self._scanner.parse(t[0], self.callback) #print(" ('{0}', '{1}'),".format(t[0], r)) tc.assertEqual(t[1], r) def callback(self, buf, whole): return "<"+buf+">" class TestScanner(unittest.TestCase): def test_parse1(self): t = ScanTester(TEST1) t.run(self) if __name__ == '__main__': unittest.main() ================================================ FILE: tests/events.py ================================================ from prefix import * from chaperone.cutil.events import EventSource class handlers: def __init__(self): self.results = list() def handler1(self, val): self.results.append("handler1:" + val) def handler2(self, val): self.results.append("handler2:" + val) def handler3(self, val): self.results.append("handler3:" + val) class TestEvents(unittest.TestCase): def setUp(self): self.h = handlers() self.e = EventSource() def test_event1(self): self.e.add(onH1 = self.h.handler1) self.e.add(onH1 = self.h.handler1) self.e.onH1("First trigger") self.e.onH1("Second trigger") self.assertEqual(self.h.results, ['handler1:First trigger', 'handler1:First trigger', 'handler1:Second trigger', 'handler1:Second trigger']) self.e.remove(onH1 = self.h.handler1) self.e.onH1("Third trigger") self.e.remove(onH1 = self.h.handler1) self.e.onH1("Fourth trigger") self.assertEqual(self.h.results, ['handler1:First trigger', 'handler1:First trigger', 'handler1:Second trigger', 'handler1:Second trigger', 'handler1:Third trigger']) def test_event2(self): self.e.add(onH1 = self.h.handler1) self.assertRaisesRegex(TypeError, 'but 3 were given', lambda: self.e.onH1("arg1", "arg2")) def test_event3(self): self.e.add(onMulti = self.h.handler1) self.e.add(onMulti = self.h.handler2) self.e.onMulti("TWO") self.e.add(onMulti = self.h.handler3) self.e.onMulti("THREE") self.assertEqual(self.h.results, ['handler1:TWO', 'handler2:TWO', 'handler1:THREE', 'handler2:THREE', 'handler3:THREE']) self.e.remove(onMulti = self.h.handler2) self.e.onMulti("AFTER-REMOVE") self.assertEqual(self.h.results, ['handler1:TWO', 'handler2:TWO', 'handler1:THREE', 'handler2:THREE', 'handler3:THREE', 'handler1:AFTER-REMOVE', 'handler3:AFTER-REMOVE']) self.e.remove(onMulti = self.h.handler1) self.e.remove(onMulti = self.h.handler2) self.e.remove(onMulti = self.h.handler3) self.e.onMulti("EMPTY") self.assertEqual(self.h.results, ['handler1:TWO', 'handler2:TWO', 'handler1:THREE', 'handler2:THREE', 'handler3:THREE', 'handler1:AFTER-REMOVE', 'handler3:AFTER-REMOVE']) if __name__ == '__main__': unittest.main() ================================================ FILE: tests/prefix.py ================================================ import sys import os import unittest if sys.version_info < (3,): print("You must run tests with Python 3 only. Python 2 distributions are not supported.") exit(1) # Assure that packages in the same directory as ours (tests) can be used without concern for where # we are installed sys.path[0] = os.path.dirname(os.path.dirname(__file__)) ================================================ FILE: tests/run-all-tests.sh ================================================ #!/bin/bash # Runs both unit tests as well as process integration tests python3 env_expand.py python3 env_parse.py python3 events.py python3 service_order.py python3 syslog_spec.py ./run-el.sh ================================================ FILE: tests/run-el.sh ================================================ #!/bin/bash function relpath() { python -c "import os,sys;print(os.path.relpath(*(sys.argv[1:])))" "$@"; } export PATH=$PWD/bin:$PATH if [ "$1" == '-n' ]; then counter=$2 shift 2 for (( i=1; $i<=$counter; i++ )); do export CHTEST_LOGDIR=$PWD/test_logs/n$i $0 $* & done wait exit fi if [ "$1" != "" ]; then export CHTEST_ONLY_ENDSWITH=$1 fi test-driver el-tests/basic-1 test-driver el-tests/simple-1 test-driver el-tests/simple-2 test-driver el-tests/cron-1 test-driver el-tests/fork-1 test-driver el-tests/inetd-1 test-driver el-tests/notify-1 test-driver el-tests/exitkills-1 ================================================ FILE: tests/run-shell.sh ================================================ #!/bin/bash if [ "$1" == "" ]; then echo 'usage: run_shell.sh ' exit fi export PATH=$PWD/bin:$PATH export CHTEST_DOCKER_CMD="sdnotify-exec --noproxy --verbose --wait-stop docker run %{SOCKET_ARGS}" test-driver --shell el-tests/$1 ================================================ FILE: tests/service_order.py ================================================ from prefix import * from chaperone.cutil.config import ServiceDict OT1 = { 'one.service': { }, 'two.service': { 'service_groups': 'foobar', 'after': 'default' }, 'three.service': { 'service_groups': 'system', 'before': 'four.service' }, 'four.service': { 'service_groups': 'system', 'before': 'default' }, 'five.service': { }, 'six.service': { 'after': 'seven.service' }, 'seven.service': { }, 'eight.service': { 'service_groups': 'system', 'before': 'default' }, } OT2 = { 'one.service': { }, 'two.service': { 'service_groups': 'foobar', 'after': 'default' }, 'three.service': { 'service_groups': 'system', 'before': 'two.service' }, 'four.service': { 'service_groups': 'system', 'before': 'three.service' }, 'five.service': { }, 'six.service': { }, 'seven.service': { } } OT3 = { 'one.service': { }, 'two.service': { 'before': 'default' }, 'three.service': { 'service_groups': 'system', 'before': 'four.service' }, 'four.service': { 'service_groups': 'system', 'before': 'default' }, 'five.service': { 'before': 'two.service' }, 'six.service': { 'after': 'seven.service' }, 'seven.service': { } } def printlist(title, d): return print(title) for item in d: print(" ", item) def checkorder(result, *series): """ Checks to be sure that the items listed in 'series' are in order in the result set. """ results = [r.name for r in result] indexes = list(map(lambda item: results.index(item+".service"), series)) for n in range(len(indexes)-1): if indexes[n] > indexes[n+1]: return False return True class TestServiceOrder(unittest.TestCase): def test_order1(self): sc = ServiceDict(OT1.items()) slist = sc.get_startup_list() printlist("startup list: ", slist) self.assertTrue(checkorder(slist, 'three', 'four', 'seven', 'six', 'two')) self.assertTrue(checkorder(slist, 'three', 'one', 'two')) self.assertTrue(checkorder(slist, 'eight', 'one', 'two')) def test_order2(self): sc = ServiceDict(OT2.items()) slist = sc.get_startup_list() printlist("startup list: ", slist) self.assertTrue(checkorder(slist, 'four', 'three', 'two')) def test_order3(self): sc = ServiceDict(OT3.items()) self.assertRaisesRegex(Exception, '^circular', lambda: sc.get_startup_list()) if __name__ == '__main__': unittest.main() ================================================ FILE: tests/syslog_spec.py ================================================ from prefix import * from chaperone.cutil.syslog import _syslog_spec_matcher SPECS = ( ('*.*', '(True)'), ('[crond].*', '((g and "crond" == g.lower()))'), ('.*', 'Invalid log spec syntax: .*'), ('kern.*;kern.!=crit', '((not (f==0) or not p==2)) and (((f==0)))'), ('KERN.*;kern.!crit', '((not (f==0) or not p<=2)) and (((f==0)))'), ('kern.crit', '((f==0) and p<=2)'), ('*.=emerg;*.=crit', '(p==0) or (p==2)'), ('/not and\/or able/.*', '(bool(s._regexes[0].search(buf)))'), ('*.*;![debian-start].*;authpriv,auth.!*', '(not ((g and "debian-start" == g.lower())) and (not (f==10 or f==4)))'), ('*.*;![debian-start].*;!authpriv,auth.*', '(not ((g and "debian-start" == g.lower())) and not ((f==10 or f==4)))'), ('*.*;![debian-start].*;!authpriv,auth.!crit', '(not ((g and "debian-start" == g.lower())) and (not (f==10 or f==4) and not p<=2))'), ('kern.*', '((f==0))'), ('*.*;*.!*', '((False))'), ('*.*;![chaperone].*', '(not ((g and "chaperone" == g.lower())))'), ('kern.*;!auth,authpriv.*', '(not ((f==4 or f==10))) and (((f==0)))'), ('[cron].*;[daemon-tools].crit;/password/.!err', '((not bool(s._regexes[0].search(buf)) or not p<=3)) and (((g and "cron" == g.lower())) or ((g and "daemon-tools" == g.lower()) and p<=2))'), ('kern.*;![cron].!err', '((not (g and "cron" == g.lower()) and not p<=3)) and (((f==0)))'), ('[chaperone].err;[logrotate].err;!kern.*', '(not ((f==0))) and (((g and "chaperone" == g.lower()) and p<=3) or ((g and "logrotate" == g.lower()) and p<=3))'), ('/panic/.*;/segfault/.*;*.!=debug', '((not p==7)) and ((bool(s._regexes[0].search(buf))) or (bool(s._regexes[1].search(buf))))'), ) class TestSyslogSpec(unittest.TestCase): def test_specs(self): for s in SPECS: try: sm = _syslog_spec_matcher(s[0]).debugexpr except Exception as ex: sm = ex if 'unexpected' in str(sm): raise #Uncomment to generate the test table, but CHECK IT carefully! #print("('{0:40} '{1}'),".format(s[0]+"',", sm)) self.assertEqual(str(sm), s[1]) if __name__ == '__main__': unittest.main()