Showing preview only (521K chars total). Download the full file or copy to clipboard to get everything.
Repository: garywiz/chaperone
Branch: master
Commit: 9ff2c3a5b9c6
Files: 201
Total size: 473.5 KB
Directory structure:
gitextract_pnlxl5op/
├── .gitignore
├── CHANGELOG.md
├── LICENSE
├── README
├── README.md
├── chaperone/
│ ├── __init__.py
│ ├── cproc/
│ │ ├── __init__.py
│ │ ├── client.py
│ │ ├── commands.py
│ │ ├── process_manager.py
│ │ ├── pt/
│ │ │ ├── __init__.py
│ │ │ ├── cron.py
│ │ │ ├── forking.py
│ │ │ ├── inetd.py
│ │ │ ├── notify.py
│ │ │ ├── oneshot.py
│ │ │ └── simple.py
│ │ ├── subproc.py
│ │ ├── version.py
│ │ └── watcher.py
│ ├── cutil/
│ │ ├── __init__.py
│ │ ├── config.py
│ │ ├── env.py
│ │ ├── errors.py
│ │ ├── events.py
│ │ ├── format.py
│ │ ├── logging.py
│ │ ├── misc.py
│ │ ├── notify.py
│ │ ├── patches.py
│ │ ├── proc.py
│ │ ├── servers.py
│ │ ├── syslog.py
│ │ ├── syslog_handlers.py
│ │ └── syslog_info.py
│ └── exec/
│ ├── __init__.py
│ ├── chaperone.py
│ ├── envcp.py
│ ├── sdnotify.py
│ ├── sdnotify_exec.py
│ └── telchap.py
├── doc/
│ ├── .gitignore
│ ├── Makefile
│ ├── docserver/
│ │ ├── README
│ │ ├── build/
│ │ │ ├── Dockerfile
│ │ │ └── install.sh
│ │ ├── build.sh
│ │ ├── chaperone.d/
│ │ │ ├── 010-start.conf
│ │ │ └── 120-apache2.conf
│ │ ├── etc/
│ │ │ ├── apache2.conf
│ │ │ └── init.sh
│ │ └── run.sh
│ └── source/
│ ├── _static/
│ │ └── custom.css
│ ├── _templates/
│ │ └── layout.html
│ ├── conf.py
│ ├── guide/
│ │ ├── chap-docker-simple.rst
│ │ ├── chap-docker-smaller.rst
│ │ ├── chap-docker.rst
│ │ ├── chap-intro.rst
│ │ ├── chap-other.rst
│ │ └── chap-using.rst
│ ├── includes/
│ │ ├── defs.rst
│ │ └── incomplete.rst
│ ├── index.rst
│ ├── ref/
│ │ ├── command-line.rst
│ │ ├── config-format.rst
│ │ ├── config-global.rst
│ │ ├── config-logging.rst
│ │ ├── config-service.rst
│ │ ├── config.rst
│ │ ├── env.rst
│ │ ├── index.rst
│ │ ├── utilities.rst
│ │ └── utility-envcp.rst
│ └── status.rst
├── samples/
│ ├── README
│ ├── chaperone-devbase/
│ │ ├── Dockerfile
│ │ ├── apps/
│ │ │ ├── bin/
│ │ │ │ └── README
│ │ │ ├── chaperone.d/
│ │ │ │ └── 010-start.conf
│ │ │ ├── etc/
│ │ │ │ ├── README
│ │ │ │ └── init.sh
│ │ │ └── init.d/
│ │ │ └── README
│ │ ├── build-image.sh
│ │ └── install.sh
│ ├── chaperone-lamp/
│ │ ├── Dockerfile
│ │ ├── apps/
│ │ │ ├── chaperone.d/
│ │ │ │ ├── 105-mysqld.conf
│ │ │ │ └── 120-apache2.conf
│ │ │ ├── etc/
│ │ │ │ ├── apache2.conf
│ │ │ │ └── mysql/
│ │ │ │ ├── my.cnf
│ │ │ │ └── start_mysql.sh
│ │ │ ├── init.d/
│ │ │ │ ├── mysql.sh
│ │ │ │ └── phpmyadmin.sh
│ │ │ └── www/
│ │ │ ├── default/
│ │ │ │ └── index.php
│ │ │ └── sites.d/
│ │ │ └── default.conf
│ │ ├── build-image.sh
│ │ └── install.sh
│ ├── docsample/
│ │ ├── Dockerfile
│ │ ├── README
│ │ └── chaperone.conf
│ └── setup-bin/
│ ├── build
│ ├── ct_setproxy
│ └── dot.bashrc
├── sandbox/
│ ├── .gitignore
│ ├── .shinit
│ ├── README
│ ├── bare_startup.sh
│ ├── bareimage/
│ │ ├── Dockerfile
│ │ └── install-bareimage.sh
│ ├── bash.bashrc
│ ├── bin/
│ │ ├── chaperone
│ │ ├── cps
│ │ ├── fakeentry
│ │ └── repeat
│ ├── centos.d/
│ │ ├── apache.conf
│ │ ├── app.conf
│ │ ├── cron.conf
│ │ └── sys1.conf
│ ├── distserv/
│ │ ├── chaperone.d/
│ │ │ ├── 005-config.conf
│ │ │ ├── 010-start.conf
│ │ │ └── 120-apache2.conf
│ │ ├── etc/
│ │ │ └── apache2.conf
│ │ └── run.sh
│ ├── etc/
│ │ ├── apache2.conf
│ │ └── makezombie.conf
│ ├── test.d/
│ │ ├── apache.conf
│ │ ├── cron.conf
│ │ └── sys1.conf
│ ├── testbare
│ ├── testcent
│ ├── testdock
│ ├── testimage
│ ├── testvar
│ └── user.d/
│ └── sys1.conf
├── setup.py
└── tests/
├── .gitignore
├── README.md
├── bin/
│ ├── chaperone
│ ├── daemon
│ ├── daemonutil.py
│ ├── envcp
│ ├── expect-lite-command-run
│ ├── expect-lite-image-run
│ ├── expect-test-command
│ ├── expect-test-image
│ ├── get-serial
│ ├── is-running
│ ├── kill-from-pidfile
│ ├── logecho
│ ├── proctool
│ ├── read_from_port
│ ├── sdnotify
│ ├── sdnotify-exec
│ ├── talkback
│ ├── telchap
│ └── test-driver
├── el-tests/
│ ├── basic-1/
│ │ ├── chaperone.conf
│ │ ├── test-001.elt
│ │ └── test-002.elt
│ ├── cron-1/
│ │ ├── chaperone.conf
│ │ ├── simulate-rotate.sh
│ │ ├── test-001.elt
│ │ ├── test-004.elt
│ │ ├── test-005.elt
│ │ ├── test-006.elt
│ │ ├── test-007.elt
│ │ └── test-008.elt
│ ├── exitkills-1/
│ │ ├── chaperone.conf
│ │ └── test-001.elt
│ ├── fork-1/
│ │ ├── chaperone.conf
│ │ ├── test-001.elt
│ │ ├── test-001b.elt
│ │ ├── test-003.elt
│ │ └── test-004.elt
│ ├── inetd-1/
│ │ ├── chaperone.conf
│ │ ├── test-001.elt
│ │ └── test-002.elt
│ ├── notify-1/
│ │ ├── chaperone.conf
│ │ ├── test-001.elt
│ │ ├── test-001b.elt
│ │ ├── test-001c.elt
│ │ ├── test-001d.elt
│ │ └── test-001e.elt
│ ├── simple-1/
│ │ ├── chaperone.conf
│ │ ├── test-001.elt
│ │ ├── test-002.elt
│ │ ├── test-003.elt
│ │ └── test-004.elt
│ └── simple-2/
│ ├── chaperone.conf
│ ├── test-001.elt
│ ├── test-002.elt
│ ├── test-003.elt
│ └── test-004.elt
├── env_expand.py
├── env_parse.py
├── events.py
├── prefix.py
├── run-all-tests.sh
├── run-el.sh
├── run-shell.sh
├── service_order.py
└── syslog_spec.py
================================================
FILE CONTENTS
================================================
================================================
FILE: .gitignore
================================================
build/*
chaperone.egg*
dist/*
================================================
FILE: CHANGELOG.md
================================================
## 0.3.00 (2015-10-04)
This is a major release that adds a number of important features and refinements. Most importantly, a new automated test harness that simulates various process mixes has been added to the release process to assure that Chaperone manages processes in a consistent and reliable way from release to release.
In addition, Chaperone now recognizes `NOTIFY_SOCKET` upon start-up, and will inform the host's `systemd` of the status of the container. This adds to Chaperone's existing support for notify-type processes within the container. This means that container designers can choose any of a number of methods of signalling process readiness inside the container while Chaperone will translate those actions into suitable `systemd` notifications for the host.
This version is completely backward-compatible with older Chaperone versions.
Enhancements:
- Chaperone will recognize the `NOTIFY_SOCKET` environment variable if passed upon start-up and provide full `systemd` compatible notifications to the host.
- The [detect_exit](http://garywiz.github.io/chaperone/ref/config-global.html#settings-detect-exit) global setting, which defaults to `true` tells Chaperone to attempt to determine when all processes have completed and automatically terminate the container. This was the previous default behavior, but the new setting provides flexibility for containers which remain dormant until processes are started manually.
- There is now a `telchap shutdown` command which provides orderly container shutdown from scripts.
- Added the `sdnotify-exec` utility which is a multi-purpose wrapper which can be used to proxy `NOTIFY_SOCKET` communication to the host, or can be used to determine if a container is properly started even outside of `systemd` contexts.
Refinements:
- Exit detection is now smarter about `cron` and `inetd` jobs and will not cause container exit if either of those types have scheduled operations which have not yet been triggered.
- The [--disable-services](http://garywiz.github.io/chaperone/ref/command-line.html#option-disable-services) switch now truly disables services rather than not defining them. Therefore, containers in such containers can now be started manually.
- Cron-type services now have more well-defined behavior for `telchap stop` which will unschedule the service, and `telchap reset` which will merely kill the current job and reschedule another.
- If Chaperone `notify`-type services signal with `ERRNO=n`, then Chaperone will intelligently pass this error number up to `systemd` if the error was the direct cause of container termination, otherwise it is noted in the logs and `systemd` won't find out about it.
## 0.2.40 (2015-09-08)
Enhancements:
- Both `uid` and `gid` can be specified using the path-format of the [--create-user](http://garywiz.github.io/chaperone/ref/command-line.html#option-create-user) command-line switch.
Refinements:
- The `${ENV:-foo}` expansion format now behaves like `bash` where 'foo' is the result if the variable `ENV` is undefined or null (blank). Previously, it required that the variable be undefined. This behavior is now consistent throughout all expansion operators.
- Improved the environment expansion code to handle outlying cases, as well as be signfiicantly more readable. Used coverage analysis to improve unit test coverage for complex expansions involving recursion.
Bug fixes:
- Newer versions of Python's `asyncio` (present in some distros) could hang when starting an **inetd**-style socket process.
## 0.2.37 (2015-08-24)
Enhancements:
- Add support for **inetd**-compatible dynamic TCP socket connections. See the description of the [port configuration parameter](http://garywiz.github.io/chaperone/ref/config-service.html#service-port) for a complete description of this feature.
- Added [_CHAP_SERVICE_SERIAL](http://garywiz.github.io/chaperone/ref/env.html#env-chap-service-serial) and [_CHAP_SERVICE_TIME](http://garywiz.github.io/chaperone/ref/env.html#env-chap-service-time) environment variables to provide useful information to 'cron' and 'inetd' services which may execute multiple times.
- Added the ability to add a `gid` number to the path-based format of the [--create-user](http://garywiz.github.io/chaperone/ref/command-line.html#option-create-user) command-line switch.
Bug Fixes:
- Fixed `telchap stop` so that it no longer would cause service restarts to occur.
- Improved the service restart logic to handle a wider variety of service failure situations.
## 0.2.31 (2015-08-11)
Enhancements:
- Add support for --create-user name:/path so that user identity can be based upon
the permissions set for a given path. This helps workaround the file permissions
issues under OSX/VirtualBox where you can't really modify the mounted file
permissions and instead "get what you get".
## 0.2.30 (2015-08-07)
Enhancements:
- Add support for --archive/-a to envcp.
## 0.2.29 (2015-08-05)
Refinements:
- Allow backslash-escaping of VBAR construct contents in environment variable
if-then-else construct.
## 0.2.28 (2015-08-03)
Refinements:
- Create a special-case syntax for shell escapes: ``$(`shell-command`)`` mainly to
assure that such syntaxes are propery supported instead of being expanded as a
side-effect. Previously, the syntax above would treat the result of the command
as the name of an environment variable, and since it was not found, would insert
the results. Since it was a useful trick, formalizing the use and eliminating
edge cases was important.
- Disabled shell escapes by default in ``envcp`` and added the ``--shell-enable``
switch to enable them.
- Added further documentation about shell escapes to clarify exactly how they
work and how they should be used.
## 0.2.27 (2015-08-01)
Enhancements:
- Added documentation for ``envcp`` in the new utilities section of the documentation.
- Enhanced environment-variable expansions so they are smart about nesting.
- Fixed syslog receiver so that trailing newlines are stripped (programs like ``sudo``
and ``openvpn`` terminate their log lines this way, even though it is a questionable
practice).
## 0.2.26 (2015-07-28)
Enhancements:
- Added the ``:/`` regex substitution expansion option, which provides a more extensive and useful
feature set than the bash-compatible options.
- Updated the documentation to reflect the new expansion option and added a footnote about
bash compatibility.
## 0.2.25 (2015-07-27)
Enhancements:
- Added the ``:?`` and ``:|`` environemnt variable expansion options. The first works similarly
to bash and raises an error if a variable is not defined. The second adds more versatility to
expansions by allowing the expansion to depend upon the particular value of a variable.
- Added documntation for the above.
## 0.2.24 (2015-07-27)
Bug Fixes:
- Made `setproctitle` an optional install so that `--no-install-recommends` can be used
on `apt-get` installs to streamline image size ([#1, @mc0e](https://github.com/garywiz/chaperone/issues/1))
Other:
- PyPi distribution is no longer done in "wheel" format, since that limits the ability
to include optional dependencies. Source format is used instead.
================================================
FILE: LICENSE
================================================
Copyright (c) 2015, Gary J. Wisniewski <garyw@blueseastech.com>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
================================================
FILE: README
================================================
Chaperone is a lean, full-featured top-level system manager, similar to init, systemd, and others,
but designed for lean container environments like Docker. It is a single, small program which provides
process clean-up, rudimentary logging, and service management without the overhead of additional
complex configuration.
================ ======================================================
Documentation http://garywiz.github.io/chaperone
chaperone Source http://github.com/garywiz/chaperone
pypi link http://pypi.python.org/pypi/chaperone
================ ======================================================
================================================
FILE: README.md
================================================
#  Chaperone
[](https://gitter.im/garywiz/chaperone?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge) [](https://badge.fury.io/py/chaperone)
Chaperone is a lean init-style startup manager for Docker-like containers. It runs as a single lightweight full-featured process which runs at the root of a docker container tree and provides all of the following functionality, plus much more:
* Monitoring for all processes in the container, automatically shutting down the
container when the last process exits.
* A complete, configurable syslog facility built in and provided on /dev/log
so daemons and other services can have output captured. Configurable
to handle log-file rotation, duplication to stdout/stderr, and full Linux
logging facility, severity support. No syslog daemon is required in your
container.
* The ability to start up system services in dependency order, with options
for per-service environment variables, restart options, and stdout/stderr capture either
to the log service or stdout.
* A built-in cron scheduling service.
* Emulation of systemd notifications (sd_notify) so services can post
ready and status notifications to chaperone.
* Process monitoring and zombie elimination, along with organized system
shutdown to assure all daemons shut-down gracefully.
* The ability to have an optional controlling process, specified on the
docker command line, to simplify creating containers which have development
mode vs. production mode.
* Complete configuration using a ``chaperone.d`` directory which can be located
in various places, and even allows different configurations
within the container, triggered based upon which user is selected at start-up.
* Default behavior designed out-of-the-box to work with simple Docker containers
for quick start-up for lean containers.
* More...
If you want to try it out quickly, the best place to start is on the
[chaperone-docker](https://github.com/garywiz/chaperone-docker) repository
page. There is a quick section called "Try it out" that uses images
available now on Docker Hub.
For full details of features
and usage: [see the documentation](http://garywiz.github.io/chaperone/index.html).
There is some debate about whether docker containers should be transformed into
complete systems (so-called "fat containers"). However, it is clear that many
containers contain one or more services to provide a single "composite feature",
but that such containers need a special, more streamlined approach to managing
a number of common daemons.
Chaperone is the best answer I've come up with so far, and was inspired by
The [Phusion baseimage-docker](http://phusion.github.io/baseimage-docker/) approach.
However, unlike the Phusion image, it does not require adding daemons for logging,
system services (such as runit). Chaperone is designed to be self-contained.
Status
------
Chaperone is now stable and ready for production. If you are currently starting up your
container services with Bash scripts, Chaperone is probably a much better choice.
Full status is [now part of the documentation](http://garywiz.github.io/chaperone/status.html).
Downloading and Installing
--------------------------
The easiest way to install `chaperone`` is using ``pip`` from the https://pypi.python.org/pypi/chaperone package:
# Ubuntu or debian prerequisites...
apt-get install python3-pip
# chaperone installation (may be all you need)
pip3 install chaperone
License
-------
Copyright (c) 2015, Gary J. Wisniewski <garyw@blueseastech.com>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
================================================
FILE: chaperone/__init__.py
================================================
# Placeholder
================================================
FILE: chaperone/cproc/__init__.py
================================================
# Placeholder
from chaperone.cproc.process_manager import TopLevelProcess
================================================
FILE: chaperone/cproc/client.py
================================================
import asyncio
class CommandClient(asyncio.Protocol):
@classmethod
def sendCommand(cls, cmd):
loop = asyncio.get_event_loop()
coro = loop.create_unix_connection(lambda: CommandClient(cmd, loop), path = "/dev/chaperone.sock")
(transport, protocol) = loop.run_until_complete(coro)
loop.run_forever()
loop.close()
return protocol.result
def __init__(self, message, loop):
self.message = message
self.loop = loop
self.result = None
def connection_made(self, transport):
transport.write(self.message.encode())
def data_received(self, data):
msg = data.decode()
lines = msg.split("\n")
error = None
if lines[0] in {'COMMAND-ERROR', 'RESULT'}:
self.result = "\n".join(lines[1:])
else:
error = "Unexpected response from chaperone: " + str(msg)
if error:
raise Exception(error)
def connection_lost(self, exc):
self.loop.stop()
================================================
FILE: chaperone/cproc/commands.py
================================================
import os
import asyncio
import stat
import shlex
from functools import partial
from docopt import docopt
from chaperone.cutil.servers import Server, ServerProtocol
from chaperone.cutil.misc import maybe_remove
from chaperone.cutil.logging import debug, warn, info
import chaperone.cutil.syslog_info as syslog_info
COMMAND_DOC = """
Usage: telchap status
telchap loglevel [<level>]
telchap stop [--force] [--wait] [--disable] [<servname> ...]
telchap start [--force] [--wait] [--enable] [<servname> ...]
telchap reset [--force] [--wait] [<servname> ...]
telchap enable [<servname> ...]
telchap disable [<servname> ...]
telchap dependencies
telchap shutdown [<delay>]
"""
CHAP_FIFO = "/dev/chaperone"
CHAP_SOCK = "/dev/chaperone.sock"
class _BaseCommand(object):
command_name = "X"
interactive_only = False
interactive = False
def match(self, opts):
if isinstance(self.command_name, tuple):
return all(opts.get(name, False) for name in self.command_name)
return opts.get(self.command_name, False)
@asyncio.coroutine
def exec(self, opts, protocol):
#result = yield from self.do_exec(opts, controller)
#return str(result)
self.interactive = protocol.interactive
try:
result = yield from self.do_exec(opts, protocol.owner.controller)
return str(result)
except Exception as ex:
return "Command error: " + str(ex)
STMSG = """
Running: {0.version}
Uptime: {0.uptime}
Managed processes: {1} ({2} enabled)
"""
class statusCommand(_BaseCommand):
command_name = "status"
interactive_only = True
@asyncio.coroutine
def do_exec(self, opts, controller):
serv = controller.services
msg = STMSG.format(controller, len(serv), len([s for s in serv.values() if s.enabled]))
msg += "\nServices:\n\n" + str(serv.get_status_formatter().get_formatted_data()) + "\n"
return msg
class dependenciesCommand(_BaseCommand):
command_name = "dependencies"
interactive_only = True
@asyncio.coroutine
def do_exec(self, opts, controller):
graph = controller.services.services_config.get_dependency_graph()
return "\n".join(graph)
class serviceReset(_BaseCommand):
command_name = 'reset'
@asyncio.coroutine
def do_exec(self, opts, controller):
wait = opts['--wait'] and self.interactive
yield from controller.services.reset(opts['<servname>'], force = opts['--force'], wait = wait)
return "services reset."
class serviceEnable(_BaseCommand):
command_name = 'enable'
@asyncio.coroutine
def do_exec(self, opts, controller):
yield from controller.services.enable(opts['<servname>'])
return "services enabled."
class serviceDisable(_BaseCommand):
command_name = 'disable'
@asyncio.coroutine
def do_exec(self, opts, controller):
yield from controller.services.disable(opts['<servname>'])
return "services disabled."
class serviceStart(_BaseCommand):
command_name = 'start'
@asyncio.coroutine
def do_exec(self, opts, controller):
wait = opts['--wait'] and self.interactive
yield from controller.services.start(opts['<servname>'], force = opts['--force'],
wait = wait,
enable = opts['--enable'])
if wait:
return "services started."
return "service start-up queued."
class serviceStop(_BaseCommand):
command_name = 'stop'
@asyncio.coroutine
def do_exec(self, opts, controller):
wait = opts['--wait'] and self.interactive
yield from controller.services.stop(opts['<servname>'], force = opts['--force'],
wait = wait,
disable = opts['--disable'])
if wait:
return "services stopped."
return "services stopping."
class loglevelCommand(_BaseCommand):
command_name = "loglevel"
@asyncio.coroutine
def do_exec(self, opts, controller):
lev = opts['<level>']
if lev is None:
curlev = controller.force_log_level()
if curlev is None:
return "Forced Logging Level: NOT SET"
try:
pri = "*." + syslog_info.PRIORITY[curlev]
except IndexError:
pri = "Forced Logging Level: UNKNOWN"
return pri
if lev.startswith('*.'):
lev = lev[2:]
controller.force_log_level(lev)
return "All logging set to include priorities >= *." + lev.lower()
class shutdownCommand(_BaseCommand):
command_name = "shutdown"
@asyncio.coroutine
def do_exec(self, opts, controller):
delay = opts['<delay>']
if delay is None or delay.lower() == "now":
delay = 0.1
message = "Shutting down now"
else:
try:
delay = float(delay)
except ValueError:
return "Specified delay is not a valid decimal number: " + str(delay)
message = "Shutting down in {0} seconds".format(delay)
info("requested shutdown scheduled to occur in {0} seconds".format(delay))
asyncio.get_event_loop().call_later(delay, controller.kill_system)
return message
##
## Register all commands here
##
COMMANDS = (
loglevelCommand(),
shutdownCommand(),
statusCommand(),
serviceStop(),
serviceStart(),
serviceReset(),
serviceEnable(),
serviceDisable(),
dependenciesCommand(),
)
class CommandProtocol(ServerProtocol):
interactive = False
@asyncio.coroutine
def _interpret_command(self, msg):
if not msg:
return
try:
options = docopt(COMMAND_DOC, shlex.split(msg), help=False)
except Exception as ex:
result = "EXCEPTION\n" + str(ex)
except SystemExit as ex:
result = "COMMAND-ERROR\n" + str(ex)
else:
result = "?"
for c in COMMANDS:
if c.match(options) and (not c.interactive_only or self.interactive):
result = yield from c.exec(options, self)
break
result = "RESULT\n" + result
return result
@asyncio.coroutine
def _command_task(self, cmd, interactive = False):
result = yield from self._interpret_command(cmd)
if interactive:
self.transport.write(result.encode())
self.transport.close()
def data_received(self, data):
if self.interactive:
asyncio.async(self._command_task(data.decode(), True))
else:
commands = data.decode().split("\n")
for c in commands:
asyncio.async(self._command_task(c))
class _InteractiveServer(Server):
def _create_server(self):
maybe_remove(CHAP_SOCK)
return asyncio.get_event_loop().create_unix_server(CommandProtocol.buildProtocol(self, interactive=True),
path=CHAP_SOCK)
@asyncio.coroutine
def server_running(self):
os.chmod(CHAP_SOCK, 0o777)
def close(self):
super().close()
maybe_remove(CHAP_SOCK)
class CommandServer(Server):
controller = None
_fifoname = None
_iserve = None
def __init__(self, controller, filename = CHAP_FIFO, **kwargs):
"""
Creates a new command FIFO and socket. The controller is the object to which commands and interactions
will occur, usually a chaperone.cproc.process_manager.TopLevelProcess.
"""
super().__init__(**kwargs)
self.controller = controller
self._fifoname = filename
@asyncio.coroutine
def server_running(self):
self._iserve = _InteractiveServer()
self._iserve.controller = self.controller # share this with our domain socket
yield from self._iserve.run()
def _open(self):
name = self._fifoname
maybe_remove(name)
if not os.path.exists(name):
os.mkfifo(name)
if not stat.S_ISFIFO(os.stat(name).st_mode):
raise TypeError("File is not a fifo: " + str(name))
os.chmod(name, 0o777)
return open(os.open(name, os.O_RDWR|os.O_NONBLOCK))
def _create_server(self):
return asyncio.get_event_loop().connect_read_pipe(CommandProtocol.buildProtocol(self), self._open())
def close(self):
super().close()
maybe_remove(CHAP_FIFO)
if self._iserve:
self._iserve.close()
================================================
FILE: chaperone/cproc/process_manager.py
================================================
import os
import pwd
import errno
import asyncio
import shlex
import signal
import datetime
from functools import partial
from time import time, sleep
import chaperone.cutil.syslog_info as syslog_info
from chaperone.cproc.commands import CommandServer
from chaperone.cproc.version import DISPLAY_VERSION
from chaperone.cproc.watcher import InitChildWatcher
from chaperone.cproc.subproc import SubProcess, SubProcessFamily
from chaperone.cutil.config import ServiceConfig
from chaperone.cutil.env import Environment
from chaperone.cutil.notify import NotifySink
from chaperone.cutil.logging import warn, info, debug, error, set_log_level
from chaperone.cutil.misc import lazydict, objectplus
from chaperone.cutil.syslog import SyslogServer
from chaperone.cutil.errors import get_errno_from_exception
class CustomEventLoop(asyncio.SelectorEventLoop):
def _make_socket_transport(self, sock, protocol, waiter=None, *,
extra=None, server=None):
"""
Supports a special protocol method 'acquire_socket' which acceps only a socket.
If it returns True, then the passed socket has been detached and no further
action will be taken. This is to support inetd-style processes.
"""
if hasattr(protocol, 'acquire_socket') and protocol.acquire_socket(sock):
if waiter:
waiter.set_result(None)
return None
return super()._make_socket_transport(sock, protocol, waiter, extra=extra, server=server)
asyncio.DefaultEventLoopPolicy._loop_factory = CustomEventLoop
class TopLevelProcess(objectplus):
send_sighup = False
detect_exit = True
_shutdown_timeout = None
_ignore_signals = False
_services_started = False
_syslog = None
_command = None
_minimum_syslog_level = None
_start_time = None
_status_interval = None
_family = None
_exitcode = None
_all_killed = False
_killing_system = False
_kill_future = None
_config = None
_pending = None
_notify_enabled = False
notify = None
def __init__(self, config):
self._config = config
self._start_time = time()
self._pending = set()
self.notify = NotifySink() # whether or not we actually have a notify socket
# wait at least 0.5 seconds, zero is totally pointless
settings = config.get_settings()
self._shutdown_timeout = settings.get('shutdown_timeout', 8) or 0.5
self.detect_exit = settings.get('detect_exit', True)
self.enable_syslog = settings.get('enable_syslog', True)
policy = asyncio.get_event_loop_policy()
w = self._watcher = InitChildWatcher(onNoProcesses = self._queue_no_processes)
policy.set_child_watcher(w)
self.loop.add_signal_handler(signal.SIGTERM, self.kill_system)
self.loop.add_signal_handler(signal.SIGINT, self._got_sigint)
self._status_interval = settings.get('status_interval', 30)
@property
def debug(self):
return asyncio.get_event_loop().get_debug()
@debug.setter
def debug(self, val):
asyncio.get_event_loop().set_debug(val)
@property
def loop(self):
return asyncio.get_event_loop()
@property
def system_alive(self):
"""
Returns true if the system is considered "alive" and new processes, restarts, and other
normal operations should proceed. Generally, the system is alive until it is killed,
but the process of shutting down the system may be complex and time consuming, and
in the future there may be other factors which cause us to suspend
normal system operation.
"""
return not self._killing_system
@property
def version(self):
"Returns version identifier"
return "chaperone version {0}".format(DISPLAY_VERSION)
@property
def uptime(self):
return datetime.timedelta(seconds = time() - self._start_time)
@property
def services(self):
return self._family
def force_log_level(self, level = None):
"""
Specifies the *minimum* logging level that will be applied to all syslog entries.
This is primarily useful for debugging, where you want to override any limitations
imposed on log file entries.
As a (convenient) side-effect, if the level is DEBUG, then debug features of both
asyncio as well as chaperone will be enabled.
If level is not provided, then returns the current setting.
"""
if level is None:
return self._minimum_syslog_level
levid = syslog_info.PRIORITY_DICT.get(level.lower(), None)
if not levid:
raise Exception("Not a valid log level: {0}".format(level))
set_log_level(levid)
self._minimum_syslog_level = levid
self.debug = (levid == syslog_info.LOG_DEBUG)
if self._syslog:
self._syslog.reset_minimum_priority(levid)
info("Forcing all log output to '{0}' or greater", level)
def _queue_no_processes(self):
# Any output from dead processes won't get queued into the logs if we
# don't return to the event loop.
self.loop.call_later(0.05, self._no_processes)
def _no_processes(self, ignore_service_state = False):
if not (ignore_service_state or self._services_started):
return # do not react during system initialization
self._all_killed = True
if not self._killing_system:
if not self.detect_exit:
return
if self._family:
ss = self._family.get_scheduled_services()
if ss:
warn("system will remain active since there are scheduled services: " + ", ".join(s.name for s in ss))
return
# Passed all checks, now kill system
self.notify.stopping()
debug("Final termination phase.")
self._services_started = False
if self._kill_future and not self._kill_future.cancelled():
self._kill_future.cancel()
self.activate(self._final_system_stop())
@asyncio.coroutine
def _final_system_stop(self):
yield from asyncio.sleep(0.1)
if self._syslog:
self._syslog.close()
if self._command:
self._command.close()
self._cancel_pending()
self.loop.stop()
def _got_sigint(self):
print("\nCtrl-C ... killing chaperone.")
self.kill_system(4, True)
def signal_ready(self):
"""
Tells any notify listener that the system is ready. Does nothing if the system
is dying due to errors, or if a kill is in progress.
"""
if not self._services_started or self._killing_system:
return
self.notify.ready()
# This is the time to set up the status monitor
if self._status_interval and self._family and self._notify_enabled:
self.activate(self._report_status())
@asyncio.coroutine
def _report_status(self):
while self._status_interval:
if self._family:
self.notify.status(self._family.get_status())
yield from asyncio.sleep(self._status_interval)
def kill_system(self, errno = None, force = False):
"""
Systematically shuts down the system. With the 'force' argument set to true,
does so even if a kill is already in progress.
"""
if force:
self._services_started = True
elif self._killing_system:
return
if self._exitcode is None and errno is not None:
self._exitcode = 1 # default exit for an error
self.notify.error(errno)
warn("Request made to kill system." + ((force and " (forced)") or ""))
self._killing_system = True
self._kill_future = asyncio.async(self._kill_system_co())
def _cancel_pending(self):
"Cancel any pending activated tasks"
for p in list(self._pending):
if not p.cancelled():
p.cancel()
@asyncio.coroutine
def _kill_system_co(self):
self.notify.stopping()
self._cancel_pending()
# Tell the family it's been nice. It's unlikely we won't have a process family, but
# it's optional, so we should handle the situation.
wait_done = False # indicates if shutdown_timeout has expired
if self._family:
for f in self._family.values():
yield from f.final_stop()
# let normal shutdown happen
if self._watcher.number_of_waiters > 0 and self._shutdown_timeout:
debug("still have {0} waiting, sleeping for shutdown_timeout={1}".format(self._watcher.number_of_waiters, self._shutdown_timeout))
yield from asyncio.sleep(self._shutdown_timeout)
wait_done = True
try:
os.kill(-1, signal.SIGTERM) # first try a sig term
if self.send_sighup:
os.kill(-1, signal.SIGHUP)
except ProcessLookupError:
debug("No processes remain when attempting to kill system, just stop.")
self._no_processes(True)
return
if wait_done: # give a short wait just so the signals fire
yield from asyncio.sleep(1) # these processes are unknowns
else:
yield from asyncio.sleep(self._shutdown_timeout)
if self._all_killed:
return
info("Some processes remain after {0}secs. Forcing kill".format(self._shutdown_timeout))
try:
os.kill(-1, signal.SIGKILL)
except ProcessLookupError:
debug("No processes when attempting to force quit")
self._no_processes(True)
return
def activate_result(self, future):
self._pending.discard(future)
def activate(self, cr):
future = asyncio.async(cr)
future.add_done_callback(self.activate_result)
self._pending.add(future)
return future
def _system_coro_check(self, f):
if f.exception():
error("system startup cancelled due to error: {0}".format(f.exception()))
self.kill_system(get_errno_from_exception(f.exception()))
def _system_started(self, startup, future=None):
if future and not future.cancelled() and future.exception():
self._system_coro_check(future)
return
info(self.version + ", ready.")
if startup:
future = self.activate(startup)
future.add_done_callback(self._system_coro_check)
@asyncio.coroutine
def _start_system_services(self):
self._notify_enabled = yield from self.notify.connect()
if self.enable_syslog:
self._syslog = SyslogServer()
self._syslog.configure(self._config, self._minimum_syslog_level)
try:
yield from self._syslog.run()
except PermissionError as ex:
self._syslog = None
warn("syslog service cannot be started: {0}", ex)
else:
self._syslog.capture_python_logging()
info("Switching all chaperone logging to /dev/log")
self._command = CommandServer(self)
try:
yield from self._command.run()
except PermissionError as ex:
self._command = None
warn("command service cannot be started: {0}", ex)
def run_event_loop(self, startup_coro = None, exit_when_done = True):
"""
Sets up the event loop and runs it, setting up basic services such as syslog
as well as the command services sockets. Then, calls the startup coroutine (if any)
to tailor the environment and start up other services as needed.
"""
initfuture = asyncio.async(self._start_system_services())
initfuture.add_done_callback(lambda f: self._system_started(startup_coro, f))
self.loop.run_forever()
self.loop.close()
if exit_when_done:
exit(self._exitcode or 0)
@asyncio.coroutine
def run_services(self, extra_services, disable_others = False):
"Run all services."
# First, determine our overall configuration for the services environment.
services = self._config.get_services()
if extra_services:
services = services.deepcopy()
if disable_others:
for s in services.values():
s.enabled = False
for s in extra_services:
services.add(s)
family = self._family = SubProcessFamily(self, services)
tried_any = False
errno = None
try:
tried_any = yield from family.run()
except asyncio.CancelledError:
pass
finally:
self._services_started = True
if self.detect_exit:
if not tried_any:
warn("No service startups attempted (all disabled?) - exiting due to 'detect_exit=true'")
self.kill_system()
else:
self._watcher.check_processes()
================================================
FILE: chaperone/cproc/pt/__init__.py
================================================
# Placeholder
from chaperone.cproc.process_manager import TopLevelProcess
================================================
FILE: chaperone/cproc/pt/cron.py
================================================
import asyncio
from aiocron import crontab
from chaperone.cutil.logging import error, warn, debug, info
from chaperone.cutil.syslog_info import LOG_CRON
from chaperone.cproc.subproc import SubProcess
from chaperone.cutil.errors import ChParameterError
_CRON_SPECIALS = {
'@yearly': '0 0 1 1 *',
'@annually': '0 0 1 1 *',
'@monthly': '0 0 1 * *',
'@weekly': '0 0 * * 0',
'@daily': '0 0 * * *',
'@hourly': '0 * * * *',
}
class CronProcess(SubProcess):
syslog_facility = LOG_CRON
_cron = None
_fut_monitor = None
def __init__(self, service, family=None):
super().__init__(service, family)
if not self.interval:
raise ChParameterError("interval= property missing, required for cron service '{0}'".format(self.name))
# Support specials with or without the @
real_interval = _CRON_SPECIALS.get(self.interval) or _CRON_SPECIALS.get('@'+self.interval) or self.interval
# make a status note
self.note = "{0} ({1})".format(self.interval, real_interval) if self.interval != real_interval else real_interval
self._cron = crontab(real_interval, func=self._cron_hit, start=False)
def default_status(self):
if self._cron.handle:
return 'waiting'
return None
@property
def scheduled(self):
return self._cron and self._cron.handle
@asyncio.coroutine
def start(self):
"""
Takes over startup and sets up our cron loop to handle starts instead.
"""
if not self.enabled or self._cron.handle:
return
self.start_attempted = True
# Start up cron
try:
self._cron.start()
except Exception:
raise ChParameterError("not a valid cron interval specification, '{0}'".format(self.interval))
self.loginfo("cron service {0} scheduled using interval spec '{1}'".format(self.name, self.interval))
@asyncio.coroutine
def _cron_hit(self):
if self.enabled:
if not self.family.system_alive:
return
if self.running:
self.logwarn("cron service {0} is still running when next interval expired, will not run again", self.name)
else:
self.loginfo("cron service {0} running CMD ( {1} )", self.name, self.command)
try:
yield from super().start()
except Exception as ex:
self.logerror(ex, "cron service {0} failed to start: {1}", self.name, ex)
yield from self.reset();
@property
def stoppable(self):
return self.scheduled
@asyncio.coroutine
def stop(self):
self._cron.stop()
yield from super().stop()
@asyncio.coroutine
def process_started_co(self):
if self._fut_monitor and not self._fut_monitor.cancelled():
self._fut_monitor.cancel()
self._fut_monitor = None
# We have a successful start. Monitor this service.
self._fut_monitor = asyncio.async(self._monitor_service())
self.add_pending(self._fut_monitor)
@asyncio.coroutine
def _monitor_service(self):
result = yield from self.wait()
if isinstance(result, int) and result > 0:
yield from self._abnormal_exit(result)
else:
yield from self.reset()
================================================
FILE: chaperone/cproc/pt/forking.py
================================================
import asyncio
from chaperone.cproc.subproc import SubProcess
from chaperone.cutil.errors import ChProcessError
class ForkingProcess(SubProcess):
defer_exit_kills = True
@asyncio.coroutine
def process_started_co(self):
result = yield from self.timed_wait(self.process_timeout, self._exit_timeout)
if result is not None and not result.normal_exit:
if self.ignore_failures:
self.logwarn("{0} (ignored) failure on start-up with result '{1}'".format(self.name, result))
else:
raise ChProcessError("{0} failed on start-up with result '{1}'".format(self.name, result), resultcode = result)
yield from self.wait_for_pidfile()
def _exit_timeout(self):
service = self.service
message = "forking service '{1}' did not exit after {2} second(s), {3}".format(
service.type,
service.name, self.process_timeout,
"proceeding due to 'ignore_failures=True'" if service.ignore_failures else
"terminating due to 'ignore_failures=False'")
if not service.ignore_failures:
self.terminate()
raise Exception(message)
================================================
FILE: chaperone/cproc/pt/inetd.py
================================================
import os
import asyncio
from copy import copy
from chaperone.cutil.logging import error, warn, debug, info
from chaperone.cproc.subproc import SubProcess
from chaperone.cutil.syslog_info import LOG_DAEMON
from chaperone.cutil.errors import ChParameterError
from chaperone.cutil.servers import Server, ServerProtocol
class InetdServiceProtocol(ServerProtocol):
_fd = None
def acquire_socket(self, sock):
# Prepare the socket so it's inheritable
sock.setblocking(True)
self._fd = sock.detach()
sock.close()
future = asyncio.async(self.start_socket_process(self._fd))
future.add_done_callback(self._done)
self.process.counter += 1
return True
def _done(self, f):
# Close the socket regardless
if self._fd is not None:
os.close(self._fd)
@asyncio.coroutine
def start_socket_process(self, fd):
process = self.process
service = process.service
if not process.family.system_alive:
process.logdebug("{0} received connection on port {1}; ignored, system no longer alive".format(service.name, service.port))
return
process.logdebug("{0} received connection on port {2}; attempting start '{1}'... ".format(service.name, " ".join(service.exec_args),
service.port))
kwargs = {'stdout': fd,
'stderr': fd,
'stdin': fd}
if service.directory:
kwargs['cwd'] = service.directory
env = process.get_expanded_environment().get_public_environment()
if service.debug:
if not env:
process.logdebug("{0} environment is empty", service.name)
else:
process.logdebug("{0} environment:", service.name)
for k,v in env.items():
process.logdebug(" {0} = '{1}'".format(k,v))
create = asyncio.create_subprocess_exec(*service.exec_args, preexec_fn=process._setup_subprocess,
env=env, **kwargs)
proc = self._proc = yield from create
self.pid = proc.pid
process.logdebug("{0} instance connected to port {1}", service.name, service.port)
process.add_process(proc)
yield from proc.wait()
process.remove_process(proc)
if not proc.returncode.normal_exit:
self.logerror("{2} exit status for pid={0} is '{1}'".format(proc.pid, proc.returncode, service.name))
class InetdService(Server):
def __init__(self, process):
super().__init__()
self.process = process
def _create_server(self):
return asyncio.get_event_loop().create_server(InetdServiceProtocol.buildProtocol(self, process=self.process),
'0.0.0.0',
self.process.port)
class InetdProcess(SubProcess):
syslog_facility = LOG_DAEMON
server = None
counter = 0
def __init__(self, service, family=None):
super().__init__(service, family)
self._proclist = set()
if not service.port:
raise ChParameterError("inetd-type service {0} requires 'port=' parameter".format(self.name))
def add_process(self, proc):
self._proclist.add(proc)
def remove_process(self, proc):
self._proclist.discard(proc)
@property
def scheduled(self):
return self.server is not None
@property
def note(self):
if self.server:
msg = "waiting on port " + str(self.port)
if self.counter:
msg += "; req recvd = " + str(self.counter)
if len(self._proclist):
msg += "; running = " + str(len(self._proclist))
return msg
@asyncio.coroutine
def start_subprocess(self):
"""
Takes over process startup and sets up our own server socket.
"""
self.server = InetdService(self)
yield from self.server.run()
self.loginfo("inetd service {0} listening on port {1}".format(self.name, self.port))
@asyncio.coroutine
def reset(self, dependents = False, enable = False, restarts_ok = False):
if self.server:
self.server.close()
self.server = None
plist = copy(self._proclist)
if plist:
self.logwarn("{0} terminating {1} processes on port {2} that are still running".format(self.name, len(plist), self.port))
for p in plist:
p.terminate()
yield from super().reset(dependents, enable, restarts_ok)
@asyncio.coroutine
def final_stop(self):
yield from self.reset()
================================================
FILE: chaperone/cproc/pt/notify.py
================================================
import asyncio
import socket
import re
from functools import partial
from chaperone.cutil.errors import ChProcessError
from chaperone.cutil.proc import ProcStatus
from chaperone.cutil.notify import NotifyListener
from chaperone.cproc.subproc import SubProcess
class NotifyProcess(SubProcess):
process_timeout = 300
defer_exit_kills = True
_fut_monitor = None
_listener = None
_ready_event = None
def _close_listener(self):
if self._listener:
self._listener.close()
self._listener = None
@asyncio.coroutine
def process_prepare_co(self, environ):
if not self._listener:
self._listener = NotifyListener('@/chaperone/' + self.service.name,
onNotify = self._notify_received)
yield from self._listener.run()
environ['NOTIFY_SOCKET'] = self._listener.socket_name
# Now, set up an event which is triggered upon ready
self._ready_event = asyncio.Event()
def _notify_timeout(self):
service = self.service
message = "notify service '{1}' did not receive ready notification after {2} second(s), {3}".format(
service.type,
service.name, self.process_timeout,
"proceeding due to 'ignore_failures=True'" if service.ignore_failures else
"terminating due to 'ignore_failures=False'")
if not service.ignore_failures:
self.terminate()
raise ChProcessError(message)
@asyncio.coroutine
def reset(self, dependents = False, enable = False, restarts_ok = False):
yield from super().reset(dependents, enable, restarts_ok)
self._close_listener()
@asyncio.coroutine
def final_stop(self):
yield from super().final_stop()
self._close_listener()
@asyncio.coroutine
def process_started_co(self):
if self._fut_monitor and not self._fut_monitor.cancelled():
self._fut_monitor.cancel()
self._fut_monitor = None
yield from self.do_startup_pause()
self._fut_monitor = asyncio.async(self._monitor_service())
self.add_pending(self._fut_monitor)
if self._ready_event:
try:
if not self.process_timeout:
raise asyncio.TimeoutError()
yield from asyncio.wait_for(self._ready_event.wait(), self.process_timeout)
except asyncio.TimeoutError:
self._ready_event = None
self._notify_timeout()
else:
if self._ready_event:
self._ready_event = None
rc = self.returncode
if rc is not None and not rc.normal_exit:
if self.ignore_failures:
warn("{0} (ignored) failure on start-up with result '{1}'".format(self.name, rc))
else:
raise ChProcessError("{0} failed with reported error {1}".format(self.name, rc), resultcode = rc)
@asyncio.coroutine
def _monitor_service(self):
"""
We only care about errors here. The rest is dealt with by having notifications
occur.
"""
result = yield from self.wait()
if isinstance(result, int) and result > 0:
self._setready() # simulate ready
self._ready_event = None
self._close_listener()
yield from self._abnormal_exit(result)
def _notify_received(self, which, var, value):
callfunc = getattr(self, "notify_" + var.upper(), None)
#print("NOTIFY RECEIVED", var, value)
if callfunc:
callfunc(value)
def _setready(self):
if self._ready_event:
self._ready_event.set()
return True
return False
def notify_MAINPID(self, value):
try:
pid = int(value)
except ValueError:
self.logdebug("{0} got MAINPID={1}, but not a valid pid#", self.name, value)
return
self.pid = pid
def notify_BUSERROR(self, value):
code = ProcStatus(value)
if not self._setready():
self.process_exit(code)
else:
self.returncode = code
def notify_ERRNO(self, value):
try:
intval = int(value)
except ValueError:
self.logdebug("{0} got ERROR={1}, not a valid error code", self.name, value)
return
code = ProcStatus(intval << 8)
if not self._setready():
self.process_exit(code)
else:
self.returncode = code
def notify_READY(self, value):
if value == "1":
self._setready()
def notify_STATUS(self, value):
self.note = value
@property
def status(self):
if self._ready_event:
return "activating"
return super().status
================================================
FILE: chaperone/cproc/pt/oneshot.py
================================================
import asyncio
from chaperone.cproc.subproc import SubProcess
from chaperone.cutil.errors import ChProcessError
class OneshotProcess(SubProcess):
process_timeout = 60.0 # default for a oneshot is 90 seconds
@asyncio.coroutine
def process_started_co(self):
result = yield from self.timed_wait(self.process_timeout, self._exit_timeout)
if result is not None and not result.normal_exit:
if self.ignore_failures:
warn("{0} (ignored) failure on start-up with result '{1}'".format(self.name, result))
else:
raise ChProcessError("{0} failed on start-up with result '{1}'".format(self.name, result), resultcode = result)
def _exit_timeout(self):
service = self.service
message = "oneshot service '{1}' did not exit after {2} second(s), {3}".format(
service.type,
service.name, self.process_timeout,
"proceeding due to 'ignore_failures=True'" if service.ignore_failures else
"terminating due to 'ignore_failures=False'")
if not service.ignore_failures:
self.terminate()
raise Exception(message)
================================================
FILE: chaperone/cproc/pt/simple.py
================================================
import asyncio
from chaperone.cproc.subproc import SubProcess
class SimpleProcess(SubProcess):
_fut_monitor = None
@asyncio.coroutine
def process_started_co(self):
if self._fut_monitor and not self._fut_monitor.cancelled():
self._fut_monitor.cancel()
self._fut_monitor = None
# We wait a short time just to see if the process errors out immediately. This avoids a retry loop
# and catches any immediate failures now.
yield from self.do_startup_pause()
# If there is a pidfile, sit here and wait for a bit
yield from self.wait_for_pidfile()
# We have a successful start. Monitor this service.
self._fut_monitor = asyncio.async(self._monitor_service())
self.add_pending(self._fut_monitor)
@asyncio.coroutine
def _monitor_service(self):
result = yield from self.wait()
if isinstance(result, int) and result > 0:
yield from self._abnormal_exit(result)
================================================
FILE: chaperone/cproc/subproc.py
================================================
import os
import asyncio
import shlex
import importlib
import signal
import errno
from functools import partial
from time import time, sleep
import chaperone.cutil.syslog_info as syslog_info
from chaperone.cutil.env import Environment, ENV_SERIAL, ENV_SERVTIME
from chaperone.cutil.logging import warn, info, debug, error
from chaperone.cutil.proc import ProcStatus
from chaperone.cutil.misc import lazydict, lookup_user, get_signal_name, executable_path
from chaperone.cutil.errors import ChNotFoundError, ChProcessError, ChParameterError
from chaperone.cutil.format import TableFormatter
@asyncio.coroutine
def _process_logger(stream, kind, service):
name = service.name.replace('.service', '')
while True:
data = yield from stream.readline()
if not data:
return
line = data.decode('ascii', 'ignore').rstrip()
if not line:
continue # ignore blank lines in stdout/stderr
if kind == 'stderr':
# we map to warning because stderr output is "to be considered" and not strictly
# erroneous
warn(line, program=name, pid=service.pid, facility=syslog_info.LOG_DAEMON)
else:
info(line, program=name, pid=service.pid, facility=syslog_info.LOG_DAEMON)
class SubProcess(object):
service = None # service object
family = None
process_timeout = 30.0 # process_timeout will be set to this unless it is overridden by
# the service entry
syslog_facility = None # specifies any additional syslog facility to use when using
# logerror, logdebug, logwarn, etc...
start_attempted = False # used to determine if a service is truly dormant
defer_exit_kills = False # if true, then exit_kills will wait until a proper PID is returned
# from a subprocess, then will kill when the real process exits
error_count = 0 # counts errors for informational purposes
_proc = None
_pid = None # the pid, often associated with _proc, but not necessarily in the
# case of notify processes
_returncode = None # an alternate returncode, set with returncode property
_exit_event = None # an event to be fired if an exit occurs, in the case of an
# attached PID
_orig_executable = None # original unexpanded exec_args[0]
_pwrec = None # the pwrec looked up for execution user/group
_cond_starting = None # a condition which, if present, indicates that this service is starting
_cond_exception = None # exception which was raised during startup (for other waiters)
_started = False # true if a start has occurred, either successful or not
_restarts_allowed = None # number of starts permitted before we give up (if None then restarts allowed according to service def)
_prereq_cache = None
_procenv = None # process environment ready to be expanded
_pending = None # pending futures
_note = None
# Class variables
_cls_ptdict = lazydict() # dictionary of process types
_cls_serial = 0 # serial number for process creation
def __new__(cls, service, family=None):
"""
New Subprocesses are managed by subclasses derived from SubProcess so that
complex process behavior can be isolated and loaded only when needed. That
keeps this basic superclass logic less convoluted.
"""
# If we are trying to create a subclass, just inherit __new__ simply
if cls is not SubProcess:
return super(SubProcess, cls).__new__(cls)
# Lookup and cache the class object used to create this type.
stype = service.type
ptcls = SubProcess._cls_ptdict.get(stype)
if not ptcls:
mod = importlib.import_module('chaperone.cproc.pt.' + stype)
ptcls = SubProcess._cls_ptdict[stype] = getattr(mod, stype.capitalize() + 'Process')
assert issubclass(ptcls, cls)
return ptcls(service, family)
def __init__(self, service, family=None):
self.service = service
self.family = family
self._pending = set()
if service.process_timeout is not None:
self.process_timeout = service.process_timeout
if not service.environment:
self._procenv = Environment()
else:
self._procenv = service.environment
if not service.exec_args:
raise ChParameterError("No command or arguments provided for service")
# If the service is enabled, assure we check for the presence of the executable now. This is
# to catch any start-up situations (such as cron jobs without their executables being present).
# However, we don't check this if a service is disabled.
self._orig_executable = service.exec_args[0]
if service.enabled:
self._try_to_enable()
def __getattr__(self, name):
"Proxies value from the service description if we don't override them."
return getattr(self.service, name)
def __setattr__(self, name, value):
"""
Any service object attribute supercedes our own except for privates or those we
keep separately, in which case there is a distinction.
"""
if name[0:0] != '_' and hasattr(self.service, name) and not hasattr(self, name):
setattr(self.service, name, value)
else:
object.__setattr__(self, name, value)
def _setup_subprocess(self):
if self._pwrec:
os.setgid(self._pwrec.pw_gid)
os.setuid(self._pwrec.pw_uid)
if self.setpgrp:
os.setpgrp()
if not self.directory:
try:
os.chdir(self._pwrec.pw_dir)
except Exception as ex:
pass
return
def _get_states(self):
states = list()
if self.started:
states.append('started')
if self.failed:
states.append('failed')
if self.ready:
states.append('ready')
if self.running:
states.append('running')
return ' '.join(states)
# pid and returncode management
@property
def pid(self):
return self._pid
@pid.setter
def pid(self, newpid):
if self._pid is not None and newpid is not None and self._pid is not newpid:
self.logdebug("{0} changing PID to {1} (from {2})", self.name, newpid, self._pid)
try:
pgid = os.getpgid(newpid)
except ProcessLookupError as ex:
raise ChProcessError("{0} attempted to attach the process with PID={1} but there is no such process".
format(self.name, newpid), errno = ex.errno)
self._attach_pid(newpid)
self._pid = newpid
@property
def returncode(self):
if self._returncode is not None:
return self._returncode
return self._proc and self._proc.returncode
@returncode.setter
def returncode(self, val):
self._returncode = ProcStatus(val)
self.logdebug("{0} got explicit return code '{1}'", self.name, self._returncode)
# Logging methods which may do special things for this service
def loginfo(self, *args, **kwargs):
info(*args, facility=self.syslog_facility, **kwargs)
def logerror(self, *args, **kwargs):
self.error_count += 1
error(*args, facility=self.syslog_facility, **kwargs)
def logwarn(self, *args, **kwargs):
warn(*args, facility=self.syslog_facility, **kwargs)
def logdebug(self, *args, **kwargs):
debug(*args, facility=self.syslog_facility, **kwargs)
@property
def note(self):
return self._note
@note.setter
def note(self, value):
self._note = value
@property
def status(self):
serv = self.service
proc = self._proc
rs = ""
if serv.restart and self._restarts_allowed is not None and self._restarts_allowed > 0:
rs = "+r#" + str(self._restarts_allowed)
if self._cond_starting:
return "starting"
if proc:
rc = self._returncode if self._returncode is not None else proc.returncode
if rc is None:
return "running"
elif rc.normal_exit and self._started:
return "started"
elif rc:
return rc.briefly + rs
if not serv.enabled:
return "disabled"
return self.default_status()
def default_status(self):
if self.ready:
return 'ready'
return None
@property
def enabled(self):
return self.service.enabled
@enabled.setter
def enabled(self, val):
if val and not self.service.enabled:
self._try_to_enable()
else:
self.service.enabled = False
def _try_to_enable(self):
service = self.service
if self._orig_executable:
try:
service.exec_args[0] = executable_path(self._orig_executable, service.environment.expanded())
except FileNotFoundError:
if service.optional:
service.enabled = False
self.loginfo("optional service {0} disabled since '{1}' is not present".format(self.name, self._orig_executable))
return
elif service.ignore_failures:
service.enabled = False
self.logwarn("(ignored) service {0} executable '{1}' is not present".format(self.name, self._orig_executable))
return
raise ChNotFoundError("executable '{0}' not found".format(service.exec_args[0]))
# Now we know this service is truly enabled, we need to assure its credentials
# are correct.
senv = service.environment
if senv and senv.uid is not None and not self._pwrec:
self._pwrec = lookup_user(senv.uid, senv.gid)
service.enabled = True
@property
def scheduled(self):
"""
True if this is a process which WILL fire up a process in the future.
A "scheduled" process does not include one which will be started manually,
nor does it include proceses which will be started due to dependencies.
Processes like "cron" and "inetd" return True if they are active and
may start processes in the future.
"""
return False
@property
def kill_signal(self):
ksig = self.service.kill_signal
if ksig is not None:
return ksig
return signal.SIGTERM
@property
def running(self):
"True if this process has started, is running, and has a pid"
return self._proc and self._proc.returncode is None
@property
def started(self):
"""
True if this process has started normally. It may have forked, or executed, or is scheduled.
"""
return self._started
@property
def stoppable(self):
"""
True if this process can be stopped. By default, returns True if the service is started,
but some job types such as cron and inetd may be stoppable even when processes themselves
are not running.
"""
return self.started
@property
def failed(self):
"True if this process has failed, either during startup or later."
return ((self._returncode is not None and not self._returncode.normal_exit) or
self._proc and (self._proc.returncode is not None and not self._proc.returncode.normal_exit))
@property
def ready(self):
"""
True if this process is ready to run, or running. If not running, To be ready to run, all
prerequisites must also be ready.
"""
if not self.enabled or self.failed:
return False
if self.started:
return True
if any(p.enabled and not p.ready for p in self.prerequisites):
return False
return True
@property
def prerequisites(self):
"""
Return a list of prerequisite objects. Right now, these must be within our family
but this may change, so don't refer to the family or the prereq in services. Use this
instead.
"""
if self._prereq_cache is None:
prereq = (self.family and self.service.prerequisites) or ()
prereq = self._prereq_cache = tuple(self.family[p] for p in prereq if p in self.family)
return self._prereq_cache
@asyncio.coroutine
def start(self):
"""
Runs this service if it is enabled and has not already been started. Starts
prerequisite services first. A service is considered started if
a) It is enabled, and started up normally.
b) It is disabled, and an attempt was made to start it.
c) An error occurred, it did not start, but failures we an acceptable
outcome and the service has not been reset since the errors occurred.
"""
service = self.service
if self._started:
self.logdebug("service {0} already started. further starts ignored.", service.name)
return
if not service.enabled:
self.logdebug("service {0} not enabled, will be skipped", service.name)
return
else:
self.logdebug("service {0} enabled, queueing start request", service.name)
# If this service is already starting, then just wait until it completes.
cond_starting = self._cond_starting
if cond_starting:
yield from cond_starting.acquire()
yield from cond_starting.wait()
cond_starting.release()
# This is an odd situation. Since every waiter expects start() to succeed, or
# raise an exception, we need to be sure we raise the exception that happened
# in the original start() request.
if self._cond_exception:
raise self._cond_exception
return
cond_starting = self._cond_starting = asyncio.Condition()
self._cond_exception = None
# Now we can procede
self.start_attempted = True
try:
prereq = self.prerequisites
if prereq:
for p in prereq:
yield from p.start()
self.logdebug("service {0} prerequisites satisfied", service.name)
if self.family:
# idle only makes sense for families
if "IDLE" in service.service_groups and service.idle_delay and not hasattr(self.family, '_idle_hit'):
self.family._idle_hit = True
self.logdebug("IDLE transition hit. delaying for {0} seconds", service.idle_delay)
yield from asyncio.sleep(service.idle_delay)
# STOP if the system is no longer alive because a prerequisite failed
if not self.family.system_alive:
return
try:
yield from self.start_subprocess()
except Exception as ex:
if service.ignore_failures:
self.loginfo("service {0} ignoring failures. Exception: {1}", service.name, ex)
else:
self._cond_exception = ex
self.logdebug("{0} received exception during attempted start. Exception: {1}", service.name, ex)
raise
finally:
self._started = True
yield from cond_starting.acquire()
cond_starting.notify_all()
cond_starting.release()
self._cond_starting = None
self.logdebug("{0} notified waiters upon completion", service.name)
def get_expanded_environment(self):
SubProcess._cls_serial += 1
penv = self._procenv
penv[ENV_SERIAL] = str(SubProcess._cls_serial)
penv[ENV_SERVTIME] = str(int(time()))
return penv.expanded()
@asyncio.coroutine
def start_subprocess(self):
service = self.service
self.logdebug("{0} attempting start '{1}'... ".format(service.name, " ".join(service.exec_args)))
kwargs = dict()
if service.stdout == 'log':
kwargs['stdout'] = asyncio.subprocess.PIPE
if service.stderr == 'log':
kwargs['stderr'] = asyncio.subprocess.PIPE
if service.directory:
kwargs['cwd'] = service.directory
env = self.get_expanded_environment()
yield from self.process_prepare_co(env)
if env:
env = env.get_public_environment()
if service.debug:
if not env:
self.logdebug("{0} environment is empty", service.name)
else:
self.logdebug("{0} environment:", service.name)
for k,v in env.items():
self.logdebug(" {0} = '{1}'".format(k,v))
create = asyncio.create_subprocess_exec(*service.exec_args, preexec_fn=self._setup_subprocess,
env=env, **kwargs)
if service.exit_kills:
self.logwarn("system will be killed when '{0}' exits", service.exec_args[0])
yield from asyncio.sleep(0.2)
proc = self._proc = yield from create
self.pid = proc.pid
if service.stdout == 'log':
self.add_pending(asyncio.async(_process_logger(proc.stdout, 'stdout', self)))
if service.stderr == 'log':
self.add_pending(asyncio.async(_process_logger(proc.stderr, 'stderr', self)))
if service.exit_kills and not self.defer_exit_kills:
self.add_pending(asyncio.async(self._wait_kill_on_exit()))
yield from self.process_started_co()
self.logdebug("{0} successfully started", service.name)
@asyncio.coroutine
def process_prepare_co(self, environment):
pass
@asyncio.coroutine
def process_started_co(self):
pass
@asyncio.coroutine
def wait_for_pidfile(self):
"""
If the pidfile option was specified, then wait until we find a valid pidfile,
and register the new PID. This is not done automatically, but is implemented
here as a utility for process types that need it.
"""
if not self.pidfile:
return
self.logdebug("{0} waiting for PID file: {1}".format(self.name, self.pidfile))
pidsleep = 0.02 # work incrementally up to no more than process_timeout
minsleep = 3
expires = time() + self.process_timeout
last_ex = None
while time() < expires:
if not self.family.system_alive:
return
yield from asyncio.sleep(pidsleep)
# ramp up until we hit the minsleep ceiling
pidsleep = min(pidsleep*2, minsleep)
try:
newpid = int(open(self.pidfile, 'r').read().strip())
except FileNotFoundError:
continue
except Exception as ex:
# Don't raise this immediately. The service may create the file before writing the PID.
last_ex = ChProcessError("{0} found pid file '{1}' but contents did not contain an integer".format(
self.name, self.pidfile), errno = errno.EINVAL)
continue
self.pid = newpid
return
if last_ex is not None:
raise last_ex
raise ChProcessError("{0} did not find pid file '{1}' before {2}sec process_timeout expired".format(
self.name, self.pidfile, self.process_timeout), errno = errno.ENOENT)
@asyncio.coroutine
def _wait_kill_on_exit(self):
yield from self.wait()
self._kill_system()
def _attach_pid(self, newpid):
"""
Attach this process to a new PID, creating a condition which will be used by
the child watcher to determine when the PID has exited.
"""
with asyncio.get_child_watcher() as watcher:
watcher.add_child_handler(newpid, self._child_watcher_callback)
self._exit_event = asyncio.Event()
def _child_watcher_callback(self, pid, returncode):
asyncio.get_event_loop().call_soon_threadsafe(self.process_exit, returncode)
def process_exit(self, code):
self.returncode = code
if self._exit_event:
self._exit_event.set()
self._exit_event = None
if self.exit_kills:
self.logwarn("{0} terminated with exit_kills enabled", self.service.name);
# Since we're dead, and the system is going away, disable any process management
self._proc = None
self.pid = None
self._kill_system();
if code.normal_exit or self.kill_signal == code.signal:
return
asyncio.async(self._abnormal_exit(code))
@asyncio.coroutine
def _abnormal_exit(self, code):
service = self.service
if service.exit_kills:
self.logwarn("{0} terminated abnormally with {1}", service.name, code)
return
# A disabled service should not do recovery
if not service.enabled:
return
if self._started and service.restart:
if self._restarts_allowed is None:
self._restarts_allowed = service.restart_limit
if self._restarts_allowed > 0:
self._restarts_allowed -= 1
controller = self.family.controller
if controller.system_alive:
if service.restart_delay:
self.loginfo("{0} pausing between restart retries ({1} left)", service.name, self._restarts_allowed)
yield from asyncio.sleep(service.restart_delay)
if controller.system_alive:
yield from self.reset()
#yield from self.start()
f = asyncio.async(self.start()) # queue it since we will just return here
f.add_done_callback(self._restart_callback)
return
if service.ignore_failures:
self.logdebug("{0} abnormal process exit ignored due to ignore_failures=true", service.name)
yield from self.reset()
return
self.logerror("{0} terminated abnormally with {1}", service.name, code)
def _restart_callback(self, fut):
# Catches a restart result, reporting it as a warning, and either passing back to _abnormal_exit
# or accepting glorious success.
ex = fut.exception()
if not ex:
self.logdebug("{0} restart succeeded", self.name)
else:
self.logwarn("{0} restart failed: {1}", self.name, ex)
asyncio.async(self._abnormal_exit(self._proc and self._proc.returncode))
def _kill_system(self):
self.family.controller.kill_system()
def add_pending(self, future):
self._pending.add(future)
future.add_done_callback(lambda f: self._pending.discard(future))
@asyncio.coroutine
def reset(self, dependents = False, enable = False, restarts_ok = False):
self.logdebug("{0} received reset", self.name)
if self._exit_event:
self.terminate()
elif self._proc:
if self._proc.returncode is None:
self.terminate()
yield from self.wait()
self.pid = None
self._proc = None
self._started = False
if restarts_ok:
self._restarts_allowed = None
if enable:
self.enabled = True
# If there is a pidfile, then remove it
if self.pidfile:
try:
os.remove(self.pidfile)
except Exception:
pass
# Reset any non-ready dependents
if dependents:
for p in self.prerequisites:
if not p.ready and (enable or p.enabled):
yield from p.reset(dependents, enable, restarts_ok)
@asyncio.coroutine
def stop(self):
yield from self.reset(restarts_ok = True)
@asyncio.coroutine
def final_stop(self):
"Called when the whole system is killed, but before drastic measures are taken."
self._exit_event = None
self.terminate()
for p in list(self._pending):
if not p.cancelled():
p.cancel()
def terminate(self):
proc = self._proc
otherpid = self.pid
if proc:
if otherpid == proc.pid:
otherpid = None
if proc.returncode is None:
if self.service.kill_signal is not None: # explicitly check service
self.logdebug("using {0} to terminate {1}", get_signal_name(self.kill_signal), self.name)
proc.send_signal(self.kill_signal)
else:
proc.terminate()
if otherpid:
self.logdebug("using {0} to terminate {1}", get_signal_name(self.kill_signal), self.name)
try:
os.kill(otherpid, self.kill_signal)
except Exception as ex:
warn("{0} could not be killed using PID={1}: ".format(ex, otherpid))
self._pid = None
@asyncio.coroutine
def do_startup_pause(self):
"""
Wait a short time just to see if the process errors out immediately. This avoids a retry loop
and catches any immediate failures now. Can be used by process implementations if needed.
"""
if not self.startup_pause:
return
try:
result = yield from self.timed_wait(self.startup_pause)
except asyncio.TimeoutError:
result = None
if result is not None and not result.normal_exit:
if self.ignore_failures:
warn("{0} (ignored) failure on start-up with result '{1}'".format(self.name, result))
else:
raise ChProcessError("{0} failed on start-up with result '{1}'".format(self.name, result),
resultcode = result)
@asyncio.coroutine
def timed_wait(self, timeout, func = None):
"""
Timed wait waits for process completion. If process completion occurs normally, the
returncode for process startup is returned.
Upon timeout either:
1. asyncio.TimeoutError is raised if 'func' is not provided, or...
2. func is called and the result is returned from timed_wait().
"""
try:
if not timeout:
raise asyncio.TimeoutError() # funny situation, but settings can cause this if users attempt it
result = yield from asyncio.wait_for(asyncio.shield(self.wait()), timeout)
except asyncio.TimeoutError:
if not func:
raise
result = func()
except asyncio.CancelledError:
result = self.returncode
return result
@asyncio.coroutine
def wait(self):
proc = self._proc
if self._exit_event:
yield from self._exit_event.wait()
elif proc:
yield from proc.wait()
else:
raise Exception("Process not running (or attached), can't wait")
if proc.returncode is not None and proc.returncode.normal_exit:
self.logdebug("{2} exit status for pid={0} is '{1}'".format(proc.pid, proc.returncode, self.name))
else:
self.loginfo("{2} exit status for pid={0} is '{1}'".format(proc.pid, proc.returncode, self.name))
return proc.returncode
class SubProcessFamily(lazydict):
controller = None # top level system controller
services_config = None
_start_time = None
def __init__(self, controller, services_config):
"""
Given a pre-analyzed list of processes, complete with prerequisites, build a process
family.
"""
super().__init__()
self.controller = controller
self.services_config = services_config
for s in services_config.get_startup_list():
self[s.name] = SubProcess(s, family = self)
def get_status_formatter(self):
df = TableFormatter('pid', 'name', 'enabled', 'status', 'note', sort='name')
df.add_rows(self.values())
return df
@property
def system_alive(self):
return self.controller.system_alive
def get_scheduled_services(self):
return [s for s in self.values() if s.scheduled]
def get_status(self):
if not self._start_time:
return "Not yet started"
secs = time() - self._start_time
total = len(self.values())
scheduled = started = failed = errors = 0
for s in self.values():
if s.scheduled:
scheduled += 1
if s.started:
started += 1
if s.failed:
failed += 1
errors += s.error_count
m,s = divmod(int(secs), 60)
h,m = divmod(m, 60)
msg = "Uptime {0:02}:{1:02}:{2:02}; {3} service{4} started".format(h, m, s, started or "No", started != 1 and 's' or '')
if scheduled:
msg += "; {0} scheduled".format(scheduled)
if failed:
msg += "; {0} failed".format(failed)
if errors:
msg += "; {0} total errors".format(errors)
return msg
@asyncio.coroutine
def run(self, servicelist = None):
"""
Runs the family, starting up services in dependency order. If any problems
occur, an exception is raised. Returns True if any attempts were made to
start services, otherwize False if the configuration contained no services
that were enabled and ready to run.
"""
# Note that all tasks are started simultaneously, but they resolve their
# interdependencies themselves.
if not servicelist:
servicelist = self.values()
yield from asyncio.gather(*[s.start() for s in servicelist])
self._start_time = time()
# Indicate if any attempts were made
return any(s.start_attempted for s in servicelist)
def _lookup_services(self, names):
result = set()
for name in names:
serv = self.get(name)
if not serv:
serv = self.get(name + ".service")
if not serv:
raise ChParameterError("no such service: " + name)
result.add(serv)
return result
@asyncio.coroutine
def start(self, service_names, force = False, wait = False, enable = False):
slist = self._lookup_services(service_names)
not_enab = [s for s in slist if not s.enabled]
if not force:
if not_enab and not enable:
raise Exception("can only start services which have been enabled: " + ", ".join([s.shortname for s in not_enab]))
started = [s for s in slist if s.started]
if started:
raise Exception("can't restart services without stop/reset: " + ", ".join([s.shortname for s in started]))
notready = [s for s in slist if not s.ready and (s.enabled and not enable)]
if notready:
raise Exception("services or their prerequisites are not ready: " + ", ".join([s.shortname for s in notready]))
resets = ()
if not_enab and enable:
resets = not_enab
# If forcing, then reset all services, as well as any non-ready dependents.
if force:
resets = [s for s in slist if (not s.ready or s.started)]
for s in resets:
yield from s.reset(dependents=True, enable=enable, restarts_ok=True)
if not wait:
asyncio.async(self._queued_start(slist, service_names))
else:
yield from self.run(slist)
@asyncio.coroutine
def _queued_start(self, slist, names):
try:
yield from self.run(slist)
except Exception as ex:
error("queued start (for {0}) failed: {1}", names, ex)
@asyncio.coroutine
def stop(self, service_names, force = False, wait = False, disable = False):
slist = self._lookup_services(service_names)
started = [s for s in slist if s.stoppable]
if not force:
if len(started) != len(slist):
raise Exception("can't stop services which aren't started: " +
", ".join([s.shortname for s in slist if not s.stoppable]))
if not wait:
asyncio.async(self._queued_stop(slist, service_names, disable))
else:
for s in slist:
yield from s.stop()
if disable:
s.enabled = False
@asyncio.coroutine
def _queued_stop(self, slist, names, disable):
try:
for s in slist:
yield from s.stop()
if disable:
s.enabled = False
except Exception as ex:
error("queued stop (for {0}) failed: {1}", names, ex)
@asyncio.coroutine
def reset(self, service_names, force = False, wait = False):
slist = self._lookup_services(service_names)
if not force:
running = [s for s in slist if s.running]
if running:
raise Exception("can't reset services which are running: " + ", ".join([s.shortname for s in running]))
if not wait:
asyncio.async(self._queued_reset(slist, service_names))
else:
for s in slist:
yield from s.reset(restarts_ok = True)
@asyncio.coroutine
def _queued_reset(self, slist, names):
try:
for s in slist:
yield from s.reset(restarts_ok = True)
except Exception as ex:
error("queued reset (for {0}) failed: {1}", names, ex)
@asyncio.coroutine
def enable(self, service_names):
slist = self._lookup_services(service_names)
for s in slist:
s.enabled = True
@asyncio.coroutine
def disable(self, service_names):
slist = self._lookup_services(service_names)
for s in slist:
s.enabled = False
================================================
FILE: chaperone/cproc/version.py
================================================
# This file is designed to be used as a package module, but also as a main program runnable
# by Python2 or Python3 which will print the version. Used in setup.py
VERSION = (0,3,9)
DISPLAY_VERSION = ".".join([str(v) for v in VERSION])
LICENSE = "Apache License, Version 2.0"
MAINTAINER = "Gary Wisniewski <garyw@blueseastech.com>"
LINK_PYPI = "https://pypi.python.org/pypi/chaperone"
LINK_DOC = "http://garywiz.github.io/chaperone"
LINK_SOURCE = "http://github.com/garywiz/chaperone"
LINK_QUICKSTART = "http://github.com/garywiz/chaperone-baseimage"
LINK_LICENSE = "http://www.apache.org/licenses/LICENSE-2.0"
import sys
import os
VERSION_MESSAGE = """
This is '{1}' version {0.DISPLAY_VERSION}.
Documentation and source is available at {0.LINK_SOURCE}.
Licensed under the {0.LICENSE}.
""".format(sys.modules[__name__], os.path.basename(sys.argv[0]))
if __name__ == '__main__':
print(DISPLAY_VERSION)
================================================
FILE: chaperone/cproc/watcher.py
================================================
import os
import asyncio
import threading
from functools import partial
from asyncio.unix_events import BaseChildWatcher
from chaperone.cutil.logging import warn, info, debug
from chaperone.cutil.proc import ProcStatus
from chaperone.cutil.misc import get_signal_name
from chaperone.cutil.events import EventSource
class InitChildWatcher(BaseChildWatcher):
"""An init-responsible child watcher.
Plugs into the asyncio child watcher framework to allow harvesting of both known and unknown
child processes.
"""
def __init__(self, **kwargs):
super().__init__()
self.events = EventSource(**kwargs)
self._callbacks = {}
self._lock = threading.Lock()
self._zombies = {}
self._forks = 0
self._no_processes = None
self._had_children = False
def close(self):
self._callbacks.clear()
self._zombies.clear()
super().close()
def __enter__(self):
with self._lock:
self._forks += 1
return self
def __exit__(self, a, b, c):
with self._lock:
self._forks -= 1
if self._forks or not self._zombies:
return
collateral_victims = str(self._zombies)
self._zombies.clear()
info(
"Caught subprocesses termination from unknown pids: %s",
collateral_victims)
@property
def number_of_waiters(self):
return len(self._callbacks)
def add_child_handler(self, pid, callback, *args):
assert self._forks, "Must use the context manager"
with self._lock:
try:
returncode = self._zombies.pop(pid)
except KeyError:
# The child is running.
self._callbacks[pid] = callback, args
return
# The child is dead already. We can fire the callback.
callback(pid, returncode, *args)
def remove_child_handler(self, pid):
try:
del self._callbacks[pid]
return True
except KeyError:
return False
def check_processes(self):
# Checks to see if any processes terminated, and triggers onNoProcesses
self._do_waitpid_all()
def _do_waitpid_all(self):
# Because of signal coalescing, we must keep calling waitpid() as
# long as we're able to reap a child.
while True:
try:
pid, status = os.waitpid(-1, os.WNOHANG)
debug("REAP pid={0},status={1}".format(pid,status))
except ChildProcessError:
# No more child processes exist.
if self._had_children:
debug("no child processes present")
self.events.onNoProcesses()
return
else:
self._had_children = True
if pid == 0:
# A child process is still alive.
return
returncode = ProcStatus(status)
with self._lock:
try:
callback, args = self._callbacks.pop(pid)
except KeyError:
# unknown child
if self._forks:
# It may not be registered yet.
self._zombies[pid] = returncode
continue
callback = None
if callback is None:
info(
"Caught subprocess termination from unknown pid: "
"%d -> %d", pid, returncode)
else:
callback(pid, returncode, *args)
================================================
FILE: chaperone/cutil/__init__.py
================================================
# Placeholder
================================================
FILE: chaperone/cutil/config.py
================================================
import os
import re
import pwd
import shlex
from operator import attrgetter
from copy import deepcopy
from itertools import chain
import yaml
import voluptuous as V
from chaperone.cutil.env import Environment, ENV_CONFIG_DIR, ENV_SERVICE
from chaperone.cutil.errors import ChParameterError
from chaperone.cutil.logging import info, warn, debug
from chaperone.cutil.misc import lazydict, lookup_user, get_signal_number
@V.message('not an executable file', cls=V.FileInvalid)
@V.truth
def IsExecutable(v):
return os.path.isfile(v) and os.access(v, os.X_OK)
_config_schema = V.Any(
{ V.Match('^.+\.service$'): {
'after': str,
'before': str,
V.Required('command'): str,
'directory': str,
'debug': bool,
'enabled': V.Any(bool, str),
'env_inherit': [ str ],
'env_set': { str: str },
'env_unset': [ str ],
'exit_kills': bool,
'gid': V.Any(str, int),
'ignore_failures': bool,
'interval': str,
'kill_signal': str,
'optional': bool,
'port': V.Any(str, int),
'pidfile': str,
'process_timeout': V.Any(float, int),
'startup_pause': V.Any(float, int),
'restart': bool,
'restart_limit': int,
'restart_delay': int,
'service_groups': str,
'setpgrp': bool,
'stderr': V.Any('log', 'inherit'),
'stdout': V.Any('log', 'inherit'),
'type': V.Any('oneshot', 'simple', 'forking', 'notify', 'cron', 'inetd'),
'uid': V.Any(str, int),
},
V.Match('^settings$'): {
'debug': bool,
'detect_exit': bool,
'env_inherit': [ str ],
'env_set': { str: str },
'env_unset': [ str ],
'gid': V.Any(str, int),
'idle_delay': V.Any(float, int),
'ignore_failures': bool,
'process_timeout': V.Any(float, int),
'startup_pause': V.Any(float, int),
'shutdown_timeout': V.Any(float, int),
'uid': V.Any(str, int),
'logrec_hostname': str,
'enable_syslog': bool,
'status_interval': V.Any(float, int),
},
V.Match('^.+\.logging'): {
'enabled': V.Any(bool, str),
'extended': bool,
'file': str,
'syslog_host': str,
'selector': str,
'stderr': bool,
'stdout': bool,
'overwrite': bool,
'uid': V.Any(str, int),
'gid': V.Any(str, int),
'logrec_hostname': str,
},
}
)
validator = V.Schema(_config_schema)
_RE_LISTSEP = re.compile(r'\s*,\s*')
def print_services(label, svlist):
# Useful for debugging startup order
print(label)
for s in svlist:
print(s)
p = getattr(s, 'prerequisites', None)
if p:
print(' prereq:', p)
# Note that we extend YAML by allowing an empty string to mean "false". This makes some macro
# expansions work better, such as ... enabled:"$(MYSQL_ENABLED:+true)"
_RE_YAML_BOOL = re.compile(r'^\s*(?:(?P<true>y|true|yes|on)|(n|false|no|off|))\s*$', re.IGNORECASE)
class _BaseConfig(object):
name = None
environment = None
env_set = None
env_unset = None
env_inherit = ['*']
_repr_pat = None
_expand_these = {}
_typecheck = {}
_settings_defaults = {}
@classmethod
def createConfig(cls, config=None, **kwargs):
"""
Creates a new configuration given a system configuration object. Initializes the
environment as triggers any per-configuration attribute initialization.
"""
return cls(kwargs,
env=config.get_environment(),
settings=config.get_settings())
def _typecheck_assure_bool(self, attr):
"Assures that the specified attribute is a legal boolean."
val = getattr(self, attr)
if val is None or isinstance(val, bool):
return
# First, try both 'true' and 'false' according to YAML conventions
match = _RE_YAML_BOOL.match(str(val))
if not match:
raise ChParameterError("invalid boolean parameter for '{0}': '{1}'".format(attr, val))
setattr(self, attr, bool(match.group('true')))
def _typecheck_assure_int(self, attr):
"Assures that the specified attribute is a legal integer."
val = getattr(self, attr)
if val is None or isinstance(val, int):
return
try:
setattr(self, attr, int(val))
except ValueError:
raise ChParameterError("invalid integer parameter for '{0}': '{1}'".format(attr, val))
def __init__(self, initdict, name = "MAIN", env = None, settings = None):
self.name = name
if settings:
for sd in self._settings_defaults:
if sd not in initdict:
val = settings.get(sd)
if val is not None:
setattr(self, sd, val)
for k,v in initdict.items():
setattr(self, k, v)
# User names always have .xxx qualifier because of schema restrictions. Otherwise, it's a user
# defined name subject to restrictions.
splitname = self.name.rsplit('.', 1)
if len(splitname) == 2 and splitname[0] == splitname[0].upper():
raise ChParameterError("all-uppercase names such as '{0}' are reserved for the system.".format(self.name))
# UID and GID are expanded according to the incoming environment,
# since the new environment depends upon these.
if env:
env.expand_attributes(self, 'uid', 'gid')
uid = self.get('uid')
gid = self.get('gid')
if gid is not None and uid is None:
raise Exception("cannot specify 'gid' without 'uid'")
# We can now use 'self' as our config, with all defaults.
env = self.environment = Environment(env, uid=uid, gid=gid, config=self,
resolve_xid = not self.get('optional', False))
self.augment_environment(env)
if self._expand_these:
env.expand_attributes(self, *self._expand_these)
for attr,func in self._typecheck.items():
getattr(self, '_typecheck_'+func)(attr)
self.post_init()
def shortname(self):
return self.name
def post_init(self):
pass
def augment_environment(self, env):
pass
def get(self, attr, default = None):
return getattr(self, attr, default)
def __repr__(self):
if self._repr_pat:
return self._repr_pat.format(self)
return super().__repr__()
class ServiceConfig(_BaseConfig):
after = None
before = None
command = None
debug = None
directory = None
enabled = True
exit_kills = False
gid = None
interval = None
ignore_failures = False
kill_signal = None
optional = False
pidfile = None # the pidfile to monitor
port = None # used for inetd processes
process_timeout = None # time to elapse before we decide a process has misbehaved
startup_pause = 0.5 # time to wait momentarily to see if a service starts (if needed)
restart = False
restart_limit = 5 # number of times to invoke a restart before giving up
restart_delay = 3 # number of seconds to delay between restarts
setpgrp = True # if this process should run in its own process group
service_groups = "default" # will be transformed into a tuple() upon construction
stderr = "log"
stdout = "log"
type = 'simple'
uid = None
exec_args = None # derived from bin/command/args, but may be preset using createConfig
idle_delay = 1.0 # present, but mirrored from settings, not settable per-service
# since it is only triggered once when the first IDLE group item executes
prerequisites = None # a list of service names which are prerequisites to this one
_repr_pat = "Service:{0.name}(service_groups={0.service_groups}, after={0.after}, before={0.before})"
_expand_these = {'command', 'stdout', 'stderr', 'interval', 'directory', 'exec_args', 'pidfile', 'enabled', 'port'}
_typecheck = {'enabled': 'assure_bool', 'port': 'assure_int'}
_assure_bool = {'enabled'}
_settings_defaults = {'debug', 'idle_delay', 'process_timeout', 'startup_pause', 'ignore_failures'}
system_group_names = ('IDLE', 'INIT')
system_service_names = ('CONSOLE', 'MAIN')
@property
def shortname(self):
return self.name.replace('.service', '')
def augment_environment(self, env):
if self.name:
env[ENV_SERVICE] = self.name
def post_init(self):
# Assure that exec_args is set to the actual arguments used for execution
if self.command:
self.exec_args = shlex.split(self.command)
# Lookup signal number
if self.kill_signal is not None:
self.kill_signal = get_signal_number(self.kill_signal)
# Expand before, after and service_groups into sets/tuples
self.before = set(_RE_LISTSEP.split(self.before)) if self.before is not None else set()
self.after = set(_RE_LISTSEP.split(self.after)) if self.after is not None else set()
self.service_groups = tuple(_RE_LISTSEP.split(self.service_groups)) if self.service_groups is not None else tuple()
for sname in chain(self.before, self.after):
if sname.upper() == sname and sname not in chain(self.system_group_names, self.system_service_names):
raise ChParameterError("{0} dependency reference not valid; '{1}' is not a recognized system name"
.format(self.name, sname))
for sname in self.service_groups:
if sname.upper() == sname and sname not in self.system_group_names:
raise ChParameterError("{0} contains an unrecognized system group name '{1}'".format(self.name, sname))
if 'IDLE' in self.after:
raise Exception("{0} cannot specify services which start *after* service_group IDLE".format(self.name))
if 'INIT' in self.before:
raise Exception("{0} cannot specify services which start *before* service_group INIT".format(self.name))
class LogConfig(_BaseConfig):
selector = '*.*'
file = None
stderr = False
stdout = False
enabled = True
overwrite = False
extended = False # include facility/priority information
uid = None # used to control permissions on logfile creation
gid = None
logrec_hostname = None # hostname used to override hostname in syslog record
syslog_host = None # remote IP of syslog handler
_expand_these = {'selector', 'file', 'enabled', 'logrec_hostname', 'syslog_host'}
_typecheck = {'enabled': 'assure_bool'}
_settings_defaults = {'logrec_hostname'}
@property
def shortname(self):
return self.name.replace('.logging', '')
class ServiceDict(lazydict):
_ordered_startup = None
def __init__(self, servdict, env = None, settings = None):
"""
Accepts a dictionary of values to be turned into services.
"""
super().__init__(
((k,ServiceConfig(v,k,env,settings)) for (k,v) in servdict)
)
def add(self, service):
self[service.name] = service
def clear(self):
super().clear()
self._ordered_startup = None
def get_dependency_graph(self):
"""
Returns a set of dependency groups. Each group represents a set of dependencies starting at the
root of the dependency tree. This is valuable for debugging dependencies. The output graph
is ascii-art which shows the earliest start times and latest stop times for each service,
roughly in order of start-up.
"""
sep = ' | '
sulist = self.get_startup_list()
curcol = 0
maxwidth = 0
for s in sulist:
ourlen = len(s.shortname)
s._column = curcol + ourlen - 1
curcol += ourlen + len(sep)
maxwidth = max(maxwidth, ourlen)
def histogram(serv):
# find the earliest prerequsite, or 0 if there is none
pcols = tuple(s._column for s in sulist if s.name in serv.prerequisites)
start = (pcols and max(pcols) + 1) or 0
return (' ' * start) + ('=' * (serv._column - start + 1))
lines = list()
lines.append(' ' * (maxwidth + len(sep)) + sep.join(s.shortname for s in sulist))
for s in sulist:
lines.append(s.shortname.ljust(maxwidth) + sep + histogram(s))
lines.append(('-' * (maxwidth)) + '-> depends on...')
for s in sulist:
lines.append(s.shortname.ljust(maxwidth) + sep + ', '.join(pr.replace('.service', '') for pr in s.prerequisites))
return lines
def get_startup_list(self):
"""
Returns the list of start-up items in priority order by examining before: and after:
attributes.
"""
if self._ordered_startup is not None:
return self._ordered_startup
services = self.deepcopy()
groups = lazydict()
for k,v in services.items():
for g in v.service_groups:
groups.setdefault(g, lambda: lazydict())[k] = v
#print_services('initial', services.values())
# The "IDLE" and "INIT" groups are special. Revamp things so that any services in the "IDLE" group
# have an implicit "after: 'all-others'" and any services in "INIT" have an implicit "before: 'all-others'
# where all-others is an explicit list of all services NOT in the respective group
if 'IDLE' in groups:
nonidle = set(k for k,v in services.items() if "IDLE" not in v.service_groups)
for s in groups['IDLE'].values():
s.after.update(nonidle)
if 'INIT' in groups:
noninit = set(k for k,v in services.items() if "INIT" not in v.service_groups)
for s in groups['INIT'].values():
s.before.update(noninit)
# We want to only look at the "after:" attribute, so we will eliminate the relevance
# of befores...
for k,v in services.items():
for bef in v.before:
if bef in groups:
for g in groups[bef].values():
g.after.add(v.name)
elif bef in services:
services[bef].after.add(v.name)
v.before = None
# Before is now gone, make sure that all "after... groups" are translated into "after.... service"
for group in groups.values():
afters = set()
for item in group.values():
afters.update(item.after)
for a in afters:
if a in groups:
names = groups[a].keys()
for item in group.values():
item.after.update(names)
# Now remove any undefined services or groups and turn the 'after' attribute into a definitive
# graph.
#
# Note: sorted() occurs a couple times below. The main reason is so that the results
# are deterministic in cases where exact order is not defined.
afters = set(services.keys())
for v in services.values():
v.refs = sorted(map(lambda n: services[n], v.after.intersection(afters)), key=attrgetter('name'))
#print_services('before add nodes', services.values())
svlist = list() # this will be our final list, containing original items
svseen = set()
def add_nodes(items):
for item in items:
if hasattr(item, 'active'):
raise Exception("circular dependency in service declaration")
item.active = True
add_nodes(item.refs)
del item.active
if item.name not in svseen:
svseen.add(item.name)
svlist.append(self[item.name])
# set startup prerequisite dependencies
svlist[-1].prerequisites = set(r.name for r in item.refs)
add_nodes(sorted(services.values(), key=attrgetter('name')))
#print_services('final service list', svlist)
self._ordered_startup = svlist
return svlist
class Configuration(object):
uid = None # specifies if a system-wide user was provided
gid = None
_conf = None
_env = None # calculated environment
@classmethod
def configFromCommandSpec(cls, spec, user = None, default = None, extra_settings = None, disable_console_log = False):
"""
A command specification (typically specified with the --config=<file_or_dir> command
line option) is used to create a configuration object. The target may be either a file
or a directory. If it is a file, then the file itself will be the only configuration
read. If it is a directory, then a search is made for any top-level files which end in
.conf or .yaml, and those will be combined according to lexicographic order.
If the configuration path is a relative path, then it is relative to either the root
directory, or the home directory of the given user. This allows a user-specific
configuration to automatically take effect if desired.
"""
frombase = '/'
if user:
frombase = lookup_user(user).pw_dir
trypath = os.path.join(frombase, spec)
debug("TRY CONFIG PATH: {0}".format(trypath))
if not os.path.exists(trypath):
return cls(default = default)
else:
os.environ[ENV_CONFIG_DIR] = os.path.dirname(trypath)
if os.path.isdir(trypath):
return cls(*[os.path.join(trypath, f) for f in sorted(os.listdir(trypath))
if f.endswith('.yaml') or f.endswith('.conf')],
default = default, uid = user, extra_settings = extra_settings, disable_console_log = disable_console_log)
return cls(trypath, default = default, uid = user, extra_settings = extra_settings, disable_console_log = disable_console_log)
def __init__(self, *args, default = None, uid = None, extra_settings = None, disable_console_log = False):
"""
Given one or more files, load our configuration. If no configuration is provided,
then use the configuration specified by the default.
"""
debug("CONFIG INPUT (uid={1}): '{0}'".format(args, uid))
self.uid = uid
self._conf = lazydict()
for fn in args:
if os.path.exists(fn):
self._merge(yaml.load(open(fn, 'r').read().expandtabs()))
if not self._conf and default:
self._conf = lazydict(yaml.load(default))
validator(self._conf)
if extra_settings:
self.update_settings(extra_settings)
s = self.get_settings()
self.uid = s.get('uid', self.uid)
self.gid = s.get('gid', self.gid)
# Special case used by --no-console-log. It really was just easiest to do it this way
# rather than try to build some special notion of "console logging" into the log services
# backends.
if disable_console_log:
for k,v in self._conf.items():
if k.endswith('.logging'):
if 'stdout' in v:
del v['stdout']
if 'stderr' in v:
del v['stderr']
def _merge(self, items):
if type(items) == list:
items = {k:dict() for k in items}
conf = self._conf
for k,v in items.items():
if k in conf and not k.endswith('.service'):
conf.smart_update(k,v)
else:
conf[k] = v
def get_services(self):
env = self.get_environment()
return ServiceDict(
((k,v) for k,v in self._conf.items() if k.endswith('.service')),
env,
self._conf.get('settings')
)
def get_logconfigs(self):
env = self.get_environment()
settings = self._conf.get('settings')
return lazydict(
((k,LogConfig(v,k,env,settings)) for k,v in self._conf.items() if k.endswith('.logging'))
)
def get_settings(self):
return self._conf.get('settings') or {}
def update_settings(self, updates):
curset = self.get_settings()
curset.update(updates)
self._conf['settings'] = curset
def get_environment(self):
if not self._env:
self._env = Environment(config=self.get_settings(), uid=self.uid, gid=self.gid)
return self._env
def dump(self):
debug('FULL CONFIGURATION: {0}'.format(self._conf))
================================================
FILE: chaperone/cutil/env.py
================================================
import re
import os
import subprocess
from fnmatch import fnmatch
from chaperone.cutil.logging import error, debug, warn
from chaperone.cutil.misc import lookup_user, lazydict
from chaperone.cutil.errors import ChVariableError, ChParameterError, ChNotFoundError
##
## ALL chaperone configuration variables defined here for easy reference
ENV_CONFIG_DIR = '_CHAP_CONFIG_DIR' # directory which CONTAINS the config file *or* directory
ENV_INTERACTIVE = '_CHAP_INTERACTIVE' # if this session is interactive (has a ptty attached)
ENV_SERVICE = '_CHAP_SERVICE' # name of the current service
ENV_SERIAL = '_CHAP_SERVICE_SERIAL' # Contains a monotonic unique serial number for each started service, starting with 1
ENV_SERVTIME = '_CHAP_SERVICE_TIME' # Timestamp when service started running
ENV_TASK_MODE = '_CHAP_TASK_MODE' # if we are running in --task mode
ENV_CHAP_OPTIONS = '_CHAP_OPTIONS' # Preset before chaperone runs to set default options
# Technically IEEE 1003.1-2001 states env vars can contain anything except '=' and NUL but we need to
# obviously exclude the terminator!
#
# Minimal support is included for nested parenthesis when operators are used, as in:
# $(VAR:-$(VAL))
# However, more levels of nesting are not supported and will cause substitutions to be unrecognised.
_RE_BACKTICK = re.compile(r'`([^`]+)`', re.DOTALL)
# Parsing for operators within expansions
_RE_OPERS = re.compile(r'^(?:([^:]+):([-|?+_/])(.*)|(`.+`))$', re.DOTALL)
_RE_SLASHOP = re.compile(r'^(.+)(?<!\\)/(.*)(?<!\\)/([i]*)$', re.DOTALL)
_RE_BAREBAR = re.compile(r'(?<!\\)\|')
_DICT_CONST = dict() # a dict we must never change, just an optimisation
class EnvScanner:
"""
A class which performs basic parsing of strings containing environment variables,
with support for nested constructs. No, you can't do this with regular expressions.
"""
open_expansion = '({'
quotes = "\"`"; # we assume that single quotes may not be paired. This prevents contractions
# from inhibiting expansions
escape = "\\"
variable_id = '$'
nestlist = ')]}([{' # arranged so that ending delimiters are first and positions match
def __init__(self, variable_id = None, open_expansion = None):
if variable_id:
self.variable_id = variable_id
if open_expansion:
self.open_expansion = open_expansion
self._RE_START = re.compile('(' + re.escape(self.escape) + ')?' + re.escape(self.variable_id) +
'(' + ('|'.join([re.escape(d[0]) for d in self.open_expansion])) + ')')
def parse(self, buf, func, *args):
"""
Parses buffer and expands variables using func(exp_data, exp_whole, *args)
where, given $(xxx):
exp_data is the actual contents of the variable, so 'xxx'
exp_whole is the entire expression, so '$(xxx)'
"""
# Quickly return if we don't have any expansions
st = self._RE_START
match = st.search(buf)
if not match:
return buf
# Now do the hard work
results = []
buflen = len(buf)
startpos = 0
nestlen = len(self.nestlist)
halfnest = nestlen // 2 # delims < halfnest are paired closing delimiters
lookfor = self.nestlist + self.quotes
while match:
pos = match.start()
if pos != startpos:
results.append(buf[startpos:pos])
if match.group(1):
# just escape the value
results.append(self.variable_id)
startpos = match.start(2)
pos = buflen
match = st.search(buf, startpos)
else:
pos = match.start(2)
startpos = pos + 1
# Init the stack. We know a push will come first
stack = []
# find the very end of the area, counting nested items
while True:
ci = lookfor.find(buf[pos])
#print(pos, buf[pos], ci, stack, results)
if ci >= 0:
s0 = (not stack and -1) or stack[-1]
if s0 == ci:
stack.pop()
# We are totally done if the stack is empty
if not stack:
results.append(func(buf[startpos:pos], buf[match.start():pos+1], *args))
startpos = pos + 1
pos = buflen
match = st.search(buf, startpos)
break
elif ci >= halfnest and s0 < nestlen: # don't match within quotes
# at matching end delimiter, which may be nesting, or not
stack.append(ci-halfnest if ci < nestlen else ci)
pos += 1
if pos >= buflen:
startpos = match.start(0)
match = None
break
if pos != startpos:
results.append(buf[startpos:pos])
return ''.join(results)
class Environment(lazydict):
uid = None
gid = None
# This is a cached version of this environment, expanded
_expanded = None
# The _shadow Environment contains a pointer to the environment which contained
# the LAST active value for each env_set item so that we can deal with self-referential
# cases like:
# 'PATH': '/usr/local:$(PATH)'
_shadow = None
# A class variable to keep track of backtick expansions so we don't do them more than once
_cls_btcache = dict()
_cls_use_btcache = True # if shell expansions should be cached once or re-executed
_cls_backtick = True # indicates backticks are enabled
# Default scanner
_cls_scan = EnvScanner()
@classmethod
def set_parse_parameters(cls, variable_id = None, open_expansion = None):
cls._cls_scan = EnvScanner(variable_id, open_expansion)
@classmethod
def set_backtick_expansion(cls, enabled = True, cache = True):
cls._cls_backtick = enabled
cls._cls_use_btcache = cache
def __init__(self, from_env = os.environ, config = None, uid = None, gid = None, resolve_xid = True):
"""
Create a new environment. An environment may have a user associated with it. If so,
then it will be pre-populated with the user's HOME, USER and LOGNAME so that expansions
can reference these.
Note that if resolve_xid is False, then credentials if they do not exist, but leave the uid/gid the same.
This means that certain features, like HOME variables, will not be properly set, leading to possible
interactions between the optional components and their actual specification. However, this is better
than having optional components trigger errors because uninstalled software did not create uid's
needed for operation. The onus is on the service itself (in cproc) to assure that checking
is performed.
Note also that environments which use backtick expansions will *still* fail, because the backticks
must occur within the context of the specified user, and it would be a security violation to
allow a default.
"""
super().__init__()
#print("\n--ENV INIT", config, uid, from_env, from_env and getattr(from_env, 'uid', None))
userenv = dict()
# Inherit user from passed-in environment
self._shadow = getattr(from_env, '_shadow', None)
shadow = None # we don't bother to recreate this in any complex fashion unless we need to
if uid is None:
self.uid = getattr(from_env, 'uid', self.uid)
self.gid = getattr(from_env, 'gid', self.gid)
else:
pwrec = None
try:
pwrec = lookup_user(uid, gid)
except ChNotFoundError:
if resolve_xid:
raise
self.uid = uid
self.gid = gid
if pwrec:
self.uid = pwrec.pw_uid
self.gid = pwrec.pw_gid
userenv['HOME'] = pwrec.pw_dir
userenv['USER'] = userenv['LOGNAME'] = pwrec.pw_name
if not config:
if from_env:
self.update(from_env)
self.update(userenv)
else:
inherit = config.get('env_inherit') or ['*']
if inherit and from_env:
self.update({k:v for k,v in from_env.items() if any([fnmatch(k,pat) for pat in inherit])})
self.update(userenv)
add = config.get('env_set')
unset = config.get('env_unset')
if add or unset:
self._shadow = shadow = (getattr(self, '_shadow') or _DICT_CONST).copy()
if add:
for k,v in add.items():
if from_env and k in from_env:
shadow[k] = from_env # we keep track of the environment where the predecessor originated
self[k] = v
if unset:
patmatch = lambda p: any([fnmatch(p,pat) for pat in unset])
for delkey in [k for k in self.keys() if patmatch(k)]:
del self[delkey]
for delkey in [k for k in shadow.keys() if patmatch(k)]:
del shadow[delkey]
#print(' DONE (.uid={0}): {1}\n'.format(self.uid, self))
def _get_shadow_environment(self, var):
"""
Returns the environment where var existed before the specified variable was set, even
that occurred long ago. Delays expansion of the parent environment until this point,
since it is only rarely that self-referential environment variables need to consult the shadow.
"""
try:
shadow = self._shadow[var]
except (TypeError, KeyError):
return None
try:
return shadow.expanded()
except AttributeError:
pass
# Note shadow may be None at this point, or a dict()
self._shadow[var] = shadow = Environment(shadow)
return shadow.expanded()
def __setitem__(self, key, value):
super().__setitem__(key, value)
self._expanded = None
def __delitem__(self, key):
super().__delitem__(key)
self._expanded = None
def clear(self):
super().clear()
self._expanded = None
def _elookup(self, match):
whole = match.group(0)
return self.get(whole[2:-1], whole)
def expand(self, instr):
"""
Expands an input string by replacing environment variables of the form ${ENV} or $(ENV).
If an expansion is not found, the substituion is ignored and the original reference remains.
Two bash features are employed to allow tests:
$(VAR:-sub) Expands to sub if VAR not defined
$(VAR:+sub) Expands to sub if VAR IS defined
If a list is provided instead of a string, a list will be returned with each item
separately expanded.
"""
if isinstance(instr, list):
return [self.expand(item) for item in instr]
if not isinstance(instr, str):
return instr
return self._cls_scan.parse(instr, self._expand_into, self)
def expand_attributes(self, obj, *args):
"""
Given an object and a set of attributes, expands each and replaces the originals with
expanded versions. Implicitly expands the environment to assure all variable substitutions
occur correctly.
"""
explist = (k for k in args if hasattr(obj, k))
if not explist:
return
env = self.expanded()
for attr in explist:
setattr(obj, attr, env.expand(getattr(obj, attr)))
def expanded(self):
"""
Does a recursive expansion on all variables until there are no matches. Circular recursion
is halted rather than reported as an error. Returns a version of this environment
which has been expanded. Asking an expanded() copy for another expanded() copy returns self
unless the expanded copy has been modified.
"""
if self._expanded is not None:
return self._expanded
result = Environment(None)
for k in sorted(self.keys()): # sorted so outcome is deterministic
self._expand_into(k, None, result, k)
# Copy uid after we expand, since any user information is already present in our
# own environment.
result.uid = self.uid
result.gid = self.gid
result._shadow = self._shadow
# Cache a copy, but also tell the cached copy that it's expanded cached copy is itself.
result._expanded = result
self._expanded = result
return result
def _expand_into(self, k, wholematch, result, parent = None):
"""
Internal workhorse that expands the variable 'k' INTO the given result dictionary.
The result dictionary will conatin the expanded values. The result dictionary is
also a cache for nested and recursive environment expansion.
'wholematch' is None unless called from in an re.sub() (or similar context).
If set, it indicates the complete expansion expression, including adornments.
It is used as the default expansion when a variable is not defined.
'parent' is the name of the variable which was being expanded in the last
recursion, to catch the special case of self-referential variables.
"""
match = _RE_OPERS.match(k)
if match:
(k, oper, repl, backtick) = match.groups()
# Phase 1: Base variable value. Start by determining the value of variable
# 'k' within the current context.
# 1A: We have a backtick shortcut, such as $(`date`)
if match and backtick:
return self._recurse(result, backtick, parent)
# 1B: We have an embedded self reference such as "PATH": "/bin:$(PATH)". We use
# the last defined value in a prior environment as the value.
elif parent == k and wholematch is not None:
val = (self._get_shadow_environment(k) or _DICT_CONST).get(k) or ''
# 1C: We have already calculated a result and will use that instead, but only
# in a nested expansion. We re-evaluate top-levels all the time.
elif wholematch is not None and k in result:
val = result[k]
# 1D: We have a variable which is not part of our environment at all, and
# either treat it as empty, or as the wholematch value for further
# processing
elif k not in self:
val = "" if match else wholematch
# 1E: Finally, we will store this value and expand further.
else:
result[k] = self[k] # assure that recursion attempts stop with this value
val = result[k] = self._recurse(result, self[k], k)
# We now have, in 'val', the fully expanded contents of the variable 'k'
if not match:
return val
# Phase 2: Process any operators to return a possibily modified
# value as the result of the complete expression.
if oper == '?':
if not val:
raise ChVariableError(self._recurse(result, repl, parent))
elif oper == '/':
smatch = _RE_SLASHOP.match(repl)
if not smatch:
raise ChParameterError("invalid regex replacement syntax in '{0}'".format(match.group(0)))
val = self._recurse(result, re.sub((smatch.group(3) and "(?" + smatch.group(3) + ")") + smatch.group(1),
smatch.group(2).replace('\/', '/'),
val), parent)
elif oper == '|':
vts = _RE_BAREBAR.split(repl, 3)
if len(vts) == 1: # same as +
val = '' if not val else self._recurse(result, vts[0], parent)
elif len(vts) == 2:
val = self._recurse(result, vts[0] if val else vts[1], parent)
elif len(vts) >= 3:
editval = vts[1] if fnmatch(val.replace(r'\|', '|').lower(), vts[0].lower()) else vts[2]
val = self._recurse(result, editval.replace(r'\|', '|'), parent)
elif oper == "+":
val = '' if not val else self._recurse(result, repl, parent)
elif oper == "_": # strict opposite of +
val = '' if val else self._recurse(result, repl, parent)
elif oper == "-": # bash :-
if not val:
val = self._recurse(result, repl, parent)
return val
def _recurse(self, result, buf, parent_var = None):
"Worker method to isolate recursive env variable expansion, with backtick support"
return _RE_BACKTICK.sub(self._backtick_expand,
self._cls_scan.parse(buf, self._expand_into, result, parent_var))
def _backtick_expand(self, cmd):
"""
Performs rudimentary backtick expansion after all other environment variables have been
expanded. Because these are cached, the user should not expect results to differ
for different environment contexts, nor should the environment itself be relied upon.
"""
# Accepts either a string or match object
if not isinstance(cmd, str):
cmd = cmd.group(1)
if not self._cls_backtick:
return "`" + cmd + "`"
key = '{0}:{1}:{2}'.format(self.uid, self.gid, cmd)
result = self._cls_btcache.get(key)
if result is None:
if self.uid:
try:
pwrec = lookup_user(self.uid, self.gid)
except ChNotFoundError as ex:
ex.annotate('(required for backtick expansion `{0}`)'.format(cmd))
raise ex
else:
pwrec = None
def _proc_setup():
if pwrec:
os.setgid(pwrec.pw_gid)
os.setuid(pwrec.pw_uid)
try:
result = subprocess.check_output(cmd, shell=True, stderr=subprocess.STDOUT,
preexec_fn=_proc_setup)
result = result.decode()
except Exception as ex:
error(ex, "Backtick expansion returned error: " + str(ex))
result = ""
result = result.strip().replace("\n", " ")
if self._cls_use_btcache:
self._cls_btcache[key] = result
return result
def get_public_environment(self):
"""
Public variables are those which are exported to the application and do NOT start with an
underscore. All underscore names will be kept private.
"""
return {k:v for k,v in self.expanded().items() if not (k.startswith('_') or v in (None, ''))}
================================================
FILE: chaperone/cutil/errors.py
================================================
import errno
class ChError(Exception):
# Named the same as OSError so that exception code can detect the presence
# of an errno for reporting purposes
errno = None
annotation = None
def annotate(self, text):
if self.annotation:
self.annotation += ' ' + text
else:
self.annotation = text
def __str__(self):
supmsg = super().__str__()
if self.annotation:
supmsg += ' ' + self.annotation
return supmsg
def __init__(self, message = None, errno = None):
super().__init__(message)
if errno is not None:
self.errno = errno
class ChParameterError(ChError):
errno = errno.EINVAL
class ChNotFoundError(ChError):
errno = errno.ENOENT
class ChSystemError(ChError):
pass
class ChProcessError(ChError):
def __init__(Self, message = None, errno = None, resultcode = None):
if resultcode is not None and errno is None:
errno = resultcode.errno
super().__init__(message, errno)
class ChVariableError(ChError):
pass
def get_errno_from_exception(ex):
try:
return ex.errno
except AttributeError:
return None
================================================
FILE: chaperone/cutil/events.py
================================================
IS_EVENT = lambda e: e.startswith('on') and len(e) > 2 and e[2:3].isupper()
def SWALLOW_EVENT(*args, **kwargs):
pass
class EventSource:
"""
This is a elegant generic class to set up and handle events.
Events are always identified by keyword arguments of the format
onXxxxx.
def __init__(self, **kwargs):
events = EventSource()
kwargs = events.add(**kwargs)
def foo(self):
self.events.onMiscEvent()
"""
__events = None
def __init__(self, **kwargs):
self.__events = dict()
if kwargs:
self._exec_kwargs(self._do_add, kwargs)
def __getattribute__(self, key):
if IS_EVENT(key):
return self.__events.get(key, SWALLOW_EVENT)
return object.__getattribute__(self, key)
def _exec_kwargs(self, oper, kwargs):
events = [e for e in kwargs.keys() if IS_EVENT(e)]
if not events:
return kwargs
for e in events:
oper(e, kwargs[e])
del kwargs[e]
return kwargs
def clear(self):
"Removes all event handlers."
self.__events.clear()
def reset(self, **kwargs):
"Removes all event handlers and sets new ones."
self.__events.clear()
return self._exec_kwargs(self._do_add, kwargs)
def add(self, **kwargs):
"""
Adds one or more events:
add(onError = handler, onExit = handler)
Returns the kwargs not processed.
"""
return self._exec_kwargs(self._do_add, kwargs)
def remove(self, **kwargs):
"""
Removes one or more events:
remove(onError = handler, onExit = handler)
Returns the kwargs not processed.
"""
return self._exec_kwargs(self._do_remove, kwargs)
def _do_add(self, name, value):
assert callable(value)
e = self.__events.get(name)
# No such event, add a singleton
if not e:
self.__events[name] = value
return
# Add to multi-event dispatcher
try:
e.__eventlist.append(value)
return
except AttributeError:
pass
# Create multi-event dispatcher
displist = [e, value]
def dispatcher(*args, _displist = displist, **kwargs):
for edisp in _displist:
edisp(*args, **kwargs)
dispatcher.__eventlist = displist
self.__events[name] = dispatcher
def _do_remove(self, name, value):
e = self.__events.get(name)
if not name:
return
try:
e.__eventlist.remove(value)
except ValueError:
return # not in list, ignore
except AttributeError:
try:
del self.__events[name] # singleton
except KeyError:
return # no singleton, ignore
================================================
FILE: chaperone/cutil/format.py
================================================
def fstr(s):
if s is None:
return '-'
if isinstance(s, bool):
return str(s).lower()
return str(s)
class TableFormatter(list):
"""
A quick formatting class which allows you to build a table, then output it
neatly with columns and headings.
"""
attributes = None
headings = None
_sortfield = None
def __init__(self, *args, sort=None):
self.attributes = tuple(isinstance(a, tuple) and a[1] or a for a in args)
self.headings = tuple(isinstance(a, tuple) and a[0] or a for a in args)
self._hsize = list(len(h) for h in self.headings)
if sort in self.attributes:
self._sortfield = self.attributes.index(sort)
def add_rows(self, rows):
for r in rows:
row = tuple(getattr(r, attr, None) for attr in self.attributes)
for i in range(len(row)):
self._hsize[i] = max(self._hsize[i], len(fstr(row[i])))
self.append(row)
def get_formatted_data(self):
if self._sortfield is not None:
rows = sorted(self, key=lambda r: r[self._sortfield])
else:
rows = self
hz = self._hsize
fieldcount = range(len(hz))
sep = " "
dividers = tuple("-" * hz[i] for i in fieldcount)
return "\n".join(sep.join(fstr(row[i]).ljust(hz[i])
for i in fieldcount)
for row in [self.headings] + [dividers] + rows)
================================================
FILE: chaperone/cutil/logging.py
================================================
import logging
import os
import sys
import traceback
from time import strftime
from logging.handlers import SysLogHandler
from functools import partial
import chaperone.cutil.syslog_info as syslog_info
logger = logging.getLogger(__name__)
_root_logger = logging.getLogger(None)
_stderr_handler = logging.StreamHandler()
_cur_level = logging.NOTSET
_format = logging.Formatter()
_stderr_handler.setFormatter(_format)
_root_logger.addHandler(_stderr_handler)
def set_log_level(lev):
global _cur_level
_cur_level = syslog_info.syslog_to_python_lev(lev)
logger.setLevel(_cur_level)
def set_custom_handler(handler, enable = True):
if enable:
_root_logger.addHandler(handler)
_root_logger.removeHandler(_stderr_handler)
logger.setLevel(logging.DEBUG)
else:
_root_logger.removeHandler(handler)
_root_logger.addHandler(_stderr_handler)
logger.setLevel(_cur_level)
def _versatile_logprint(delegate, fmt, *args,
facility=None, exceptions=False,
program=None, pid=None, **kwargs):
"""
In addition to standard log formatting, the following two special cases are
covered:
1. If there are no formatting characters (%), then simply concatenate repr() of *args
2. If there are '{' formatting arguments, then apply new-style .format using arguments
provided.
Additionally, you can pass an exception as the first argument:
1. If no other arguments are provided, then the exception message will be the
log item.
2. A traceback will be printed in the case where the logger priority level is set to debug.
"""
if isinstance(fmt, Exception):
ex = fmt
args = list(args)
if len(args) == 0:
fmt = [str(ex)]
else:
fmt = args.pop(0)
else:
ex = None
if facility is not None or program or pid:
extra = kwargs['extra'] = {}
if facility:
extra['_facility'] = facility
if program:
extra['program_name'] = str(program)
if pid:
extra['program_pid'] = str(pid)
if ex and (exceptions or logger.level == logging.DEBUG): # use python level here
trace = "\n" + traceback.format_exc()
else:
trace = ""
if not len(args):
delegate(fmt, **kwargs)
elif '%' not in fmt:
if '{' in fmt:
delegate('%s', fmt.format(*args) + trace, **kwargs)
else:
delegate('%s', " ".join([repr(a) for a in args]) + trace, **kwargs)
else:
delegate(fmt, *args, **kwargs)
warn = partial(_versatile_logprint, logger.warning)
info = partial(_versatile_logprint, logger.info)
debug = partial(_versatile_logprint, logger.debug, exceptions=True)
error = partial(_versatile_logprint, logger.error)
================================================
FILE: chaperone/cutil/misc.py
================================================
import os
import pwd
import grp
import copy
import signal
import subprocess
from chaperone.cutil.errors import ChNotFoundError, ChParameterError, ChSystemError
class objectplus:
"""
An object which provides some general-purpose useful patterns.
"""
_cls_singleton = None
@classmethod
def sharedInstance(cls):
"Return a singleton object for this class."
if not cls._cls_singleton:
cls._cls_singleton = cls()
return cls._cls_singleton
class lazydict(dict):
__slots__ = () # create no __dict__ overhead for a pure dict subclass
def __init__(self, *args):
"""
Allow a series of iterables as an initializer.
"""
super().__init__()
for a in args:
self.update(a)
def get(self, key, default = None):
"""
A very of get() that accepts lazy defaults. You can provide a callable which will be invoked only
if necessary.
"""
if key in self:
return self[key]
return default() if callable(default) else default
def setdefault(self, key, default = None):
"""
A version of setdefault that works the way it should, by having a lambda that is executed
only in the case where the item does not exist.
"""
if key in self:
return self[key]
self[key] = value = default() if callable(default) else default
return value
def smart_update(self, key, theirs):
"""
Smart update replaces values in our dictionary with values from the other. However,
in the case where both dictionaries contain sub-dictionaries, the sub-dictionaries
are updated rather than replaced. (This makes things like env_set inheritance easier.)
"""
ours = super().get(key)
if ours is None:
ours[key] = theirs
return
for k,v in theirs.items():
oursub = ours.get(k)
if isinstance(oursub, dict) and isinstance(v, dict):
oursub.update(v)
else:
ours[k] = v
def deepcopy(self):
return copy.deepcopy(self)
def maybe_remove(fn, strict = False):
"""
Tries to remove a file but ignores a FileNotFoundError or Permission error. If an exception
would have been raised, returns the exception, otherwise None.
If "strict" then the file must either be missing, or successfully removed. Other errors
will still raise exceptions.
"""
try:
os.remove(fn)
except (FileNotFoundError if strict else (FileNotFoundError, PermissionError)) as ex:
return ex
return None
def is_exe(p):
return os.path.isfile(p) and os.access(p, os.X_OK)
def executable_path(fn, env = os.environ):
"""
Returns the fully qualified pathname to an executable. The PATH is searched, and
any tilde expansions are performed. Exceptions are raised as usual.
"""
penv = env.get("PATH")
newfn = os.path.expanduser(fn)
path,prog = os.path.split(newfn)
if not path and penv:
for path in penv.split(os.pathsep):
if is_exe(os.path.join(path, prog)):
newfn = os.path.join(path, prog)
break
if not os.path.isfile(newfn):
raise FileNotFoundError(fn)
if not os.access(newfn, os.X_OK):
raise PermissionError(fn)
return newfn
_lookup_user_cache = {}
def lookup_user(uid, gid = None):
"""
Looks up a user using either a name or integer user value. If a group is specified,
Then set the group explicitly in the returned pwrec
"""
key = (uid, gid)
retval = _lookup_user_cache.get(key)
if retval:
return retval
# calculate the new entry
intuid = None
try:
intuid = int(uid)
except ValueError:
pass
try:
if intuid is not None:
pwrec = pwd.getpwuid(intuid)
else:
pwrec = pwd.getpwnam(uid)
except KeyError:
raise ChNotFoundError("specified user ('{0}') does not exist".format(uid))
if gid is None:
return pwrec
retval = _lookup_user_cache[key] = type(pwrec)(
(pwrec.pw_name,
pwrec.pw_passwd,
pwrec.pw_uid,
lookup_group(gid, True),
pwrec.pw_gecos,
pwrec.pw_dir,
pwrec.pw_shell)
)
return retval
def lookup_group(gid, optional = False):
"""
Looks up a user using either a name or integer user value.
If 'optional' is true, then does not require that the group exist, and always
returns the numeric value of 'gid', or the mapping from 'gid' if it is a name.
Otherwise returns the group record.
"""
intgid = None
try:
intgid = int(gid)
except ValueError:
pass
if intgid is not None:
if optional:
return intgid
findit = grp.getgrgid
else:
findit = grp.getgrnam
try:
grrec = findit(gid)
except KeyError:
raise ChNotFoundError("specified group ('{0}') does not exist".format(gid))
return grrec.gr_gid if optional else grrec
def groupadd(name, gid):
"""
Adds a group to the system with the specified name and GID.
"""
# First, try the gnu tools way
try:
if subprocess.call(['groupadd', '-g', str(gid), name]) == 0:
return
raise ChSystemError("Unable to add a group with name={0} and GID={1}".format(name, gid))
except FileNotFoundError:
pass
# Now, try using 'addgroup' with the busybox syntax
if subprocess.call("addgroup -g {0} {1}".format(gid, name), shell=True) == 0:
return
raise ChSystemError("Unable to add a group with name={0} and GID={1}".format(name, gid))
def useradd(name, uid = None, gid = None, home = None):
"""
Adds a user to the system given an optional UID and numeric GID.
"""
ucmd = ['useradd', '--no-create-home']
if uid is not None:
ucmd += ['-u', str(uid)]
if gid is not None:
ucmd += ['-g', str(gid)]
if home is not None:
ucmd += ['--home-dir', home]
ucmd += [name]
tried = " ".join(ucmd)
# try gnu tools first
try:
if subprocess.call(ucmd) == 0:
return
raise ChSystemError("Error while trying to add user: {0} ({1})".format(name, tried))
except FileNotFoundError:
pass
ucmd = "adduser -D -H"
if uid is not None:
ucmd += " -u " + str(uid)
if gid is not None:
ucmd += " -G " + str(gid)
if home is not None:
ucmd += " -h '{0}'".format(home)
ucmd += " " + name
tried += "\n" + ucmd
# try busybox-style adduser
if subprocess.call(ucmd, shell=True) == 0:
return
raise ChSystemError("Error while trying to add user: {0}\ntried:\n{1}".format(name, tried))
def userdel(name):
"""
Removes a user from the system.
"""
del_ex = ChSystemError("Error while trying to remove user: {0}".format(name))
# try gnu tools first
try:
if subprocess.call(['userdel', name]) == 0:
return
raise del_ex
except FileNotFoundError:
pass
# try busybox-style adduser
if subprocess.call("deluser " + name, shell=True) == 0:
return
raise del_ex
# User Directories Directory cache
_udd = None
def get_user_directories_directory():
"""
Determines the directory where user directories are stored. This is actually
not that easy, and different systems have different ways of doing it. So,
we try adding a user called '_chaptest_' just to see where the directory goes,
and use that.
"""
global _udd
if _udd is not None:
return _udd
try:
testuser = "_chaptest_"
useradd(testuser)
userinfo = lookup_user(testuser)
_udd = os.path.dirname(userinfo.pw_dir)
userdel(testuser)
except Exception:
_udd = "/" # default if any error occurs
return _udd
def maybe_create_user(user, uid = None, gid = None, using_file = None, default_home = None):
"""
If the user does not exist, then create one with the given name, and optionally
the specified uid. If a gid is specified, create a group with the same name as the
user, and the given gid.
If the user does exist, then confirm that the uid and gid match, if either
or both are specified.
If 'using_file' is specified, then uid/gid are ignored and replaced with the uid/gid
of the specified file. The file must exist and be readable.
"""
if using_file:
stat = os.stat(using_file)
if uid is None:
uid = stat.st_uid
if gid is None:
gid = stat.st_gid
if uid is not None:
try:
uid = int(uid)
except ValueError:
raise ChParameterError("Specified UID is not a number: {0}".format(uid))
try:
pwrec = lookup_user(user)
except ChNotFoundError:
pwrec = None
# If the user exists, we do nothing, but we do validate that their UID and GID
# exist.
if pwrec:
if uid is not None and uid != pwrec.pw_uid:
raise ChParameterError("User {0} exists, but does not have expected UID={1}".format(user, uid))
if gid is not None and lookup_group(gid).gr_gid != pwrec.pw_gid:
raise ChParameterError("User {0} exists, but does not have expected GID={1}".format(user, gid))
return
# Now, we need to create the user, and optionally the group.
if gid is not None:
create_group = False
try:
newgid = lookup_group(gid).gr_name # always use name
except ChNotFoundError:
create_group = True
try:
newgid = int(gid) # must be a number at this point
except ValueError:
# We don't report the numeric error, because we *know* there is no such group
# and we won't create a symbolic group with a randomly-created number.
raise ChParameterError("Group does not exist: {0}".format(gid))
if create_group:
groupadd(user, newgid)
newgid = lookup_group(user).gr_name
gid = newgid # always will be the group name
# Test to see if the user directory itself already exists, which should be the case.
# If it doesn't, then use the default, if provided.
home = None
if default_home:
udd = get_user_directories_directory()
if not os.path.exists(os.path.join(udd, user)):
home = default_home
useradd(user, uid, gid, home)
def _assure_dir_for(path, pwrec, gid):
# gid is present so we know if we need to set group modes, but
# we always use the one in pwrec
if os.path.exists(path):
return
_assure_dir_for(os.path.dirname(path), pwrec, gid)
os.mkdir(path, 0o755 if not gid else 0o775)
if pwrec:
os.chown(path, pwrec.pw_uid, pwrec.pw_gid if gid else -1)
def open_foruser(filename, mode = 'r', uid = None, gid = None, exists_ok = True):
"""
Similar to open(), but assures all directories exist (similar to os.makedirs)
and assures that all created objects are writable by the given user, and
optionally by the given group (causing mode to be set accordingly).
"""
if uid:
pwrec = lookup_user(uid, gid)
else:
pwrec = None
gid = None
rp = os.path.realpath(filename)
_assure_dir_for(os.path.dirname(rp), pwrec, gid)
fobj = open(rp, mode)
if pwrec:
os.chown(rp, pwrec.pw_uid, pwrec.pw_gid if gid else -1)
os.chmod(rp, 0o644 if not gid else 0o664)
return fobj
SIGDICT = dict((v,k) for k,v in sorted(signal.__dict__.items())
if k.startswith('SIG') and not k.startswith('SIG_'))
def remove_for_recreate(filename):
"""
Indicates the intention to recreate the file at the given path. This is function can be used
in advance to assure that
a) any existing file is gone, and
b) full permissions and directories exist for creation of a new file in it's place
"""
ex = maybe_remove(filename, strict = True)
open_foruser(filename, mode='w').close()
os.remove(filename)
def get_signal_name(signum):
return SIGDICT.get(signum, "SIG%d" % signum)
def get_signal_number(signame):
sup = signame.upper()
if sup.startswith('SIG') and not sup.startswith('SIG_'):
num = getattr(signal, sup, None)
else:
try:
num = int(signame)
except ValueError:
num = None
if num is None:
raise ChParameterError("Invalid signal specifier: " + str(signame))
return num
================================================
FILE: chaperone/cutil/notify.py
================================================
import asyncio
import socket
import os
import re
from chaperone.cutil.servers import Server, ServerProtocol
from chaperone.cutil.misc import maybe_remove
from chaperone.cutil.logging import debug
_RE_NOTIFY = re.compile(r'^([A-Za-z]+)=(.+)$')
class NotifyProtocol(ServerProtocol):
def datagram_received(self, data, addr):
lines = data.decode().split("\n")
for line in lines:
m = _RE_NOTIFY.match(line)
if m:
self.events.onNotify(self.owner, m.group(1), m.group(2))
class NotifyListener(Server):
def _create_server(self):
loop = asyncio.get_event_loop()
return loop.create_datagram_endpoint(NotifyProtocol.buildProtocol(self), family=socket.AF_UNIX)
@property
def is_client(self):
return False
@property
def socket_name(self):
return self._socket_name
@property
def bind_name(self):
if self._socket_name.startswith('@'):
return self._socket_name.replace('@', "\0")
return self._socket_name
def __init__(self, socket_name, **kwargs):
super().__init__(**kwargs)
self._socket_name = socket_name
@asyncio.coroutine
def send(self, message):
if not self.server:
yield from self.run()
self.server[0].sendto(message.encode(), self.bind_name)
@asyncio.coroutine
def server_running(self):
(transport, protocol) = self.server
bindname = self.bind_name
# Clients connect to an existing socket
if self.is_client:
loop = asyncio.get_event_loop()
yield from loop.sock_connect(transport._sock, bindname)
return
# Servers set up a binding to a new one
transport._sock.bind(bindname)
if not bindname.startswith("\0"): # if not abstract socket
os.chmod(bindname, 0o777)
def close(self):
super().close()
if not (self.is_client or self._socket_name.startswith('@')):
maybe_remove(self._socket_name)
# A lot like a socket server, there are only subtle differences.
class NotifyClient(NotifyListener):
@property
def is_client(self):
return True
# A sink to specific notify messages. Can operate with or without a client,
# and has multiple levels of support.
class NotifySink:
NSLEV = 0 # level 0: nothing
NSLEV = 1 # level 1: only READY notifications
NSLEV = 2 # level 2: READY and STATUS
NSLEV = 3 # level 3: adds ERRNO, STARTING and STOPPING messages
_LEVS = [
set(),
{'READY'},
{'READY', 'STATUS'},
{'READY', 'STATUS', 'ERRNO', 'STOPPING'},
]
_client = None
_lev = None
_sent = None
def __init__(self):
self.level = 99
self._sent = set()
@property
def level(self):
try:
return self._LEVS.index(self._lev)
except ValueError:
return None
@level.setter
def level(self, val):
if val > len(self._LEVS):
val = len(self._LEVS) - 1
self._lev = self._LEVS[val].copy()
def enable(self, ntype):
self._lev.add(ntype.upper())
def disable(self, ntype):
self._lev.discard(ntype.upper())
def error(self, val):
if not self.sent("ERRNO"):
self.send("ERRNO", int(val))
def stopping(self):
if not self.sent("STOPPING"):
self.send("STOPPING", 1)
def ready(self):
if not self.sent("READY"):
self.send("READY", 1)
def status(self, statmsg):
self.send("STATUS", statmsg)
def mainpid(self):
self.send("MAINPID", os.getpid())
def sent(self, name):
return name in self._sent
def send(self, name, val):
if name not in self._lev:
return
self._sent.add(name)
if self._client:
debug("queueing '{0}={1}' to notify socket '{2}'".format(name, val, self._client.socket_name))
asyncio.async(self._do_send("{0}={1}".format(name, val)))
@asyncio.coroutine
def _do_send(self, msg):
if self._client:
yield from self._client.send(msg)
@asyncio.coroutine
def connect(self, socket = None):
"""
Connects to the notify socket. However, if we can't, it's not considered an error.
We just return False.
"""
self.close()
if socket is None:
if "NOTIFY_SOCKET" not in os.environ:
return False
socket = os.environ["NOTIFY_SOCKET"]
self._client = NotifyClient(socket,
onClose = lambda which,exc: self.close(),
onError = lambda which,exc: debug("{0} error, notifications disabled".format(socket)))
try:
yield from self._client.run()
except OSError as ex:
debug("could not connect to notify socket '{0} ({1})".format(socket, ex))
self.close()
return False
return True
def close(self):
if not self._client:
return
self._client.close()
self._client = None
================================================
FILE: chaperone/cutil/patches.py
================================================
import inspect
import importlib
# This module contains patches to Python. A patch wouldn't appear here if it didn't have major impact,
# and they are constructed and researched carefully. Avoid if possible, please.
# Patch routine for patching classes. Ignore ALL exceptions, since there could be any number of
# reasons why a distribution may not allow such patching (though most do). Exact code is compared,
# so there is little chance of an error in deciding if the patch is relevant.
def PATCH_CLASS(module, clsname, member, oldstr, newfunc):
try:
cls = getattr(importlib.import_module(module), clsname)
should_be = ''.join(inspect.getsourcelines(getattr(cls, member))[0])
if should_be == oldstr:
setattr(cls, member, newfunc)
except Exception:
pass
# PATCH for Issue23140: https://bugs.python.org/issue23140
# WHERE asyncio
# IMPACT Eliminates exceptions during process termination
# WHY There is no workround except upgrading to Python 3.4.3, which dramatically affects
# distro compatibility. Mostly, this benefits Ubuntu 14.04LTS.
OLD_process_exited = """ def process_exited(self):
# wake up futures waiting for wait()
returncode = self._transport.get_returncode()
while self._waiters:
waiter = self._waiters.popleft()
waiter.set_result(returncode)
"""
def NEW_process_exited(self):
# wake up futures waiting for wait()
returncode = self._transport.get_returncode()
while self._waiters:
waiter = self._waiters.popleft()
if not waiter.cancelled():
waiter.set_result(returncode)
PATCH_CLASS('asyncio.subprocess', 'SubprocessStreamProtocol', 'process_exited', OLD_process_exited, NEW_process_exited)
================================================
FILE: chaperone/cutil/proc.py
================================================
import os
from chaperone.cutil.misc import get_signal_name
class ProcStatus(int):
_other_error = None
_errno = None
def __new__(cls, val):
try:
intval = int(val)
except ValueError:
rval = int.__new__(cls, 0)
rval._other_error = str(val)
return rval
return int.__new__(cls, intval)
@property
def exited(self):
return os.WIFEXITED(self)
@property
def signaled(self):
return os.WIFSIGNALED(self)
@property
def stopped(self):
return os.WIFSTOPPED(self)
@property
def continued(self):
return os.WIFCONTINUED(self)
@property
def exit_status(self):
status = (os.WIFEXITED(self) or None) and os.WEXITSTATUS(self)
if not status and self._errno:
return 1 # default to exit_status = 1 in the case of an errno value
return status
@property
def normal_exit(self):
return self.exit_status == 0 and not self._other_error
@property
def errno(self):
"Map situation to an errno, even if contrived, unless one was provided."
if self._errno is not None:
return self._errno
if self.signal:
return 4 #EINTR
return 8 #ENOEXEC
@errno.setter
def errno(self, val):
self._errno = val
@property
def exit_message(self):
es = self.exit_status
if es is not None:
return os.strerror(es)
return None
@property
def signal(self):
if os.WIFSTOPPED(self):
return os.WSTOPSIG(self)
if os.WIFSIGNALED(self):
return os.WTERMSIG(self)
return None
@property
def briefly(self):
if self.signaled or self.stopped:
return get_signal_name(self.signal)
if self.exited:
return "exit({0})".format(self.exit_status)
return '?'
def __format__(self, spec):
if spec:
return int.__format__(self, spec)
msg = "<ProcStatus"
if self._errno:
msg += " errno={0}".format(self._errno)
if self.exited:
msg += " exit_status={0}".format(self.exit_status)
if self.signaled:
msg += " signal=%d" % self.signal
if self.stopped:
msg += " stoppped=%d" % self.signal
return msg + ">"
================================================
FILE: chaperone/cutil/servers.py
================================================
import asyncio
from functools import partial
from chaperone.cutil.events import EventSource
class ServerProtocol(asyncio.Protocol):
@classmethod
def buildProtocol(cls, owner, **kwargs):
return partial(cls, owner, **kwargs)
def __init__(self, owner, **kwargs):
"""
Copy keywords directly into attributes when each protocol is created.
This creates flexibility so that various servers can pass information to protocols.
"""
super().__init__()
self.owner = owner
self.events = self.owner.events
for k,v in kwargs.items():
setattr(self, k, v)
def connection_made(self, transport):
self.transport = transport
self.events.onConnection(self.owner)
def error_received(self, exc):
self.events.onError(self.owner, exc)
self.events.onClose(self.owner, exc)
def connection_lost(self, exc):
self.events.onClose(self.owner, exc)
class Server:
server = None
def __init__(self, **kwargs):
self.events = EventSource(**kwargs)
@asyncio.coroutine
def run(self):
self.loop = asyncio.get_event_loop()
self.server = yield from self._create_server()
yield from self.server_running()
@asyncio.coroutine
def server_running(self):
pass
def close(self):
s = self.server
if s:
if isinstance(s, tuple):
s = s[0]
s.close()
================================================
FILE: chaperone/cutil/syslog.py
================================================
import asyncio
import socket
import os
import re
import sys
import logging
from time import strftime
from functools import partial
from chaperone.cutil.logging import info, warn, debug, set_custom_handler
from chaperone.cutil.misc import lazydict, maybe_remove, remove_for_recreate
from chaperone.cutil.servers import ServerProtocol, Server
from chaperone.cutil.syslog_handlers import LogOutput
import chaperone.cutil.syslog_info as syslog_info
_RE_SPEC = re.compile(r'^(?P<fpfx>!?)(?:/(?P<regex>.+)/|\[(?P<prog>.+)\]|(?P<fac>[,*0-9a-zA-Z]+))\.(?P<pfx>!?=?)(?P<pri>[*a-zA-Z]+)$')
_RE_SPECSEP = re.compile(r' *; *')
# The following is based on RFC3164 with some tweaks to deal with anomalies.
# One anomaly worth mentioning is that some log sources append newlines (or whitespace) to their messages,
# or include embedded newlines. Here is a good JIRA discussion about how Apache dealt with this, including some background:
# https://issues.apache.org/jira/browse/LOG4NET-370
# At present we merely DISCARD whitespace from the end of messages, but don't attempt to break multiple
# messages into separate lines so that UDP syslog destinations don't have to deal with packet reordering,
# which is a real pain for some people, with an example here:
# https://redmine.pfsense.org/issues/1938
_RE_RFC3164 = re.compile(r'^<(?P<pri>\d+)>(?P<date>\w{3} [ 0-9][0-9] \d\d:\d\d:\d\d) (?:(?P<host>[^ :\[]+) )?(?P<tag>[^ :\[]+)(?P<rest>[:\[ ].+?)\s*$', re.DOTALL)
class _syslog_spec_matcher:
"""
This class supports matching a classic syslog.conf spec:
<facilty>.<priority>
where:
facility is a list of comma-separated faclities, or '*'
priority is a priority (meaning >=priority) or =priority (meaning exactly that priority)
either may be preceded by '!' to invert the match.
And the extensions:
/regex/.<priority>
where regex will match the entire message
[prog].<priority>
where prog will match the program specifier, if any
One or more of the above can be combined, separated by semicolons.
Note that the syslogd semantics are hard to actually figure out, even if you scour the web. So, here are
some rules.
The semicolon "joins" constraints by combining all negative constraints (those which omit facilities or priorities)
and positive constraints separately. The result will be logged ONLY if all the positive constraints are true
and all of the negative constraints are false!
So,
*.!emerg LOGS NOTHING (missing inclusions)
*.*;*.!emerg logs everything bug .emerg
*.info;![cron].* logs all info or higher, but omits everything from program "cron"
*.*;![cron].!=info Omits the info messages from any program BUT cron
[cron].*;*.!info includes all cron messages except those of info and above
More specifically:
*.info Includes info through emergency (6->0) but not Debug
*.!info Excludes info through emergency but does not exclude debug
*.=info Includes just info itself
*.!=info Excludes everything BUT info
!f.!=info Excludes everyting BUT info from everything BUT f
Why all this bother?
1. Basic cases are pretty easy to read and understand.
2. Negations can be understood if documented, and are useful.
3. I don't want to introduce a completely new syntax.
3. Somewhere out here, there is some nerdy OCD guy who will say "But wait, your selector format is so CLOSE
to the syslog format that you MUST support it with the same semantics or you're going to alienate [me]."
Just nipping that in the bud.
"""
__slots__ = ('_regexes', '_match', 'debugexpr', 'selector')
def __init__(self, selector, minimum_priority = None):
self.selector = selector
self._compile(minimum_priority)
def reset_minimum_priority(self, minimum_priority = None):
"""
Recompile the spec using a new minimum priority. minimum_priority may be None to eliminate
any such minimum from having an effect and reverting to the exact selectors.
"""
self._compile(minimum_priority)
def _compile(self, minimum_priority):
self._regexes = []
pieces = _RE_SPECSEP.split(self.selector)
# Build the list of negations and positive expressions
neg = list()
pos = list()
for p in pieces:
self._init_spec(p, neg, pos, minimum_priority)
if not pos:
self._buildex("False")
elif not neg:
self._buildex(" or ".join(pos))
else:
self._buildex("(" + (" and ".join(neg)) + ") and (" + (" or ".join(pos)) + ")")
def _buildex(self, expr):
# Perform some quick peepole optimization, then compile
nexpr = expr.replace("True and ", "").replace(" and True", "")
nexpr = nexpr.replace("not True", "False").replace(" and ((True))", "")
nexpr = nexpr.replace("False or ", "").replace(" or False", "")
self.debugexpr = nexpr
self._match = eval("lambda s,p,f,g,buf: " + nexpr)
def _init_spec(self, spec, neg, pos, minpri):
match = _RE_SPEC.match(spec)
if not match:
raise Exception("Invalid log spec syntax: " + spec)
# Compile an expression to match
gdict = match.groupdict()
if gdict['regex'] is not None:
self._regexes.append(re.compile(gdict['regex'], re.IGNORECASE))
c1 = 'bool(s._regexes[%d].search(buf))' % (len(self._regexes) - 1)
elif gdict['prog'] is not None:
c1 = '(g and "%s" == g.lower())' % gdict['prog'].lower()
elif gdict['fac'] != '*':
faclist = [syslog_info.FACILITY_DICT.get(f) for f in gdict.get('fac', '').lower().split(',')]
if None in faclist:
raise Exception("Invalid logging facility code, %s: %s" % (gdict['fac'], spec))
c1 = '(' + ' or '.join(['f==%d' % f for f in faclist]) + ')'
else:
c1 = 'True'
pri = gdict['pri']
pfx = gdict.get('pfx', '')
if pri == '*':
c2 = 'True'
else:
prival = syslog_info.PRIORITY_DICT.get(pri.lower())
if prival == None:
raise Exception("Invalid logging priority, %s: %s" % (pri, spec))
if minpri is not None and minpri > prival:
prival = minpri
if '=' in pfx:
c2 = "p==%d" % prival
else:
c2 = "p<=%d" % prival
fpfx = gdict.get('fpfx', '')
# Assess negatives and positives.
# neg will contain "EXCLUDE IF" and pos will contain "INCLUDE IF"
if '!' in fpfx:
# Double exclusion means to exclude everything except the given priority from
# everything except the given facility
if '!' in pfx:
neg.append("(not %s and not %s)" % (c1, c2))
else:
neg.append("not (%s and %s)" % (c1, c2))
elif '!' in pfx:
neg.append("(not %s or not %s)" % (c1, c2))
else:
pos.append("(%s and %s)" % (c1, c2))
def match(self, msg, prog = None, priority = syslog_info.LOG_ERR, facility = syslog_info.LOG_SYSLOG):
result = self._match(self, priority, facility, prog, msg)
#print('MATCH', prog, result, self.debugexpr)
return result
class SyslogServerProtocol(ServerProtocol):
def datagram_received(self, data, addr):
self.data_received(data)
def data_received(self, data):
try:
message = data.decode('ascii', 'ignore')
except Exception as ex:
self._output("Could not decode SYSLOG record data")
sys.stdout.flush()
return
messages = message.split("\0")
for m in messages:
if m:
self.owner.parse_to_output(m)
sys.stdout.flush()
class SyslogServer(Server):
_loglist = list()
_server = None
_log_socket = None
_capture_handler = None # our capture handler to redirect python logs
def __init__(self, logsock = "/dev/log", datagram = True, **kwargs):
super().__init__(**kwargs)
self._datagram = datagram
self._log_socket = logsock
try:
os.remove(logsock)
except Exception:
pass
def _create_server(self):
if not self._datagram:
return self.loop.create_unix_server(
SyslogServerProtocol.buildProtocol(self), path=self._log_socket)
# Assure we will be able to bind later
remove_for_recreate(self._log_socket)
return self.loop.create_datagram_endpoint(
SyslogServerProtocol.buildProtocol(self), family=socket.AF_UNIX)
@asyncio.coroutine
def server_running(self):
# Bind the socket if it's a datagram
if self._datagram:
transport = self.server[0]
transport._sock.bind(self._log_socket)
os.chmod(self._log_socket, 0o777)
def close(self):
self.capture_python_logging(False)
for logitem in self._loglist:
for m in logitem[1]:
m.close()
super().close()
maybe_remove(self._log_socket)
def configure(self, config, minimum_priority = None):
loglist = self._loglist = list()
lc = config.get_logconfigs()
for k,v in lc.items():
matcher = _syslog_spec_matcher(v.selector or '*.*', minimum_priority)
loglist.append( (matcher, LogOutput.getOutputHandlers(v)) )
def reset_minimum_priority(self, minimum_priority = None):
"""
Specifies a new minimum priority for logging. Recompiles all selectors, so it's best
to provide this when the configure is done, if possible.
"""
for m in self._loglist:
m[0].reset_minimum_priority(minimum_priority)
def capture_python_logging(self, enable = True):
if enable:
if not self._capture_handler:
self._capture_handler = CustomSysLog(self)
set_custom_handler(self._capture_handler)
elif self._capture_handler:
set_custom_handler(self._capture_handler, False)
self._capture_handler = None
def parse_to_output(self, msg):
# For a description of what a valid syslog line can look like, see:
# http://www.rsyslog.com/doc/syslog_parsing.html
match = _RE_RFC3164.match(msg)
if not match:
pri = syslog_info.LOG_SYSLOG * 8 + syslog_info.LOG_ERR
logattrs = { 'tag': '?', 'format_error': True, 'host' : None }
else:
logattrs = match.groupdict()
pri = int(logattrs['pri'])
if logattrs['tag'][0] == '/':
logattrs['tag'] = os.path.basename(logattrs['tag'])
logattrs['raw'] = msg
self.writeLog(logattrs, priority = pri & 7, facility = pri // 8)
def writeLog(self, logattrs, priority, facility):
#print("\nWRITELOG", priority, facility, logattrs)
for m in self._loglist:
if m[0].match(logattrs['raw'], logattrs['tag'], priority, facility):
for logger in m[1]:
logger.writeLog(logattrs, priority, facility)
class SysLogFormatter(logging.Formatter):
"""
Handles formatting Python output in the same format as normal syslog daemons.
"""
def __init__(self, program, pid):
self.default_program = program
self.default_pid = pid
super().__init__('{asctime} {program_name}[{program_pid}]: {message}', style='{')
def format(self, record):
if not hasattr(record, 'program_name'):
setattr(record, 'program_name', self.default_program)
if not hasattr(record, 'program_pid'):
setattr(record, 'program_pid', self.default_pid)
return super().format(record)
def formatTime(self, record, datefmt=None):
timestr = strftime('%b %d %H:%M:%S', self.converter(record.created))
# this may be picky, but people parse syslogs, let's not annoy them
if timestr[3:5] == ' 0':
return timestr.replace(' 0', ' ', 1)
return timestr
class CustomSysLog(logging.Handler):
"""
A custom Python logging class that makes it easy to redirect Python output to our
internal syslog capture handler.
"""
PRIORITY_NAMES = {
"ALERT": syslog_info.LOG_ALERT,
"CRIT": syslog_info.LOG_CRIT,
"CRITICAL": syslog_info.LOG_CRIT,
"DEBUG": syslog_info.LOG_DEBUG,
"EMERG": syslog_info.LOG_EMERG,
"ERR": syslog_info.LOG_ERR,
"ERROR": syslog_info.LOG_ERR, # DEPRECATED
"INFO": syslog_info.LOG_INFO,
"NOTICE": syslog_info.LOG_NOTICE,
"PANIC": syslog_info.LOG_EMERG, # DEPRECATED
"WARN": syslog_info.LOG_WARNING, # DEPRECATED
"WARNING": syslog_info.LOG_WARNING,
}
def __init__(self, owner):
super().__init__(logging.DEBUG) # enable all levels since we manage filtering ourselves
self._owner = owner
self.setFormatter(SysLogFormatter(sys.argv[0] or '-', os.getpid()))
def emit(self, record):
facility = getattr(record, '_facility', syslog_info.LOG_LOCAL5)
priority = self.PRIORITY_NAMES.get(record.levelname, syslog_info.LOG_ERR)
self._owner.parse_to_output("<{0}>".format(facility << 3 | priority) + self.format(record))
================================================
FILE: chaperone/cutil/syslog_handlers.py
================================================
import sys
import os
import socket
import asyncio
from time import time, localtime, strftime
from chaperone.cutil.misc import lazydict, open_foruser
from chaperone.cutil.syslog_info import get_syslog_info
_our_hostname = socket.gethostname()
class LogOutput:
name = None
config_match = lambda c: False
_cls_handlers = lazydict()
_cls_reghandlers = list()
@classmethod
def register(cls, handlercls):
cls._cls_reghandlers.append(handlercls)
@classmethod
def getOutputHandlers(cls, config):
return list(filter(None, [h.getHandler(config) for h in cls._cls_reghandlers]))
@classmethod
def getName(cls, config):
return cls.name
@classmethod
def matchesConfig(cls, config):
return config.enabled and cls.config_match(config)
@classmethod
def getHandler(cls, config):
if not cls.matchesConfig(config):
return None
name = cls.getName(config)
if name is None:
return None
return cls._cls_handlers.setdefault(name, lambda: cls(config))
def __init__(self, config):
self.name = config.name
self.config = config
def close(self):
pass
def writeLog(self, logattrs, priority, facility):
if logattrs.get('format_error'):
msg = "??" + logattrs['raw']
else:
# Note that 'rest' always starts with a ':', '[' or ' '.
msg = (logattrs['date'] + ' ' +
(self.config.logrec_hostname or logattrs['host'] or _our_hostname) + ' ' +
logattrs['tag'] + logattrs['rest'])
if self.config.extended:
msg = get_syslog_info(facility, priority) + " " + msg
self.write(msg)
def write(self, data):
h = self.handle
h.write(data)
h.write("\n")
h.flush()
class StdoutHandler(LogOutput):
name = "sys:stdout"
handle = sys.stdout
config_match = lambda c: c.stdout
LogOutput.register(StdoutHandler)
class StderrHandler(LogOutput):
name = "sys:stderr"
handle = sys.stderr
config_match = lambda c: c.stderr
LogOutput.register(StderrHandler)
class RemoteClientProtocol:
def __init__(self, loop):
self.loop = loop
self.transport = None
def connection_made(self, transport):
self.transport = transport
def send(self, message):
self.transport.sendto(message.encode())
def datagram_received(self, data, addr):
pass
def error_received(self, exc):
pass
def connection_lost(self, exc):
self.transport = None
def close(self):
if self.transport:
self.transport.close()
class RemoteHandler(LogOutput):
config_match = lambda c: c.syslog_host is not None
_pending = None # a pending future to setup this handler
_protocol = None # protocol for this logger
@classmethod
def getName(cls, config):
return "syslog_host:" + config.syslog_host
@asyncio.coroutine
def setup_handler(self):
loop = asyncio.get_event_loop()
connect = loop.create_datagram_endpoint(lambda: RemoteClientProtocol(loop),
remote_addr=(self.config.syslog_host, 514))
(transport, protocol) = yield from connect
self._pending = None
self._protocol = protocol
def __init__(self, config):
super().__init__(config)
self._pending = asyncio.async(self.setup_handler())
def write(self, data):
if self._protocol:
self._protocol.send(data)
def close(self):
if self._pending:
if not self._pending.cancelled():
self._pending.cancel()
self._pending = None
if self._protocol:
self._protocol.close()
self._protocol = None
LogOutput.register(RemoteHandler)
class FileHandler(LogOutput):
config_match = lambda c: c.file is not None
CHECK_INTERVAL = 60
_orig_filename = None
_cur_filename = None
_next_check = 0
_stat = None
@classmethod
def getName(cls, config):
return 'file:' + config.file
def __init__(self, config):
super().__init__(config)
self._orig_filename = os.path.abspath(config.file)
self._maybe_reopen()
def _maybe_reopen(self):
new_filename = strftime(self.config.file, localtime())
if new_filename != self._cur_filename or not self._stat:
reopen = True
else:
try:
newstat = os.stat(new_filename)
except FileNotFoundError:
newstat = None
reopen = not newstat or (newstat.st_dev != self._stat.st_dev or
newstat.st_ino != self._stat.st_ino)
if not reopen:
return
if self._stat:
self.handle.flush()
self.handle.close()
self.handle = self._stat = None
env = self.config.environment
self._cur_filename = new_filename
self.handle = open_foruser(new_filename, 'w' if self.config.overwrite else 'a', env.uid, env.gid)
self._stat = os.fstat(self.handle.fileno())
def close(self):
if self._stat:
self.handle.close()
self._stat = None
self._next_check = 0
self._cur_filename = None
def write(self, data):
if self._next_check <= time():
self._maybe_reopen()
self._next_check = time() + self.CHECK_INTERVAL
super().write(data)
LogOutput.register(FileHandler)
================================================
FILE: chaperone/cutil/syslog_info.py
================================================
import logging
from logging.handlers import SysLogHandler
# Copy all syslog levels
for k,v in SysLogHandler.__dict__.items():
if k.startswith('LOG_'):
globals()[k] = v
FACILITY = ('kern', 'user', 'mail', 'daemon', 'auth', 'syslog', 'lpr', 'news', 'uucp', 'cron', 'authpriv',
'ftp', 'ntp', 'audit', 'alert', 'altcron', 'local0', 'local1', 'local2', 'local3', 'local4',
'local5', 'local6', 'local7')
FACILITY_DICT = {FACILITY[i]:i for i in range(len(FACILITY))}
PRIORITY = ('emerg', 'alert', 'crit', 'err', 'warn', 'notice', 'info', 'debug')
PRIORITY_DICT = {PRIORITY[i]:i for i in range(len(PRIORITY))}
PRIORITY_DICT['warning'] = PRIORITY_DICT['warn']
PRIORITY_DICT['error'] = PRIORITY_DICT['err']
# Python equivalent for PRIORITY settings
PRIORITY_PYTHON = (logging.CRITICAL, logging.CRITICAL, logging.CRITICAL, logging.ERROR,
logging.WARNING, logging.INFO, logging.INFO, logging.DEBUG)
def get_syslog_info(facility, priority):
try:
f = FACILITY[facility]
except IndexError:
f = '?'
try:
return f + '.' + PRIORITY[priority]
except IndexError:
return f + '.?'
def syslog_to_python_lev(lev):
if lev < 0 or lev > len(PRIORITY):
return logging.DEBUG
return PRIORITY_PYTHON[lev]
================================================
FILE: chaperone/exec/__init__.py
================================================
# Placeholder
================================================
FILE: chaperone/exec/chaperone.py
================================================
"""
Lightweight process and service manager
Usage:
chaperone [--config=<file_or_dir>]
[--user=<name> | --create-user=<newuser>] [--default-home=<dir>]
[--exitkills | --no-exitkills] [--ignore-failures] [--log-level=<level>] [--no-console-log]
[--debug] [--force] [--disable-services] [--no-defaults] [--no-syslog]
[--version] [--show-dependencies]
[--task]
[<command> [<args> ...]]
Options:
--config=<file_or_dir> Specifies file or directory for configuration (default is /etc/chaperone.d)
--create-user=<newuser> Create a new user with an optional UID (name or name/uid),
then run as if --user was specified.
--default-home=<dir> If the --create-user home directory does not exist, then use this
directory as the default home directory for the new user instead.
--debug Turn on debugging features (same as --log-level=DEBUG)
--disable-services Does not run any services, only the given command (troubleshooting)
--exitkills When given command exits, kill the system (default if container running interactive)
--force If chaperone normally refuses, do it anyway and take the risk.
--ignore-failures Assumes that "ignore_failures:true" was specified on all services (troubleshooting)
--log-level=<level> Specify log level filtering, such as INFO, DEBUG, etc.
--no-console-log Disable all logging to stdout and stderr (useful when the container produces non-log output)
--no-exitkills When givencommand exits, don't kill the system (default if container running daemon)
--no-defaults Ignores any default options in the CHAPERONE_OPTIONS environment variable
--no-syslog The internal syslog server will not be started (useful when a separate syslog
daemon is started later).
--user=<name> Start first process as user (else root)
--show-dependencies Shows a list of service dependencies then exits
--task Run in task mode (see below).
--version Display version and exit
Notes:
* If a user is specified, then the --config is relative to the user's home directory.
* Chaperone makes the assumption that an interactive command should shut down the system upon exit,
but a non-interactive command should not. You can reverse this assumption with options.
* --task is used in cases where you wish to execute a script in the container environment
for utility purposes, such as a script to extract data from the container, etc. This switch
is equivalent to "--log err --exitkills --disable-services" and also requires a command
to be specified as usual.
"""
# perform any patches first
import chaperone.cutil.patches
# regular code begins
import sys
import shlex
import os
import re
import asyncio
import subprocess
from functools import partial
from docopt import docopt
from chaperone.cproc import TopLevelProcess
from chaperone.cproc.version import VERSION_MESSAGE
from chaperone.cutil.config import Configuration, ServiceConfig
from chaperone.cutil.env import ENV_INTERACTIVE, ENV_TASK_MODE, ENV_CHAP_OPTIONS
from chaperone.cutil.misc import maybe_create_user
from chaperone.cutil.logging import warn, info, debug, error
MSG_PID1 = """Normally, chaperone expects to run as PID 1 in the 'init' role.
If you want to go ahead anyway, use --force."""
MSG_NOTHING_TO_DO = """There are no services configured to run, nor is there a command specified
on the command line to run as an application. You need to do one or the other."""
# We require usernames to start with a letter or underscore. This is consistent with default Linux
# rules. Yeah I know, regexes can get complicated, but they can also do a lot of work to make the
# rest of the code simpler. Note that <file> matches strings like /foo:bar as a path of "/foo" with a
# groupname of bar, but the colon can be escaped if you actualy have a filename that contains
# a colon like "/foo\:bar".
RE_CREATEUSER = re.compile(
r'''(?P<user>[a-z_][a-z0-9_-]*) # ALWAYS start with the username
(?::(?P<file>/(?:\\:|[^:])+))? # File is next if it's :/path
(?::(?P<uid>\d*))? # Either /uid or :uid introduces a uid (number may be missing)
(?::(?P<gid>[a-z_][a-z0-9_-]*|\d+)?)? # followed by an optional GID
$''',
re.IGNORECASE | re.X)
def main_entry():
# parse these first since we may disable the environment check
options = docopt(__doc__, options_first=True, version=VERSION_MESSAGE)
if options['--task']:
options['--disable-services'] = True
options['--no-console-log'] = not options['--debug']
options['--exitkills'] = True
os.environ[ENV_TASK_MODE] = '1'
if not options['--no-defaults']:
envopts = os.environ.get(ENV_CHAP_OPTIONS)
if envopts:
try:
defaults = docopt(__doc__, argv=(shlex.split(envopts)), options_first=True)
except SystemExit as ex:
print("Error occurred in {0} environment variable: {1}".format(ENV_CHAP_OPTIONS, envopts))
raise
# Replace any "false" command option with the default version.
options.update({k:defaults[k] for k in options.keys() if not options[k]})
if options['--config'] is None:
options['--config'] = '/etc/chaperone.d'
if options['--debug']:
options['--log-level'] = "DEBUG"
print('COMMAND OPTIONS', options)
force = options['--force']
if not force and os.getpid() != 1:
print(MSG_PID1)
exit(1)
tty = sys.stdin.isatty()
os.environ[ENV_INTERACTIVE] = "1" if tty else "0"
kill_switch = options['--exitkills'] or (False if options['--no-exitkills'] else tty)
cmd = options['<command>']
if options['--task'] and not cmd:
error("--task can only be used if a shell command is specified as an argument")
exit(1)
# It's possible that BOTH --create-user and --user exist due to the way _CHAP_OPTIONS is overlaid
# with command line options. So, in such a case, note that we ignore --user.
create = options['--create-user']
if create is None:
user = options['--user']
else:
match = RE_CREATEUSER.match(create)
if not match:
print("Invalid format for --create-user argument: {0}".format(create))
exit(1)
udata = match.groupdict()
try:
maybe_create_user(udata['user'], udata['uid'] or None, udata['gid'] or None,
udata['file'] and udata['file'].replace(r'\:', ':'),
options['--default-home'])
except Exception as ex:
print("--create-user failure: {0}".format(ex))
exit(1)
user = udata['user']
extras = dict()
if options['--ignore-failures']:
extras['ignore_failures'] = True
if options['--no-syslog']:
extras['enable_syslog'] = False
try:
config = Configuration.configFromCommandSpec(options['--config'], user=user, extra_settings=extras,
disable_console_log=options['--no-console-log'])
services = config.get_services()
except Exception as ex:
error(ex, "Configuration Error: {0}", ex)
exit(1)
if not (services or cmd):
print(MSG_NOTHING_TO_DO)
exit(1)
if options['--show-dependencies']:
dg = services.get_dependency_graph()
print("\n".join(dg))
exit(0)
if not cmd and options['--disable-services']:
error("--disable-services not valid without specifying a command to run")
exit(1)
# Now, create the tlp and proceed
tlp = TopLevelProcess(config)
if options['--log-level']:
tlp.force_log_level(options['--log-level'])
if tlp.debug:
config.dump()
# Set proctitle and go
proctitle = "[" + os.path.basename(sys.argv[0]) + "]"
if cmd:
proctitle += " " + cmd
try:
from setproctitle import setproctitle
setproctitle(proctitle)
except ImportError:
pass
# Define here so we can share scope
@asyncio.coroutine
def startup_done():
if options['--ignore-failures']:
warn("ignoring failures on all service startups due to --ignore-failures")
if options['--disable-services'] and services:
warn("services will not be configured due to --disable-services")
extra_services = None
if cmd:
cmdsvc = ServiceConfig.createConfig(config=config,
name="CONSOLE",
exec_args=[cmd] + options['<args>'],
uid=user,
kill_signal=("SIGHUP" if tty else None),
setpgrp=not tty,
exit_kills=kill_switch,
service_groups="IDLE",
ignore_failures=not options['--task'],
stderr='inherit', stdout='inherit')
extra_ser
gitextract_pnlxl5op/
├── .gitignore
├── CHANGELOG.md
├── LICENSE
├── README
├── README.md
├── chaperone/
│ ├── __init__.py
│ ├── cproc/
│ │ ├── __init__.py
│ │ ├── client.py
│ │ ├── commands.py
│ │ ├── process_manager.py
│ │ ├── pt/
│ │ │ ├── __init__.py
│ │ │ ├── cron.py
│ │ │ ├── forking.py
│ │ │ ├── inetd.py
│ │ │ ├── notify.py
│ │ │ ├── oneshot.py
│ │ │ └── simple.py
│ │ ├── subproc.py
│ │ ├── version.py
│ │ └── watcher.py
│ ├── cutil/
│ │ ├── __init__.py
│ │ ├── config.py
│ │ ├── env.py
│ │ ├── errors.py
│ │ ├── events.py
│ │ ├── format.py
│ │ ├── logging.py
│ │ ├── misc.py
│ │ ├── notify.py
│ │ ├── patches.py
│ │ ├── proc.py
│ │ ├── servers.py
│ │ ├── syslog.py
│ │ ├── syslog_handlers.py
│ │ └── syslog_info.py
│ └── exec/
│ ├── __init__.py
│ ├── chaperone.py
│ ├── envcp.py
│ ├── sdnotify.py
│ ├── sdnotify_exec.py
│ └── telchap.py
├── doc/
│ ├── .gitignore
│ ├── Makefile
│ ├── docserver/
│ │ ├── README
│ │ ├── build/
│ │ │ ├── Dockerfile
│ │ │ └── install.sh
│ │ ├── build.sh
│ │ ├── chaperone.d/
│ │ │ ├── 010-start.conf
│ │ │ └── 120-apache2.conf
│ │ ├── etc/
│ │ │ ├── apache2.conf
│ │ │ └── init.sh
│ │ └── run.sh
│ └── source/
│ ├── _static/
│ │ └── custom.css
│ ├── _templates/
│ │ └── layout.html
│ ├── conf.py
│ ├── guide/
│ │ ├── chap-docker-simple.rst
│ │ ├── chap-docker-smaller.rst
│ │ ├── chap-docker.rst
│ │ ├── chap-intro.rst
│ │ ├── chap-other.rst
│ │ └── chap-using.rst
│ ├── includes/
│ │ ├── defs.rst
│ │ └── incomplete.rst
│ ├── index.rst
│ ├── ref/
│ │ ├── command-line.rst
│ │ ├── config-format.rst
│ │ ├── config-global.rst
│ │ ├── config-logging.rst
│ │ ├── config-service.rst
│ │ ├── config.rst
│ │ ├── env.rst
│ │ ├── index.rst
│ │ ├── utilities.rst
│ │ └── utility-envcp.rst
│ └── status.rst
├── samples/
│ ├── README
│ ├── chaperone-devbase/
│ │ ├── Dockerfile
│ │ ├── apps/
│ │ │ ├── bin/
│ │ │ │ └── README
│ │ │ ├── chaperone.d/
│ │ │ │ └── 010-start.conf
│ │ │ ├── etc/
│ │ │ │ ├── README
│ │ │ │ └── init.sh
│ │ │ └── init.d/
│ │ │ └── README
│ │ ├── build-image.sh
│ │ └── install.sh
│ ├── chaperone-lamp/
│ │ ├── Dockerfile
│ │ ├── apps/
│ │ │ ├── chaperone.d/
│ │ │ │ ├── 105-mysqld.conf
│ │ │ │ └── 120-apache2.conf
│ │ │ ├── etc/
│ │ │ │ ├── apache2.conf
│ │ │ │ └── mysql/
│ │ │ │ ├── my.cnf
│ │ │ │ └── start_mysql.sh
│ │ │ ├── init.d/
│ │ │ │ ├── mysql.sh
│ │ │ │ └── phpmyadmin.sh
│ │ │ └── www/
│ │ │ ├── default/
│ │ │ │ └── index.php
│ │ │ └── sites.d/
│ │ │ └── default.conf
│ │ ├── build-image.sh
│ │ └── install.sh
│ ├── docsample/
│ │ ├── Dockerfile
│ │ ├── README
│ │ └── chaperone.conf
│ └── setup-bin/
│ ├── build
│ ├── ct_setproxy
│ └── dot.bashrc
├── sandbox/
│ ├── .gitignore
│ ├── .shinit
│ ├── README
│ ├── bare_startup.sh
│ ├── bareimage/
│ │ ├── Dockerfile
│ │ └── install-bareimage.sh
│ ├── bash.bashrc
│ ├── bin/
│ │ ├── chaperone
│ │ ├── cps
│ │ ├── fakeentry
│ │ └── repeat
│ ├── centos.d/
│ │ ├── apache.conf
│ │ ├── app.conf
│ │ ├── cron.conf
│ │ └── sys1.conf
│ ├── distserv/
│ │ ├── chaperone.d/
│ │ │ ├── 005-config.conf
│ │ │ ├── 010-start.conf
│ │ │ └── 120-apache2.conf
│ │ ├── etc/
│ │ │ └── apache2.conf
│ │ └── run.sh
│ ├── etc/
│ │ ├── apache2.conf
│ │ └── makezombie.conf
│ ├── test.d/
│ │ ├── apache.conf
│ │ ├── cron.conf
│ │ └── sys1.conf
│ ├── testbare
│ ├── testcent
│ ├── testdock
│ ├── testimage
│ ├── testvar
│ └── user.d/
│ └── sys1.conf
├── setup.py
└── tests/
├── .gitignore
├── README.md
├── bin/
│ ├── chaperone
│ ├── daemon
│ ├── daemonutil.py
│ ├── envcp
│ ├── expect-lite-command-run
│ ├── expect-lite-image-run
│ ├── expect-test-command
│ ├── expect-test-image
│ ├── get-serial
│ ├── is-running
│ ├── kill-from-pidfile
│ ├── logecho
│ ├── proctool
│ ├── read_from_port
│ ├── sdnotify
│ ├── sdnotify-exec
│ ├── talkback
│ ├── telchap
│ └── test-driver
├── el-tests/
│ ├── basic-1/
│ │ ├── chaperone.conf
│ │ ├── test-001.elt
│ │ └── test-002.elt
│ ├── cron-1/
│ │ ├── chaperone.conf
│ │ ├── simulate-rotate.sh
│ │ ├── test-001.elt
│ │ ├── test-004.elt
│ │ ├── test-005.elt
│ │ ├── test-006.elt
│ │ ├── test-007.elt
│ │ └── test-008.elt
│ ├── exitkills-1/
│ │ ├── chaperone.conf
│ │ └── test-001.elt
│ ├── fork-1/
│ │ ├── chaperone.conf
│ │ ├── test-001.elt
│ │ ├── test-001b.elt
│ │ ├── test-003.elt
│ │ └── test-004.elt
│ ├── inetd-1/
│ │ ├── chaperone.conf
│ │ ├── test-001.elt
│ │ └── test-002.elt
│ ├── notify-1/
│ │ ├── chaperone.conf
│ │ ├── test-001.elt
│ │ ├── test-001b.elt
│ │ ├── test-001c.elt
│ │ ├── test-001d.elt
│ │ └── test-001e.elt
│ ├── simple-1/
│ │ ├── chaperone.conf
│ │ ├── test-001.elt
│ │ ├── test-002.elt
│ │ ├── test-003.elt
│ │ └── test-004.elt
│ └── simple-2/
│ ├── chaperone.conf
│ ├── test-001.elt
│ ├── test-002.elt
│ ├── test-003.elt
│ └── test-004.elt
├── env_expand.py
├── env_parse.py
├── events.py
├── prefix.py
├── run-all-tests.sh
├── run-el.sh
├── run-shell.sh
├── service_order.py
└── syslog_spec.py
SYMBOL INDEX (490 symbols across 38 files)
FILE: chaperone/cproc/client.py
class CommandClient (line 3) | class CommandClient(asyncio.Protocol):
method sendCommand (line 6) | def sendCommand(cls, cmd):
method __init__ (line 14) | def __init__(self, message, loop):
method connection_made (line 19) | def connection_made(self, transport):
method data_received (line 22) | def data_received(self, data):
method connection_lost (line 35) | def connection_lost(self, exc):
FILE: chaperone/cproc/commands.py
class _BaseCommand (line 28) | class _BaseCommand(object):
method match (line 34) | def match(self, opts):
method exec (line 40) | def exec(self, opts, protocol):
class statusCommand (line 57) | class statusCommand(_BaseCommand):
method do_exec (line 63) | def do_exec(self, opts, controller):
class dependenciesCommand (line 69) | class dependenciesCommand(_BaseCommand):
method do_exec (line 75) | def do_exec(self, opts, controller):
class serviceReset (line 79) | class serviceReset(_BaseCommand):
method do_exec (line 84) | def do_exec(self, opts, controller):
class serviceEnable (line 89) | class serviceEnable(_BaseCommand):
method do_exec (line 94) | def do_exec(self, opts, controller):
class serviceDisable (line 98) | class serviceDisable(_BaseCommand):
method do_exec (line 103) | def do_exec(self, opts, controller):
class serviceStart (line 107) | class serviceStart(_BaseCommand):
method do_exec (line 112) | def do_exec(self, opts, controller):
class serviceStop (line 121) | class serviceStop(_BaseCommand):
method do_exec (line 126) | def do_exec(self, opts, controller):
class loglevelCommand (line 135) | class loglevelCommand(_BaseCommand):
method do_exec (line 140) | def do_exec(self, opts, controller):
class shutdownCommand (line 156) | class shutdownCommand(_BaseCommand):
method do_exec (line 161) | def do_exec(self, opts, controller):
class CommandProtocol (line 195) | class CommandProtocol(ServerProtocol):
method _interpret_command (line 200) | def _interpret_command(self, msg):
method _command_task (line 219) | def _command_task(self, cmd, interactive = False):
method data_received (line 225) | def data_received(self, data):
class _InteractiveServer (line 233) | class _InteractiveServer(Server):
method _create_server (line 235) | def _create_server(self):
method server_running (line 241) | def server_running(self):
method close (line 244) | def close(self):
class CommandServer (line 249) | class CommandServer(Server):
method __init__ (line 255) | def __init__(self, controller, filename = CHAP_FIFO, **kwargs):
method server_running (line 266) | def server_running(self):
method _open (line 271) | def _open(self):
method _create_server (line 285) | def _create_server(self):
method close (line 288) | def close(self):
FILE: chaperone/cproc/process_manager.py
class CustomEventLoop (line 26) | class CustomEventLoop(asyncio.SelectorEventLoop):
method _make_socket_transport (line 27) | def _make_socket_transport(self, sock, protocol, waiter=None, *,
class TopLevelProcess (line 43) | class TopLevelProcess(objectplus):
method __init__ (line 68) | def __init__(self, config):
method debug (line 91) | def debug(self):
method debug (line 94) | def debug(self, val):
method loop (line 98) | def loop(self):
method system_alive (line 102) | def system_alive(self):
method version (line 113) | def version(self):
method uptime (line 118) | def uptime(self):
method services (line 122) | def services(self):
method force_log_level (line 125) | def force_log_level(self, level = None):
method _queue_no_processes (line 149) | def _queue_no_processes(self):
method _no_processes (line 154) | def _no_processes(self, ignore_service_state = False):
method _final_system_stop (line 181) | def _final_system_stop(self):
method _got_sigint (line 191) | def _got_sigint(self):
method signal_ready (line 195) | def signal_ready(self):
method _report_status (line 210) | def _report_status(self):
method kill_system (line 216) | def kill_system(self, errno = None, force = False):
method _cancel_pending (line 234) | def _cancel_pending(self):
method _kill_system_co (line 242) | def _kill_system_co(self):
method activate_result (line 288) | def activate_result(self, future):
method activate (line 291) | def activate(self, cr):
method _system_coro_check (line 297) | def _system_coro_check(self, f):
method _system_started (line 302) | def _system_started(self, startup, future=None):
method _start_system_services (line 312) | def _start_system_services(self):
method run_event_loop (line 337) | def run_event_loop(self, startup_coro = None, exit_when_done = True):
method run_services (line 354) | def run_services(self, extra_services, disable_others = False):
FILE: chaperone/cproc/pt/cron.py
class CronProcess (line 17) | class CronProcess(SubProcess):
method __init__ (line 24) | def __init__(self, service, family=None):
method default_status (line 37) | def default_status(self):
method scheduled (line 43) | def scheduled(self):
method start (line 47) | def start(self):
method _cron_hit (line 65) | def _cron_hit(self):
method stoppable (line 80) | def stoppable(self):
method stop (line 84) | def stop(self):
method process_started_co (line 89) | def process_started_co(self):
method _monitor_service (line 100) | def _monitor_service(self):
FILE: chaperone/cproc/pt/forking.py
class ForkingProcess (line 5) | class ForkingProcess(SubProcess):
method process_started_co (line 10) | def process_started_co(self):
method _exit_timeout (line 19) | def _exit_timeout(self):
FILE: chaperone/cproc/pt/inetd.py
class InetdServiceProtocol (line 10) | class InetdServiceProtocol(ServerProtocol):
method acquire_socket (line 14) | def acquire_socket(self, sock):
method _done (line 27) | def _done(self, f):
method start_socket_process (line 33) | def start_socket_process(self, fd):
class InetdService (line 77) | class InetdService(Server):
method __init__ (line 79) | def __init__(self, process):
method _create_server (line 83) | def _create_server(self):
class InetdProcess (line 88) | class InetdProcess(SubProcess):
method __init__ (line 94) | def __init__(self, service, family=None):
method add_process (line 101) | def add_process(self, proc):
method remove_process (line 104) | def remove_process(self, proc):
method scheduled (line 108) | def scheduled(self):
method note (line 112) | def note(self):
method start_subprocess (line 122) | def start_subprocess(self):
method reset (line 133) | def reset(self, dependents = False, enable = False, restarts_ok = False):
method final_stop (line 145) | def final_stop(self):
FILE: chaperone/cproc/pt/notify.py
class NotifyProcess (line 11) | class NotifyProcess(SubProcess):
method _close_listener (line 20) | def _close_listener(self):
method process_prepare_co (line 26) | def process_prepare_co(self, environ):
method _notify_timeout (line 37) | def _notify_timeout(self):
method reset (line 49) | def reset(self, dependents = False, enable = False, restarts_ok = False):
method final_stop (line 54) | def final_stop(self):
method process_started_co (line 59) | def process_started_co(self):
method _monitor_service (line 88) | def _monitor_service(self):
method _notify_received (line 100) | def _notify_received(self, which, var, value):
method _setready (line 106) | def _setready(self):
method notify_MAINPID (line 112) | def notify_MAINPID(self, value):
method notify_BUSERROR (line 120) | def notify_BUSERROR(self, value):
method notify_ERRNO (line 127) | def notify_ERRNO(self, value):
method notify_READY (line 139) | def notify_READY(self, value):
method notify_STATUS (line 143) | def notify_STATUS(self, value):
method status (line 147) | def status(self):
FILE: chaperone/cproc/pt/oneshot.py
class OneshotProcess (line 5) | class OneshotProcess(SubProcess):
method process_started_co (line 10) | def process_started_co(self):
method _exit_timeout (line 18) | def _exit_timeout(self):
FILE: chaperone/cproc/pt/simple.py
class SimpleProcess (line 4) | class SimpleProcess(SubProcess):
method process_started_co (line 9) | def process_started_co(self):
method _monitor_service (line 28) | def _monitor_service(self):
FILE: chaperone/cproc/subproc.py
function _process_logger (line 21) | def _process_logger(stream, kind, service):
class SubProcess (line 38) | class SubProcess(object):
method __new__ (line 75) | def __new__(cls, service, family=None):
method __init__ (line 95) | def __init__(self, service, family=None):
method __getattr__ (line 122) | def __getattr__(self, name):
method __setattr__ (line 126) | def __setattr__(self, name, value):
method _setup_subprocess (line 136) | def _setup_subprocess(self):
method _get_states (line 149) | def _get_states(self):
method pid (line 164) | def pid(self):
method pid (line 168) | def pid(self, newpid):
method returncode (line 180) | def returncode(self):
method returncode (line 186) | def returncode(self, val):
method loginfo (line 192) | def loginfo(self, *args, **kwargs):
method logerror (line 195) | def logerror(self, *args, **kwargs):
method logwarn (line 199) | def logwarn(self, *args, **kwargs):
method logdebug (line 202) | def logdebug(self, *args, **kwargs):
method note (line 206) | def note(self):
method note (line 209) | def note(self, value):
method status (line 213) | def status(self):
method default_status (line 238) | def default_status(self):
method enabled (line 244) | def enabled(self):
method enabled (line 247) | def enabled(self, val):
method _try_to_enable (line 253) | def _try_to_enable(self):
method scheduled (line 280) | def scheduled(self):
method kill_signal (line 291) | def kill_signal(self):
method running (line 298) | def running(self):
method started (line 303) | def started(self):
method stoppable (line 310) | def stoppable(self):
method failed (line 319) | def failed(self):
method ready (line 325) | def ready(self):
method prerequisites (line 339) | def prerequisites(self):
method start (line 351) | def start(self):
method get_expanded_environment (line 433) | def get_expanded_environment(self):
method start_subprocess (line 441) | def start_subprocess(self):
method process_prepare_co (line 493) | def process_prepare_co(self, environment):
method process_started_co (line 497) | def process_started_co(self):
method wait_for_pidfile (line 501) | def wait_for_pidfile(self):
method _wait_kill_on_exit (line 543) | def _wait_kill_on_exit(self):
method _attach_pid (line 547) | def _attach_pid(self, newpid):
method _child_watcher_callback (line 557) | def _child_watcher_callback(self, pid, returncode):
method process_exit (line 560) | def process_exit(self, code):
method _abnormal_exit (line 580) | def _abnormal_exit(self, code):
method _restart_callback (line 616) | def _restart_callback(self, fut):
method _kill_system (line 626) | def _kill_system(self):
method add_pending (line 629) | def add_pending(self, future):
method reset (line 634) | def reset(self, dependents = False, enable = False, restarts_ok = False):
method stop (line 669) | def stop(self):
method final_stop (line 673) | def final_stop(self):
method terminate (line 681) | def terminate(self):
method do_startup_pause (line 705) | def do_startup_pause(self):
method timed_wait (line 726) | def timed_wait(self, timeout, func = None):
method wait (line 749) | def wait(self):
class SubProcessFamily (line 767) | class SubProcessFamily(lazydict):
method __init__ (line 774) | def __init__(self, controller, services_config):
method get_status_formatter (line 787) | def get_status_formatter(self):
method system_alive (line 793) | def system_alive(self):
method get_scheduled_services (line 796) | def get_scheduled_services(self):
method get_status (line 799) | def get_status(self):
method run (line 831) | def run(self, servicelist = None):
method _lookup_services (line 849) | def _lookup_services(self, names):
method start (line 861) | def start(self, service_names, force = False, wait = False, enable = F...
method _queued_start (line 895) | def _queued_start(self, slist, names):
method stop (line 902) | def stop(self, service_names, force = False, wait = False, disable = F...
method _queued_stop (line 920) | def _queued_stop(self, slist, names, disable):
method reset (line 930) | def reset(self, service_names, force = False, wait = False):
method _queued_reset (line 945) | def _queued_reset(self, slist, names):
method enable (line 953) | def enable(self, service_names):
method disable (line 959) | def disable(self, service_names):
FILE: chaperone/cproc/watcher.py
class InitChildWatcher (line 13) | class InitChildWatcher(BaseChildWatcher):
method __init__ (line 19) | def __init__(self, **kwargs):
method close (line 29) | def close(self):
method __enter__ (line 34) | def __enter__(self):
method __exit__ (line 40) | def __exit__(self, a, b, c):
method number_of_waiters (line 55) | def number_of_waiters(self):
method add_child_handler (line 58) | def add_child_handler(self, pid, callback, *args):
method remove_child_handler (line 71) | def remove_child_handler(self, pid):
method check_processes (line 78) | def check_processes(self):
method _do_waitpid_all (line 82) | def _do_waitpid_all(self):
FILE: chaperone/cutil/config.py
function IsExecutable (line 19) | def IsExecutable(v):
function print_services (line 90) | def print_services(label, svlist):
class _BaseConfig (line 104) | class _BaseConfig(object):
method createConfig (line 118) | def createConfig(cls, config=None, **kwargs):
method _typecheck_assure_bool (line 127) | def _typecheck_assure_bool(self, attr):
method _typecheck_assure_int (line 138) | def _typecheck_assure_int(self, attr):
method __init__ (line 148) | def __init__(self, initdict, name = "MAIN", env = None, settings = None):
method shortname (line 193) | def shortname(self):
method post_init (line 196) | def post_init(self):
method augment_environment (line 199) | def augment_environment(self, env):
method get (line 202) | def get(self, attr, default = None):
method __repr__ (line 205) | def __repr__(self):
class ServiceConfig (line 211) | class ServiceConfig(_BaseConfig):
method shortname (line 255) | def shortname(self):
method augment_environment (line 258) | def augment_environment(self, env):
method post_init (line 262) | def post_init(self):
class LogConfig (line 291) | class LogConfig(_BaseConfig):
method shortname (line 310) | def shortname(self):
class ServiceDict (line 314) | class ServiceDict(lazydict):
method __init__ (line 318) | def __init__(self, servdict, env = None, settings = None):
method add (line 326) | def add(self, service):
method clear (line 329) | def clear(self):
method get_dependency_graph (line 333) | def get_dependency_graph(self):
method get_startup_list (line 372) | def get_startup_list(self):
class Configuration (line 460) | class Configuration(object):
method configFromCommandSpec (line 468) | def configFromCommandSpec(cls, spec, user = None, default = None, extr...
method __init__ (line 503) | def __init__(self, *args, default = None, uid = None, extra_settings =...
method _merge (line 541) | def _merge(self, items):
method get_services (line 551) | def get_services(self):
method get_logconfigs (line 559) | def get_logconfigs(self):
method get_settings (line 566) | def get_settings(self):
method update_settings (line 569) | def update_settings(self, updates):
method get_environment (line 574) | def get_environment(self):
method dump (line 579) | def dump(self):
FILE: chaperone/cutil/env.py
class EnvScanner (line 39) | class EnvScanner:
method __init__ (line 52) | def __init__(self, variable_id = None, open_expansion = None):
method parse (line 60) | def parse(self, buf, func, *args):
class Environment (line 134) | class Environment(lazydict):
method set_parse_parameters (line 157) | def set_parse_parameters(cls, variable_id = None, open_expansion = None):
method set_backtick_expansion (line 161) | def set_backtick_expansion(cls, enabled = True, cache = True):
method __init__ (line 165) | def __init__(self, from_env = os.environ, config = None, uid = None, g...
method _get_shadow_environment (line 241) | def _get_shadow_environment(self, var):
method __setitem__ (line 262) | def __setitem__(self, key, value):
method __delitem__ (line 266) | def __delitem__(self, key):
method clear (line 270) | def clear(self):
method _elookup (line 274) | def _elookup(self, match):
method expand (line 278) | def expand(self, instr):
method expand_attributes (line 296) | def expand_attributes(self, obj, *args):
method expanded (line 310) | def expanded(self):
method _expand_into (line 336) | def _expand_into(self, k, wholematch, result, parent = None):
method _recurse (line 427) | def _recurse(self, result, buf, parent_var = None):
method _backtick_expand (line 432) | def _backtick_expand(self, cmd):
method get_public_environment (line 479) | def get_public_environment(self):
FILE: chaperone/cutil/errors.py
class ChError (line 3) | class ChError(Exception):
method annotate (line 10) | def annotate(self, text):
method __str__ (line 16) | def __str__(self):
method __init__ (line 22) | def __init__(self, message = None, errno = None):
class ChParameterError (line 27) | class ChParameterError(ChError):
class ChNotFoundError (line 30) | class ChNotFoundError(ChError):
class ChSystemError (line 33) | class ChSystemError(ChError):
class ChProcessError (line 36) | class ChProcessError(ChError):
method __init__ (line 38) | def __init__(Self, message = None, errno = None, resultcode = None):
class ChVariableError (line 43) | class ChVariableError(ChError):
function get_errno_from_exception (line 46) | def get_errno_from_exception(ex):
FILE: chaperone/cutil/events.py
function SWALLOW_EVENT (line 4) | def SWALLOW_EVENT(*args, **kwargs):
class EventSource (line 8) | class EventSource:
method __init__ (line 27) | def __init__(self, **kwargs):
method __getattribute__ (line 32) | def __getattribute__(self, key):
method _exec_kwargs (line 38) | def _exec_kwargs(self, oper, kwargs):
method clear (line 49) | def clear(self):
method reset (line 53) | def reset(self, **kwargs):
method add (line 58) | def add(self, **kwargs):
method remove (line 67) | def remove(self, **kwargs):
method _do_add (line 76) | def _do_add(self, name, value):
method _do_remove (line 103) | def _do_remove(self, name, value):
FILE: chaperone/cutil/format.py
function fstr (line 1) | def fstr(s):
class TableFormatter (line 8) | class TableFormatter(list):
method __init__ (line 19) | def __init__(self, *args, sort=None):
method add_rows (line 26) | def add_rows(self, rows):
method get_formatted_data (line 33) | def get_formatted_data(self):
FILE: chaperone/cutil/logging.py
function set_log_level (line 24) | def set_log_level(lev):
function set_custom_handler (line 31) | def set_custom_handler(handler, enable = True):
function _versatile_logprint (line 42) | def _versatile_logprint(delegate, fmt, *args,
FILE: chaperone/cutil/misc.py
class objectplus (line 10) | class objectplus:
method sharedInstance (line 18) | def sharedInstance(cls):
class lazydict (line 25) | class lazydict(dict):
method __init__ (line 29) | def __init__(self, *args):
method get (line 37) | def get(self, key, default = None):
method setdefault (line 47) | def setdefault(self, key, default = None):
method smart_update (line 58) | def smart_update(self, key, theirs):
method deepcopy (line 76) | def deepcopy(self):
function maybe_remove (line 80) | def maybe_remove(fn, strict = False):
function is_exe (line 97) | def is_exe(p):
function executable_path (line 100) | def executable_path(fn, env = os.environ):
function lookup_user (line 124) | def lookup_user(uid, gid = None):
function lookup_group (line 167) | def lookup_group(gid, optional = False):
function groupadd (line 196) | def groupadd(name, gid):
function useradd (line 215) | def useradd(name, uid = None, gid = None, home = None):
function userdel (line 259) | def userdel(name):
function get_user_directories_directory (line 283) | def get_user_directories_directory():
function maybe_create_user (line 308) | def maybe_create_user(user, uid = None, gid = None, using_file = None, d...
function _assure_dir_for (line 384) | def _assure_dir_for(path, pwrec, gid):
function open_foruser (line 397) | def open_foruser(filename, mode = 'r', uid = None, gid = None, exists_ok...
function remove_for_recreate (line 424) | def remove_for_recreate(filename):
function get_signal_name (line 435) | def get_signal_name(signum):
function get_signal_number (line 438) | def get_signal_number(signame):
FILE: chaperone/cutil/notify.py
class NotifyProtocol (line 12) | class NotifyProtocol(ServerProtocol):
method datagram_received (line 14) | def datagram_received(self, data, addr):
class NotifyListener (line 22) | class NotifyListener(Server):
method _create_server (line 24) | def _create_server(self):
method is_client (line 29) | def is_client(self):
method socket_name (line 33) | def socket_name(self):
method bind_name (line 37) | def bind_name(self):
method __init__ (line 42) | def __init__(self, socket_name, **kwargs):
method send (line 47) | def send(self, message):
method server_running (line 54) | def server_running(self):
method close (line 71) | def close(self):
class NotifyClient (line 79) | class NotifyClient(NotifyListener):
method is_client (line 82) | def is_client(self):
class NotifySink (line 88) | class NotifySink:
method __init__ (line 106) | def __init__(self):
method level (line 111) | def level(self):
method level (line 118) | def level(self, val):
method enable (line 123) | def enable(self, ntype):
method disable (line 126) | def disable(self, ntype):
method error (line 129) | def error(self, val):
method stopping (line 133) | def stopping(self):
method ready (line 137) | def ready(self):
method status (line 141) | def status(self, statmsg):
method mainpid (line 144) | def mainpid(self):
method sent (line 147) | def sent(self, name):
method send (line 150) | def send(self, name, val):
method _do_send (line 159) | def _do_send(self, msg):
method connect (line 164) | def connect(self, socket = None):
method close (line 190) | def close(self):
FILE: chaperone/cutil/patches.py
function PATCH_CLASS (line 11) | def PATCH_CLASS(module, clsname, member, oldstr, newfunc):
function NEW_process_exited (line 35) | def NEW_process_exited(self):
FILE: chaperone/cutil/proc.py
class ProcStatus (line 4) | class ProcStatus(int):
method __new__ (line 9) | def __new__(cls, val):
method exited (line 20) | def exited(self):
method signaled (line 24) | def signaled(self):
method stopped (line 28) | def stopped(self):
method continued (line 32) | def continued(self):
method exit_status (line 36) | def exit_status(self):
method normal_exit (line 43) | def normal_exit(self):
method errno (line 47) | def errno(self):
method errno (line 55) | def errno(self, val):
method exit_message (line 59) | def exit_message(self):
method signal (line 66) | def signal(self):
method briefly (line 74) | def briefly(self):
method __format__ (line 81) | def __format__(self, spec):
FILE: chaperone/cutil/servers.py
class ServerProtocol (line 5) | class ServerProtocol(asyncio.Protocol):
method buildProtocol (line 8) | def buildProtocol(cls, owner, **kwargs):
method __init__ (line 11) | def __init__(self, owner, **kwargs):
method connection_made (line 25) | def connection_made(self, transport):
method error_received (line 29) | def error_received(self, exc):
method connection_lost (line 33) | def connection_lost(self, exc):
class Server (line 36) | class Server:
method __init__ (line 40) | def __init__(self, **kwargs):
method run (line 44) | def run(self):
method server_running (line 50) | def server_running(self):
method close (line 53) | def close(self):
FILE: chaperone/cutil/syslog.py
class _syslog_spec_matcher (line 33) | class _syslog_spec_matcher:
method __init__ (line 83) | def __init__(self, selector, minimum_priority = None):
method reset_minimum_priority (line 87) | def reset_minimum_priority(self, minimum_priority = None):
method _compile (line 94) | def _compile(self, minimum_priority):
method _buildex (line 112) | def _buildex(self, expr):
method _init_spec (line 120) | def _init_spec(self, spec, neg, pos, minpri):
method match (line 176) | def match(self, msg, prog = None, priority = syslog_info.LOG_ERR, faci...
class SyslogServerProtocol (line 182) | class SyslogServerProtocol(ServerProtocol):
method datagram_received (line 184) | def datagram_received(self, data, addr):
method data_received (line 187) | def data_received(self, data):
class SyslogServer (line 202) | class SyslogServer(Server):
method __init__ (line 210) | def __init__(self, logsock = "/dev/log", datagram = True, **kwargs):
method _create_server (line 221) | def _create_server(self):
method server_running (line 233) | def server_running(self):
method close (line 240) | def close(self):
method configure (line 248) | def configure(self, config, minimum_priority = None):
method reset_minimum_priority (line 255) | def reset_minimum_priority(self, minimum_priority = None):
method capture_python_logging (line 263) | def capture_python_logging(self, enable = True):
method parse_to_output (line 272) | def parse_to_output(self, msg):
method writeLog (line 290) | def writeLog(self, logattrs, priority, facility):
class SysLogFormatter (line 298) | class SysLogFormatter(logging.Formatter):
method __init__ (line 303) | def __init__(self, program, pid):
method format (line 310) | def format(self, record):
method formatTime (line 317) | def formatTime(self, record, datefmt=None):
class CustomSysLog (line 325) | class CustomSysLog(logging.Handler):
method __init__ (line 346) | def __init__(self, owner):
method emit (line 351) | def emit(self, record):
FILE: chaperone/cutil/syslog_handlers.py
class LogOutput (line 13) | class LogOutput:
method register (line 21) | def register(cls, handlercls):
method getOutputHandlers (line 25) | def getOutputHandlers(cls, config):
method getName (line 29) | def getName(cls, config):
method matchesConfig (line 33) | def matchesConfig(cls, config):
method getHandler (line 37) | def getHandler(cls, config):
method __init__ (line 45) | def __init__(self, config):
method close (line 49) | def close(self):
method writeLog (line 52) | def writeLog(self, logattrs, priority, facility):
method write (line 64) | def write(self, data):
class StdoutHandler (line 71) | class StdoutHandler(LogOutput):
class StderrHandler (line 80) | class StderrHandler(LogOutput):
class RemoteClientProtocol (line 89) | class RemoteClientProtocol:
method __init__ (line 90) | def __init__(self, loop):
method connection_made (line 94) | def connection_made(self, transport):
method send (line 97) | def send(self, message):
method datagram_received (line 100) | def datagram_received(self, data, addr):
method error_received (line 103) | def error_received(self, exc):
method connection_lost (line 106) | def connection_lost(self, exc):
method close (line 109) | def close(self):
class RemoteHandler (line 114) | class RemoteHandler(LogOutput):
method getName (line 122) | def getName(cls, config):
method setup_handler (line 126) | def setup_handler(self):
method __init__ (line 134) | def __init__(self, config):
method write (line 138) | def write(self, data):
method close (line 142) | def close(self):
class FileHandler (line 154) | class FileHandler(LogOutput):
method getName (line 166) | def getName(cls, config):
method __init__ (line 169) | def __init__(self, config):
method _maybe_reopen (line 174) | def _maybe_reopen(self):
method close (line 200) | def close(self):
method write (line 207) | def write(self, data):
FILE: chaperone/cutil/syslog_info.py
function get_syslog_info (line 24) | def get_syslog_info(facility, priority):
function syslog_to_python_lev (line 35) | def syslog_to_python_lev(lev):
FILE: chaperone/exec/chaperone.py
function main_entry (line 86) | def main_entry():
FILE: chaperone/exec/envcp.py
function check_canwrite (line 38) | def check_canwrite(flist, overok):
function main_entry (line 44) | def main_entry():
FILE: chaperone/exec/sdnotify.py
function _mkabstract (line 35) | def _mkabstract(socket_name):
function do_notify (line 41) | def do_notify(msg):
function main_entry (line 53) | def main_entry():
FILE: chaperone/exec/sdnotify_exec.py
function maybe_quote (line 71) | def maybe_quote(s):
class SDNotifyExec (line 76) | class SDNotifyExec:
method __init__ (line 97) | def __init__(self, options):
method info (line 145) | def info(self, msg):
method _got_sig (line 149) | def _got_sig(self):
method kill_program (line 152) | def kill_program(self, exitcode = None):
method _really_kill (line 157) | def _really_kill(self):
method _parent_closed (line 161) | def _parent_closed(self, which, ex):
method _do_proxy_send (line 167) | def _do_proxy_send(self, name, value):
method send_to_proxy (line 177) | def send_to_proxy(self, name, value):
method notify_received (line 180) | def notify_received(self, which, name, value):
method _notify_timeout (line 205) | def _notify_timeout(self):
method _run_process (line 212) | def _run_process(self):
method run (line 227) | def run(self):
function main_entry (line 247) | def main_entry():
FILE: chaperone/exec/telchap.py
function main_entry (line 21) | def main_entry():
FILE: doc/source/conf.py
function setup (line 261) | def setup(app):
FILE: setup.py
function read (line 12) | def read(fname):
function get_version (line 15) | def get_version():
function which (line 18) | def which(program):
FILE: tests/bin/daemonutil.py
class Daemon (line 13) | class Daemon:
method __init__ (line 18) | def __init__(self, pidfile = None):
method daemonize (line 21) | def daemonize(self, exitwith = 0):
method delpid (line 71) | def delpid(self):
method start (line 74) | def start(self, exitwith = 0):
method stop (line 96) | def stop(self):
method restart (line 128) | def restart(self):
method run (line 133) | def run(self):
FILE: tests/env_expand.py
function printdict (line 158) | def printdict(d, legend = "Dict:", compare = None):
function canonical (line 167) | def canonical(d, nl = False):
class TestEnvOrder (line 175) | class TestEnvOrder(unittest.TestCase):
method test_expand1 (line 179) | def test_expand1(self):
method test_expand2 (line 186) | def test_expand2(self):
method test_expand3 (line 193) | def test_expand3(self):
method test_expand4 (line 200) | def test_expand4(self):
method test_expand5 (line 207) | def test_expand5(self):
method test_expand6 (line 217) | def test_expand6(self):
method test_expand7 (line 229) | def test_expand7(self):
method test_expand8 (line 236) | def test_expand8(self):
FILE: tests/env_parse.py
class ScanTester (line 33) | class ScanTester:
method __init__ (line 35) | def __init__(self, test):
method run (line 39) | def run(self, tc):
method callback (line 45) | def callback(self, buf, whole):
class TestScanner (line 49) | class TestScanner(unittest.TestCase):
method test_parse1 (line 51) | def test_parse1(self):
FILE: tests/events.py
class handlers (line 5) | class handlers:
method __init__ (line 7) | def __init__(self):
method handler1 (line 10) | def handler1(self, val):
method handler2 (line 13) | def handler2(self, val):
method handler3 (line 16) | def handler3(self, val):
class TestEvents (line 19) | class TestEvents(unittest.TestCase):
method setUp (line 21) | def setUp(self):
method test_event1 (line 25) | def test_event1(self):
method test_event2 (line 39) | def test_event2(self):
method test_event3 (line 43) | def test_event3(self):
FILE: tests/service_order.py
function printlist (line 36) | def printlist(title, d):
function checkorder (line 42) | def checkorder(result, *series):
class TestServiceOrder (line 53) | class TestServiceOrder(unittest.TestCase):
method test_order1 (line 55) | def test_order1(self):
method test_order2 (line 63) | def test_order2(self):
method test_order3 (line 69) | def test_order3(self):
FILE: tests/syslog_spec.py
class TestSyslogSpec (line 28) | class TestSyslogSpec(unittest.TestCase):
method test_specs (line 30) | def test_specs(self):
Condensed preview — 201 files, each showing path, character count, and a content snippet. Download the .json file or copy for the full structured content (514K chars).
[
{
"path": ".gitignore",
"chars": 30,
"preview": "build/*\nchaperone.egg*\ndist/*\n"
},
{
"path": "CHANGELOG.md",
"chars": 7238,
"preview": "## 0.3.00 (2015-10-04)\n\nThis is a major release that adds a number of important features and refinements. Most importa"
},
{
"path": "LICENSE",
"chars": 588,
"preview": "Copyright (c) 2015, Gary J. Wisniewski <garyw@blueseastech.com>\n\nLicensed under the Apache License, Version 2.0 (the \"Li"
},
{
"path": "README",
"chars": 638,
"preview": "Chaperone is a lean, full-featured top-level system manager, similar to init, systemd, and others,\nbut designed for lean"
},
{
"path": "README.md",
"chars": 4289,
"preview": "\n#  Chaperone\n\n[:\n\n @classmethod\n def sendCommand(cls, cmd):\n loop = a"
},
{
"path": "chaperone/cproc/commands.py",
"chars": 8785,
"preview": "import os\nimport asyncio\nimport stat\nimport shlex\nfrom functools import partial\nfrom docopt import docopt\n\nfrom chaperon"
},
{
"path": "chaperone/cproc/process_manager.py",
"chars": 13291,
"preview": "import os\nimport pwd\nimport errno\nimport asyncio\nimport shlex\nimport signal\nimport datetime\n\nfrom functools import parti"
},
{
"path": "chaperone/cproc/pt/__init__.py",
"chars": 75,
"preview": "# Placeholder\n\nfrom chaperone.cproc.process_manager import TopLevelProcess\n"
},
{
"path": "chaperone/cproc/pt/cron.py",
"chars": 3435,
"preview": "import asyncio\nfrom aiocron import crontab\nfrom chaperone.cutil.logging import error, warn, debug, info\nfrom chaperone.c"
},
{
"path": "chaperone/cproc/pt/forking.py",
"chars": 1193,
"preview": "import asyncio\nfrom chaperone.cproc.subproc import SubProcess\nfrom chaperone.cutil.errors import ChProcessError\n\nclass F"
},
{
"path": "chaperone/cproc/pt/inetd.py",
"chars": 4755,
"preview": "import os\nimport asyncio\nfrom copy import copy\nfrom chaperone.cutil.logging import error, warn, debug, info\nfrom chapero"
},
{
"path": "chaperone/cproc/pt/notify.py",
"chars": 4961,
"preview": "import asyncio\nimport socket\nimport re\nfrom functools import partial\n\nfrom chaperone.cutil.errors import ChProcessError\n"
},
{
"path": "chaperone/cproc/pt/oneshot.py",
"chars": 1185,
"preview": "import asyncio\nfrom chaperone.cproc.subproc import SubProcess\nfrom chaperone.cutil.errors import ChProcessError\n\nclass O"
},
{
"path": "chaperone/cproc/pt/simple.py",
"chars": 1003,
"preview": "import asyncio\nfrom chaperone.cproc.subproc import SubProcess\n\nclass SimpleProcess(SubProcess):\n\n _fut_monitor = None"
},
{
"path": "chaperone/cproc/subproc.py",
"chars": 34911,
"preview": "import os\nimport asyncio\nimport shlex\nimport importlib\nimport signal\nimport errno\nfrom functools import partial\n\nfrom ti"
},
{
"path": "chaperone/cproc/version.py",
"chars": 914,
"preview": "# This file is designed to be used as a package module, but also as a main program runnable\n# by Python2 or Python3 whic"
},
{
"path": "chaperone/cproc/watcher.py",
"chars": 3679,
"preview": "import os\nimport asyncio\nimport threading\n\nfrom functools import partial\nfrom asyncio.unix_events import BaseChildWatche"
},
{
"path": "chaperone/cutil/__init__.py",
"chars": 14,
"preview": "# Placeholder\n"
},
{
"path": "chaperone/cutil/config.py",
"chars": 21161,
"preview": "import os\nimport re\nimport pwd\nimport shlex\nfrom operator import attrgetter\nfrom copy import deepcopy\nfrom itertools imp"
},
{
"path": "chaperone/cutil/env.py",
"chars": 19531,
"preview": "import re\nimport os\nimport subprocess\nfrom fnmatch import fnmatch\n\nfrom chaperone.cutil.logging import error, debug, war"
},
{
"path": "chaperone/cutil/errors.py",
"chars": 1212,
"preview": "import errno\n\nclass ChError(Exception):\n\n # Named the same as OSError so that exception code can detect the presence\n"
},
{
"path": "chaperone/cutil/events.py",
"chars": 2966,
"preview": "\nIS_EVENT = lambda e: e.startswith('on') and len(e) > 2 and e[2:3].isupper()\n\ndef SWALLOW_EVENT(*args, **kwargs):\n pa"
},
{
"path": "chaperone/cutil/format.py",
"chars": 1488,
"preview": "def fstr(s):\n if s is None:\n return '-'\n if isinstance(s, bool):\n return str(s).lower()\n return s"
},
{
"path": "chaperone/cutil/logging.py",
"chars": 2857,
"preview": "import logging\nimport os\nimport sys\nimport traceback\nfrom time import strftime\n\nfrom logging.handlers import SysLogHandl"
},
{
"path": "chaperone/cutil/misc.py",
"chars": 12885,
"preview": "import os\nimport pwd\nimport grp\nimport copy\nimport signal\nimport subprocess\n\nfrom chaperone.cutil.errors import ChNotFou"
},
{
"path": "chaperone/cutil/notify.py",
"chars": 5227,
"preview": "import asyncio\nimport socket\nimport os\nimport re\n\nfrom chaperone.cutil.servers import Server, ServerProtocol\nfrom chaper"
},
{
"path": "chaperone/cutil/patches.py",
"chars": 1773,
"preview": "import inspect\nimport importlib\n\n# This module contains patches to Python. A patch wouldn't appear here if it didn't ha"
},
{
"path": "chaperone/cutil/proc.py",
"chars": 2416,
"preview": "import os\nfrom chaperone.cutil.misc import get_signal_name\n\nclass ProcStatus(int):\n\n _other_error = None\n _errno ="
},
{
"path": "chaperone/cutil/servers.py",
"chars": 1488,
"preview": "import asyncio\nfrom functools import partial\nfrom chaperone.cutil.events import EventSource\n\nclass ServerProtocol(asynci"
},
{
"path": "chaperone/cutil/syslog.py",
"chars": 13724,
"preview": "import asyncio\nimport socket\nimport os\nimport re\nimport sys\nimport logging\n\nfrom time import strftime\nfrom functools imp"
},
{
"path": "chaperone/cutil/syslog_handlers.py",
"chars": 5679,
"preview": "import sys\nimport os\nimport socket\nimport asyncio\n\nfrom time import time, localtime, strftime\n\nfrom chaperone.cutil.misc"
},
{
"path": "chaperone/cutil/syslog_info.py",
"chars": 1311,
"preview": "import logging\nfrom logging.handlers import SysLogHandler\n\n# Copy all syslog levels\nfor k,v in SysLogHandler.__dict__.it"
},
{
"path": "chaperone/exec/__init__.py",
"chars": 14,
"preview": "# Placeholder\n"
},
{
"path": "chaperone/exec/chaperone.py",
"chars": 9503,
"preview": "\"\"\"\nLightweight process and service manager\n\nUsage:\n chaperone [--config=<file_or_dir>]\n [--user=<name> "
},
{
"path": "chaperone/exec/envcp.py",
"chars": 5327,
"preview": "\"\"\"\nCopy text files and expand environment variables as you copy.\n\nUsage:\n envcp [options] FILE ...\n\nOptions:\n "
},
{
"path": "chaperone/exec/sdnotify.py",
"chars": 2762,
"preview": "\"\"\"\nSystemd notify tool (compatible with systemd-notify)\n\nUsage:\n sdnotify [options] [VARIABLE=VALUE ...]\n\nOptions:\n "
},
{
"path": "chaperone/exec/sdnotify_exec.py",
"chars": 9050,
"preview": "\"\"\"\nSystemd notify exec shell (compatible with systemd-notify)\nRuns a program and either proxies or simulates sd-notify "
},
{
"path": "chaperone/exec/telchap.py",
"chars": 764,
"preview": "\"\"\"\nInteractive command tool for chaperone\n\nUsage:\n telchap <command> [<args> ...]\n\"\"\"\n\n# perform any patches first\ni"
},
{
"path": "doc/.gitignore",
"chars": 22,
"preview": "build/*\ndocserver/var\n"
},
{
"path": "doc/Makefile",
"chars": 5585,
"preview": "# Makefile for Sphinx documentation\n#\n\n# You can set these variables from the command line.\nSPHINXOPTS =\nSPHINXBUILD "
},
{
"path": "doc/docserver/README",
"chars": 191,
"preview": "This is a basic documentation webserver that runs on port 8088 and points to the Sphinx\ndocumentation located in ../buil"
},
{
"path": "doc/docserver/build/Dockerfile",
"chars": 77,
"preview": "FROM chapdev/chaperone-lamp:latest\nADD . /setup/\nRUN /setup/build/install.sh\n"
},
{
"path": "doc/docserver/build/install.sh",
"chars": 321,
"preview": "cd /setup\n# remove existing chaperone.d and init.d from /apps so none linger\nrm -rf /apps/chaperone.d /apps/init.d\n# cop"
},
{
"path": "doc/docserver/build.sh",
"chars": 439,
"preview": "#!/bin/bash\n#Created by chaplocal on `date`\n# the cd trick assures this works even if the current directory is not curre"
},
{
"path": "doc/docserver/chaperone.d/010-start.conf",
"chars": 2642,
"preview": "# 010-start.conf\n#\n# This is the first start-up file for the chaperone base images. Note that start-up files\n# are proc"
},
{
"path": "doc/docserver/chaperone.d/120-apache2.conf",
"chars": 1570,
"preview": "# 120-apache2.conf\n#\n# Start up apache. This is a \"simple\" service, so chaperone will monitor Apache and restart\n# it i"
},
{
"path": "doc/docserver/etc/apache2.conf",
"chars": 4432,
"preview": "# This is the main Apache server configuration file. It contains the\n# configuration directives that give the server it"
},
{
"path": "doc/docserver/etc/init.sh",
"chars": 1280,
"preview": "#!/bin/bash\n# A quick script to initialize the system\n\n# We publish two variables for use in startup scripts:\n#\n# CONT"
},
{
"path": "doc/docserver/run.sh",
"chars": 809,
"preview": "#!/bin/bash\n#Created by chaplocal on Wed Jun 10 16:08:42 EST 2015\n\ncd ${0%/*} # go to directory of this file\nAPPS=$PWD\nc"
},
{
"path": "doc/source/_static/custom.css",
"chars": 326,
"preview": ".wy-table-responsive table td, .wy-table-responsive table th {\n white-space: normal !important;\n}\n\n.wy-table-responsive"
},
{
"path": "doc/source/_templates/layout.html",
"chars": 494,
"preview": "{% extends \"!layout.html\" %}\n\n{% block footer %}\n{{ super() }}\n<script>\n (function(i,s,o,g,r,a,m){i['GoogleAnalyticsObj"
},
{
"path": "doc/source/conf.py",
"chars": 8420,
"preview": "# -*- coding: utf-8 -*-\n#\n# chaperone documentation build configuration file, created by\n# sphinx-quickstart on Mon May "
},
{
"path": "doc/source/guide/chap-docker-simple.rst",
"chars": 4363,
"preview": "\n.. _chap.example-docker:\n\nA Simple Docker Example\n=======================\n\nThe following example creates a simple Docke"
},
{
"path": "doc/source/guide/chap-docker-smaller.rst",
"chars": 4673,
"preview": "\n.. _chap.small-docker:\n\nCreating Small Docker Images\n============================\n\nThe default official Docker images a"
},
{
"path": "doc/source/guide/chap-docker.rst",
"chars": 960,
"preview": ".. _chap.docker:\n\nUsing Chaperone with Docker\n===========================\n\nWhile Chaperone is a general-purpose program "
},
{
"path": "doc/source/guide/chap-intro.rst",
"chars": 2351,
"preview": "\n.. _intro:\n\nIntroduction to Chaperone\n=========================\n\nOverview\n--------\n\nContainer technologies like Docker "
},
{
"path": "doc/source/guide/chap-other.rst",
"chars": 906,
"preview": "\n.. include:: /includes/incomplete.rst\n\n.. _chap.other:\n\nOther Uses for Chaperone\n========================\n\nChaperone wa"
},
{
"path": "doc/source/guide/chap-using.rst",
"chars": 223,
"preview": ".. _chap.using:\n\nUsing Chaperone\n===============\n\nChaperone is a simple, but full-featured process manager. It is desig"
},
{
"path": "doc/source/includes/defs.rst",
"chars": 31,
"preview": ".. |ENV| replace:: :kbd:`$ENV`\n"
},
{
"path": "doc/source/includes/incomplete.rst",
"chars": 222,
"preview": ".. note:: \n\n This section is being worked on and is not yet complete. The :ref:`reference` is currently complete and "
},
{
"path": "doc/source/index.rst",
"chars": 2491,
"preview": ".. chaperone documentation master file, created by\n sphinx-quickstart on Mon May 6 17:19:12 2013.\n You can adapt th"
},
{
"path": "doc/source/ref/command-line.rst",
"chars": 24176,
"preview": ".. chaperone documentation n\n command line documentation\n\n.. _ref.chaperone:\n\nChaperone Command Reference\n============"
},
{
"path": "doc/source/ref/config-format.rst",
"chars": 3000,
"preview": ".. chaperone documentation\n configuration directives\n\n.. _config.file-format:\n\nConfiguration File Format\n============="
},
{
"path": "doc/source/ref/config-global.rst",
"chars": 10077,
"preview": ".. chaperone documentation\n configuration directives\n\n.. include:: /includes/defs.rst\n\n.. _config.settings:\n\nConfigura"
},
{
"path": "doc/source/ref/config-logging.rst",
"chars": 15621,
"preview": ".. chapereone documentation\n configuration directives\n\n.. include:: /includes/defs.rst\n\n.. _logging:\n\nConfiguration: L"
},
{
"path": "doc/source/ref/config-service.rst",
"chars": 37123,
"preview": ".. chaperone documentation\n configuration directives\n\n.. include:: /includes/defs.rst\n\n.. _service:\n\nConfiguration: Se"
},
{
"path": "doc/source/ref/config.rst",
"chars": 990,
"preview": ".. chaperone documentation\n configuration directives\n\n.. _config:\n\nChaperone Configuration\n=======================\n\nCh"
},
{
"path": "doc/source/ref/env.rst",
"chars": 15337,
"preview": ".. include:: /includes/defs.rst\n\n.. _ch.env:\n\nEnvironment Variables\n=====================\n\nOverview and Quick Reference\n"
},
{
"path": "doc/source/ref/index.rst",
"chars": 931,
"preview": ".. chaperone documentation master file, created by\n sphinx-quickstart on Mon May 6 17:19:12 2013.\n You can adapt th"
},
{
"path": "doc/source/ref/utilities.rst",
"chars": 353,
"preview": ".. chaperone documentation\n configuration directives\n\n.. _utilities:\n\nAdditional Utilities\n====================\n\nIn ad"
},
{
"path": "doc/source/ref/utility-envcp.rst",
"chars": 5718,
"preview": ".. chaperone documentation n\n command line documentation\n\n.. _ref.envcp:\n\nUtility: ``envcp``\n==================\n\nOverv"
},
{
"path": "doc/source/status.rst",
"chars": 1244,
"preview": ".. _status:\n\nChaperone Project Status\n========================================================================\n\nThe Chap"
},
{
"path": "samples/README",
"chars": 223,
"preview": "These are some early samples that may still be useful.\n\nHowever, it is much better to take a look at:\n\t https://github.c"
},
{
"path": "samples/chaperone-devbase/Dockerfile",
"chars": 328,
"preview": "FROM ubuntu:14.04\n\nADD setup-bin/* *.sh /setup-bin/\nADD apps/ /apps/\nADD chaperone/ /setup-bin/chaperone/\nRUN /setup-bin"
},
{
"path": "samples/chaperone-devbase/apps/bin/README",
"chars": 174,
"preview": "Put commands which need to be executed at the command line, or by application\nprograms here. This directory will automa"
},
{
"path": "samples/chaperone-devbase/apps/chaperone.d/010-start.conf",
"chars": 756,
"preview": "# General environmental settings\n\nsettings: {\n env_set: {\n 'PATH': '$(APPS_DIR)/bin:/usr/local/bin:/bin:/usr/bin:/sbin"
},
{
"path": "samples/chaperone-devbase/apps/etc/README",
"chars": 784,
"preview": "This is a \"mini etc\" directory which, as much as possible, is where all normal application and service configuration\nfil"
},
{
"path": "samples/chaperone-devbase/apps/etc/init.sh",
"chars": 1280,
"preview": "#!/bin/bash\n# A quick script to initialize the system\n\n# We publish two variables for use in startup scripts:\n#\n# CONT"
},
{
"path": "samples/chaperone-devbase/apps/init.d/README",
"chars": 675,
"preview": "Files in this directory are executed upon container startup by the ../etc/init.sh script.\n\nThere are two modes:\n\n1. Whe"
},
{
"path": "samples/chaperone-devbase/build-image.sh",
"chars": 169,
"preview": "#!/bin/bash\n\n# the cd trick assures this works even if the current directory is not current.\ncd ${0%/*}\n./setup-bin/buil"
},
{
"path": "samples/chaperone-devbase/install.sh",
"chars": 1285,
"preview": "#!/bin/bash\n\n# Assumes there is an \"optional\" apt-get proxy running on our HOST\n# on port 3142. You can run one by look"
},
{
"path": "samples/chaperone-lamp/Dockerfile",
"chars": 113,
"preview": "FROM chapdev/chaperone-base:latest\n\nADD *.sh /setup-bin/\nADD apps/ /apps/\nRUN /setup-bin/install.sh\n\nEXPOSE 8080\n"
},
{
"path": "samples/chaperone-lamp/apps/chaperone.d/105-mysqld.conf",
"chars": 394,
"preview": "settings: {\n env_set: {\n 'MYSQL_HOME': '$(APPS_DIR)/etc/mysql',\n 'MYSQL_UNIX_PORT': '$(APPS_DIR)/var/run/mysqld.sock'"
},
{
"path": "samples/chaperone-lamp/apps/chaperone.d/120-apache2.conf",
"chars": 797,
"preview": "apache2.service: {\n command: \"/usr/sbin/apache2 -f $(APPS_DIR)/etc/apache2.conf -DFOREGROUND\",\n enabled: true,\n resta"
},
{
"path": "samples/chaperone-lamp/apps/etc/apache2.conf",
"chars": 4183,
"preview": "# This is the main Apache server configuration file. It contains the\n# configuration directives that give the server it"
},
{
"path": "samples/chaperone-lamp/apps/etc/mysql/my.cnf",
"chars": 3498,
"preview": "#\n# The MySQL database server configuration file.\n#\n# You can copy this to one of:\n# - \"/etc/mysql/my.cnf\" to set global"
},
{
"path": "samples/chaperone-lamp/apps/etc/mysql/start_mysql.sh",
"chars": 455,
"preview": "#!/bin/bash\n\n# For a general query log, include the following:\n# --general-log-file=$APPS_DIR/log/mysqld-query.log\n# "
},
{
"path": "samples/chaperone-lamp/apps/init.d/mysql.sh",
"chars": 625,
"preview": "#!/bin/bash\n\ndistdir=/var/lib/mysql\nappdbdir=$APPS_DIR/var/mysql\n\nfunction dolog() { logger -t mysql.sh -p info $*; }\n\ni"
},
{
"path": "samples/chaperone-lamp/apps/init.d/phpmyadmin.sh",
"chars": 398,
"preview": "#!/bin/bash\n\npuser=${USER:-www-data}\n\nfunction dolog() { logger -t mysql.sh -p info $*; }\n\nif [ $CONTAINER_INIT == 1 ]; "
},
{
"path": "samples/chaperone-lamp/apps/www/default/index.php",
"chars": 18,
"preview": "<?= phpinfo(); ?>\n"
},
{
"path": "samples/chaperone-lamp/apps/www/sites.d/default.conf",
"chars": 416,
"preview": "<VirtualHost *:8080>\n\n\t# The ServerName directive sets the request scheme, hostname and port that\n\t# the server uses to "
},
{
"path": "samples/chaperone-lamp/build-image.sh",
"chars": 125,
"preview": "#!/bin/bash\n\n# the cd trick assures this works even if the current directory is not current.\ncd ${0%/*}\n./setup-bin/buil"
},
{
"path": "samples/chaperone-lamp/install.sh",
"chars": 1389,
"preview": "#!/bin/bash\n\nMYSQL_ROOT_PW='ChangeMe'\n\n# Assumes there is an \"optional\" apt-get proxy running on our HOST\n# on port 3142"
},
{
"path": "samples/docsample/Dockerfile",
"chars": 354,
"preview": "FROM ubuntu:14.04\nMAINTAINER garyw@blueseastech.com\n\nRUN apt-get update && \\\n apt-get install -y openssh-server apach"
},
{
"path": "samples/docsample/README",
"chars": 219,
"preview": "This is a sample designed as a substitute for the Docker \"supervisor\" sample\nat: https://docs.docker.com/articles/using_"
},
{
"path": "samples/docsample/chaperone.conf",
"chars": 223,
"preview": "sshd.service: { \n command: \"/usr/sbin/sshd -D\"\n}\n\napache2.service: {\n command: \"bash -c 'source /etc/apache2/envvars &"
},
{
"path": "samples/setup-bin/build",
"chars": 1604,
"preview": "#!/bin/bash\n\n# This is a great little program to make it easy to share basic build components across\n# a set of docker f"
},
{
"path": "samples/setup-bin/ct_setproxy",
"chars": 379,
"preview": "#/bin/bash\n# If our host has an apt proxy container running at 3142, then use it for apt\n\ndefhost=`ip route | awk '/defa"
},
{
"path": "samples/setup-bin/dot.bashrc",
"chars": 881,
"preview": "# ~/.bashrc: executed by bash(1) for non-login shells.\n# This is a simpler, stripped-down version for containers\n\n# If n"
},
{
"path": "sandbox/.gitignore",
"chars": 15,
"preview": "apps-*\nvar-*\n\n\n"
},
{
"path": "sandbox/.shinit",
"chars": 28,
"preview": "echo THIS IS THE SHELL INIT\n"
},
{
"path": "sandbox/README",
"chars": 536,
"preview": "Files in this directory were created ad-hoc by me as a sandbox testing area. Typically, I create a docker\nimage and poi"
},
{
"path": "sandbox/bare_startup.sh",
"chars": 352,
"preview": "#!/bin/bash\n# Used to start up a bare chaperone test image using ubuntu:latest. Helps for streamlining installation\n# a"
},
{
"path": "sandbox/bareimage/Dockerfile",
"chars": 88,
"preview": "FROM ubuntu:14.04\n\nADD setup-bin/* *.sh /setup-bin/\nRUN /setup-bin/install-bareimage.sh\n"
},
{
"path": "sandbox/bareimage/install-bareimage.sh",
"chars": 130,
"preview": "#!/bin/bash\n\n/setup-bin/ct_setproxy\napt-get update\napt-get -y install --no-install-recommends python3-pip\npip3 install s"
},
{
"path": "sandbox/bash.bashrc",
"chars": 188,
"preview": "PS1=\"image:\\W$ \"\nif [ \"$EMACS\" == \"t\" ]; then\n stty -echo\nfi\ncd $APPS_DIR/..\nPATH=$PWD/bin:$PATH\ncd $APPS_DIR\necho \"\"\ne"
},
{
"path": "sandbox/bin/chaperone",
"chars": 226,
"preview": "#!/usr/bin/python3\n\nimport sys\nimport os\n\n# Assure we use the local package for testing and development\nsys.path[0] = os"
},
{
"path": "sandbox/bin/cps",
"chars": 125,
"preview": "#!/bin/bash\n# Shortcut for more relevant PS for containers\n\nps --forest -weo 'user,pid,ppid,pgid,sid,%cpu,%mem,stat,comm"
},
{
"path": "sandbox/bin/fakeentry",
"chars": 152,
"preview": "#!/bin/bash\n# Useful for testing if you want to inject a shell BEFORE chaperone starts by changing the entry point.\n\nexp"
},
{
"path": "sandbox/bin/repeat",
"chars": 1076,
"preview": "#!/usr/bin/python3\n\n\"\"\"\nRepeat utility for testing\n\nUsage: repeat [--nosignals] [-n=<reps>] [-i=<interval>] [-e] <messag"
},
{
"path": "sandbox/centos.d/apache.conf",
"chars": 374,
"preview": "apache1.service: {\n type: notify,\n command: \"/usr/sbin/httpd -DFOREGROUND\",\n enabled: true,\n restart: true,\n env_se"
},
{
"path": "sandbox/centos.d/app.conf",
"chars": 35,
"preview": "main.logging: {\n stderr: false,\n}\n"
},
{
"path": "sandbox/centos.d/cron.conf",
"chars": 88,
"preview": "cron.service: {\n bin: /usr/sbin/cron,\n args: -f,\n optional: true,\n restart: true,\n}\n"
},
{
"path": "sandbox/centos.d/sys1.conf",
"chars": 2112,
"preview": "settings: {\n env_inherit: ['SANDBOX', '_*'],\n env_set: {'TERM': 'xpath-revisited',\n 'QUESTIONER': 'the-law'"
},
{
"path": "sandbox/distserv/chaperone.d/005-config.conf",
"chars": 1393,
"preview": "# 005-config.conf\n#\n# Put container configuration variables here. This should strictly be for configuration\n# variables"
},
{
"path": "sandbox/distserv/chaperone.d/010-start.conf",
"chars": 1854,
"preview": "# 010-start.conf\n#\n# This is the first start-up file for the chaperone base images. Note that start-up files\n# are proc"
},
{
"path": "sandbox/distserv/chaperone.d/120-apache2.conf",
"chars": 948,
"preview": "# 120-apache2.conf\n#\n# Start up apache. This is a \"simple\" service, so chaperone will monitor Apache and restart\n# it i"
},
{
"path": "sandbox/distserv/etc/apache2.conf",
"chars": 3846,
"preview": "# This is the main Apache server configuration file. It contains the\n# configuration directives that give the server it"
},
{
"path": "sandbox/distserv/run.sh",
"chars": 2040,
"preview": "#!/bin/bash\n#Developer's startup script\n#Created by chaplocal on Thu Oct 15 03:47:31 UTC 2015\n\nIMAGE=\"chapdev/chaperone-"
},
{
"path": "sandbox/etc/apache2.conf",
"chars": 7101,
"preview": "# This is the main Apache server configuration file. It contains the\n# configuration directives that give the server it"
},
{
"path": "sandbox/etc/makezombie.conf",
"chars": 160,
"preview": "# A chaperone.d configuration which will create a zombie process\n\nzombie.service: {\n command: \"$(APPS_DIR)/../bin/daemo"
},
{
"path": "sandbox/test.d/apache.conf",
"chars": 503,
"preview": "apache1.service: {\n command: \"/usr/sbin/apache2 -f $(SANDBOX)/etc/apache2.conf\",\n enabled: true,\n restart: false,\n o"
},
{
"path": "sandbox/test.d/cron.conf",
"chars": 85,
"preview": "cron.service: {\n command: '/usr/sbin/cron -f',\n restart: true,\n enabled: false,\n}\n"
},
{
"path": "sandbox/test.d/sys1.conf",
"chars": 2421,
"preview": "settings: {\n env_inherit: ['SANDBOX', '_*'],\n env_set: {'TERM': 'xpath-revisited',\n 'QUESTIONER': 'the-law'"
},
{
"path": "sandbox/testbare",
"chars": 434,
"preview": "#!/bin/bash\n\n# Used to test a bareimage, an image which was created \"as if\" chaperone was JUST installed from pip,\n# mos"
},
{
"path": "sandbox/testcent",
"chars": 236,
"preview": "#!/bin/bash\n\nSANDBOX=$PWD\n\ndocker run -t -i -e \"TERM=$TERM\" --rm=true -v /home:/home --entrypoint=$SANDBOX/chaperone -e "
},
{
"path": "sandbox/testdock",
"chars": 227,
"preview": "#!/bin/bash\n\nSANDBOX=$PWD\n\ndocker run -t -i -e \"TERM=$TERM\" --rm=true -v /home:/home --entrypoint=$SANDBOX/chaperone -e "
},
{
"path": "sandbox/testimage",
"chars": 1531,
"preview": "#!/bin/bash\n# Used to create an apps directory here in the sandbox which runs a\n# standard docker image, however uses th"
},
{
"path": "sandbox/testvar",
"chars": 1065,
"preview": "#!/bin/bash\n# Used to create an apps directory here in the sandbox which runs a\n# standard docker image, however uses th"
},
{
"path": "sandbox/user.d/sys1.conf",
"chars": 2269,
"preview": "settings: {\n env_inherit: ['SANDBOX', '_*'],\n env_set: {'TERM': 'xpath-revisited',\n 'QUESTIONER': 'the-law'"
},
{
"path": "setup.py",
"chars": 2362,
"preview": "import os\nimport sys\nimport subprocess\nfrom setuptools import setup, find_packages\n\nif sys.version_info < (3,):\n prin"
},
{
"path": "tests/.gitignore",
"chars": 10,
"preview": "test_logs\n"
},
{
"path": "tests/README.md",
"chars": 1276,
"preview": "This directory contains both Chaperone unit tests as well as more complex integration tests. The `run-all-tests.sh` scr"
},
{
"path": "tests/bin/chaperone",
"chars": 226,
"preview": "#!/usr/bin/python3\n\nimport sys\nimport os\n\n# Assure we use the local package for testing and development\nsys.path[0] = os"
},
{
"path": "tests/bin/daemon",
"chars": 1307,
"preview": "#!/usr/bin/python3\n\n\"\"\"\nForks a process in a daemon-like fashion for testing.\n\nUsage:\n daemon [--wait=seconds] [--ign"
},
{
"path": "tests/bin/daemonutil.py",
"chars": 3762,
"preview": "\"\"\"\n\nGeneric linux daemon base class for python 3.x.\n\nFrom: http://www.jejik.com/articles/2007/02/a_simple_unix_linux_da"
},
{
"path": "tests/bin/envcp",
"chars": 222,
"preview": "#!/usr/bin/python3\n\nimport sys\nimport os\n\n# Assure we use the local package for testing and development\nsys.path[0] = os"
},
{
"path": "tests/bin/expect-lite-command-run",
"chars": 507,
"preview": "#!/bin/bash\n\nfunction RUNTASK() { \n expect-lite-image-run --task $*\n}\n\nfunction RUNIMAGE() { \n export CHTEST_DOCKE"
},
{
"path": "tests/bin/expect-lite-image-run",
"chars": 708,
"preview": "#!/bin/bash\n\noptions=\"\"\nif [ \"$CHTEST_CONTAINER_NAME\" != \"\" ]; then\n options=\"--name $CHTEST_CONTAINER_NAME\"\nfi\n\nif [[ "
},
{
"path": "tests/bin/expect-test-command",
"chars": 75,
"preview": "#!/bin/bash\n\nexport EL_SHELL=\"expect-lite-command-run\"\nexec expect-lite $1\n"
},
{
"path": "tests/bin/expect-test-image",
"chars": 73,
"preview": "#!/bin/bash\n\nexport EL_SHELL=\"expect-lite-image-run\"\nexec expect-lite $1\n"
},
{
"path": "tests/bin/get-serial",
"chars": 180,
"preview": "#!/bin/bash\n\nserfile=$CHTEST_HOME/serial.dat\nif [ ! -f $serfile ]; then\n current=0\nelse\n current=$(cat $serfile)\nfi\n\nl"
},
{
"path": "tests/bin/is-running",
"chars": 50,
"preview": "#!/bin/bash\n\nps -C $1 >/dev/null && exit 0\nexit 1\n"
},
{
"path": "tests/bin/kill-from-pidfile",
"chars": 74,
"preview": "#!/bin/bash\n\npidfile=$1\n\nif [ -f $pidfile ]; then\n sudo kill `cat $1`\nfi\n"
},
{
"path": "tests/bin/logecho",
"chars": 112,
"preview": "#!/bin/bash\n\nif [ \"$SERVICE_NAME\" == \"\" ]; then\n SERVICE_NAME=\"pid$$\"\nfi\n\nlogger -p info -t $SERVICE_NAME \"$*\"\n"
},
{
"path": "tests/bin/proctool",
"chars": 1414,
"preview": "#!/usr/bin/python3\n\n\"\"\"\nTool to create processes for various purposes.\n\nUsage:\n proctool [--dump] [--hang] [--wait=se"
},
{
"path": "tests/bin/read_from_port",
"chars": 175,
"preview": "#!/bin/bash\n\nif nc --version >/dev/null 2>&1; then\n # nmap.org accepts --version and has different syntax (lovely eh)\n"
},
{
"path": "tests/bin/sdnotify",
"chars": 225,
"preview": "#!/usr/bin/python3\n\nimport sys\nimport os\n\n# Assure we use the local package for testing and development\nsys.path[0] = os"
},
{
"path": "tests/bin/sdnotify-exec",
"chars": 230,
"preview": "#!/usr/bin/python3\n\nimport sys\nimport os\n\n# Assure we use the local package for testing and development\nsys.path[0] = os"
},
{
"path": "tests/bin/talkback",
"chars": 171,
"preview": "#!/usr/bin/python3\n# Simple echo script to test inetd\n\nimport sys\n\nfor line in sys.stdin:\n if \"EXIT\" in line:\n exit("
},
{
"path": "tests/bin/telchap",
"chars": 224,
"preview": "#!/usr/bin/python3\n\nimport sys\nimport os\n\n# Assure we use the local package for testing and development\nsys.path[0] = os"
},
{
"path": "tests/bin/test-driver",
"chars": 2434,
"preview": "#!/bin/bash\n# Assumes the current directory contains executable files and runs them all.\n\nfunction relpath() { python -c"
},
{
"path": "tests/el-tests/basic-1/chaperone.conf",
"chars": 199,
"preview": "settings: {\n env_set: {\n PATH: \"$(TESTHOME)/bin:$(PATH)\",\n }\n}\n\necho.service: {\n command: \"echo first output\",\n s"
},
{
"path": "tests/el-tests/basic-1/test-001.elt",
"chars": 152,
"preview": "#!/usr/bin/env expect-test-command\n#TITLE: Test simplest possible commmand service\n>RUNIMAGE\n<first output\n<queueing 'RE"
},
{
"path": "tests/el-tests/basic-1/test-002.elt",
"chars": 130,
"preview": "#!/usr/bin/env expect-test-command\n#TITLE: Test simplest possible task\n\n>RUNTASK proctool testing-123\n<proctool says: te"
},
{
"path": "tests/el-tests/cron-1/chaperone.conf",
"chars": 2770,
"preview": "settings: {\n env_set: { PATH: \"$(TESTHOME)/bin:$(PATH)\" }\n}\n\ncron1-echo.service: {\n type: cron,\n enabled: \"$(ENABLE_"
},
{
"path": "tests/el-tests/cron-1/simulate-rotate.sh",
"chars": 618,
"preview": "echo simulating rotation\necho SIMULATE-ROTATE SERIAL NUMBER: $(get-serial)\nservice=$1\ntelchap=$2\n$(is-running apache2) &"
},
{
"path": "tests/el-tests/cron-1/test-001.elt",
"chars": 226,
"preview": "#!/usr/bin/env expect-test-command\n#TITLE: Cron services - Simple echo\n\n>(sleep 15; echo \"K\"\"ILL ME NOW\")&\n>RUNIMAGE -e "
},
{
"path": "tests/el-tests/cron-1/test-004.elt",
"chars": 466,
"preview": "#!/usr/bin/env expect-test-command\n#TITLE: Complex Apache and background restart - timing tests for process termination\n"
},
{
"path": "tests/el-tests/cron-1/test-005.elt",
"chars": 462,
"preview": "#!/usr/bin/env expect-test-command\n#TITLE: Complex Apache and background restart - process termination with PIDFILE\n\n@40"
},
{
"path": "tests/el-tests/cron-1/test-006.elt",
"chars": 287,
"preview": "#!/usr/bin/env expect-test-command\n#TITLE: Oneshot job kills Apache - be sure Chaperone terminates\n\n>RUNIMAGE -e ENABLE_"
},
{
"path": "tests/el-tests/cron-1/test-007.elt",
"chars": 376,
"preview": "#!/usr/bin/env expect-test-command\n#TITLE: Cron job killing apache keeps container running (cron scheduled)\n\n@30\n>(sleep"
},
{
"path": "tests/el-tests/cron-1/test-008.elt",
"chars": 260,
"preview": "#!/usr/bin/env expect-test-command\n#TITLE: Cron job disables self plus kills Apache, container should die\n\n@30\n>RUNIMAGE"
},
{
"path": "tests/el-tests/exitkills-1/chaperone.conf",
"chars": 518,
"preview": "settings: {\n env_set: { PATH: \"$(TESTHOME)/bin:$(PATH)\" }\n}\n\ntest1-keeper.service: {\n command: \"bash -c 'logecho laggi"
},
{
"path": "tests/el-tests/exitkills-1/test-001.elt",
"chars": 229,
"preview": "#!/usr/bin/env expect-test-command\n#TITLE: Forking service - combined with exit_kills\n\n@20\n>RUNIMAGE\n<: daemon running\n<"
},
{
"path": "tests/el-tests/fork-1/chaperone.conf",
"chars": 1408,
"preview": "settings: {\n env_set: { PATH: \"$(TESTHOME)/bin:$(PATH)\" }\n}\n\ntest1-exit1.service: {\n type: forking,\n enabled: \"$(ENAB"
},
{
"path": "tests/el-tests/fork-1/test-001.elt",
"chars": 201,
"preview": "#!/usr/bin/env expect-test-command\n#TITLE: Forking service - spawn daemon normally\n\n>RUNIMAGE -e ENABLE_EXIT1=true\n<test"
},
{
"path": "tests/el-tests/fork-1/test-001b.elt",
"chars": 260,
"preview": "#!/usr/bin/env expect-test-command\n#TITLE: Forking service - spawn daemon - error on spawn\n\n>RUNIMAGE -e ENABLE_EXIT1B=t"
},
{
"path": "tests/el-tests/fork-1/test-003.elt",
"chars": 327,
"preview": "#!/usr/bin/env expect-test-command\n#TITLE: Forking services - no kill for untracked processes (using apache)\n\n>(sleep 8;"
},
{
"path": "tests/el-tests/fork-1/test-004.elt",
"chars": 353,
"preview": "#!/usr/bin/env expect-test-command\n#TITLE: Forking services - track processes using pidfile (using apache)\n\n>RUNIMAGE -e"
},
{
"path": "tests/el-tests/inetd-1/chaperone.conf",
"chars": 609,
"preview": "settings: {\n env_set: { PATH: \"$(TESTHOME)/bin:$(PATH)\" }\n}\n\n# INETD1: simple test\n\ninetd1.service: {\n enabled: \"$(ENA"
},
{
"path": "tests/el-tests/inetd-1/test-001.elt",
"chars": 840,
"preview": "#!/usr/bin/env expect-test-command\n#TITLE: inetd - simple exit service, keeps running\n\n# Start the image and capture the"
},
{
"path": "tests/el-tests/inetd-1/test-002.elt",
"chars": 1050,
"preview": "#!/usr/bin/env expect-test-command\n#TITLE: inetd - second service disables both\n\n# Start the image and capture the conta"
},
{
"path": "tests/el-tests/notify-1/chaperone.conf",
"chars": 1102,
"preview": "settings: {\n env_set: { PATH: \"$(TESTHOME)/bin:$(PATH)\", SERVICE_NAME: \"$(_CHAP_SERVICE)\" },\n process_timeout: 5,\n}\n\nt"
},
{
"path": "tests/el-tests/notify-1/test-001.elt",
"chars": 272,
"preview": "#!/usr/bin/env expect-test-command\n#TITLE: Notify service - spawn daemon normally - never gets notified\n\n>RUNIMAGE -e EN"
},
{
"path": "tests/el-tests/notify-1/test-001b.elt",
"chars": 262,
"preview": "#!/usr/bin/env expect-test-command\n#TITLE: Notify service - spawn daemon - error during grace period\n\n>RUNIMAGE -e ENABL"
},
{
"path": "tests/el-tests/notify-1/test-001c.elt",
"chars": 244,
"preview": "#!/usr/bin/env expect-test-command\n#TITLE: Notify service - spawn daemon - error while waiting for notify\n\n>RUNIMAGE -e "
},
{
"path": "tests/el-tests/notify-1/test-001d.elt",
"chars": 257,
"preview": "#!/usr/bin/env expect-test-command\n#TITLE: Notify service - spawn daemon - error from notifying process\n\n>RUNIMAGE -e EN"
},
{
"path": "tests/el-tests/notify-1/test-001e.elt",
"chars": 266,
"preview": "#!/usr/bin/env expect-test-command\n#TITLE: Notify service - spawn daemon - normal ready notification\n\n>RUNIMAGE -e ENABL"
},
{
"path": "tests/el-tests/simple-1/chaperone.conf",
"chars": 1114,
"preview": "test1-exit1.service: {\n type: simple,\n enabled: \"$(ENABLE_EXIT1:-false)\",\n command: \"echo exit immediately\",\n}\n\n# The"
},
{
"path": "tests/el-tests/simple-1/test-001.elt",
"chars": 239,
"preview": "#!/usr/bin/env expect-test-command\n#TITLE: Simple services - exit immediately\n\n>RUNIMAGE -e ENABLE_EXIT1=true\n<test1-exi"
},
{
"path": "tests/el-tests/simple-1/test-002.elt",
"chars": 184,
"preview": "#!/usr/bin/env expect-test-command\n#TITLE: Simple services - all processes disabled\n\n>RUNIMAGE\n<test1-exit1.service not "
},
{
"path": "tests/el-tests/simple-1/test-003.elt",
"chars": 360,
"preview": "#!/usr/bin/env expect-test-command\n#TITLE: Simple services - no kill for untracked processes (using apache)\n\n>(sleep 8; "
},
{
"path": "tests/el-tests/simple-1/test-004.elt",
"chars": 339,
"preview": "#!/usr/bin/env expect-test-command\n#TITLE: Simple services - track processes using pidfile (using apache)\n\n>RUNIMAGE -e "
},
{
"path": "tests/el-tests/simple-2/chaperone.conf",
"chars": 1151,
"preview": "settings: {\n detect_exit: false,\n}\n\ntest1-exit1.service: {\n type: simple,\n enabled: \"$(ENABLE_EXIT1:-false)\",\n comma"
},
{
"path": "tests/el-tests/simple-2/test-001.elt",
"chars": 334,
"preview": "#!/usr/bin/env expect-test-command\n#TITLE: Simple services - exit immediately - no exit detection\n\n>(sleep 5; echo \"K\"\"I"
},
{
"path": "tests/el-tests/simple-2/test-002.elt",
"chars": 276,
"preview": "#!/usr/bin/env expect-test-command\n#TITLE: Simple services - all processes disabled - no exit detection\n\n>(sleep 5; echo"
},
{
"path": "tests/el-tests/simple-2/test-003.elt",
"chars": 380,
"preview": "#!/usr/bin/env expect-test-command\n#TITLE: Simple services - no kill for untracked processes (using apache) - no exit de"
},
{
"path": "tests/el-tests/simple-2/test-004.elt",
"chars": 410,
"preview": "#!/usr/bin/env expect-test-command\n#TITLE: Simple services - track processes using pidfile (using apache) - no exit dete"
},
{
"path": "tests/env_expand.py",
"chars": 14909,
"preview": "from prefix import *\n\nfrom chaperone.cutil.env import Environment\n\nENV1 = {\n \"HOME\": '/usr/garyw',\n \"APPS-DIR\": '$"
},
{
"path": "tests/env_parse.py",
"chars": 2237,
"preview": "from prefix import *\n\nfrom chaperone.cutil.env import EnvScanner\n\nTEST1 = (\n ('Nothing',),\n ('A normal $(expansion"
},
{
"path": "tests/events.py",
"chars": 2500,
"preview": "from prefix import *\n\nfrom chaperone.cutil.events import EventSource\n\nclass handlers:\n \n def __init__(self):\n "
},
{
"path": "tests/prefix.py",
"chars": 349,
"preview": "import sys\nimport os\nimport unittest\n\nif sys.version_info < (3,):\n print(\"You must run tests with Python 3 only. Pyt"
},
{
"path": "tests/run-all-tests.sh",
"chars": 195,
"preview": "#!/bin/bash\n# Runs both unit tests as well as process integration tests\n\npython3 env_expand.py\npython3 env_parse.py\npyth"
},
{
"path": "tests/run-el.sh",
"chars": 604,
"preview": "#!/bin/bash\n\nfunction relpath() { python -c \"import os,sys;print(os.path.relpath(*(sys.argv[1:])))\" \"$@\"; }\n\nexport PATH"
},
{
"path": "tests/run-shell.sh",
"chars": 264,
"preview": "#!/bin/bash\n\nif [ \"$1\" == \"\" ]; then\n echo 'usage: run_shell.sh <relative-test-subdir-path>'\n exit\nfi\n\nexport PATH=$PW"
},
{
"path": "tests/service_order.py",
"chars": 2480,
"preview": "from prefix import *\n\nfrom chaperone.cutil.config import ServiceDict\n\nOT1 = {\n 'one.service': { },\n 'two.service':"
}
]
// ... and 1 more files (download for full content)
About this extraction
This page contains the full source code of the garywiz/chaperone GitHub repository, extracted and formatted as plain text for AI agents and large language models (LLMs). The extraction includes 201 files (473.5 KB), approximately 123.3k tokens, and a symbol index with 490 extracted functions, classes, methods, constants, and types. Use this with OpenClaw, Claude, ChatGPT, Cursor, Windsurf, or any other AI tool that accepts text input. You can copy the full output to your clipboard or download it as a .txt file.
Extracted by GitExtract — free GitHub repo to text converter for AI. Built by Nikandr Surkov.