[
  {
    "path": ".dockerignore",
    "content": ".tox\n.git\n.idea\n"
  },
  {
    "path": ".github/workflows/ci.yml",
    "content": "---\nname: tron-ci\non:\n  push:\n    branches:\n      - master\n    tags:\n      - v*.*\n  pull_request:\n  release:\njobs:\n  tox:\n    runs-on: ubuntu-22.04\n    strategy:\n      fail-fast: false\n      matrix:\n        toxenv:\n          - py310,docs\n    steps:\n      - uses: actions/checkout@v2\n      - uses: actions/setup-python@v2\n        with:\n          python-version: '3.10'\n      # GHA won't setup tox for us\n      - run: pip install tox==3.2\n        # there are no pre-built wheels for bsddb3, so we need to install\n        # its dpkg dependencies so that we can build a wheel when we're\n        # creating our env. Once we get rid of bsddb3 as a Python dependency,\n        # then we can also get rid of this dpkg\n      - run: sudo apt-get install --quiet --assume-yes libdb5.3-dev\n        # we explictly attempt to import the C extensions for some PyYAML\n        # functionality, so we need the LibYAML bindings provided by this\n        # package\n      - run: sudo apt-get install --quiet --assume-yes libyaml-dev\n      - run: tox -e ${{ matrix.toxenv }}\n  build_debs:\n    runs-on: ubuntu-22.04\n    strategy:\n      fail-fast: false\n      matrix:\n        dist:\n          - jammy\n    steps:\n      - uses: actions/checkout@v2\n      - uses: actions/setup-python@v2\n        with:\n          python-version: '3.10'\n      # Update package lists to ensure we have the latest information\n      - run: sudo apt-get update\n      # the container provided by GitHub doesn't include utilities\n      # needed for dpkg building, so we need to install `devscripts`\n      # to bring those in\n      - run: sudo apt-get install --quiet --assume-yes devscripts\n      - run: make itest_${{ matrix.dist }}\n      - uses: actions/upload-artifact@v4\n        with:\n          name: deb-${{ matrix.dist }}\n          path: dist/tron_*.deb\n  cut_release:\n    runs-on: ubuntu-22.04\n    needs: build_debs\n    steps:\n      - uses: actions/checkout@v2\n      - run: mkdir -p dist/\n      - uses: actions/download-artifact@v4\n        with:\n          name: deb-jammy\n          path: dist/\n      - name: Release\n        uses: softprops/action-gh-release@v1\n        if: startsWith(github.ref, 'refs/tags/v')\n        with:\n          generate_release_notes: true\n          files: |\n            dist/tron_*.deb\n          fail_on_unmatched_files: true\n"
  },
  {
    "path": ".github/workflows/security-review.yml",
    "content": "# Managed by terraform, do not edit manually\nname: Security Review\npermissions:\n  pull-requests: write\n  contents: read\n  id-token: write\non:\n  pull_request:\njobs:\n  security:\n    runs-on: ubuntu-latest\n    steps:\n      - uses: actions/checkout@v4\n        with:\n          ref: ${{ github.event.pull_request.head.sha || github.sha }}\n          fetch-depth: 2\n      - name: Configure AWS credentials\n        uses: aws-actions/configure-aws-credentials@v4\n        with:\n          role-to-assume: arn:aws:iam::${{ secrets.AWS_DEV_ACCOUNT_ID }}:role/security-review-bot\n          aws-region: us-west-2\n      - uses: anthropics/claude-code-security-review@0c6a49f1fa56a1d472575da86a94dbc1edb78eda\n        with:\n          comment-pr: true\n          claude-api-key: \"github-actions\"\n          claude-model: \"us.anthropic.claude-opus-4-6-v1\"\n          run-every-commit: true\n        env:\n          CLAUDE_CODE_USE_BEDROCK: \"1\"\n          AWS_REGION: \"us-west-2\"\n"
  },
  {
    "path": ".gitignore",
    "content": "dist\nbuild\nMANIFEST\ntron.egg-info\n*.pyc\n*._*\n*.swp\n*.swo\ndocs/_build/\n.idea\n.vscode\n.fleet\ntron.iml\ndocs/images/\n*.dot\ntronweb/js/cs/*.js\nyarn.lock\ntronweb_tests/spec/*.js\ntronweb_tests/lib/\n.tox\n.tox-indocker\ntron.iml\n__pycache__/\n.pytest_cache/\ntron_state\ntron.lock\nmanhole.sock\nmanhole.sock.lock\nnode_modules/\n\n# Example cluster\nexample-cluster/config\nexample-cluster/MASTER.*\nexample-cluster/tron-repl.lock\nexample-cluster/tron_state*\nexample-cluster/manhole.sock*\nexample-cluster/_events/\n*.stdout\n*.stderr\ndev/manhole.sock.lock\ndev/tron.pid\ndev/_events/\n\n# Generated debian artifacts\ndebian/.debhelper/\ndebian/debhelper-build-stamp\ndebian/files\ndebian/tron\ndebian/tron.debhelper.log\ndebian/tron.postinst.debhelper\ndebian/tron.postrm.debhelper\ndebian/tron.preinst.debhelper\ndebian/tron.prerm.debhelper\ndebian/tron.substvars\n"
  },
  {
    "path": ".pre-commit-config.yaml",
    "content": "---\ndefault_language_version:\n    python: python3.10\nrepos:\n  - repo: https://github.com/pre-commit/pre-commit-hooks\n    rev: v2.5.0\n    hooks:\n      - id: trailing-whitespace\n      - id: end-of-file-fixer\n        exclude: CHANGELOG.md\n      - id: check-docstring-first\n      - id: check-json\n      - id: check-yaml\n      - id: requirements-txt-fixer\n      - id: fix-encoding-pragma\n        args: [--remove]\n      - id: pretty-format-json\n        args: [--autofix, --indent, '4', --no-sort-keys]\n  - repo: https://github.com/PyCQA/flake8\n    rev: 5.0.4\n    hooks:\n      - id: flake8\n        exclude: ^docs/source/conf.py$\n  - repo: https://github.com/asottile/reorder_python_imports\n    rev: v1.9.0\n    hooks:\n      - id: reorder-python-imports\n        args: [--py3-plus]\n  - repo: https://github.com/asottile/pyupgrade\n    rev: v3.20.0\n    hooks:\n      - id: pyupgrade\n        args: [--py39-plus]\n  - repo: local\n    hooks:\n      - id: patch-enforce-autospec\n        name: mock.patch enforce autospec\n        description: |\n          This hook ensures all mock.patch invocations specify an autospec\n        entry: contrib/mock_patch_checker.py\n        language: script\n        files: ^tests/.*\\.py$\n  - repo: http://github.com/psf/black\n    rev: 22.3.0\n    hooks:\n      - id: black\n        args: [--target-version, py310]\n"
  },
  {
    "path": ".pyautotest",
    "content": "test_runner_name: \"testify\"\n"
  },
  {
    "path": ".readthedocs.yaml",
    "content": "# Read the Docs configuration file for Sphinx projects\n# See https://docs.readthedocs.io/en/stable/config-file/v2.html for details\n\n# Required\nversion: 2\n\n# RTD defaults as of 2023-11-08\nbuild:\n  os: ubuntu-22.04\n  tools:\n    python: \"3.10\"\n    # You can also specify other tool versions:\n    # nodejs: \"20\"\n    # rust: \"1.70\"\n    # golang: \"1.20\"\n\n# Also provide downloadable zip\nformats: [htmlzip]\n\n# Build documentation in the \"docs/\" directory with Sphinx\nsphinx:\n  configuration: docs/source/conf.py\n  # You can configure Sphinx to use a different builder, for instance use the dirhtml builder for simpler URLs\n  # builder: \"dirhtml\"\n  # Fail on all warnings to avoid broken references\n  # fail_on_warning: true\n\n# Optionally build your docs in additional formats such as PDF and ePub\n# formats:\n#   - pdf\n#   - epub\n\n# Optional but recommended, declare the Python requirements required\n# to build your documentation\n# See https://docs.readthedocs.io/en/stable/guides/reproducible-builds.html\npython:\n  install:\n    - requirements: requirements-docs.txt\n"
  },
  {
    "path": "AGENTS.md",
    "content": "# AGENTS.md\n\nThis file provides guidance to AI coding agents working with code in this repository.\n\n## What is Tron?\n\nTron is Yelp's centralized batch job scheduling system—a distributed alternative to cron for managing periodic batch processes across a cluster.\n\n### Core Concepts\n\n**Jobs** are DAGs (directed acyclic graphs) defined in YAML configuration. Each job contains one or more **Actions** (individual commands) with dependency relationships between them.\n\n**JobRuns** are instances of a job execution. If a job runs daily, each execution creates a new JobRun containing **ActionRuns** for each action. Tron tracks each run independently.\n\n### Lifecycle\n\n```mermaid\nflowchart LR\n    A[Startup] --> B[Restore state from DynamoDB]\n    B --> C[Schedule jobs]\n    C --> D[Track state changes]\n    D --> E[Save state to DynamoDB]\n    E --> C\n```\n\n### State Persistence\n\nTron persists all job and run state to DynamoDB. 101 consecutive save errors triggers intentional crash (prevents running degraded).\n\n```mermaid\nflowchart LR\n    A[State change] --> B[Buffer]\n    B -->|buffer full| C[Save queue]\n    C --> D[Pop from queue]\n    D --> E[Delete existing entries]\n    E --> F[Partition into chunks]\n    F --> G[Batch write to DynamoDB]\n    G -->|failure| C\n```\n\n### Execution Backends\n\n- **Kubernetes**: Primary execution backend\n- **SSH**: Legacy backend for remote command execution, do not extend\n- **Mesos**: Deprecated, do not extend\n\n## Project Structure\n\n```\ntron/\n├── core/           # Job, Action, JobRun, scheduling, dependency graphs\n├── config/         # YAML config parsing and schema definitions\n├── serialize/      # State persistence (DynamoDB, shelve backends)\n├── api/            # REST API endpoints and adapters\n├── kubernetes.py   # Kubernetes execution backend\n└── mesos.py        # Deprecated - do not modify\n\ntronweb/            # Web UI (CoffeeScript/Backbone.js)\nbin/                # CLI: trond, tronctl, tronview, tronfig\n```\n\n## Testing\n\nTox manages the virtualenv in `.tox/py310/`. Use `make test` for the full suite, or iterate with pytest directly:\n\n```bash\n.tox/py310/bin/pytest tests/path/to/test.py -x\n```\n\n## Development Guardrails\n\n### High-risk areas\n\n**DynamoDB/Persistence changes:**\n- Pickle deserialization is still active—deleting or renaming persisted classes/fields breaks restore\n- Reverting changes that add new persisted fields is NOT safe\n- Writes batch 8 partitions at a time; large jobs needing more can be partially written if a later batch fails\n\n**Job/action schema changes** require updates in two repos:\n- Tron: `tron/config/schema.py`, `tron/core/action.py` (including `from_json`/`to_json`)\n- PaaSTA: `paasta_tools/cli/schemas/tron_schema.json`, `paasta_tools/tron_tools.py`\n- Plus: tests in both repos, and any code that consumes the new field\n\n**MASTER config changes**:\n- `tron/config/schema.py` — Add field to config object\n- `tron/config/config_parse.py` — Add default value and validator\n- Plus: tests, and any code that consumes the new config value\n\nReverting config changes is risky: new params get written to MASTER.yaml on disk, so reverting code requires manual config cleanup on servers.\n\n### Do not modify\n\n- `tron/mesos.py` — Deprecated\n- `tron/ssh.py` - Deprecated\n- `tron/node.py` - Deprecated\n\n\n- DynamoDB schema without approval\n"
  },
  {
    "path": "CODEOWNERS",
    "content": "# NOTE: \"we\" in this file will refer to the Compute Infrastructure team at Yelp\n* @Yelp/paasta\n#\n# prevent cheeky modifications :)\nCODEOWNERS @Yelp/paasta\n"
  },
  {
    "path": "LICENSE.txt",
    "content": "Copyright 2010-2012 Yelp\n\n  Licensed under the Apache License, Version 2.0 (the \"License\");\n  you may not use this file except in compliance with the License.\n  You may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\n  Unless required by applicable law or agreed to in writing, software\n  distributed under the License is distributed on an \"AS IS\" BASIS,\n  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n  See the License for the specific language governing permissions and\n  limitations under the License.\n"
  },
  {
    "path": "MANIFEST.in",
    "content": "include *.txt\ninclude *.md\ninclude Makefile\ninclude tron/default_config.yaml\ninclude tron/logging.conf\ninclude tron/named_config_template.yaml\nrecursive-include tests *.py *.yaml\nrecursive-include docs *.rst *.yaml *.1 *.8\nrecursive-include tronweb *\nrecursive-exclude tronweb *.coffee\n"
  },
  {
    "path": "Makefile",
    "content": "# Edit this release and run \"make release\"\nRELEASE=3.10.0\n\nSHELL=/bin/bash\n\nDOCKER_RUN = docker run -t -v $(CURDIR):/work:rw -v $(CURDIR)/.tox-indocker:/work/.tox:rw\nUID:=$(shell id -u)\nGID:=$(shell id -g)\n\nifeq ($(findstring .yelpcorp.com,$(shell hostname -f)), .yelpcorp.com)\n\tPAASTA_ENV ?= YELP\nelse\n\tPAASTA_ENV ?= $(shell hostname --fqdn)\nendif\n\nNOOP = true\nifeq ($(PAASTA_ENV),YELP)\n\texport PIP_INDEX_URL ?= http://169.254.255.254:20641/$*/simple/\n\tADD_MISSING_DEPS_MAYBE:=-diff --unchanged-line-format= --old-line-format= --new-line-format='%L' ./requirements.txt ./yelp_package/extra_requirements_yelp.txt >> ./requirements.txt\nelse\n\texport PIP_INDEX_URL ?= https://pypi.python.org/simple\n\tADD_MISSING_DEPS_MAYBE:=$(NOOP)\nendif\n\n.PHONY : all clean tests docs dev\n\n-usage:\n\t@echo \"make test - Run tests\"\n\t@echo \"make deb_jammy - Generate jammy deb package\"\n\t@echo \"make itest_jammy - Run tests and integration checks\"\n\t@echo \"make _itest_jammy - Run only integration checks\"\n\t@echo \"make release - Prepare debian info for new release\"\n\t@echo \"make clean - Get rid of scratch and byte files\"\n\t@echo \"make dev - Get a local copy of trond running in debug mode in the foreground\"\n\ndocker_%:\n\t@echo \"Building docker image for $*\"\n\t[ -d dist ] || mkdir -p dist\n\tcd ./yelp_package/$* && docker build --build-arg PIP_INDEX_URL=${PIP_INDEX_URL} -t tron-builder-$* .\n\ndeb_%: clean docker_% coffee_%\n\t@echo \"Building deb for $*\"\n\t# backup these files so we can temp modify them\n\tcp requirements.txt requirements.txt.old\n\t$(ADD_MISSING_DEPS_MAYBE)\n\t$(DOCKER_RUN) -e PIP_INDEX_URL=${PIP_INDEX_URL} tron-builder-$* /bin/bash -c ' \\\n\t\tdpkg-buildpackage -d &&                  \\\n\t\tmv ../*.deb dist/ &&                     \\\n\t\trm -rf debian/tron                    \\\n\t'\n\t# restore the backed up files\n\tmv requirements.txt.old requirements.txt\n\ncoffee_%: docker_%\n\t@echo \"Building tronweb\"\n\t$(DOCKER_RUN) tron-builder-$* /bin/bash -c '       \\\n\t\trm -rf tronweb/js/cs &&                        \\\n\t\tmkdir -p tronweb/js/cs &&                      \\\n\t\tcoffee -o tronweb/js/cs/ -c tronweb/coffee/ \\\n\t'\n\ntest:\n\ttox -e py310\n\ntest_in_docker_%: docker_%\n\t$(DOCKER_RUN) tron-builder-$* python3.10 -m tox -vv -e py310\n\ntox_%:\n\ttox -e $*\n\n_itest_%:\n\t$(DOCKER_RUN) ubuntu:$* /work/itest.sh\n\ndebitest_%: deb_% _itest_%\n\t@echo \"Package for $* looks good\"\n\nitest_%: debitest_%\n\t@echo \"itest $* OK\"\n\ndev:\n\tSSH_AUTH_SOCK=$(SSH_AUTH_SOCK) .tox/py310/bin/trond --debug --working-dir=dev -l logging.conf --host=0.0.0.0\n\nexample_cluster:\n\ttox -e example-cluster\n\nyelpy:\n\t.tox/py310/bin/pip install -r yelp_package/extra_requirements_yelp.txt\n\n\n# 1. Bump version at the top of this file\n# 2. `make release`\n\nVERSION = $(firstword $(subst -, ,$(RELEASE) ))\nLAST_COMMIT_MSG = $(shell git log -1 --pretty=%B | sed -e 's/\\x27/\"/g')\nrelease:\n\t@if [[ \"$$(git status --porcelain --untracked-files=no :^/Makefile)\" != '' ]]; then echo \"Error: Working directory is not clean; only changes to Makefile are allowed when cutting a release.\"; exit 1; fi\n\t$(eval untracked_files_tmpfile=$(shell mktemp))\n\tgit status --porcelain --untracked-files=all :^./Makefile > $(untracked_files_tmpfile)\n\t@if [[ \"$$(git status --porcelain --untracked-files=normal :/docs/source/generated)\" != '' ]]; then echo \"Error: Untracked files found in docs/source/generated.\"; exit 1; fi\n\t@if existing_sha=$$(git rev-parse --verify --quiet v$(VERSION)); then echo \"Error: tag v$(VERSION) exists and points at $$existing_sha\"; exit 1; fi\n\t@read upstream_master junk <<<\"$$(git ls-remote -h origin master)\" && if ! git merge-base --is-ancestor $$upstream_master HEAD; then echo \"Error: HEAD is missing commits from origin/master ($$upstream_master).\"; exit 1; fi\n\tdch -v $(RELEASE) --distribution jammy --changelog ./debian/changelog $$'$(VERSION) tagged with \\'make release\\'\\rCommit: $(LAST_COMMIT_MSG)'\n\tsed -i -e \"s/__version__ = .*/__version__ = \\\"$(VERSION)\\\"/\" ./tron/__init__.py\n\tmake docs || true\n\tgit add ./Makefile ./debian/changelog ./tron/__init__.py ./docs/source/generated/\n\tgit commit -m \"Released $(RELEASE) via make release\"\n\tif [[ \"$$(git status --porcelain --untracked-files=all)\" != \"$$(<$(untracked_files_tmpfile))\" ]]; then echo \"Error: automatic git commit left some files uncommitted. Fix the git commit command in ./Makefile to include any automatically generated files that it is currently missing.\"; exit 1; fi\n\tgit tag v$(VERSION)\n\tgit push --atomic origin master v$(VERSION)\n\ndocs:\n\ttox -r -e docs\n\nman:\n\twhich $(SPHINXBUILD) >/dev/null && $(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(DOCS_DIR) $(DOCS_DIR)/source/man || true\n\t@echo\n\t@echo \"Build finished. The manual pages are in $(DOCS_BUILDDIR)/source/man.\"\n\nclean:\n\trm -rf tronweb/js/cs\n\tfind . -name '*.pyc' -delete\n"
  },
  {
    "path": "OWNERS",
    "content": "---\nteams:\n- Compute Infra <compute-infra@yelp.com>\n"
  },
  {
    "path": "README.md",
    "content": "Tron - Batch Scheduling System\n==============================\n\n[![Build Status](https://github.com/Yelp/Tron/actions/workflows/ci.yml/badge.svg?query=branch%3Amaster)](https://github.com/Yelp/Tron/actions/workflows/ci.yml)\n[![Documentation Status](https://readthedocs.org/projects/tron/badge/?version=latest)](http://tron.readthedocs.io/en/latest/?badge=latest)\n\nTron is a centralized system for managing periodic batch processes\nacross a cluster. If you find [cron](http://en.wikipedia.org/wiki/Cron) or\n[fcron](http://fcron.free.fr/) to be insufficient for managing complex work\nflows across multiple computers, Tron might be for you.\n\nInstall with:\n\n    > sudo pip install tron\n\nOr look at the [tutorial](http://tron.readthedocs.io/en/latest/tutorial.html).\n\nThe full documentation is available [on ReadTheDocs](http://tron.readthedocs.io/en/latest/).\n\nVersions / Roadmap\n------------------\n\nTron is changing and under active development.\n\nIt is being transformed from an ssh-based execution engine to be compatible with running on [Kubernetes\n](https://kubernetes.io/docs/concepts/overview/).\n\nTron development is specifically targeting Yelp's needs and not designed to be\na general solution for other companies.\n\n\nContributing\n------------\n\nRead [Working on Tron](http://tron.readthedocs.io/en/latest/developing.html) and\nstart sending pull requests!\n\nAny issues should be posted [on Github](http://github.com/Yelp/Tron/issues).\n\nBerkeleyDB on Mac OS X\n----------------------\n\n    $ brew install berkeley-db\n    $ export BERKELEYDB_DIR=$(brew --cellar)/berkeley-db/<installed version>\n    $ export YES_I_HAVE_THE_RIGHT_TO_USE_THIS_BERKELEY_DB_VERSION=1\n"
  },
  {
    "path": "bin/generate_tron_tab_completion_cache",
    "content": "#!/usr/bin/env python\n\"\"\"\nprint a list of all the tron jobs, to be saved as a cache for tab completion\n\"\"\"\nimport argcomplete\n\nfrom tron.commands import cmd_utils\nfrom tron.commands.client import Client\n\n\ndef main():\n    parser = cmd_utils.build_option_parser()\n    argcomplete.autocomplete(parser)\n    args = parser.parse_args()\n    cmd_utils.load_config(args)\n\n    client = Client(args.server)\n    for job in client.jobs(include_job_runs=True, include_action_runs=True):\n        print(job[\"name\"])\n        for run in job[\"runs\"]:\n            print(run[\"id\"])\n            for action in run[\"runs\"]:\n                print(action[\"id\"])\n\n\nif __name__ == \"__main__\":\n    main()\n"
  },
  {
    "path": "bin/tronctl",
    "content": "#!/usr/bin/env python\n\"\"\"Tron Control\n\nPart of the command line interface to the tron daemon. Provides the interface\nto controlling jobs and runs.\n\"\"\"\nimport argparse\nimport asyncio\nimport datetime\nimport logging\nimport pprint\nimport sys\nfrom collections import defaultdict\nfrom collections.abc import Callable\nfrom collections.abc import Generator\nfrom typing import Any\nfrom urllib.parse import urljoin\n\nimport argcomplete  # type: ignore\n\nfrom tron import __version__\nfrom tron.commands import client\nfrom tron.commands import cmd_utils\nfrom tron.commands.backfill import BackfillRun\nfrom tron.commands.backfill import confirm_backfill\nfrom tron.commands.backfill import DEFAULT_MAX_PARALLEL_RUNS\nfrom tron.commands.backfill import get_date_range\nfrom tron.commands.backfill import LIMIT_MAX_PARALLEL_RUNS\nfrom tron.commands.backfill import print_backfill_cmds\nfrom tron.commands.backfill import print_backfill_runs_table\nfrom tron.commands.backfill import run_backfill_for_date_range\nfrom tron.commands.client import RequestError\nfrom tron.commands.client import TronObjectIdentifier\nfrom tron.commands.cmd_utils import COLOR_YELLOW\nfrom tron.commands.cmd_utils import ExitCode\nfrom tron.commands.cmd_utils import suggest_possibilities\nfrom tron.commands.cmd_utils import tron_jobs_completer\nfrom tron.commands.cmd_utils import warning_output\nfrom tron.commands.retry import parse_deps_timeout\nfrom tron.commands.retry import print_retries_table\nfrom tron.commands.retry import retry_actions\nfrom tron.commands.retry import RetryAction\n\nCOMMAND_HELP = (\n    (\n        \"start\",\n        \"job name, job run id, or action id\",\n        \"Start the selected job, job run, or action. Creates a new job run if starting a job.\",\n    ),\n    (\n        \"rerun\",\n        \"job run id\",\n        \"Start a new job run with the same start time command context as the given job run.\",\n    ),\n    (\n        \"retry\",\n        \"action id\",\n        \"Re-run a job action within an existing job run. Uses latest code/config except the command by default. Add --use-latest-command to use the latest command.\",\n    ),\n    (\"recover\", \"action id\", \"Ask Tron to start tracking an UNKNOWN action run again\"),\n    (\"cancel\", \"job run id\", \"Cancel the selected job run.\"),\n    (\n        \"backfill\",\n        \"job name\",\n        \"Start job runs for a particular date range\",\n    ),\n    (\n        \"disable\",\n        \"job name\",\n        \"Disable selected job and cancel any outstanding runs. WARNING: you *must* disable the job in yelpsoa-configs to guarantee it will not be re-enabled.\",\n    ),\n    (\"enable\", \"job name\", \"Enable the selected job and schedule the next run\"),\n    (\n        \"fail\",\n        \"job run or action id\",\n        \"Mark an UNKNOWN job or action as failed. Does not publish action triggers.\",\n    ),\n    (\n        \"success\",\n        \"job run or action id\",\n        \"Mark an UNKNOWN job or action as having succeeded. Will publish action triggers.\",\n    ),\n    (\n        \"skip\",\n        \"action id\",\n        \"Skip a failed action, unblocks dependent actions. Does *not* publish action triggers.\",\n    ),\n    (\n        \"skip-and-publish\",\n        \"action id\",\n        \"Skip a failed action, unblocks dependent actions. *Does* publish action triggers.\",\n    ),\n    (\"stop\", \"action id\", \"Stop the action run (SIGTERM)\"),\n    (\"kill\", \"action id\", \"Force kill the action run (SIGKILL)\"),\n    (\"move\", \"job name\", \"Rename a job\"),\n    (\"publish\", \"trigger id\", \"Publish actionrun trigger to kick off downstream jobs\"),\n    (\"discard\", \"trigger id\", \"Discard existing actionrun trigger\"),\n    (\"version\", None, \"Print tron client and server versions\"),\n)\n\nlog = logging.getLogger(\"tronctl\")\n\n\ndef parse_date(date_string):\n    return datetime.datetime.strptime(date_string, \"%Y-%m-%d\")\n\n\ndef parse_cli():\n    parser = cmd_utils.build_option_parser()\n    subparsers = parser.add_subparsers(dest=\"command\", title=\"commands\", help=\"Tronctl command to run\", required=True)\n\n    cmd_parsers = {}\n    for cmd_name, id_help_text, desc in COMMAND_HELP:\n        cmd_parsers[cmd_name] = subparsers.add_parser(cmd_name, help=desc, description=desc)\n        if id_help_text:\n            cmd_parsers[cmd_name].add_argument(\n                \"id\", nargs=\"*\", help=id_help_text\n            ).completer = cmd_utils.tron_jobs_completer\n\n        # HACK: this is slightly funky since we already add --verbose in cmd_utils.build_option_parser(),\n        # but that requires something like tronctl --verbose start JOB rather than tronctl start -v JOB\n        cmd_parsers[cmd_name].add_argument(\n            \"-v\",\n            \"--verbose\",\n            action=\"count\",\n            help=\"Verbose logging\",\n            default=None,\n        )\n\n    # start\n    cmd_parsers[\"start\"].add_argument(\n        \"--run-date\",\n        type=parse_date,\n        dest=\"run_date\",\n        help=\"What the run-date should be set to\",\n    )\n\n    # backfill\n    backfill_parser = cmd_parsers[\"backfill\"]\n    mutex_dates_group = backfill_parser.add_mutually_exclusive_group(required=True)\n    mutex_dates_group.add_argument(\n        \"--start-date\",\n        type=parse_date,\n        dest=\"start_date\",\n        help=\"First run-date to backfill\",\n    )\n    backfill_parser.add_argument(\n        \"--end-date\",\n        type=parse_date,\n        dest=\"end_date\",\n        help=(\n            \"Last run-date to backfill (note: many jobs operate on date-1), \"\n            \"assuming --start-date is set. This date is inclusive. Defaults to today.\"\n        ),\n    )\n    backfill_parser.add_argument(\n        \"--descending\",\n        action=\"store_true\",\n        default=False,\n        help=(\n            \"If set, backfill from end date to start date. Otherwise, \"\n            \"the default is to backfill from start date to end date.\"\n        ),\n    )\n    mutex_dates_group.add_argument(\n        \"-d\",\n        \"--dates\",\n        type=lambda v: [parse_date(date_str.strip()) for date_str in v.split(\",\")],\n        dest=\"dates\",\n        help=(\n            \"List of comma-separated dates to run backfills on. \"\n            \"Backfills will be executed for dates in the order they are presented.\"\n        ),\n    )\n    backfill_parser.add_argument(\n        \"-P\",\n        \"--max-parallel\",\n        type=int,\n        dest=\"max_parallel\",\n        default=DEFAULT_MAX_PARALLEL_RUNS,\n        help=(\n            \"The max number of dates that can be backfilled in parallel. \"\n            \"Before setting, consider how much in resources your job needs. \"\n            \"If it needs a lot, keep this number low, because there may not be \"\n            \"enough resources in the cluster too satisfy the demand, which can \"\n            \"adversely affect other jobs. \"\n            \"The default is %(default)s.\"\n        ),\n    )\n    backfill_parser.add_argument(\n        \"--fail-on-error\",\n        dest=\"fail_on_error\",\n        action=\"store_true\",\n        default=False,\n        help=(\n            \"If set, the overall backfill will fail immediately if a backfill \"\n            \"for a single date fails. All in-progress backfills will cancelled. \"\n            \"If a single backfill is still considered successful it was otherwise \"\n            \"cancelled or skipped by the user. \"\n            \"By default, individual backfill failures are ignored.\"\n        ),\n    )\n    backfill_parser.add_argument(\n        \"--dry-run\",\n        action=\"store_true\",\n        default=False,\n        help=\"Prints the equivalent `tronctl start` commands for the backfill\",\n    )\n\n    # retry\n    retry_parser = cmd_parsers[\"retry\"]\n    retry_parser.add_argument(\n        \"--use-latest-command\",\n        action=\"store_true\",\n        default=False,\n        help=\"Use the latest command in tronfig rather than the original command when the action run was created\",\n    )\n    retry_parser.add_argument(\n        \"--wait-for-deps\",\n        type=parse_deps_timeout,\n        default=0,\n        dest=\"deps_timeout\",\n        help=(\n            \"Max duration to wait for upstream dependencies (upstream triggers \"\n            \"and/or same job actions) before attempting to retry. \"\n            \"If all dependencies are not done when the timeout expires, \"\n            \"this command will exit with an error, and the action will NOT be retried. \"\n            \"Must be either an int number of seconds, a human-readable/\"\n            \"pytimeparse-parsable string, or 'infinity' to wait forever. \"\n            \"Defaults to 0 (don't wait).\"\n        ),\n    )\n\n    argcomplete.autocomplete(parser)\n    args = parser.parse_args()\n\n    return args\n\n\ndef request(url: str, data: dict[str, Any], headers=None, method=None) -> bool:\n    # We want every tronctl request to be attributable\n    response = client.request(url, data=data, headers=headers, method=method, user_attribution=True)\n    if response.error:\n        print(f\"Error: {response.content}\")\n        return False\n    print(response.content.get(\"result\", \"OK\"))\n    return True\n\n\ndef event_publish(args):\n    for event in args.id:\n        # trying to publish a job run/action run id will likely print multiple warnings\n        # since the conditions are somewhat overlapping - only print the first one\n        warning_printed = False\n        split_event = event.split(\".\")\n        # first, let's try to catch folks trying to publish a job run or action run id as a trigger\n        # these will look something like NAMESPACE.JOB_NAME.RUN_NUMBER or NAMESPACE.JOB_NAME.RUN_NUMBER.ACTION_NAME\n        # i.e., if the 3rd element is an integer, it's one of these\n        if len(split_event) >= 3:\n            try:\n                int(split_event[2])\n                print(\n                    warning_output(\n                        f\"\\nWarning: the event id '{event}' looks like a job run or action run id rather than a trigger id!\",\n                        color=COLOR_YELLOW,\n                    )\n                )\n                print(\n                    warning_output(\n                        \"This is almost certainly incorrect and you want something like `tronctl publish $SERVICE.$JOB_NAME.$ACTION_NAME.$TRIGGER_NAME.$TRIGGER_VALUE`\",\n                        color=COLOR_YELLOW,\n                    )\n                )\n                warning_printed = True\n            except ValueError:\n                pass\n\n        if len(split_event) != 5 and not warning_printed:\n            print(\n                warning_output(\n                    f\"\\nWarning: '{event}' is too {'long' if len(split_event) > 5 else 'short'} and does not match the expected trigger format!\",\n                    color=COLOR_YELLOW,\n                )\n            )\n            print(\n                warning_output(\n                    \"This is almost certainly incorrect and you want something like `tronctl publish $SERVICE.$JOB_NAME.$ACTION_NAME.$TRIGGER_NAME.$TRIGGER_VALUE`\",\n                    color=COLOR_YELLOW,\n                )\n            )\n\n        yield request(\n            urljoin(args.server, \"/api/events\"),\n            dict(command=\"publish\", event=event),\n        )\n\n\ndef event_discard(args):\n    for event in args.id:\n        yield request(\n            urljoin(args.server, \"/api/events\"),\n            dict(command=\"discard\", event=event),\n        )\n\n\ndef _get_triggers_for_action(server: str, action_identifier: str) -> tuple[str, ...] | None:\n    try:\n        namespace, job_name, run_number, action_name = action_identifier.split(\".\")\n    except ValueError:\n        print(\n            f\"Unable to fully decompose {action_identifier}: expected an identifier of the form (namespace).(job).(run).(action)\"\n        )\n        return None\n\n    trigger_response = client.request(\n        uri=urljoin(\n            server,\n            f\"/api/jobs/{namespace}.{job_name}/{run_number}/{action_name}\",\n        ),\n    )\n    if trigger_response.error:\n        print(f\"Unable to fetch downstream triggers for {action_identifier}: {trigger_response.error}\")\n        return None\n\n    # triggers are returned by the API as comma-separated values with a space after every comma, which is\n    # not automation-friendly - thus the non-standard multi-character split\n    triggers = trigger_response.content.get(\"trigger_downstreams\", \"\").split(\", \")\n\n    # the API will return an empty string for actions with no triggers to emit, but splitting '' yields [''],\n    # so we want to make sure that we return an empty iterable in this case\n    return tuple(f\"{namespace}.{job_name}.{action_name}.{trigger}\" for trigger in triggers if trigger)\n\n\ndef skip_and_publish(server: str, tron_id: TronObjectIdentifier, identifier: str) -> bool:\n    all_success = True\n    print(f\"Skipping {identifier}...\")\n    if request(\n        url=urljoin(server, tron_id.url),\n        data={\"command\": \"skip\"},\n    ):\n        print(f\"Successfully skipped {identifier}.\")\n        print(f\"\\nFetching triggers to publish for {identifier}...\")\n        # a single action can have 0..N triggers to publish and these can be arbitrarily named, so we need to\n        # query the API and figure out what triggers exist\n        triggers = _get_triggers_for_action(server=server, action_identifier=identifier)\n        if triggers is None:\n            print(f\"\\nEncountered error getting triggers to publish for {identifier}!\")\n            return False\n        elif not triggers:\n            print(f\"{identifier} has no triggers to publish - just skipping instead.\")\n            # TODO: should we check this up-front and refuse to skip if there are no triggers that will be\n            # published rather than carry on under the assumption that the user copy-pasted/typo'd the identifier?\n            return True\n\n        else:\n            # TODO: this loop should use event_publish(), but we'd need to refactor how the CLI works and stop passing\n            # around the full set of args everywhere to do so\n            print(\"\\nTriggers to publish:\")\n            print(\"\\n\".join(f\"    * {trigger}\" for trigger in triggers) + \"\\n\")\n            for trigger in triggers:\n                print(f\"Publishing trigger {trigger}...\")\n                if not request(\n                    url=urljoin(server, \"/api/events\"),\n                    data={\"command\": \"publish\", \"event\": trigger},\n                ):\n                    print(\n                        f\"Failed to publish trigger {trigger} - you may want to retry this command or manually publish the trigger!\"\n                    )\n                    all_success = False\n    else:\n        print(f\"\\nFailed to skip {identifier}!\")\n        return False\n\n    return all_success\n\n\ndef control_objects(args: argparse.Namespace):\n    tron_client = client.Client(args.server, user_attribution=True)\n    url_index = tron_client.index()\n    for identifier in args.id:\n        try:\n            tron_id = client.get_object_type_from_identifier(\n                url_index,\n                identifier,\n            )\n        except ValueError as e:\n            possibilities = list(\n                tron_jobs_completer(prefix=\"\", client=tron_client),\n            )\n            suggestions = suggest_possibilities(\n                word=identifier,\n                possibilities=possibilities,\n            )\n            raise SystemExit(f\"Error: {e}{suggestions}\")\n\n        if args.command == \"skip-and-publish\":\n            # this command is more of a pseudo-command - skip and publish are handled in two different resources\n            # and changing the API would be painful, so instead we call skip + publish separately from the client\n            # (i.e., this file) to implement this functionality\n            yield skip_and_publish(args.server, tron_id, identifier)\n\n        else:\n            data = dict(command=args.command)\n            if args.command == \"start\" and args.run_date:\n                data[\"run_time\"] = str(args.run_date)\n            yield request(urljoin(args.server, tron_id.url), data)\n            # NOTE: ideally we'd add this message in the JobController handle_command() function, but having the API return terminal escape codes\n            # sounds like a bad idea, so we're doing it here instead\n            if args.command == \"disable\":\n                print(\n                    warning_output(\n                        \"WARNING: jobs disabled with tronctl disable are *NOT* guaranteed to stay disabled. You must disable the job in yelpsoa-configs to guarantee it will not be re-enabled.\"\n                    )\n                )\n\n\ndef retry(args):\n    if args.deps_timeout != RetryAction.NO_TIMEOUT:\n        deps_timeout_str = \"forever\"  # timeout = -1 (RetryAction.WAIT_FOREVER)\n        if args.deps_timeout > 0:\n            deps_timeout_str = \"up to \" + str(datetime.timedelta(seconds=args.deps_timeout))\n        print(\n            f\"We will wait {deps_timeout_str} for all upstream triggers to be published \"\n            \"and required actions to finish successfully before issuing retries for the \"\n            \"following actions:\"\n        )\n        print()\n        pprint.pprint(args.id)\n        print()\n\n    retries = retry_actions(args.server, args.id, args.use_latest_command, args.deps_timeout)\n    print_retries_table(retries)\n    yield all([r.succeeded for r in retries])\n\n\ndef move(args):\n    try:\n        old_name = args.id[0]\n        new_name = args.id[1]\n    except IndexError as e:\n        raise SystemExit(f\"Error: Move command needs two arguments.\\n{e}\")\n\n    tron_client = client.Client(args.server, user_attribution=True)\n    url_index = tron_client.index()\n    job_index = url_index[\"jobs\"]\n    if old_name not in job_index.keys():\n        raise SystemExit(f\"Error: {old_name} doesn't exist\")\n    if new_name in job_index.keys():\n        raise SystemExit(f\"Error: {new_name} exists already\")\n\n    data = dict(command=\"move\", old_name=old_name, new_name=new_name)\n    yield request(urljoin(args.server, \"/api/jobs\"), data)\n\n\ndef backfill(args):\n    if not args.id:\n        print(\"Error: must provide at least one id argument\")\n        yield False\n    if args.max_parallel > LIMIT_MAX_PARALLEL_RUNS:\n        raise SystemExit(\n            f\"The flag --max-parallel exceeds the allowed limit of {LIMIT_MAX_PARALLEL_RUNS}. \"\n            + \"Please reach out to the Tron team if you need to run backfills with higher limits.\"\n        )\n\n    if args.start_date:\n        if args.end_date is None:\n            args.end_date = datetime.datetime.today()\n        dates = get_date_range(args.start_date, args.end_date, descending=args.descending)\n    else:\n        dates = args.dates\n    date_strs = [d.date().isoformat() for d in dates]\n\n    job_name = args.id[0]\n    if args.dry_run:\n        print_backfill_cmds(job_name, date_strs)\n        yield True\n    else:\n        if confirm_backfill(job_name, date_strs):\n            loop = asyncio.get_event_loop()\n            try:\n                backfill_runs = loop.run_until_complete(\n                    run_backfill_for_date_range(\n                        args.server,\n                        job_name,\n                        dates,\n                        max_parallel=args.max_parallel,\n                        ignore_errors=(not args.fail_on_error),\n                    ),\n                )\n            finally:\n                loop.close()\n\n            print_backfill_runs_table(backfill_runs)\n            yield all(br.run_state in BackfillRun.SUCCESS_STATES for br in backfill_runs)\n\n\ndef tron_version(args):\n    local_version = __version__\n    print(f\"Tron client version: {local_version}\")\n    response = client.request(urljoin(args.server, \"/api/status\"))\n    if response.error:\n        print(f\"Error: {response.content}\")\n        yield\n    server_version = response.content.get(\"version\", \"unknown\")\n    print(f\"Tron server version: {server_version}\")\n    if server_version != local_version:\n        print(\"Warning: client and server versions should match\")\n        yield\n    yield True\n\n\nCOMMANDS: dict[str, Callable[[argparse.Namespace], Generator[bool, None, None]]] = defaultdict(\n    lambda: control_objects,\n    publish=event_publish,\n    discard=event_discard,\n    backfill=backfill,\n    move=move,\n    retry=retry,\n    version=tron_version,\n)\n\n\ndef main():\n    \"\"\"run tronctl\"\"\"\n    args = parse_cli()\n    cmd_utils.load_config(args)\n\n    # NOTE: we do this after load_configs() since load_config() may set some logging defaults that we want to override\n    desired_level = cmd_utils.setup_logging(args)\n    logging.getLogger().setLevel(desired_level)\n\n    cmd = COMMANDS[args.command]\n    try:\n        for ret in cmd(args):\n            if not ret:\n                sys.exit(ExitCode.fail)\n    except RequestError as err:\n        print(\n            f\"Error connecting to the tron server ({args.server}): {err}\",\n            file=sys.stderr,\n        )\n        sys.exit(ExitCode.fail)\n\n\nif __name__ == \"__main__\":\n    main()\n"
  },
  {
    "path": "bin/tronctl_tabcomplete.sh",
    "content": "if [[ -n ${ZSH_VERSION-} ]]; then\n\tautoload -U +X bashcompinit && bashcompinit\nfi\n\n# This magic eval enables tab-completion for tron commands\n# http://argcomplete.readthedocs.io/en/latest/index.html#synopsis\neval \"$(/opt/venvs/tron/bin/register-python-argcomplete tronctl)\"\n"
  },
  {
    "path": "bin/trond",
    "content": "#!/usr/bin/env python\n\"\"\" Start the Tron server daemon.\"\"\"\nimport argparse\nimport faulthandler\nimport logging\nimport os\nimport time\nimport traceback\n\nimport pkg_resources\n\nimport tron\nfrom tron import trondaemon\nfrom tron.commands import cmd_utils\nfrom tron.config import manager\n\nlog = logging.getLogger(__name__)\n\nDEFAULT_CONF = \"default_config.yaml\"\nDEFAULT_CONF_PATH = \"config/\"\nDEFAULT_WORKING_DIR = \"/var/lib/tron/\"\nDEFAULT_LOCKFILE = \"tron.lock\"\nDEFAULT_LOCKPATH = \"/var/run/\" + DEFAULT_LOCKFILE\n\n\ndef parse_cli():\n    parser = argparse.ArgumentParser()\n\n    parser.add_argument(\n        \"--version\",\n        action=\"version\",\n        version=f\"{parser.prog} {tron.__version__}\",\n    )\n\n    parser.add_argument(\n        \"-w\",\n        \"--working-dir\",\n        default=DEFAULT_WORKING_DIR,\n        help=\"Working directory for the Tron daemon, default %(default)s\",\n    )\n\n    parser.add_argument(\n        \"-c\",\n        \"--config-path\",\n        default=DEFAULT_CONF_PATH,\n        help=\"File path to the Tron configuration file\",\n    )\n\n    parser.add_argument(\n        \"--nodaemon\",\n        action=\"store_true\",\n        default=False,\n        help=\"[DEPRECATED] Disable daemonizing, default %(default)s\",\n    )\n\n    parser.add_argument(  # for backwards compatibility\n        \"--pid-file\",\n        help=\"[DEPRECATED] File path to pid file. Use --lock-file instead.\",\n    )\n\n    parser.add_argument(\n        \"--lock-file\",\n        help=\"File path to lock file, defaults to %s if working directory \"\n        \"is default. Otherwise defaults to <working dir>/%s\" % (DEFAULT_LOCKPATH, DEFAULT_LOCKFILE),\n    )\n\n    logging_group = parser.add_argument_group(\"logging\", \"\")\n    logging_group.add_argument(\n        \"--log-conf\",\n        \"-l\",\n        help=\"File path to a custom logging.conf\",\n    )\n\n    logging_group.add_argument(\n        \"-v\",\n        \"--verbose\",\n        action=\"count\",\n        default=0,\n        help=\"Verbose logging. Repeat for more verbosity.\",\n    )\n\n    logging_group.add_argument(\n        \"--debug\",\n        action=\"store_true\",\n        help=\"Debug mode, extra error reporting, no daemonizing\",\n    )\n\n    api_group = parser.add_argument_group(\"Web Service API\", \"\")\n    api_group.add_argument(\n        \"--port\",\n        \"-P\",\n        dest=\"listen_port\",\n        type=int,\n        help=\"TCP port number to listen on, default %(default)s\",\n        default=cmd_utils.DEFAULT_PORT,\n    )\n\n    api_group.add_argument(\n        \"--host\",\n        \"-H\",\n        dest=\"listen_host\",\n        help=\"Hostname to listen on, default %(default)s\",\n        default=cmd_utils.DEFAULT_HOST,\n    )\n\n    requirement = pkg_resources.Requirement.parse(\"tron\")\n    api_group.add_argument(\n        \"--web-path\",\n        default=pkg_resources.resource_filename(\n            requirement,\n            \"tronweb\",\n        ),\n        help=\"Path to static web resources, default %(default)s.\",\n    )\n\n    args = parser.parse_args()\n    args.working_dir = os.path.abspath(args.working_dir)\n\n    if args.log_conf:\n        args.log_conf = os.path.join(args.working_dir, args.log_conf)\n        if not os.path.exists(args.log_conf):\n            parser.error(\"Logging config file not found: %s\" % args.log_conf)\n\n    if not args.lock_file:\n        if args.pid_file:  # for backwards compatibility\n            args.lock_file = args.pid_file\n        elif args.working_dir == DEFAULT_WORKING_DIR:\n            args.lock_file = DEFAULT_LOCKPATH\n        else:\n            args.lock_file = DEFAULT_LOCKFILE\n\n    args.lock_file = os.path.join(args.working_dir, args.lock_file)\n    args.config_path = os.path.join(\n        args.working_dir,\n        args.config_path,\n    )\n\n    return args\n\n\ndef create_default_config(config_path):\n    \"\"\"Create a default empty configuration for first time installs\"\"\"\n    default = pkg_resources.resource_string(tron.__name__, DEFAULT_CONF)\n    manager.create_new_config(config_path, default)\n\n\ndef setup_environment(args):\n    \"\"\"Setup the working directory and config environment.\"\"\"\n    if not os.path.exists(args.working_dir):\n        os.makedirs(args.working_dir)\n\n    if not os.path.isdir(args.working_dir) or not os.access(args.working_dir, os.R_OK | os.W_OK | os.X_OK):\n        msg = \"Error, can't access working directory %s\" % args.working_dir\n        raise SystemExit(msg)\n\n    # Attempt to create a default config if config is missing\n    if not os.path.exists(args.config_path):\n        try:\n            create_default_config(args.config_path)\n        except OSError as e:\n            msg = \"Error creating default configuration at %s: %s\"\n            log.debug(traceback.format_exc())\n            raise SystemExit(msg % (args.config_path, e))\n\n    if not os.access(args.config_path, os.R_OK | os.W_OK):\n        msg = \"Error opening configuration %s: Missing Permissions\"\n        raise SystemExit(msg % args.config_path)\n\n\ndef main():\n    args = parse_cli()\n    boot_time = time.time()\n    setup_environment(args)\n    trond = trondaemon.TronDaemon(args)\n    trond.run(boot_time)\n\n\nif __name__ == \"__main__\":\n    # print tracebacks on signals/faults\n    # NOTE: you likely want to read https://docs.python.org/3/library/faulthandler.html\n    # as these tracebacks will look slightly different\n    faulthandler.enable()\n\n    try:\n        main()\n    # this is a little weird, but every now and then we're seeing a mysterious tron exit\n    # that doesn't seem to correspond with anything else - let's catch BaseException\n    # (and therefore SystemExit) in case anything is calling sys.exit() since there's no\n    # traceback when we see this\n    except (\n        BaseException,\n        # technically, we really only need to catch BaseException - but let's be extra-paranoid\n        Exception,\n    ):\n        traceback.print_exc()\n        raise\n"
  },
  {
    "path": "bin/tronfig",
    "content": "#!/usr/bin/env python\nimport logging\nimport os\nimport shutil\nimport sys\nimport tempfile\nimport traceback\n\nfrom tron.commands import cmd_utils\nfrom tron.commands.client import Client\nfrom tron.config import config_parse\nfrom tron.config import ConfigError\nfrom tron.config import manager\nfrom tron.config import schema\n\nlog = logging.getLogger(\"tronfig\")\n\n\ndef parse_cli():\n    parser = cmd_utils.build_option_parser()\n\n    parser.add_argument(\n        \"-p\",\n        \"--print\",\n        action=\"store_true\",\n        dest=\"print_config\",\n        help=\"Print config to stdout, rather than uploading\",\n    )\n    parser.add_argument(\n        \"-C\",\n        \"--check\",\n        action=\"store_true\",\n        dest=\"check\",\n        help=\"Upload and check configuration, don't apply, \"\n        \"useful when you want to verify if tron daemon \"\n        \"will accept your configuration.\",\n    )\n    parser.add_argument(\n        \"-d\",\n        \"--delete\",\n        action=\"store_true\",\n        help=\"Delete the configuration for this namespace\",\n    )\n    parser.add_argument(\n        \"-V\",\n        \"--validate\",\n        action=\"store_true\",\n        dest=\"validate\",\n        help=\"Only validate configuration, don't upload, \"\n        \"useful for verifying config locally. If namespace \"\n        \"is not specified, it will be derived from file \"\n        \"name, if any.\",\n    )\n    parser.add_argument(\n        \"-D\",\n        \"--validate-dir\",\n        action=\"store_true\",\n        dest=\"validate_dir\",\n        help=\"Full validation of a folder, don't upload, \" \"same as -V but checks for more edge-cases\",\n    )\n    parser.add_argument(\n        \"-n\",\n        \"--namespace\",\n        action=\"store\",\n        help=\"Alternate namespace to use\",\n    )\n    parser.add_argument(\n        \"-m\",\n        \"--master-config\",\n        action=\"store\",\n        dest=\"master_config\",\n        help=\"Source of master configuration file\",\n    )\n    parser.add_argument(\"source\")\n\n    return parser.parse_args()\n\n\ndef upload_config(client, config_name, contents, config_hash, check=False):\n    response = client.config(\n        config_name,\n        config_data=contents,\n        config_hash=config_hash,\n        check=check,\n    )\n\n    if \"error\" in response:\n        log.error(response[\"error\"])\n        return False\n\n    print(\"Configuration uploaded successfully\", file=sys.stderr)\n    return True\n\n\ndef validate(config_name, config_content, master_content=None):\n    try:\n        config_data = manager.from_string(config_content)\n        master_data = (\n            manager.from_string(\n                master_content,\n            )\n            if master_content\n            else None\n        )\n        config_parse.validate_fragment(\n            name=config_name,\n            fragment=config_data,\n            master_config=master_data,\n        )\n    except ConfigError as e:\n        return str(e)\n\n\ndef delete_config(client, config_name):\n    if config_name == schema.MASTER_NAMESPACE:\n        log.error(\n            \"Deleting MASTER namespace is not allowed. Name must be specified.\",\n        )\n        return\n\n    response = input(\n        f\"This will delete the configuration for the {config_name} namespace. Proceed? (y/n): \",\n    )\n    if response[:1].lower() != \"y\":\n        return\n\n    config_hash = client.config(config_name)[\"hash\"]\n    if upload_config(client, config_name, \"\", config_hash):\n        return\n    raise SystemExit(\"tronfig deletion failed\")\n\n\ndef validate_dir(path):\n    try:\n        manifest_dir = tempfile.mkdtemp()\n        manifest = manager.ManifestFile(manifest_dir)\n        manifest.create()\n        for fname in os.listdir(path):\n            name, ext = os.path.splitext(fname)\n            if ext == \".yaml\":\n                namespace = name\n                manifest.add(namespace, os.path.join(path, fname))\n\n        config_manager = manager.ConfigManager(path, manifest)\n        config_manager.load()\n    except ConfigError as e:\n        traceback.print_exc()\n        return str(e)\n    finally:\n        if manifest_dir:\n            shutil.rmtree(manifest_dir)\n\n\ndef get_config_input(namespace, source):\n    if source == \"-\":\n        source_io = sys.stdin\n        if not namespace:\n            namespace = schema.MASTER_NAMESPACE\n    else:\n        source_io = open(source)\n        if not namespace:\n            name, _ = os.path.splitext(os.path.basename(source))\n            namespace = name\n\n    content = source_io.read()\n\n    return namespace, content\n\n\nif __name__ == \"__main__\":\n    args = parse_cli()\n    cmd_utils.setup_logging(args)\n    cmd_utils.load_config(args)\n\n    if args.validate or args.validate_dir:\n        if args.validate:\n            name, content = get_config_input(args.namespace, args.source)\n            master_content = None\n            if args.master_config:\n                _, master_content = get_config_input(\n                    schema.MASTER_NAMESPACE,\n                    args.master_config,\n                )\n            result = validate(\n                config_name=name,\n                config_content=content,\n                master_content=master_content,\n            )\n        elif args.validate_dir:\n            result = validate_dir(args.source)\n\n        if not result:\n            print(\"OK\")\n            sys.exit(0)\n        else:\n            print(result)\n            sys.exit(1)\n\n    client = Client(args.server)\n\n    if args.print_config:\n        content = client.config(args.source)[\"config\"]\n        if type(content) is not bytes:\n            content = content.encode(\"utf8\")\n        os.write(sys.stdout.fileno(), content)\n    elif args.delete:\n        delete_config(client, args.source)\n    else:\n        namespace, content = get_config_input(args.namespace, args.source)\n        config_hash = client.config(namespace)[\"hash\"]\n        result = validate(namespace, content)\n        if result:\n            print(result)\n            sys.exit(1)\n\n        if upload_config(\n            client,\n            namespace,\n            content,\n            config_hash,\n            check=args.check,\n        ):\n            sys.exit(0)\n\n        print(\"Uploading failed\")\n        sys.exit(1)\n"
  },
  {
    "path": "bin/tronrepl",
    "content": "#!/usr/bin/env python\n\"\"\" Start the Tron server daemon.\"\"\"\nimport argparse\nimport logging\nimport os\nimport traceback\n\nimport IPython\nimport pkg_resources\n\nimport tron.mcp\nfrom tron import trondaemon\nfrom tron.commands import cmd_utils\nfrom tron.config import manager\n\nlog = logging.getLogger(__name__)\n\nDEFAULT_CONF = \"default_config.yaml\"\nDEFAULT_CONF_PATH = \"config/\"\nDEFAULT_WORKING_DIR = \"/var/lib/tron/\"\nDEFAULT_LOCKFILE = \"tron-repl.lock\"\nDEFAULT_LOCKPATH = \"/var/run/\" + DEFAULT_LOCKFILE\n\n\ndef parse_cli():\n    parser = argparse.ArgumentParser()\n\n    parser.add_argument(\n        \"--version\",\n        action=\"version\",\n        version=f\"{parser.prog} {tron.__version__}\",\n    )\n\n    parser.add_argument(\n        \"-w\",\n        \"--working-dir\",\n        default=DEFAULT_WORKING_DIR,\n        help=\"Working directory for the Tron daemon, default %(default)s\",\n    )\n\n    parser.add_argument(\n        \"-c\",\n        \"--config-path\",\n        default=DEFAULT_CONF_PATH,\n        help=\"File path to the Tron configuration file\",\n    )\n\n    parser.add_argument(\n        \"--nodaemon\",\n        action=\"store_true\",\n        default=False,\n        help=\"[DEPRECATED] Disable daemonizing, default %(default)s\",\n    )\n\n    parser.add_argument(  # for backwards compatibility\n        \"--pid-file\",\n        help=\"[DEPRECATED] File path to pid file. Use --lock-file instead.\",\n    )\n\n    parser.add_argument(\n        \"--lock-file\",\n        help=\"File path to lock file, defaults to %s if working directory \"\n        \"is default. Otherwise defaults to <working dir>/%s\" % (DEFAULT_LOCKPATH, DEFAULT_LOCKFILE),\n    )\n\n    logging_group = parser.add_argument_group(\"logging\", \"\")\n    logging_group.add_argument(\n        \"--log-conf\",\n        \"-l\",\n        help=\"File path to a custom logging.conf\",\n    )\n\n    logging_group.add_argument(\n        \"-v\",\n        \"--verbose\",\n        action=\"count\",\n        default=0,\n        help=\"Verbose logging. Repeat for more verbosity.\",\n    )\n\n    logging_group.add_argument(\n        \"--debug\",\n        action=\"store_true\",\n        help=\"Debug mode, extra error reporting, no daemonizing\",\n    )\n\n    api_group = parser.add_argument_group(\"Web Service API\", \"\")\n    api_group.add_argument(\n        \"--port\",\n        \"-P\",\n        dest=\"listen_port\",\n        type=int,\n        help=\"TCP port number to listen on, default %(default)s\",\n        default=cmd_utils.DEFAULT_PORT,\n    )\n\n    api_group.add_argument(\n        \"--host\",\n        \"-H\",\n        dest=\"listen_host\",\n        help=\"Hostname to listen on, default %(default)s\",\n        default=cmd_utils.DEFAULT_HOST,\n    )\n\n    requirement = pkg_resources.Requirement.parse(\"tron\")\n    api_group.add_argument(\n        \"--web-path\",\n        default=pkg_resources.resource_filename(\n            requirement,\n            \"tronweb\",\n        ),\n        help=\"Path to static web resources, default %(default)s.\",\n    )\n\n    args = parser.parse_args()\n    args.working_dir = os.path.abspath(args.working_dir)\n\n    if args.log_conf:\n        args.log_conf = os.path.join(args.working_dir, args.log_conf)\n        if not os.path.exists(args.log_conf):\n            parser.error(\"Logging config file not found: %s\" % args.log_conf)\n\n    if not args.lock_file:\n        if args.pid_file:  # for backwards compatibility\n            args.lock_file = args.pid_file\n        elif args.working_dir == DEFAULT_WORKING_DIR:\n            args.lock_file = DEFAULT_LOCKPATH\n        else:\n            args.lock_file = DEFAULT_LOCKFILE\n\n    args.lock_file = os.path.join(args.working_dir, args.lock_file)\n    args.config_path = os.path.join(\n        args.working_dir,\n        args.config_path,\n    )\n\n    return args\n\n\ndef create_default_config(config_path):\n    \"\"\"Create a default empty configuration for first time installs\"\"\"\n    default = pkg_resources.resource_string(tron.__name__, DEFAULT_CONF)\n    manager.create_new_config(config_path, default)\n\n\ndef setup_environment(args):\n    \"\"\"Setup the working directory and config environment.\"\"\"\n    if not os.path.exists(args.working_dir):\n        os.makedirs(args.working_dir)\n\n    if not os.path.isdir(args.working_dir) or not os.access(args.working_dir, os.R_OK | os.W_OK | os.X_OK):\n        msg = \"Error, can't access working directory %s\" % args.working_dir\n        raise SystemExit(msg)\n\n    # Attempt to create a default config if config is missing\n    if not os.path.exists(args.config_path):\n        try:\n            create_default_config(args.config_path)\n        except OSError as e:\n            msg = \"Error creating default configuration at %s: %s\"\n            log.debug(traceback.format_exc())\n            raise SystemExit(msg % (args.config_path, e))\n\n    if not os.access(args.config_path, os.R_OK | os.W_OK):\n        msg = \"Error opening configuration %s: Missing Permissions\"\n        raise SystemExit(msg % args.config_path)\n\n\ndef main():\n    args = parse_cli()\n\n    setup_environment(args)\n    trond = trondaemon.TronDaemon(args)  # noqa: F841\n\n    trond.mcp = tron.mcp.MasterControlProgram(\n        trond.options.working_dir,\n        trond.options.config_path,\n    )\n    trond.mcp._load_config()\n    # trond.mcp.restore_state(trond.mcp.config.load().get_master().action_runner)\n\n    # mcp = trond.mcp  # noqa: F841\n    # store = mcp.state_watcher.state_manager._impl  # noqa: F841\n\n    print(\"\")\n    print(\"+---------------------+\")\n    print(\"| Tron REPL           |\")\n    print(\"|   Available locals: |\")\n    print(\"|   - trond           |\")\n    print(\"+---------------------+\")\n    print(\"\")\n    IPython.embed()\n\n\nif __name__ == \"__main__\":\n    main()\n"
  },
  {
    "path": "bin/tronview",
    "content": "#!/usr/bin/env python\nimport os\nimport sys\n\nimport argcomplete\n\nfrom tron.commands import cmd_utils\nfrom tron.commands import display\nfrom tron.commands.client import Client\nfrom tron.commands.client import get_object_type_from_identifier\nfrom tron.commands.client import RequestError\nfrom tron.commands.client import TronObjectType\nfrom tron.commands.cmd_utils import ExitCode\nfrom tron.commands.cmd_utils import suggest_possibilities\nfrom tron.commands.cmd_utils import tron_jobs_completer\n\n\ndef parse_cli():\n    parser = cmd_utils.build_option_parser()\n    parser.add_argument(\n        \"--numshown\",\n        \"-n\",\n        type=int,\n        dest=\"num_displays\",\n        help=\"Max number of jobs/job-runs shown\",\n        default=10,\n    )\n    parser.add_argument(\n        \"--color\",\n        \"-c\",\n        action=\"store_true\",\n        dest=\"display_color\",\n        help=\"Display in color\",\n        default=None,\n    )\n    parser.add_argument(\n        \"--nocolor\",\n        action=\"store_false\",\n        dest=\"display_color\",\n        help=\"Display without color\",\n        default=None,\n    )\n    parser.add_argument(\n        \"--stdout\",\n        \"-o\",\n        action=\"count\",\n        dest=\"stdout\",\n        help=\"Solely displays stdout\",\n        default=0,\n    )\n    parser.add_argument(\n        \"--stderr\",\n        \"-e\",\n        action=\"count\",\n        dest=\"stderr\",\n        help=\"Solely displays stderr\",\n        default=0,\n    )\n    parser.add_argument(\n        \"--events\",\n        \"-E\",\n        action=\"store_true\",\n        dest=\"events\",\n        help=\"Display stored events\",\n        default=0,\n    )\n    parser.add_argument(\n        \"name\",\n        nargs=\"?\",\n        help=\"job name | job run id | action id\",\n    ).completer = cmd_utils.tron_jobs_completer\n\n    argcomplete.autocomplete(parser)\n    args = parser.parse_args()\n    return args\n\n\ndef console_height():\n    if not sys.stdout.isatty():\n        return 40\n    return int(os.popen(\"stty size\", \"r\").read().split()[0])\n\n\ndef view_all(args, client):\n    \"\"\"Retrieve jobs and display them.\"\"\"\n    return display.DisplayJobs().format(\n        client.jobs(\n            include_job_runs=False,\n            include_action_runs=False,\n            include_action_graph=False,\n            include_node_pool=False,\n        ),\n    )\n\n\ndef view_job(args, job_id, client):\n    \"\"\"Retrieve details of the specified job and display\"\"\"\n    job_content = client.job(job_id.url, count=args.num_displays)\n    return display.format_job_details(job_content)\n\n\ndef view_job_run(args, job_run_id, client):\n    actions = client.job_runs(job_run_id.url)\n    display_action = display.DisplayActionRuns()\n    return display_action.format(actions)\n\n\ndef view_action_run(args, act_run_id, client):\n    content = client.action_runs(\n        act_run_id.url,\n        num_lines=args.num_displays,\n    )\n    return display.format_action_run_details(content)\n\n\nobj_type_to_view_map = {\n    TronObjectType.job: view_job,\n    TronObjectType.job_run: view_job_run,\n    TronObjectType.action_run: view_action_run,\n}\n\n\ndef get_view_output(name, args, client):\n    url_index = client.index()\n    try:\n        tron_id = get_object_type_from_identifier(url_index, name)\n    except ValueError as e:\n        possibilities = list(tron_jobs_completer(prefix=\"\", client=client))\n        suggestions = suggest_possibilities(\n            word=name,\n            possibilities=possibilities,\n        )\n        raise SystemExit(f\"Error: {e}{suggestions}\")\n\n    if tron_id.type not in obj_type_to_view_map:\n        return\n\n    try:\n        return obj_type_to_view_map[tron_id.type](args, tron_id, client)\n    except RequestError as e:\n        raise SystemExit(f\"Error: {e}\")\n\n\ndef main():\n    \"\"\"run tronview\"\"\"\n    args = parse_cli()\n    cmd_utils.setup_logging(args)\n    cmd_utils.load_config(args)\n\n    display.Color.toggle(args.display_color)\n    client = Client(args.server)\n\n    try:\n        if args.events:\n            response = client.request(\"/api/events\")\n            error = response.get(\"error\")\n            if not error:\n                for evt in response.get(\"response\", [\"* no recorded events *\"]):\n                    print(evt)\n                sys.exit(ExitCode.success)\n\n        if not args.name:\n            output = view_all(args, client)\n        else:\n            output = get_view_output(args.name, args, client)\n\n        if not output:\n            print(\"Unrecognized identifier: %s\" % args.name, file=sys.stderr)\n            sys.exit(ExitCode.fail)\n\n        if sys.stdout.isatty() and len(output.split(\"\\n\")) > console_height():\n            display.view_with_less(output, args.display_color)\n        else:\n            print(output)\n\n    except RequestError as err:\n        print(\n            f\"Error connecting to the tron server ({args.server}): {err}\",\n            file=sys.stderr,\n        )\n        sys.exit(ExitCode.fail)\n\n\nif __name__ == \"__main__\":\n    main()\n"
  },
  {
    "path": "bin/tronview_tabcomplete.sh",
    "content": "if [[ -n ${ZSH_VERSION-} ]]; then\n\tautoload -U +X bashcompinit && bashcompinit\nfi\n\n# This magic eval enables tab-completion for tron commands\n# http://argcomplete.readthedocs.io/en/latest/index.html#synopsis\neval \"$(/opt/venvs/tron/bin/register-python-argcomplete tronview)\"\n"
  },
  {
    "path": "contrib/migration_script.py",
    "content": "#!/usr/bin/env python\n\"\"\"\nThis script is for migrating jobs to another namespace\n\"\"\"\nimport argparse\nimport subprocess\nimport time\nfrom urllib.parse import urljoin\nfrom urllib.parse import urlparse\n\nfrom tron import yaml\nfrom tron.commands import client\n\n\nclass bcolors:\n    HEADER = \"\\033[95m\"\n    OKBLUE = \"\\033[94m\"\n    OKGREEN = \"\\033[92m\"\n    WARNING = \"\\033[93m\"\n    FAIL = \"\\033[91m\"\n    ENDC = \"\\033[0m\"\n    BOLD = \"\\033[1m\"\n    UNDERLINE = \"\\033[4m\"\n\n\ndef parse_args():\n    parser = argparse.ArgumentParser(\n        description=\"Migrate jobs to new namespace\",\n    )\n    parser.add_argument(\n        \"--server\",\n        required=True,\n        help=\"specify the location of tron master\",\n    )\n    parser.add_argument(\n        \"--old-ns\",\n        required=True,\n        help=\"Old namespace\",\n    )\n    parser.add_argument(\n        \"--new-ns\",\n        required=True,\n        help=\"New namespace\",\n    )\n    parser.add_argument(\n        \"source\",\n        help=\"source file to get list of jobs\",\n    )\n    parser.add_argument(\n        \"--job\",\n        help=\"Specify a single job to migrate\",\n    )\n    args = parser.parse_args()\n    return args\n\n\ndef check_job_if_running(jobs_status, job_name):\n    for job_status in jobs_status:\n        if job_status[\"name\"] == job_name:\n            status = job_status[\"status\"]\n            if status == \"running\":\n                print(bcolors.FAIL + f\"job {job_name} is still running, can not migrate\" + bcolors.ENDC)\n                return False\n            elif status == \"disabled\":\n                print(bcolors.WARNING + f\"job {job_name} is disabled, need to cancel it manually later\" + bcolors.ENDC)\n                return True\n            else:\n                print(bcolors.OKGREEN + f\"job {job_name} is not running, can migrate\" + bcolors.ENDC)\n                return True\n\n    print(bcolors.FAIL + f\"Can not find the job {job_name}\" + bcolors.ENDC)\n    return False\n\n\ndef command_jobs(command, jobs, args, ns=None):\n    \"\"\"This function run tronctl command for the jobs\n    command: the tronctl command it will run\n    jobs: a list of jobs\n    args: the args for this script\n    ns: the namespace to use as the prefix for each job, if None, the scrip would use args.old_ns instead\n    \"\"\"\n    data = {\"command\": command}\n    command_flag = True\n    for job in jobs:\n        if ns is not None:\n            job_name = ns + \".\" + job[\"name\"]\n        else:\n            job_name = args.old_ns + \".\" + job[\"name\"]\n\n        if command == \"move\":\n            data = {\n                \"command\": command,\n                \"old_name\": args.old_ns + \".\" + job[\"name\"],\n                \"new_name\": args.new_ns + \".\" + job[\"name\"],\n            }\n            uri = urljoin(args.server, \"api/jobs\")\n            job_name = args.new_ns + \".\" + job[\"name\"]\n        else:\n            data = {\"command\": command}\n            uri = urljoin(args.server, \"api/jobs/\" + job_name)\n\n        response = client.request(uri, data=data)\n        if response.error:\n            print(bcolors.FAIL + f\"Failed to {command} {job_name}\" + bcolors.ENDC)\n            command_flag = False\n        else:\n            print(bcolors.OKGREEN + f\"Succeed to {command} {job_name}\" + bcolors.ENDC)\n    return command_flag\n\n\ndef ssh_command(hostname, command):\n    print(bcolors.BOLD + f\"Executing the command: ssh -A {hostname} {command}\" + bcolors.ENDC)\n    ssh = subprocess.Popen(\n        [\"ssh\", \"-A\", hostname, command],\n        shell=False,\n        stdout=subprocess.PIPE,\n        stderr=subprocess.PIPE,\n    )\n    exitcode = ssh.wait()\n    result = ssh.stdout.readlines()\n    error = ssh.stderr.readlines()\n    if exitcode != 0:\n        print(bcolors.FAIL + f\"Execute command {command} failed: {error}\" + bcolors.ENDC)\n        exit(exitcode)\n    return result\n\n\ndef main():\n    args = parse_args()\n    filename = args.source\n    hostname = urlparse(args.server).hostname\n    if filename.endswith(\".yaml\"):\n        tron_client = client.Client(args.server)\n        jobs_status = tron_client.jobs()\n\n        is_migration_safe = True\n        with open(filename) as f:\n            jobs = yaml.load(f)[\"jobs\"]\n            job_names = [job[\"name\"] for job in jobs]\n            if args.job is not None:  # only want to migrate specific job\n                # Overwrite existing jobs since only migrating one job\n                jobs = [job for job in jobs if job[\"name\"] == args.job]\n                if not jobs:\n                    raise ValueError(f\"Invalid job specified. Options were {job_names}\")\n                job_name_with_ns = args.old_ns + \".\" + args.job\n                is_migration_safe = is_migration_safe & check_job_if_running(jobs_status, job_name_with_ns)\n\n            else:  # Migrate all jobs in namespace\n                for job_name in job_names:\n                    job_name_with_ns = args.old_ns + \".\" + job_name\n                    is_migration_safe = is_migration_safe & check_job_if_running(jobs_status, job_name_with_ns)\n\n        if is_migration_safe is True:\n            print(bcolors.OKBLUE + \"Jobs are not running.\" + bcolors.ENDC)\n        else:\n            print(bcolors.WARNING + \"Some jobs are still running, abort this migration,\" + bcolors.ENDC)\n            return\n\n        # try stop cron\n        ssh_command(hostname, \"sudo service cron stop\")\n\n        # wait unitil yelpsoa-configs branch is merged\n        res = input(\"Merge and push yelpsoa-configs branch. Ready to continue? [y/n]\")\n        if res == \"y\":\n            # wait for 10 seconds after pushing the branch\n            time.sleep(30)\n            # rsyn yelpsoa-configs\n            command = \"sudo rsync -a --delay-updates --contimeout=10 --timeout=10 --chmod=Du+rwx,go+rx --port=8731 --delete yelpsoa-slave.local.yelpcorp.com::yelpsoa-configs /nail/etc/services\"\n            ssh_command(hostname, command)\n\n            # migrate jobs to new namespace\n            command_jobs(\"move\", jobs, args)\n\n            # update new namespace\n            ssh_command(hostname, \"sudo paasta_setup_tron_namespace \" + args.new_ns)\n\n            # update old namespace if only one job is moving\n            if args.job:\n                ssh_command(hostname, \"sudo paasta_setup_tron_namespace \" + args.old_ns)\n\n        # clean up namespace\n        ssh_command(hostname, \"sudo paasta_cleanup_tron_namespaces\")\n\n        # start cron\n        ssh_command(hostname, \"sudo service cron start\")\n\n    return\n\n\nif __name__ == \"__main__\":\n    main()\n"
  },
  {
    "path": "contrib/mock_patch_checker.py",
    "content": "#!/usr/bin/env python3.10\nimport ast\nimport sys\n\n\nclass MockChecker(ast.NodeVisitor):\n    def __init__(self):\n        self.errors = 0\n        self.init_module_imports()\n\n    def init_module_imports(self):\n        self.imported_patch = False\n        self.imported_mock = False\n\n    def check_files(self, files):\n        for file in files:\n            self.check_file(file)\n\n    def check_file(self, filename):\n        self.current_filename = filename\n        try:\n            with open(filename) as fd:\n                try:\n                    file_ast = ast.parse(fd.read())\n                except SyntaxError as error:\n                    print(\"SyntaxError on file %s:%d\" % (filename, error.lineno))\n                    return\n        except OSError:\n            print(\"Error opening filename: %s\" % filename)\n            return\n        self.init_module_imports()\n        self.visit(file_ast)\n\n    def _call_uses_patch(self, node):\n        try:\n            return node.func.id == \"patch\"\n        except AttributeError:\n            return False\n\n    def _call_uses_mock_patch(self, node):\n        try:\n            return node.func.value.id == \"mock\" and node.func.attr == \"patch\"\n        except AttributeError:\n            return False\n\n    def visit_Import(self, node):\n        if [name for name in node.names if \"mock\" == name.name]:\n            self.imported_mock = True\n\n    def visit_ImportFrom(self, node):\n        if node.module == \"mock\" and (name for name in node.names if \"patch\" == name.name):\n            self.imported_patch = True\n\n    def visit_Call(self, node):\n        try:\n            if (self.imported_patch and self._call_uses_patch(node)) or (\n                self.imported_mock and self._call_uses_mock_patch(node)\n            ):\n                if not any([keyword for keyword in node.keywords if keyword.arg == \"autospec\"]):\n                    print(\"%s:%d: Found a mock without an autospec!\" % (self.current_filename, node.lineno))\n                    self.errors += 1\n        except AttributeError:\n            pass\n        self.generic_visit(node)\n\n\ndef main(filenames):\n    checker = MockChecker()\n    checker.check_files(filenames)\n    if checker.errors == 0:\n        sys.exit(0)\n    else:\n        print(\"You probably meant to specify 'autospec=True' in these tests.\")\n        print(\"If you really don't want to, specify 'autospec=None'\")\n        sys.exit(1)\n\n\nif __name__ == \"__main__\":\n    main(sys.argv[1:])\n"
  },
  {
    "path": "contrib/namespace_cleanup.sh",
    "content": "#/bin/bash\n\necosystem=\"stagef\"\n\nread -p \"Are you at tron-$ecosystem (y/n)?\" RES\necho\nif [ $RES = \"y\" ]; then\n        #load namespace from _manifest.yaml\n        for namespace in $(cat /nail/tron/config/_manifest.yaml | uq | jq -r 'keys[]')\n        do\n                file=$(cat /nail/tron/config/_manifest.yaml | uq | jq -r .\\\"$namespace\\\")\n                filename=$(basename $file)\n                if [ -f \"/nail/etc/services/tron/$ecosystem/$filename\" ]; then\n                        echo \"$namespace is up to date\"\n                elif [ $namespace == \"MASTER\" ]; then\n                        echo \"It is MASTER namepsace\"\n                else\n                        num_job=$(cat /nail/tron/config/$filename | uq | jq -r \".jobs | length\")\n                        echo \"========= $filename =========\"\n                        cat /nail/tron/config/$filename\n                        echo \"=============================\"\n                        if [ $num_job == 0 ]; then\n                            echo \"$namespace is left behind, deleting the namespace\"\n                            tronfig -d $namespace\n                        else\n                            echo \"Can't remove the namespace since it is not empty.\"\n                        fi\n                fi\n        done\nelse\n        echo \"Please change the ecosystem variable in this script or execute this script at tron-$ecosystem\"\nfi\n"
  },
  {
    "path": "contrib/patch-config-loggers.diff",
    "content": "--- a/debian/tron/opt/venvs/tron/lib/python3.10/site-packages/kubernetes/client/configuration.py\n+++ b/debian/tron/opt/venvs/tron/lib/python3.10/site-packages/kubernetes/client/configuration.py\n@@ -71,11 +71,11 @@\n     \"\"\"\n\n     _default = None\n-\n     def __init__(self, host=\"http://localhost\",\n                  api_key=None, api_key_prefix=None,\n                  username=None, password=None,\n                  discard_unknown_keys=False,\n+                 is_logger_used=False,\n                  ):\n         \"\"\"Constructor\n         \"\"\"\n@@ -106,26 +106,28 @@\n         \"\"\"Password for HTTP basic authentication\n         \"\"\"\n         self.discard_unknown_keys = discard_unknown_keys\n+        self.is_logger_used = is_logger_used\n         self.logger = {}\n-        \"\"\"Logging Settings\n-        \"\"\"\n-        self.logger[\"package_logger\"] = logging.getLogger(\"client\")\n-        self.logger[\"urllib3_logger\"] = logging.getLogger(\"urllib3\")\n-        self.logger_format = '%(asctime)s %(levelname)s %(message)s'\n-        \"\"\"Log format\n-        \"\"\"\n-        self.logger_stream_handler = None\n-        \"\"\"Log stream handler\n-        \"\"\"\n-        self.logger_file_handler = None\n-        \"\"\"Log file handler\n-        \"\"\"\n-        self.logger_file = None\n-        \"\"\"Debug file location\n-        \"\"\"\n-        self.debug = False\n-        \"\"\"Debug switch\n-        \"\"\"\n+        if self.is_logger_used:\n+            \"\"\"Logging Settings\n+            \"\"\"\n+            self.logger[\"package_logger\"] = logging.getLogger(\"client\")\n+            self.logger[\"urllib3_logger\"] = logging.getLogger(\"urllib3\")\n+            self.logger_format = '%(asctime)s %(levelname)s %(message)s'\n+            \"\"\"Log format\n+            \"\"\"\n+            self.logger_stream_handler = None\n+            \"\"\"Log stream handler\n+            \"\"\"\n+            self.logger_file_handler = None\n+            \"\"\"Log file handler\n+            \"\"\"\n+            self.logger_file = None\n+            \"\"\"Debug file location\n+            \"\"\"\n+            self.debug = False\n+            \"\"\"Debug switch\n+            \"\"\"\n\n         self.verify_ssl = True\n         \"\"\"SSL/TLS verification\n@@ -178,11 +180,12 @@\n         for k, v in self.__dict__.items():\n             if k not in ('logger', 'logger_file_handler'):\n                 setattr(result, k, copy.deepcopy(v, memo))\n-        # shallow copy of loggers\n-        result.logger = copy.copy(self.logger)\n-        # use setters to configure loggers\n-        result.logger_file = self.logger_file\n-        result.debug = self.debug\n+        if self.is_logger_used:\n+            # shallow copy of loggers\n+            result.logger = copy.copy(self.logger)\n+            # use setters to configure loggers\n+            result.logger_file = self.logger_file\n+            result.debug = self.debug\n         return result\n\n     @classmethod\n"
  },
  {
    "path": "contrib/sync-from-yelp-prod.sh",
    "content": "#!/bin/bash\nrsync --exclude=.stderr --exclude=.stdout -aPv tron-prod:/nail/tron/*  example-cluster/\ngit checkout example-cluster/logging.conf\n\necho \"\"\necho \"Now Run:\"\necho \"\"\necho \"    tox -e example-cluster\"\necho \"    ./example-cluster/start.sh\"\n"
  },
  {
    "path": "contrib/sync_namespaces_jobs.py",
    "content": "#!/usr/bin/env python\n\"\"\" This script is for load testing of Tron\n\nHistorically, Tronview and Tronweb were (are) slow. To better understand the performance\nbottleneck of Tron, we could use this script to  generate the fake namespaces and\njobs as many as we want to perform load testing. Ticket TRON-70 tracks the progress\nof speeding up Tronview and Tronweb.\n\"\"\"\nimport argparse\nimport os\n\nfrom tron import yaml\n\n\ndef parse_args():\n    parser = argparse.ArgumentParser(\n        description=\"Creating namespaces and jobs configuration for load testing\",\n    )\n    parser.add_argument(\n        \"--multiple\",\n        type=int,\n        default=1,\n        help=\"multiple workload of namespaces and jobs from source directory\",\n    )\n    parser.add_argument(\n        \"--src\",\n        default=\"/nail/etc/services/tron/prod\",\n        help=\"Directory to get Tron configuration files\",\n    )\n    parser.add_argument(\n        \"--dest\",\n        default=\"/tmp/tron-servdir\",\n        help=\"Directory to put Tron configuration files for load testing\",\n    )\n    args = parser.parse_args()\n    return args\n\n\ndef main():\n    args = parse_args()\n    for filename in os.listdir(args.src):\n        print(f\"filename = {filename}\")\n        filepath = os.path.join(args.src, filename)\n        if os.path.isfile(filepath) and filepath.endswith(\".yaml\"):\n            with open(filepath) as f:\n                config = yaml.load(f)\n\n            if filename == \"MASTER.yaml\":\n                for key in list(config):\n                    if key != \"jobs\":\n                        del config[key]\n\n            jobs = config.get(\"jobs\", [])\n            if jobs is not None:\n                for job in jobs:\n                    job[\"node\"] = \"localhost\"\n                    if \"monitoring\" in job:\n                        del job[\"monitoring\"]\n                    for action in job.get(\"actions\", []):\n                        action[\"command\"] = \"sleep 10s\"\n                        if \"node\" in action:\n                            action[\"node\"] = \"localhost\"\n            for i in range(args.multiple):\n                out_filepath = os.path.join(\n                    args.dest,\n                    \"load_testing_\" + str(i) + \"-\" + filename,\n                )\n                with open(out_filepath, \"w\") as outf:\n                    yaml.dump(config, outf, default_flow_style=False)\n\n\nif __name__ == \"__main__\":\n    main()\n"
  },
  {
    "path": "debian/changelog",
    "content": "tron (3.10.0) jammy; urgency=medium\n\n  * 3.10.0 tagged with 'make release'\n    Commit: Merge pull request #1096 from Yelp/u/kkasp/unlearn-how-to-\n    read-pickles  U/kkasp/unlearn how to read pickles\n\n -- Kevin Kaspari <kkasp@yelp.com>  Mon, 16 Mar 2026 09:03:19 -0700\n\ntron (3.9.3) jammy; urgency=medium\n\n  * 3.9.3 tagged with 'make release'\n    Commit: Merge pull request #1098 from jhereth/u/jhereth/MLCOMPUTE-\n    6270/add-attempt-number-label  Add k8s label for attempt number\n\n -- Kevin Kaspari <kkasp@yelp.com>  Mon, 16 Mar 2026 07:40:42 -0700\n\ntron (3.9.2) jammy; urgency=medium\n\n  * 3.9.2 tagged with 'make release'\n    Commit: Merge pull request #1090 from Yelp/u/kkasp/TRON-2452-\n    compress-json  Compress json\n\n -- Kevin Kaspari <kkasp@yelp.com>  Mon, 09 Feb 2026 13:24:43 -0800\n\ntron (3.9.1) jammy; urgency=medium\n\n  * 3.9.1 tagged with 'make release'\n    Commit: Merge pull request #1084 from Yelp/jfong/TRON-2546-fix-spot-\n    terminatioons-not-ooms  TRON-2546: Fix spot termination\n    identification in current k8s version\n\n -- Jen Patague <jfong@yelp.com>  Thu, 08 Jan 2026 09:58:32 -0800\n\ntron (3.9.0) jammy; urgency=medium\n\n  * 3.9.0 tagged with 'make release'\n    Commit: Upgrade Tron to Python 3.10 - TRON-2435 (#1071)  * Upgrade\n    Tron to Python 3.10  * Upgrade zope.interface to a compatible python\n    3.10 version  * Moved moto to requirements dev instead and removed\n    mock from minimal  * lowering object_size for more room  * Update\n    classifier and path in itest\n\n -- Eman Elsabban <emanelsabban@yelp.com>  Thu, 18 Dec 2025 06:24:50 -0800\n\ntron (3.8.9) jammy; urgency=medium\n\n  * 3.8.9 tagged with 'make release'\n    Commit: Assume we\"ll only see yaml files in logreader.py (#1077)  If\n    someone is using a .yml file - they\"ve already gone horribly off-\n    track :p\n\n -- Luis Perez <luisp@yelp.com>  Mon, 24 Nov 2025 13:42:34 -0800\n\ntron (3.8.8) jammy; urgency=medium\n\n  * 3.8.8 tagged with 'make release'\n    Commit: Try to warn folks about incorrect `tronctl publish` usage\n    (#1076)  It\"s absurdly easy to use this incorrectly (especially if\n    you\"ve been woken up by a page ;p) - so let\"s try to warn folks if\n    they\"re potentially using `tronctl publish` incorrectly  afaik, the\n    usual mistakes here are to use a job/action id (detectable by the\n    run number) OR to use a too long/short trigger id (either from copy-\n    pasting too much or too little)  I\"ve opted to not error out when\n    this happens since I\"m somewhat sure that it\"s possible to manually\n    construct triggers that don\"t match this format and depend on those\n    triggers in soaconfigs. If anyone with more time wants to vibe-code\n    something to verify this (and maybe even add some validation to the\n    tronfig schema ;)), then we can always error out immediately\n\n -- Luis Perez <luisp@yelp.com>  Thu, 20 Nov 2025 12:01:45 -0800\n\ntron (3.8.7) jammy; urgency=medium\n\n  * 3.8.7 tagged with 'make release'\n    Commit: Lightly refactor skip-and-publish (and add some more output)\n    (#1075)  The current output is pretty minimal and doesn\"t help us\n    (or users) debug particularly efficiently whenever something isn\"t\n    working as expected.  Output now looks something like: ``` Skipping\n    compute-infra-test-service.always_fail.5.fail... ActionRun: compute-\n    infra-test-service.always_fail.5.fail now in state skipped\n    Successfully skipped compute-infra-test-service.always_fail.5.fail.\n    Fetching triggers to publish for compute-infra-test-\n    service.always_fail.5.fail...  Triggers to publish:     * compute-\n    infra-test-service.always_fail.fail.shortdate.2025-11-15  Publishing\n    trigger compute-infra-test-service.always_fail.fail.shortdate.2025-\n    11-15... OK ```  I\"ve lightly refactored this since I don\"t really\n    know why I inlined all this logic in 2021 and it was bothering me.\n\n -- Luis Perez <luisp@yelp.com>  Wed, 12 Nov 2025 06:46:31 -0800\n\ntron (3.8.6) jammy; urgency=medium\n\n  * 3.8.6 tagged with 'make release'\n    Commit: Add another disk eviction substring (#1074)  We\"re also\n    seeing messages that look like: `Pod ephemeral local storage usage\n    exceeds the total limit of containers 1Gi.` which don\"t trigger the\n    existing check.  This adds \"ephemeral local storage\" as another\n    trigger without removing the existing one.  I\"m not quite sure how\n    k8s decides which message to use without reading through the k8s\n    code further, but it appears that both cases are possible:\n    https://github.com/kubernetes/kubernetes/blob/350481c4747da5c2ad4f24\n    e71e7edaabbd4cfe2e/pkg/kubelet/eviction/helpers.go#L53-L57\n\n -- Luis Perez <luisp@yelp.com>  Mon, 03 Nov 2025 09:56:04 -0800\n\ntron (3.8.5) jammy; urgency=medium\n\n  * 3.8.5 tagged with 'make release'\n    Commit: TRON-2480: Fix tron systemd unit to retry indefinitely & add\n    minor delay between restarts (#1070)  We saw in\n    https://yelp.slack.com/archives/CA53K7S68/p1755111528654579 and\n    https://yelp.slack.com/archives/CA53K7S68/p1752625385958589?thread_t\n    s=1752624525.373959&cid=CA53K7S68 my change from #1066 to add the\n    `ExecStartPre` , combined with the default `RestartSec=0` and\n    `StartLimitBurst=5` meant that tron in a cluster that required some\n    time to perform the shutdown process would still see `trond` still\n    running and immediately run through the 5 allowed attempts, then\n    fail to restart unless retriggered by a timer or socket:  > Note\n    that units which are configured for Restart=, and which reach the\n    start limit are not attempted to be restarted anymore; however, they\n    may still be restarted manually or from a timer or socket at a later\n    point, after the interval has passed.  This PR now disables the\n    restart limits entirely by setting both `StartLimitBurst` and\n    `StartLimitIntervalSec` to 0, as per the docs: > interval is a time\n    span with the default unit of seconds, but other units may be\n    specified, see\n    [systemd.time(7)](https://www.freedesktop.org/software/systemd/man/l\n    atest/systemd.time.html#). The special value \"infinity\" can be used\n    to limit the total number of start attempts, even if they happen at\n    large time intervals. Defaults to DefaultStartLimitIntervalSec= in\n    manager configuration file, and may be set to 0 to disable any kind\n    of rate limiting.  (Note this indicates only setting\n    `StartLimitIntervalSec=0` is necessary to disable the rate limiting,\n    but chatgpt convinced me it makes it clearer to the reader if both\n    are set to 0, happy to undo this if we think we\"ll be confused\n    later)  These settings seem to work on tron-infrastage after\n    modifying it to cause an artificial 60s delay when shutting down, as\n    we can see restarts are delayed 10s between the next attempt, and\n    eventually tron is able to start cleanly on its own with no user\n    intervention:\n    https://fluffy.yelpcorp.com/i/qnk5kPRNXsMPRbpLv32KvKDLFk1gl72p.html\n\n -- Luis Perez <luisp@yelp.com>  Mon, 18 Aug 2025 09:22:27 -0700\n\ntron (3.8.4) jammy; urgency=medium\n\n  * 3.8.4 tagged with 'make release'\n    Commit: Default sliders to show 50 items (#1067)  Defaulting to 10\n    doesn\"t actually save any time since we\"ve fetched all the data\n    anyway.  Let\"s instead default to 50 since that should should be the\n    most common run_limit  This will make pages longer, but it will be\n    less confusing to folks when they get alerts for failed/unknown/etc\n    jobs that are past the visible runs with the previous slider\n    configuration.\n\n -- Luis Perez <luisp@yelp.com>  Wed, 23 Jul 2025 14:10:18 -0700\n\ntron (3.8.3) jammy; urgency=medium\n\n  * 3.8.3 tagged with 'make release'\n    Commit: Merge pull request #1066 from Yelp/jfong/TRON-2340-systemd-\n    sigterm-handling  TRON-2340: Update tron unit file to ensure proper\n    sigterm handling\n\n -- Jen Patague <jfong@yelp.com>  Mon, 14 Jul 2025 13:51:48 -0700\n\ntron (3.8.2) jammy; urgency=medium\n\n  * 3.8.2 tagged with 'make release'\n    Commit: Released 3.8.1 via make release\n\n -- Jen Patague <jfong@yelp.com>  Mon, 14 Jul 2025 12:42:31 -0700\n\ntron (3.8.1) jammy; urgency=medium\n\n  * 3.8.1 tagged with 'make release'\n    Commit: Enable disallow_untyped_decorators mypy option (#1064)  For\n    once, a small mypy PR :p  There was just a single untyped decorator\n    and I went ahead and wrote some inline comments \"cause I don\"t think\n    I\"ll remember how to read this after this gets merged :deepfried-\n    loljpg:\n\n -- Luis Perez <luisp@yelp.com>  Wed, 02 Jul 2025 10:14:27 -0700\n\ntron (3.8.0) jammy; urgency=medium\n\n  * 3.8.0 tagged with 'make release'\n    Commit: Allow transitioning to success/fail from any terminal state\n    (#1057)  This is helpful when folks have handled failures outside of\n    Tron (or have done something outside of Tron that nominally means\n    that the Tron status is now incorrect) OR to work-around any\n    weirdness with Tron states (e.g., something funky happened in k8s\n    and Tron never saw the event)  NOTE: this PR changes a bunch of\n    extra files - but that\"s just due to mypy following more code paths\n    after I typed the ActionRunController `__init__` so that I could go-\n    to-def in my editor :p Additionally, kkasp and I have occasionally\n    been runing into an state where `check-requirements` says a\n    mismatched version of `cryptography` is installed - I finally got\n    around to debugging this and realized this stems from our very hacky\n    extra_requirements_yelp.txt shenanigans: pyopenssl has lower/upper-\n    bounds on cryptography but, due to the ordering we install things\n    in, `check-requirements` might run before we\"ve installed pyopenssl\n    and downgraded cryptography (and pass) or after (and fail)  The real\n    changes are confined to\n    `tron/core/action.py::ActionRun::STATE_MACHINE` (and a now-deleted\n    ActionRun tests that are no longer valid)\n\n -- Luis Perez <luisp@yelp.com>  Mon, 30 Jun 2025 10:16:53 -0700\n\ntron (3.7.3) jammy; urgency=medium\n\n  * 3.7.3 tagged with 'make release'\n    Commit: more gracefully handle errors during backfill monitoring\n    (#1062)\n\n -- Matteo Piano <mpiano@yelp.com>  Thu, 26 Jun 2025 05:54:02 -0700\n\ntron (3.7.2) jammy; urgency=medium\n\n  * 3.7.2 tagged with 'make release'\n    Commit: Merge pull request #1056 from Yelp/u/kkasp/rest-in-peace-\n    tronweb2  Delete tronweb2\n\n -- Kevin Kaspari <kkasp@yelp.com>  Wed, 25 Jun 2025 10:25:34 -0700\n\ntron (3.7.1) jammy; urgency=medium\n\n  * 3.7.1 tagged with 'make release'\n    Commit: Merge pull request #1055 from Yelp/u/kkasp/TRON-2007-tron-\n    disk-evictions  Add disk eviction error code and check logic\n\n -- Kevin Kaspari <kkasp@yelp.com>  Fri, 20 Jun 2025 10:11:01 -0700\n\ntron (3.7.0) jammy; urgency=medium\n\n  * 3.7.0 tagged with 'make release'\n    Commit: Save idempotent to json - TRON-2154 (#1051)  * Save\n    idempotent to json  * set idempotent to false if it doesnt exist  *\n    address review\n\n -- Eman Elsabban <emanelsabban@yelp.com>  Wed, 18 Jun 2025 13:09:02 -0700\n\ntron (3.6.3) jammy; urgency=medium\n\n  * 3.6.3 tagged with 'make release'\n    Commit: ignore cached okta token in backfills (#1054)\n\n -- Matteo Piano <mpiano@yelp.com>  Mon, 16 Jun 2025 01:38:02 -0700\n\ntron (3.6.2) jammy; urgency=medium\n\n  * 3.6.2 tagged with 'make release'\n    Commit: cache auth tokens before starting backfill jobs (#1053)\n\n -- Matteo Piano <mpiano@yelp.com>  Fri, 13 Jun 2025 01:23:13 -0700\n\ntron (3.6.1) jammy; urgency=medium\n\n  * 3.6.1 tagged with 'make release'\n    Commit: return auth error that clients can parse (#1052)\n\n -- Matteo Piano <mpiano@yelp.com>  Mon, 09 Jun 2025 05:24:25 -0700\n\ntron (3.6.0) jammy; urgency=medium\n\n  * 3.6.0 tagged with 'make release'\n    Commit: Merge pull request #1046 from Yelp/u/kkasp/TRON-2391-\n    aggregate-metrics  U/kkasp/tron 2391 aggregate metrics\n\n -- Kevin Kaspari <kkasp@yelp.com>  Wed, 04 Jun 2025 10:43:44 -0700\n\ntron (3.5.2) jammy; urgency=medium\n\n  * 3.5.2 tagged with 'make release'\n    Commit: use oidc token method from vault-tools (#1050)\n\n -- Matteo Piano <mpiano@yelp.com>  Wed, 28 May 2025 07:36:00 -0700\n\ntron (3.5.1) jammy; urgency=medium\n\n  * 3.5.1 tagged with 'make release'\n    Commit: fix some annoying warnings (#1049)\n\n -- Matteo Piano <mpiano@yelp.com>  Tue, 27 May 2025 10:22:44 -0700\n\ntron (3.5.0) jammy; urgency=medium\n\n  * 3.5.0 tagged with 'make release'\n    Commit: add support for api auth via vault (#1048)  add support for\n    api auth via vault\n\n -- Matteo Piano <mpiano@yelp.com>  Tue, 27 May 2025 08:47:48 -0700\n\ntron (3.4.10) jammy; urgency=medium\n\n  * 3.4.10 tagged with 'make release'\n    Commit: Merge pull request #1045 from Yelp/u/kkasp/TRON-2414-fix-\n    broken-config-parse  U/kkasp/tron 2414 fix broken config parse\n\n -- Kevin Kaspari <kkasp@yelp.com>  Thu, 08 May 2025 09:51:28 -0700\n\ntron (3.4.9) jammy; urgency=medium\n\n  * 3.4.9 tagged with 'make release'\n    Commit: Merge pull request #1043 from Yelp/u/kkasp/TRON-2414-set-\n    extra-safe-transaction-limit  Drop under the limit a bit more for\n    maximum safety\n\n -- Kevin Kaspari <kkasp@yelp.com>  Thu, 01 May 2025 10:54:54 -0700\n\ntron (3.4.8) jammy; urgency=medium\n\n  * 3.4.8 tagged with 'make release'\n    Commit: fix build_url_request when method is None (#1041)\n\n -- Luis Perez <luisp@yelp.com>  Mon, 14 Apr 2025 10:52:51 -0700\n\ntron (3.4.7) jammy; urgency=medium\n\n  * 3.4.7 tagged with 'make release'\n    Commit: Merge pull request #1042 from Yelp/u/kkasp/DAR-2637-lower-\n    max-transact-items  Reduce transact size to avoid cap\n\n -- Kevin Kaspari <kkasp@yelp.com>  Thu, 10 Apr 2025 08:24:26 -0700\n\ntron (3.4.6) jammy; urgency=medium\n\n  * 3.4.6 tagged with 'make release'\n    Commit: Merge pull request #1039 from Yelp/u/kkasp/KKASP-0001-\n    download-more-time  Upgrade moment and get the latest tz info\n\n -- Kevin Kaspari <kkasp@yelp.com>  Mon, 07 Apr 2025 11:10:20 -0700\n\ntron (3.4.5) jammy; urgency=medium\n\n  * 3.4.5 tagged with 'make release'\n    Commit: Bump task_processing from 1.3.4 to 1.3.5 (#1040)  I made a\n    silly mistake and didn\"t increase the attempt counter\n\n -- Luis Perez <luisp@yelp.com>  Thu, 03 Apr 2025 15:37:22 -0700\n\ntron (3.4.4) jammy; urgency=medium\n\n  * 3.4.4 tagged with 'make release'\n    Commit: Update task_processing to 1.3.4 for watch backoff (#1038)\n    This includes https://github.com/Yelp/task_processing/pull/225,\n    which should add some backoff to watch restarts to avoid slamming\n    the apiserver\n\n -- Luis Perez <luisp@yelp.com>  Thu, 03 Apr 2025 14:01:31 -0700\n\ntron (3.4.3) jammy; urgency=medium\n\n  * 3.4.3 tagged with 'make release'\n    Commit: Bump kubernetes clientlib to the latest supported version\n    (#1037)  See https://github.com/kubernetes-client/python?tab=readme-\n    ov-file#compatibility\n\n -- Luis Perez <luisp@yelp.com>  Thu, 03 Apr 2025 10:46:44 -0700\n\ntron (3.4.2) jammy; urgency=medium\n\n  * 3.4.2 tagged with 'make release'\n    Commit: Merge pull request #1036 from Yelp/u/kkasp/TRON-2396-lower-\n    min-zoom-for-big-ol-charts  Halve minZoom for some of our big charts\n\n -- Kevin Kaspari <kkasp@yelp.com>  Thu, 27 Mar 2025 10:42:14 -0700\n\ntron (3.4.1) jammy; urgency=medium\n\n  * 3.4.1 tagged with 'make release'\n    Commit: Bump task_processing to v1.3.3 for Watch restart fix (#1035)\n    This brings in https://github.com/Yelp/task_processing/pull/223,\n    which should ensure that we don\"t get stuck in a funky restart loop\n    if k8s tells us our watch\"s resourceVersion is too old\n\n -- Luis Perez <luisp@yelp.com>  Wed, 26 Mar 2025 11:46:03 -0700\n\ntron (3.4.0) jammy; urgency=medium\n\n  * 3.4.0 tagged with 'make release'\n    Commit: Merge pull request #1032 from Yelp/u/kkasp/TRON-2370-do-ya-\n    like-dags  U/kkasp/tron 2370 do ya like dags\n\n -- Kevin Kaspari <kkasp@yelp.com>  Fri, 21 Mar 2025 13:17:02 -0700\n\ntron (3.3.2) jammy; urgency=medium\n\n  * 3.3.2 tagged with 'make release'\n    Commit: Merge pull request #1034 from Yelp/u/mpiano/SEC-\n    19862_fix_service_param  fix service name extraction logic\n\n -- Kevin Kaspari <kkasp@yelp.com>  Fri, 21 Mar 2025 11:28:33 -0700\n\ntron (3.3.1) jammy; urgency=medium\n\n  * 3.3.1 tagged with 'make release'\n    Commit: Merge pull request #1033 from Yelp/u/mpiano/SEC-19862_fix\n    fix auth middleware integration\n\n -- Kevin Kaspari <kkasp@yelp.com>  Mon, 17 Mar 2025 12:47:36 -0700\n\ntron (3.3.0) jammy; urgency=medium\n\n  * 3.3.0 tagged with 'make release'\n    Commit: Merge pull request #1005 from Yelp/u/mpiano/SEC-19555  auth\n    support for Tron APIs\n\n -- Kevin Kaspari <kkasp@yelp.com>  Thu, 13 Mar 2025 11:30:42 -0700\n\ntron (3.2.12) jammy; urgency=medium\n\n  * 3.2.12 tagged with 'make release'\n    Commit: Merge pull request #1030 from Yelp/jfong/TRON-1797-tronweb-\n    path  TRON-1797: Put tronweb in a python-version-agnostic location\n\n -- Jen Patague <jfong@yelp.com>  Mon, 03 Mar 2025 12:15:00 -0800\n\ntron (3.2.11) jammy; urgency=medium\n\n  * 3.2.11 tagged with 'make release'\n    Commit: Try to load config_name_mapping from disk just once (#1013)\n    There\"s enough files (and enough YAML in these files) that the\n    IO/YAML parsing takes a significant amount of time.  While it\"s nice\n    to always load from the source-of-truth (i.e., the files on-disk) -\n    it\"s not worth paying the performance penalty (especially at the\n    scale we\"re seeing internally) for a negligible benefit.  Locally\n    (with a large test config), this results in a ~5x improvement in\n    timings. More concretely, my test configs were taking ~30ish seconds\n    *without* this diff and ~6ish seconds *with* this diff.\n\n -- Luis Perez <luisp@yelp.com>  Mon, 24 Feb 2025 09:08:41 -0800\n\ntron (3.2.10) jammy; urgency=medium\n\n  * 3.2.10 tagged with 'make release'\n    Commit: Merge pull request #1028 from Yelp/jfong/TRON-2333  TRON-\n    2333: Bump task_processing for pod truncation fix\n\n -- Jen Patague <jfong@yelp.com>  Fri, 21 Feb 2025 11:08:55 -0800\n\ntron (3.2.9) jammy; urgency=medium\n\n  * 3.2.9 tagged with 'make release'\n    Commit: Merge pull request #1023 from Yelp/u/kkasp/TRON-2342-\n    exponential-backoff-dynamo-get  Add dynamodb retry config for\n    throttling and other errors. Add exponential backoff and jitter for\n    unprocessed keys. Fix edge case where we succesfully process keys on\n    our last attempt but still fail\n\n -- Kevin Kaspari <kkasp@yelp.com>  Wed, 12 Feb 2025 13:03:36 -0800\n\ntron (3.2.8) jammy; urgency=medium\n\n  * 3.2.8 tagged with 'make release'\n    Commit: Merge pull request #1025 from Yelp/u/kkasp/DAR-2558-fix-\n    overstep  Fix loop boundary on getting partitions\n\n -- Kevin Kaspari <kkasp@yelp.com>  Thu, 30 Jan 2025 11:59:51 -0800\n\ntron (3.2.7) jammy; urgency=medium\n\n  * 3.2.7 tagged with 'make release'\n    Commit: Merge pull request #1027 from Yelp/u/emanelsabban/TRON-2354\n    Handling timezone aware jobs\n\n -- Eman Elsabban <emanelsabban@yelp.com>  Fri, 24 Jan 2025 12:38:21 -0800\n\ntron (3.2.6) jammy; urgency=medium\n\n  * 3.2.6 tagged with 'make release'\n    Commit: Add default behaviour for jobs that dont have some keys and\n    handle the non-existent of some json_vals (#1017)  * Add default\n    behaviour for jobs that dont have some keys and handle the non-\n    existent of some json_vals  * Remove get from command  * cap add and\n    drop should have values  * removing the try/except since it was\n    added in a separate pr  * maybe not all gets are necessary  * Adding\n    comments\n\n -- Eman Elsabban <emanelsabban@yelp.com>  Fri, 17 Jan 2025 11:47:48 -0800\n\ntron (3.2.5) jammy; urgency=medium\n\n  * 3.2.5 tagged with 'make release'\n    Commit: Merge pull request #1026 from Yelp/u/kkasp/fix-timezones\n    Use localize instead of replace when writing tz info\n\n -- Kevin Kaspari <kkasp@yelp.com>  Fri, 17 Jan 2025 11:00:55 -0800\n\ntron (3.2.4) jammy; urgency=medium\n\n  * 3.2.4 tagged with 'make release'\n    Commit: Merge pull request #1024 from Yelp/u/kkasp/empty-val  Add\n    check for empty val. Merge json correctly\n\n -- Kevin Kaspari <kkasp@yelp.com>  Thu, 16 Jan 2025 08:49:56 -0800\n\ntron (3.2.3) jammy; urgency=medium\n\n  * 3.2.3 tagged with 'make release'\n    Commit: Merge pull request #1016 from Yelp/u/kkasp/TRON-2239-migrate-\n    pickle-only-state-to-JSON  U/kkasp/tron 2239 migrate pickle only\n    state to json\n\n -- Kevin Kaspari <kkasp@yelp.com>  Tue, 14 Jan 2025 12:05:21 -0800\n\ntron (3.2.2) jammy; urgency=medium\n\n  * 3.2.2 tagged with 'make release'\n    Commit: Merge pull request #1014 from Yelp/u/kkasp/fix-tooltip-lol-\n    its-bootstrap-2.3.1  Disable the tooltip animation that is causing a\n    strange problem where you can\"t get a tooltip when entering from the\n    top of an object\n\n -- Kevin Kaspari <kkasp@yelp.com>  Mon, 13 Jan 2025 06:59:56 -0800\n\ntron (3.2.1) jammy; urgency=medium\n\n  * 3.2.1 tagged with 'make release'\n    Commit: Merge pull request #1022 from Yelp/u/kkasp/pass-json-back-to-\n    save-queue  Pass json_val back to the save queue on failure\n\n -- Kevin Kaspari <kkasp@yelp.com>  Fri, 10 Jan 2025 07:27:17 -0800\n\ntron (3.2.0) jammy; urgency=medium\n\n  * 3.2.0 tagged with 'make release'\n    Commit: Merge pull request #1018 from Yelp/u/kkasp/update-delete-\n    item  Update delete_item logic to handle json partitions\n\n -- Kevin Kaspari <kkasp@10-40-19-131-uswest1cdevc.dev.yelpcorp.com>  Thu, 09 Jan 2025 12:29:29 -0800\n\ntron (3.1.2) jammy; urgency=medium\n\n  * 3.1.2 tagged with 'make release'\n    Commit: Bump task_processing to v1.3.1 for capability fix (#1020)\n    Just like Yelp/paasta#3972 and Yelp/paasta#3973, we need to ensure\n    that there are no duplicates between cap_add and cap_drop -\n    otherwise, the cap_drop entry will \"win\" and the duplicate\n    capability will not be added.\n\n -- Luis Perez <luisp@yelp.com>  Thu, 09 Jan 2025 09:21:33 -0800\n\ntron (3.1.1) jammy; urgency=medium\n\n  * 3.1.1 tagged with 'make release'\n    Commit: Merge pull request #1019 from Yelp/u/emanelsabban/capture-\n    json_val  Capturing exception when json_val doesnt exist\n\n -- Eman Elsabban <emanelsabban@yelp.com>  Wed, 08 Jan 2025 13:04:07 -0800\n\ntron (3.1.0) jammy; urgency=medium\n\n  * 3.1.0 tagged with 'make release'\n    Commit: Add read_json to be a part of master.yaml config (#1015)  *\n    Add read_json to be a part of master.yaml config  * Fix the mypy\n    error & other failed tests  * Addressing reviews  * Add reas_json to\n    restore in shelve function\n\n -- Eman Elsabban <emanelsabban@yelp.com>  Thu, 02 Jan 2025 12:51:55 -0800\n\ntron (3.0.0) jammy; urgency=medium\n\n  * 3.0.0 tagged with 'make release'\n    Commit: [TRON-2238] Reading from JSON to restore jobs\" state instead\n    of pickles (#1010)  * Reading from Json to restore jobs\" state\n    instead of pickles  * Fixing some bugs through testing  * Deleting\n    some comments  * mocking get_config_watcher  * Try mocking one more\n    time  * Toggling off read_json since we want to merge that way  *\n    Removing some comments  * Addressed reviews  * Toggling read_json\n    back off  * Addressing more reviews  * one more review plz  *\n    includes reviews except tz\n\n -- Eman Elsabban <emanelsabban@yelp.com>  Mon, 23 Dec 2024 07:56:01 -0800\n\ntron (2.8.1) jammy; urgency=medium\n\n  * 2.8.1 tagged with 'make release'\n    Commit: Merge pull request #1004 from Yelp/u/kkasp/TRON-2237-\n    timedeltas-cant-json  Use total_seconds for timedeltas. Log job and\n    jobrun names/runnums on serialization\n\n -- Kevin Kaspari <kkasp@yelp.com>  Thu, 31 Oct 2024 09:11:35 -0700\n\ntron (2.8.0) jammy; urgency=medium\n\n  * 2.8.0 tagged with 'make release'\n    Commit: Merge pull request #997 from Yelp/u/kkasp/TRON-2237-write-\n    json-state  Write JSON state\n\n -- Kevin Kaspari <kkasp@yelp.com>  Tue, 29 Oct 2024 14:14:38 -0700\n\ntron (2.7.0) jammy; urgency=medium\n\n  * 2.7.0 tagged with 'make release'\n    Commit: Merge pull request #1001 from\n    Yelp/u/cuza/getting_namespace_logs_from_yelpsoa  Fixing Tron logs\n    for jobs using other services images\n\n -- Kevin Kaspari <kkasp@yelp.com>  Tue, 29 Oct 2024 13:31:16 -0700\n\ntron (2.6.0) jammy; urgency=medium\n\n  * 2.6.0 tagged with 'make release'\n    Commit: Use logreader rathern than vector-reader (#1002)  This CLI\n    is being renamed, so let\"s account for that\n\n -- Luis Perez <luisp@yelp.com>  Thu, 24 Oct 2024 09:34:05 -0700\n\ntron (2.5.3) jammy; urgency=medium\n\n  * 2.5.3 tagged with 'make release'\n    Commit: Merge pull request #1000 from Yelp/jfong/TRON-2208-\n    nonretryable-to-unknown  TRON-2208: Update non_retryable_exit_code\n    behavior to treat as UNKNOWN\n\n -- Jen Patague <jfong@yelp.com>  Mon, 30 Sep 2024 14:12:37 -0700\n\ntron (2.5.2) jammy; urgency=medium\n\n  * 2.5.2 tagged with 'make release'\n    Commit: Use vector-reader cli if new logging pipeline is enabled\n    (#999)  Thankfully, this was a pretty easy change - we really only\n    needed to parametrize the command name being used and the location\n    selector is now a lot simpler as it\"ll always be the superregion\n\n -- Luis Perez <luisp@yelp.com>  Mon, 30 Sep 2024 09:32:22 -0700\n\ntron (2.5.1) jammy; urgency=medium\n\n  * 2.5.1 tagged with 'make release'\n    Commit: Merge pull request #998 from Yelp/jfong/TRON-2277-fix-\n    nonretryable-exit-codes  TRON-2277: Pass along\n    non_retryable_exit_codes to KubernetesCluster objects\n\n -- Jen Patague <jfong@yelp.com>  Thu, 26 Sep 2024 10:21:53 -0700\n\ntron (2.5.0) jammy; urgency=medium\n\n  * 2.5.0 tagged with 'make release'\n    Commit: Attempt to batch config loading for deployments (#996)\n    Right now we make at most 2N calls to the Tron API during config\n    deployments: N to get the current configs and at most N if all\n    services have changes.  To start, I\"d like to reduce this to N by\n    allowing GET /api/config to return all the configs so that the only\n    requests needed are POSTs for changed configs.  Depending on how\n    this goes, we can look into batching up the POSTs so that we can\n    also do that in a single request.  In terms of speed, it looks like\n    loading all the configs from pnw-prod (on my devbox) with this new\n    behavior takes ~3s - which isn\"t great, but there\"s a decent bit of\n    file IO going on here :(\n\n -- Luis Perez <luisp@yelp.com>  Tue, 17 Sep 2024 08:25:15 -0700\n\ntron (2.4.2) jammy; urgency=medium\n\n  * 2.4.2 tagged with 'make release'\n    Commit: Merge pull request #995 from Yelp/u/cuza/making-non-\n    retryable-exit-code-accept-negative-numbers  Fix negative value\n    check for non-retryable exit codes in Kubernetes configuration\n\n -- Dave Cuza <dcuza@yelp.com>  Mon, 09 Sep 2024 12:23:49 -0700\n\ntron (2.4.1) jammy; urgency=medium\n\n  * 2.4.1 tagged with 'make release'\n    Commit: Merge pull request #994 from Yelp/StefanoChiodino-patch-1\n    Update tronweb.less\n\n -- Luis Perez <luisp@yelp.com>  Tue, 03 Sep 2024 11:24:45 -0700\n\ntron (2.4.0) jammy; urgency=medium\n\n  * 2.4.0 tagged with 'make release'\n    Commit: TRON-2208: Add toggle in tron config to disable retries on\n    LOST k8s jobs (#988)  Given that \"LOST\" means Tron has lost track of\n    a pod it already thought it had started for a job, attempting to\n    retry/start a replacement can be dangerous for non-idempotent jobs.\n    In the current state, these will consume retries, but with some of\n    our EKS migration methods, LOST tasks are more likely. Therefore, we\n    should have a way to temporarily pause retries on these.  ## Related\n    Issues - TRON-2208: Add toggle in Tron config to disable retries on\n    LOST k8s jobs\n\n -- Luis Perez <luisp@yelp.com>  Wed, 28 Aug 2024 09:19:14 -0700\n\ntron (2.3.0) jammy; urgency=medium\n\n  * 2.3.0 tagged with 'make release'\n    Commit: Merge pull request #989 from Yelp/jfong/TRON-2195-old-\n    kubeconfig-paths  TRON-2195: Support watcher_kubeconfig_paths\n\n -- Jen Patague <jfong@yelp.com>  Thu, 11 Jul 2024 14:25:42 -0700\n\ntron (2.2.7) jammy; urgency=medium\n\n  * 2.2.7 tagged with 'make release'\n    Commit: Only show disable warning on tronctl disable (#990)  I\"m not\n    sure I was thinking here since this ended up unconditionally\n    printing the disable warning - but we can chalk this up to an\n    intentional PR campaign to warn folks and only show the warnings on\n    tronctl disable from now on :p\n\n -- Luis Perez <luisp@yelp.com>  Thu, 11 Jul 2024 11:49:18 -0700\n\ntron (2.2.6) jammy; urgency=medium\n\n  * 2.2.6 tagged with 'make release'\n    Commit: Update scribereader tests (#983)\n\n -- Yaroslav Liakhovskyi <yaro@yelp.com>  Fri, 28 Jun 2024 01:41:29 -0700\n\ntron (2.2.5) jammy; urgency=medium\n\n  * 2.2.5 tagged with 'make release'\n    Commit: Update yelp_clog and use datetime range for S3 logs (#979)\n\n -- Yaroslav Liakhovskyi <yaro@yelp.com>  Wed, 26 Jun 2024 08:27:10 -0700\n\ntron (2.2.4) jammy; urgency=medium\n\n  * 2.2.4 tagged with 'make release'\n    Commit: Merge pull request #981 from Yelp/revert-980-\n    u/jfong/revert_to_2.1.1  Revert \"Reverts all changes back through\n    2.1.1 and retains urgent unprocessedkeys fix\"\n\n -- Eman Elsabban <emanelsabban@yelp.com>  Thu, 20 Jun 2024 14:52:09 -0700\n\ntron (2.2.3) jammy; urgency=medium\n\n  * 2.2.3 tagged with 'make release'\n    Commit: Reverts all changes back through 2.1.1 and retains urgent\n    unprocessedkeys fix (#980)  * Revert \"pass around projected SA\n    configs properly\"  This reverts commit\n    12b1bf27e7b9e9f3fd0963f9d073d2552a58115f.  * Revert \"Merge remote-\n    tracking branch \"origin/u/mpiano/SEC-18955\"\"  This reverts commit\n    ea6376d72ffdd269e11cb9338d4f0c656bcd6f66, reversing changes made to\n    4038f1e173aeff932b6e060fc2690f7aa502a85d.  * Revert \"Use\n    S3LogsReader with superregion and UTC timezone (#972)\"  This reverts\n    commit af73799363549a082eea11f0f98d5c7f4810abe4.\n\n -- Eman Elsabban <emanelsabban@yelp.com>  Thu, 20 Jun 2024 10:07:42 -0700\n\ntron (2.2.2) jammy; urgency=medium\n\n  * 2.2.2 tagged with 'make release'\n    Commit: Merge pull request #977 from Yelp/u/jfong/tron_quick_fix\n    Fix UnprocessedKeys bug when restoring from dynamodb\n\n -- Jen Patague <jfong@yelp.com>  Mon, 17 Jun 2024 17:50:59 -0700\n\ntron (2.2.1) jammy; urgency=medium\n\n  * 2.2.1 tagged with 'make release'\n    Commit: Merge pull request #976 from Yelp/u/mpiano/SEC-18955_fix\n    pass around projected SA configs properly\n\n -- Jen Patague <jfong@yelp.com>  Mon, 17 Jun 2024 14:52:24 -0700\n\ntron (2.2.0) jammy; urgency=medium\n\n  * 2.2.0 tagged with 'make release'\n    Commit: Merge remote-tracking branch \"origin/u/mpiano/SEC-18955\"\n\n -- Matteo Piano <mpiano@yelp.com>  Tue, 11 Jun 2024 02:27:25 -0700\n\ntron (2.1.2) jammy; urgency=medium\n\n  * 2.1.2 tagged with 'make release'\n    Commit: Use S3LogsReader with superregion and UTC timezone (#972)\n\n -- Yaroslav Liakhovskyi <yaro@yelp.com>  Wed, 05 Jun 2024 02:17:04 -0700\n\ntron (2.1.1) jammy; urgency=medium\n\n  * 2.1.1 tagged with 'make release'\n    Commit: Handling exceptions thrown from threads span by\n    ThreadPoolExecutor - TRON-2202 (#969)  * Handling exceptions thrown\n    from threads span by ThreadPoolExecutor  * Addressing reviews  *\n    Addressing more reviews  * Removed Exit code\n\n -- Eman Elsabban <emanelsabban@yelp.com>  Tue, 04 Jun 2024 11:24:00 -0700\n\ntron (2.1.0) jammy; urgency=medium\n\n  * 2.1.0 tagged with 'make release'\n    Commit: Revert the Mesos code deletions (#970)  This turned out to\n    be unsafe to to our use of pickles as a serialization format.  *\n    Revert \"Use the latest task_proc (#966)\"  This reverts commit\n    01003a980854bc25ed2764c880e1fb69db296fb1.  * Revert \"Delete\n    remaining Mesos code (#961)\"  This reverts commit\n    1f71d0fa406e530ac943f1fcaf312224015f392c.  * Revert \"Delete Mesos\n    related exit codes and docker files - TRON-2187 (#959)\"  This\n    reverts commit 33ad2a1657aeea61f899380dada42825e137083b.  * Revert\n    \"Delete Mesos logging config (#962)\"  This reverts commit\n    640362424f1b1b6bb1c959a04b0843ca1e846c10.  * Revert \"Merge pull\n    request #953 from Yelp/u/emanelsabban/TRON-2183\"  This reverts\n    commit 10353457221d11f2a587c665bae05b16f3cba447, reversing changes\n    made to e4114088fefaf6a3d3f5be27e5caf41d7c1c9973.  * Revert\n    \"Deleting Mesos code from the Master Control Program\"  This reverts\n    commit 18f48ee1db23e1e7f91e0e9e7846eb345d171534.\n\n -- Luis Perez <luisp@yelp.com>  Mon, 03 Jun 2024 13:33:05 -0700\n\ntron (2.0.0) jammy; urgency=medium\n\n  * 2.0.0 tagged with 'make release'\n    Commit: Use the latest task_proc (#966)  There\"s no real changes\n    here other than dropping all the Mesos-related code from task_proc\n\n -- Luis Perez <luisp@yelp.com>  Fri, 17 May 2024 13:49:55 -0700\n\ntron (1.32.5) jammy; urgency=medium\n\n  * 1.32.5 tagged with 'make release'\n    Commit: Merge pull request #965 from Yelp/u/emanelsabban/fix-make-\n    release  Remove /docs/source/generated from gitignore file\n\n -- Eman Elsabban <emanelsabban@yelp.com>  Fri, 17 May 2024 09:10:54 -0700\n\ntron (1.32.4) jammy; urgency=medium\n\n  * 1.32.4 tagged with 'make release'\n    Commit: Delete Mesos related exit codes and docker files - TRON-2187\n    (#959)  * Delete Mesos related exit codes and docker files  * Adding\n    a try/except for validating extra keys  * Testing deleted extra keys\n\n -- Eman Elsabban <emanelsabban@yelp.com>  Fri, 17 May 2024 07:51:23 -0700\n\ntron (1.32.3) jammy; urgency=medium\n\n  * 1.32.3 tagged with 'make release'\n    Commit: Automate make release in Tron (#960)\n\n -- Jon Lee <jonlee@yelp.com>  Mon, 13 May 2024 15:36:34 -0700\n\ntron (1.32.2) jammy; urgency=medium\n\n  * 1.32.2 tagged with 'make release'\n    Commit: Merge pull request #953 from Yelp/u/emanelsabban/TRON-2183\n    Delete Mesos code from statemanager - TRON-2183\n\n -- Eman Elsabban <emanelsabban@yelp.com>  Mon, 13 May 2024 12:00:06 -0700\n\ntron (1.32.1) jammy; urgency=medium\n\n  * 1.32.1 tagged with 'make release'\n    Commit: Merge pull request #952 from Yelp/u/emanelsabban/TRON-2182\n    Deleting Mesos code from the Master Control Program - TRON-2182\n\n -- Eman Elsabban <emanelsabban@yelp.com>  Mon, 13 May 2024 11:43:05 -0700\n\ntron (1.32.0) jammy; urgency=medium\n\n  * 1.32.0 tagged with 'make release'\n    Commit: Update task_proc to better handle killing tasks (#951)\n    We've seen that task_proc/k8s will sometimes not correctly send\n    events for pods that we try to kill ourselves (either because the\n    pods are already gone or because the event is somehow missing data),\n    so this task_proc version will send synthetic events when we call\n    kill() to ensure that tron is in the correct state :)\n    Co-authored-by: Jen Patague <jfong@yelp.com>\n\n -- Luis Perez <luisp@yelp.com>  Thu, 09 May 2024 09:16:24 -0700\n\ntron (1.31.0) jammy; urgency=medium\n\n  * 1.31.0 tagged with 'make release'\n    Commit: Parallelizing execution of restoring in Tron - TRON-2161\n    (#950)  * Parallelizing execution of restoring in Tron  * Addressing\n    reviews and fixing unit tests  * Adding sorting for runs state\n    otherwise runs will be out of order  * Addressing more reviews and\n    adding exception for exceeding max_attempts  * Address typing stuff\n    and timers  * Fixing the typing error  * fixing incomplete sentence\n\n -- Eman Elsabban <emanelsabban@yelp.com>  Wed, 01 May 2024 12:54:10 -0700\n\ntron (1.30.0) jammy; urgency=medium\n\n  * 1.30.0 tagged with 'make release'\n    Commit: Adding yelp_clog S3LogsReader (#949)  * Upgrade yelp\n    scribereader deps  * Enable S3LogsReader for action run logs  *\n    Upgrade boto requirements  * Upgrade mypy and update type ignores  *\n    Pin more internal requirements\n\n -- Yaroslav Liakhovskyi <yaro@yelp.com>  Mon, 29 Apr 2024 04:57:37 -0700\n\ntron (1.29.5) jammy; urgency=medium\n\n  * 1.29.5 tagged with 'make release'\n    Commit: Merge pull request #940 from Yelp/jfong/TRON-1850-starting-\n    jobs-stuck  TRON-1850: Include 'starting' pods in check for stuck jobs\n\n -- Jen Patague <jfong@yelp.com>  Tue, 09 Apr 2024 11:37:15 -0700\n\ntron (1.29.4) jammy; urgency=medium\n\n  * 1.29.4 tagged with 'make release'\n    Commit: Adding logs to Tron to indicate start and scheduling times -\n    TRON-2152 (#948)  * Adding logs to Tron to indicate start and\n    scheduling times  * Addressed reviews on the PR  * Address reviews\n    2.0  * Deleting the start_schedule_jobs flag  * Making boot_time a\n    required param  * bring back time.time in duration\n\n -- Eman Elsabban <emanelsabban@yelp.com>  Mon, 08 Apr 2024 11:15:48 -0700\n\ntron (1.29.3) jammy; urgency=medium\n\n  * 1.29.3 tagged with 'make release'\n    Commit: Add a top-level exception handler (#947)  As the comment\n    says: let's see if adding a top-level handler (that then re-raises)\n    and catching BaseException will give us more info as to what's\n    sometimes causing Tron to exit.  I've also added the usual base\n    exception (Exception) just to be extra-safe  (h/t to krall for the\n    idea)\n\n -- Luis Perez <luisp@yelp.com>  Wed, 27 Mar 2024 15:00:00 -0700\n\ntron (1.29.2) jammy; urgency=medium\n\n  * 1.29.2 tagged with 'make release'\n    Commit: Merge pull request #946 from Yelp/u/emanelsabban/edit-\n    messages  Editing some of the messages from previous prs\n\n -- Eman Elsabban <emanelsabban@yelp.com>  Wed, 20 Mar 2024 08:33:45 -0700\n\ntron (1.29.1) jammy; urgency=medium\n\n  * 1.29.1 tagged with 'make release'\n    Commit: Merge pull request #942 from Yelp/u/kkasp/TRON-1970-escape-\n    html-tronweb  Use Underscores HTML escaped interpolation in template\n    for log and command outputs\n\n -- Kevin Kaspari <kkasp@yelp.com>  Fri, 08 Mar 2024 08:03:14 -0800\n\ntron (1.29.0) jammy; urgency=medium\n\n  * 1.29.0 tagged with 'make release'\n    Commit: Merge pull request #944 from Yelp/u/emanelsabban/TRON-2124-\n    expose-prom-metrics  Adding prometheus endpoint in tron - TRON-2124\n\n -- Eman Elsabban <emanelsabban@yelp.com>  Tue, 05 Mar 2024 12:55:14 -0800\n\ntron (1.28.5) jammy; urgency=medium\n\n  * 1.28.5 tagged with 'make release'\n    Commit: Stop keeping old copies of eventbus files (#941)  We've\n    never restored from these 'backups', and as long as we ensure that\n    the current file points to a fully written file, it's fine to only\n    keep at most 2 files around: the actual current file and a temporary\n    'new' file.  To ensure that we're not pointing at a half-written\n    file, we continue using the age-old pattern for atomic file updates:\n    write to another file and swap a symlink once the write is complete\n\n -- Luis Perez <luisp@yelp.com>  Tue, 20 Feb 2024 11:55:45 -0800\n\ntron (1.28.4) jammy; urgency=medium\n\n  * 1.28.4 tagged with 'make release'\n    Commit: Merge pull request #939 from Yelp/u/wilmerrafael/COMPINFRA-\n    3601_adding_tron_run_number_label  Adding tron run id to pod labels\n    for k8s\n\n -- Wilmer Bandres <wilmerrafael@yelp.com>  Thu, 15 Feb 2024 06:43:40 -0800\n\ntron (1.28.3) jammy; urgency=medium\n\n  * 1.28.3 tagged with 'make release'\n    Commit: Merge pull request #938 from Yelp/u/kkasp/TRON-2112-lock-\n    start  Add lock to tron start to mitigate the risk of running\n    duplicate jobs…\n\n -- Kevin Kaspari <kkasp@yelp.com>  Thu, 08 Feb 2024 11:24:39 -0800\n\ntron (1.28.2) jammy; urgency=medium\n\n  * 1.28.2 tagged with 'make release'\n    Commit: Remove unnecessary mocks (#937)  This is what I get for\n    trusting GHA rather than also running the tests internally :)  These\n    mocks are no longer required and are actually causing test failures\n    internally.\n\n -- Luis Perez <luisp@yelp.com>  Thu, 01 Feb 2024 09:10:42 -0800\n\ntron (1.28.1) jammy; urgency=medium\n\n  * 1.28.1 tagged with 'make release'\n    Commit: Downpin yelp-clog (#936)  We had bumped this a couple major\n    versions since we were also bumping scribereader - but we reverted\n    the scribereader bump before merging the jammy/py38 branch and\n    forgot to also revert the yelp-clog bump :)\n\n -- Luis Perez <luisp@yelp.com>  Thu, 01 Feb 2024 08:51:40 -0800\n\ntron (1.28.0) jammy; urgency=medium\n\n  * 1.28.0 tagged with 'make release'\n    Commit: Upgrading Tron to py3.8 + patching it with the fix (#934)  *\n    Monkeypatch SimpleQueue back to PySimpleQueue  We have a hunch that\n    this is what is causing our pod event loop to have wildly delayed\n    items  * Revert 'Revert python/jammy upgrades (#907)'  This reverts\n    commit 483da5c0fb258b01b8e47912ad034d43554ada7d.  * new formatting\n    * Bump pyyaml  * Rm sad test that is not relevant for validation  *\n    added stuff to run tron locally  * matching the python version with\n    whats currently running in infrastage  * adding also changelog  *\n    This commit includes some requriements for clog and try/except block\n    for handle_events  * This commit adds the patch fix  * Revert\n    'matching the python version with whats currently running in\n    infrastage'  This reverts commit\n    1f81a6d4805d742a7a7a28b1d7d8eef39522a896.  * Revert 'adding also\n    changelog'  This reverts commit\n    951fcc8fc31114bde340bad4b82ced0529e03b65.  * precommit fixes the\n    patch  * Fixing mypy issues and tests failing  * removing return and\n    adding comment for handling defer being none  * addressing wording\n    * fix InvariantException back to Exception  ---------  Co-authored-\n    by: Luis Perez <luisp@yelp.com> Co-authored-by: Vincent Thibault\n    <vit@yelp.com>\n\n -- Eman Elsabban <emanelsabban@yelp.com>  Wed, 31 Jan 2024 09:25:13 -0800\n\ntron (1.27.5) jammy; urgency=medium\n\n  * 1.27.5 tagged with 'make release'\n    Commit: Allow Tron to use more than 10k file descriptors (#923)\n    There appears to be an fd leak somewhere in tron, we're not in\n    danger of hitting any host-level limits - so let's update the unit\n    file to have a bigger limit in the meantime\n\n -- Luis Perez <luisp@yelp.com>  Wed, 11 Oct 2023 12:11:22 -0700\n\ntron (1.27.4) jammy; urgency=medium\n\n  * 1.27.4 tagged with 'make release'\n    Commit: Revert 'Link tronweb_url ' (#928)  Reverts #911 as it did\n    not actually result in a clickable link in Slack\n\n -- Luis Perez <luisp@yelp.com>  Tue, 10 Oct 2023 12:26:07 -0700\n\ntron (1.27.3) jammy; urgency=medium\n\n  * 1.27.3 tagged with 'make release'\n    Commit: Catch all exceptions in k8s submit_command() (#926)  It's\n    entirely possible that creating a task_processing task (and/or\n    submitting one) can result in an exception. At the moment, this\n    results in the affected ActionRun getting stuck in the Starting\n    state - but this is a lie and means that the normal\n    monitoring/alerting on failed runs does not kick in.\n\n -- Luis Perez <luisp@yelp.com>  Mon, 21 Aug 2023 12:27:26 -0700\n\ntron (1.27.2) jammy; urgency=medium\n\n  * 1.27.2 tagged with 'make release'\n    Commit: Merge pull request #924 from Yelp/u/vit/fix-\n    configsecretvolume  Fix item assignment issue with\n    ConfigSecretVolume\n\n -- Vincent Thibault <vit@yelp.com>  Mon, 14 Aug 2023 13:36:37 -0700\n\ntron (1.27.1) jammy; urgency=medium\n\n  * 1.27.1 tagged with 'make release'\n    Commit: Update to the latest task_proc (#915)  This version pulls in\n    a fix that stops pods from being launched with a null request. This\n    has been running fine in infrastage where I've verified that Pods\n    are indeed being launched with the correct metadata  I've verified\n    this as well with unit tests in task_proc :)\n\n -- Luis Perez <luisp@yelp.com>  Mon, 10 Jul 2023 08:28:15 -0700\n\ntron (1.27.0) jammy; urgency=medium\n\n  * 1.27.0 tagged with 'make release'\n    Commit: Remove tronweb2 code (#914)  This isn't currently being used\n    and is complicating the internal build process\n\n -- Luis Perez <luisp@yelp.com>  Tue, 13 Jun 2023 08:49:02 -0700\n\ntron (1.26.0) jammy; urgency=medium\n\n  * 1.26.0 tagged with 'make release'\n    Commit: Merge pull request #909 from Yelp/u/vit/tron-secret-volume\n    TRON-1636: Pass secret_volumes to taskproc from kubernetes\n\n -- Vincent Thibault <vit@yelp.com>  Mon, 12 Jun 2023 09:03:39 -0700\n\ntron (1.25.1) jammy; urgency=low\n\n  * 1.25.1 tagged with 'make release'\n    Commit: Merge pull request #911 from Yelp/tchen/link-tronweb-url\n    Link tronweb_url\n\n -- Tianle Chen <tchen@yelp.com>  Mon, 05 Jun 2023 09:21:09 -0700\n\ntron (1.25.0) jammy; urgency=medium\n\n  * 1.25.0 tagged with 'make release'\n    Commit: COMPINFRA-2565: enforce an upper limit on backfill\n    concurrency (#906)  Problem  ----- The Kubernetes control plane can\n    be overwhelmed if a high number of concurrent backfills are\n    executed. We currently set a relatively high default of  and enforce\n    no upper limit at all.  Solution ----- * Reduce the default  from\n    to  to reduce the baseline pressure of Tron backfill jobs. *\n    Introduce and enforce a hard limit of  concurrent backfills per\n    tronctl invocation  In a future PR, we might want to track the total\n    count of running backfills per user in Zookeeper but this change\n    should already safeguard against overwhelming Tron with backfills in\n    the meantime.  Signed-off-by: Max Falk <gfalk@yelp.com>\n\n -- Luis Perez <luisp@10-40-27-179-uswest1cdevc.dev.yelpcorp.com>  Thu, 18 May 2023 09:06:24 -0700\n\ntron (1.24.5) jammy; urgency=medium\n\n  * 1.24.5 tagged with 'make release'\n    Commit: Revert python/jammy upgrades (#907)  * Revert 'Upgrades\n    requirements and adds caching on get loggers (#900)'  This reverts\n    commit 0ac2b69d648bb3b915a80637a4f3051e06a16296.  * Revert 'Adds\n    Python 3.8; build for Jammy (#896)'  This reverts commit\n    f7679b9a281d026b4d956c5735875eded5602310.\n\n -- Luis Perez <luisp@10-40-27-179-uswest1cdevc.dev.yelpcorp.com>  Wed, 17 May 2023 08:44:16 -0700\n\ntron (1.24.4) jammy; urgency=medium\n\n  * 1.24.4 tagged with 'make release'\n    Commit: Updates date arithmetic to handle whitespace (#903)\n\n -- Jon Lee <jonlee@yelp.com>  Thu, 09 Mar 2023 14:23:44 -0800\n\ntron (1.24.3) jammy; urgency=medium\n\n  * 1.24.3 tagged with 'make release'\n    Commit: Upgrades kubernetes version to support 1.21 (#902)\n\n -- Jon Lee <jonlee@yelp.com>  Thu, 09 Mar 2023 12:15:55 -0800\n\ntron (1.24.2) jammy; urgency=medium\n\n  * 1.24.2 tagged with 'make release'\n    Commit: Ignores CryptographyDeprecationWarning (#901)  * Ignores\n    CryptographyDeprecationWarning\n\n -- Jon Lee <jonlee@yelp.com>  Thu, 09 Mar 2023 11:46:44 -0800\n\ntron (1.24.1) jammy; urgency=medium\n\n  * 1.24.1 tagged with 'make release'\n    Commit: Upgrades requirements and adds caching on get loggers (#900)\n    * updates axe-core  * Upgrade twisted  * Stop locking on getting\n    loggers  * Adds better description and commit suggestions  *\n    Upgrades Werkzeug dependency  * Upgrades future dependency  *\n    Upgrades ipython dependency  * Upgrades cryptography dependency  *\n    Upgrades setuptools dependency  * Upgrades certifi dependency  *\n    Upgrades urllib3 dependency  * Upgrades rsa dependency  * Upgrades\n    Pygments dependency  * Upgrades jinja2 for docs dependency  *\n    Upgrades docs dependencies  * Update tron/trondaemon.py  Co-authored-\n    by: Luis Pérez <luisp@yelp.com>  ---------  Co-authored-by: Luis\n    Pérez <luisp@yelp.com>\n\n -- Jon Lee <jonlee@yelp.com>  Wed, 08 Mar 2023 10:13:38 -0800\n\ntron (1.24.0) jammy; urgency=medium\n\n  * 1.24.0 tagged with 'make release'\n    Commit: Fixes make release for jammy (#899)\n\n -- Jon Lee <jonlee@yelp.com>  Fri, 03 Mar 2023 05:39:51 -0800\n\ntron (1.23.10) bionic; urgency=medium\n\n  * 1.23.10 tagged with 'make release'\n    Commit: Merge pull request #890 from\n    Yelp/u/cuza/logging_tron_exit_status  Adding ExecStopPost to systemd\n    unit file to log tron exit status\n\n -- root <root@ba896c01f138>  Tue, 31 Jan 2023 18:27:19 +0000\n\ntron (1.23.9) bionic; urgency=medium\n\n  * 1.23.9 tagged with 'make release'\n    Commit: Merge pull request #887 from Yelp/u/jfong/TRON-1723  TRON-\n    1723: Add user attribution into user agent when invoking tronctl\n    commands\n\n -- root <root@35f519810749>  Wed, 07 Dec 2022 20:29:42 +0000\n\ntron (1.23.8) bionic; urgency=medium\n\n  * 1.23.8 tagged with 'make release'\n    Commit: TRON-1825: Add additional failure handling of specific\n    errors (#886)  Adds additional failure handling of specific errors\n    Adds handling of spot interruptions and k8s scaling down Captures\n    real exit codes from failures.\n\n -- root <root@b5061e28e264>  Mon, 05 Dec 2022 15:43:23 +0000\n\ntron (1.23.7) bionic; urgency=medium\n\n  * 1.23.7 tagged with 'make release'\n    Commit: Use correct dict casing for k8s metadata (#885)  The data\n    we'd been dumping into our logging streams was using different key\n    names than what task_proc was actually sending k8s: we were logging\n    the python-ified names, but task_proc was sending tron the literal\n    k8s payloads (i.e., keynames being camelCased).  We didn't notice\n    this 'cause until the initial attempt at working around this weird\n    k8s bug we weren't actually logging what tron was seeing and our\n    test data in the unit tests was based on what we'd been seeing in\n    our logging streams\n\n -- root <root@93f5efd72e85>  Wed, 23 Nov 2022 19:44:24 +0000\n\ntron (1.23.6) bionic; urgency=medium\n\n  * 1.23.6 tagged with 'make release'\n    Commit: Detect abnormal successful exits in k8s (#884)  this is\n    kinda wild: we're seeing that a kubelet will sometimes fail to start\n    a container (usually due to what appear to be race conditions like\n    those mentioned in\n    https://github.com/kubernetes/kubernetes/issues/100047#issuecomment-\n    797624208 and then decide that these Pods should be phase=Succeeded\n    with an exit code of 0 - even though the container never actually\n    started.  So far, we've noticed that when this happens, the  and\n    fields will be  - thus we'll check for at least one of these\n    conditions to detect an abnormal exit and actually 'fail' the\n    affected action\n\n -- root <root@4c9236847e02>  Tue, 22 Nov 2022 19:28:02 +0000\n\ntron (1.23.5) bionic; urgency=medium\n\n  * 1.23.5 tagged with 'make release'\n    Commit: Merge pull request #882 from Yelp/DAR-1739-revert-rookout\n    Revert 'This adds rookout to tron (TRON-1764) (#875)'\n\n -- root <root@b023a97c11d6>  Wed, 26 Oct 2022 00:24:33 +0000\n\ntron (1.23.4) bionic; urgency=medium\n\n  * 1.23.4 tagged with 'make release'\n    Commit: Bail out earlier on large tron logs (#881)  It's possible\n    for there to be multiple gigabytes of logs in our logging streams\n    and we don't want Tron sitting there processing these for ages on\n    end. This isn't a silver bullet, but this should help in the\n    meantime\n\n -- root <root@d4d7abb7b249>  Mon, 24 Oct 2022 16:02:12 +0000\n\ntron (1.23.3) bionic; urgency=medium\n\n  * 1.23.3 tagged with 'make release'\n    Commit: TRON-1806 Adds new function in check_tron_jobs to skip\n    failed logging queries for a specific superregion (#880)  Adds new\n    function in check_tron_jobs --skip-sensu-failure-logging\n\n -- root <root@277e452fcc16>  Fri, 07 Oct 2022 19:53:47 +0000\n\ntron (1.23.2) bionic; urgency=medium\n\n  * 1.23.2 tagged with 'make release'\n    Commit: fixes incorrect test\n\n -- root <root@561dbdc505d9>  Thu, 06 Oct 2022 15:24:16 +0000\n\ntron (1.23.1) bionic; urgency=medium\n\n  * 1.23.1 tagged with 'make release'\n    Commit: Merge pull request #877 from Yelp/jammy-3.7  Python 3.7;\n    build for Jammy\n\n -- root <root@7225e396580b>  Thu, 15 Sep 2022 22:42:37 +0000\n\ntron (1.23.0) bionic; urgency=medium\n\n  * 1.23.0 tagged with 'make release'\n    Commit: This adds rookout to tron (TRON-1764) (#875)  Adds rookout\n    to tron (TRON-1764)\n\n -- root <root@fd6a57b542b3>  Mon, 12 Sep 2022 19:54:31 +0000\n\ntron (1.22.0) bionic; urgency=medium\n\n  * 1.22.0 tagged with 'make release'\n    Commit: Merge pull request #873 from Yelp/u/emanelsabban/tronUI-1737\n    Limiting amount of output displayed in Tron UI - TRON-1737\n\n -- root <root@3923dfd1fe65>  Tue, 23 Aug 2022 19:35:57 +0000\n\ntron (1.21.0) bionic; urgency=medium\n\n  * 1.21.0 tagged with 'make release'\n    Commit: Merge pull request #874 from Yelp/u/jfong/TRON-1777-update-\n    taskproc  TRON-1777: bump taskproc to get kubeconfig reload fix\n\n -- root <root@6bf33780de09>  Mon, 15 Aug 2022 18:29:12 +0000\n\ntron (1.20.0) bionic; urgency=medium\n\n  * 1.20.0 tagged with 'make release'\n    Commit: bumped up taskproc ver pass tron version to tskprc\n\n -- root <root@d63f2c9cda1b>  Mon, 25 Jul 2022 20:44:50 +0000\n\ntron (1.19.0) bionic; urgency=medium\n\n  * 1.19.0 tagged with 'make release'\n    Commit: Pass port/field selector envvars to task_proc (#869)  we\n    need this for spark drivers in k8s to work - and without the field\n    selector env vars we're not able to fully adhere to the paasta\n    workload contract\n\n -- root <root@9be8d1790a61>  Mon, 18 Apr 2022 16:10:56 +0000\n\ntron (1.18.0) bionic; urgency=medium\n\n  * 1.18.0 tagged with 'make release'\n    Commit: Merge pull request #867 from Yelp/u/kawaiwan/retry-honors-\n    triggers  Add arg to  for waiting for dependencies\n\n -- root <root@27a2aed6edd5>  Wed, 13 Apr 2022 18:41:37 +0000\n\ntron (1.17.1) bionic; urgency=medium\n\n  * 1.17.1 tagged with 'make release'\n    Commit: Create KubernetesActionRuns for executor: spark (#866)  For\n    now, we should be able to just use a KubernetesActionRun for Spark\n    drivers - worst case, I think we'll just have a couple places where\n    we branch on  if we absolutely need to do something Spark-specific\n    and can't just pass that down from paasta-tools  This PR also\n    removes spark_driver_service_account_name - I'm not sure what I was\n    thinking initially - we don't really need anything to distinguish\n    between the driver and executor SA since the executor SA is\n    configured by the arguments used to start the driver (and thus we\n    can just start the driver pod as we normally would by using the\n    service account payload that we'd use for a normal tron-launched\n    pod)\n\n -- root <root@46477e094be3>  Mon, 21 Mar 2022 15:03:15 +0000\n\ntron (1.17.0) bionic; urgency=medium\n\n  * 1.17.0 tagged with 'make release'\n    Commit: Bump pysensu-yelp to pull in issuetype support (#864)\n    https://github.com/Yelp/pysensu-yelp/pull/30 was released in pysensu-\n    yelp==0.4.4 and allows users to set the issuetype for a ticket (so\n    that sensu-created tickets aren't always of type  (the default))\n\n -- root <root@cef92af84f05>  Thu, 10 Mar 2022 18:47:54 +0000\n\ntron (1.16.3) bionic; urgency=medium\n\n  * 1.16.3 tagged with 'make release'\n    Commit: Respect ActionRunAdapter max_lines param (#862)  Callers can\n    request a specific number of lines for logs - before this change we\n    were ignoring that (except for the k8s metadata logs) and always\n    returning all the data available.\n\n -- root <root@0d7972f57edb>  Thu, 10 Feb 2022 22:45:16 +0000\n\ntron (1.16.3) bionic; urgency=medium\n\n  * 1.16.3 tagged with 'make release'\n    Commit: Respect ActionRunAdapter max_lines param (#862)  Callers can\n    request a specific number of lines for logs - before this change we\n    were ignoring that (except for the k8s metadata logs) and always\n    returning all the data available.\n\n -- root <root@9dc0edf8a8df>  Thu, 10 Feb 2022 19:46:18 +0000\n\ntron (1.16.2) bionic; urgency=medium\n\n  * 1.16.2 tagged with 'make release'\n    Commit: Merge pull request #861 from Yelp/u/kawaiwan/always-restart-\n    trond  Always restart trond if it goes down\n\n -- root <root@1f8824f30d27>  Thu, 03 Feb 2022 19:20:28 +0000\n\ntron (1.16.1) xenial; urgency=medium\n\n  * 1.16.1 tagged with 'make release'\n    Commit: Silence yelp_clog.StreamTailer logs + k8s event guard (#855)\n    This should cleanup any non-fatal exceptions that we see in Tron\n    logs.\n\n -- root <root@87fe59aafb66>  Tue, 07 Dec 2021 19:39:33 +0000\n\ntron (1.16.0) xenial; urgency=medium\n\n  * 1.16.0 tagged with 'make release'\n    Commit: Allow users to use runid for triggers (#853)  Some users\n    have jobs whose output depends on the output of the previous run -\n    for critical jobs, it's useful to explicitly declare this dependency\n    so that these jobs never run unless Tron knows that the previous\n    output is ready/complete (using successful completion as a proxy for\n    this)  For additional context:\n    https://yelp.slack.com/archives/CA4K8PBLG/p1638402768180500\n\n -- root <root@81bd90063f15>  Mon, 06 Dec 2021 19:06:11 +0000\n\ntron (1.15.0) xenial; urgency=medium\n\n  * 1.15.0 tagged with 'make release'\n    Commit: Add Service Account support for k8s jobs (#852)  We'll use\n    this for Pod Identity - there's a webhook that will inject a secret\n    token + some environment variables if a Pod has a Service Account\n    set up.  paasta-tools will be in charge of creating the Service\n    Account that we'll use if it doesn't exist (as well as setting up\n    the annotations on that Service Account).\n\n -- root <root@6959115743ac>  Fri, 03 Dec 2021 20:41:48 +0000\n\ntron (1.14.5) xenial; urgency=medium\n\n  * 1.14.5 tagged with 'make release'\n    Commit: Allow passing Pod annotations to task_processing (#850)\n    We'll need this to toggle certain behaviors (e.g., disable Pods\n    getting routable IPs internally).\n\n -- root <root@2239f09987e7>  Thu, 28 Oct 2021 18:11:55 +0000\n\ntron (1.14.4) xenial; urgency=medium\n\n  * 1.14.4 tagged with 'make release'\n    Commit: Update scribereader to v0.14.1 (#847)  v0.14.0 has a bug\n    that points to the wrong hosts (corpdev uses dev infra, not its own)\n\n -- root <root@b36588a41db7>  Wed, 06 Oct 2021 19:44:17 +0000\n\ntron (1.14.3) xenial; urgency=medium\n\n  * 1.14.3 tagged with 'make release'\n    Commit: Upgrade scribereader so that we can read logs in corpdev\n    (#846)  We duplicate the ecosystems that we can point scribereader\n    at in the package and that list was missing corpdev  This causes a\n    KeyError when trying to get the tailer host:port tuple for tron in\n    corpdev\n\n -- root <root@f3aeb35abc97>  Fri, 01 Oct 2021 19:57:14 +0000\n\ntron (1.14.2) xenial; urgency=medium\n\n  * 1.14.2 tagged with 'make release'\n    Commit: Support passing Pod labels to task_processing (#841)  We'll\n    need this to set labels required by the PaaSTA Workload Contract\n    (which is needed to ensure that internal tooling will work as\n    expected with our Pods)\n\n -- root <root@f3474d567429>  Fri, 10 Sep 2021 17:32:09 +0000\n\ntron (1.14.1) xenial; urgency=medium\n\n  * 1.14.1 tagged with 'make release'\n    Commit: Show duration in ActionRun history table (#840)  This was\n    missed when this view was initially created and is helpful for users\n    to determine how run times for a given action are changing over time\n    at a glance.\n\n -- root <root@5cd3c8fa4dd6>  Thu, 02 Sep 2021 20:42:19 +0000\n\ntron (1.14.0) xenial; urgency=medium\n\n  * 1.14.0 tagged with 'make release'\n    Commit: Support node selectors/affinity for k8s tasks (#837)  This\n    additionally adds some error handling when things fail any\n    invariants for a KubernetesTaskConfig - previously, an\n    InvariantException meant that we would never realize that a\n    KubernetesTaskConfig was invalid and that the ActionRun would never\n    work.\n\n -- root <root@126e6e1cddcf>  Thu, 26 Aug 2021 16:00:51 +0000\n\ntron (1.13.2) xenial; urgency=medium\n\n  * 1.13.2 tagged with 'make release'\n    Commit: Use start and end times from ActionRunAttempts (#839)  It\n    turns out that we reset the start time for an ActionRun on any\n    retries, so let's use the start time from the first attempt and the\n    end time from the last attempt to figure out what timespan to ask\n    for logs from scribereader\n\n -- root <root@302b18891663>  Tue, 24 Aug 2021 17:55:59 +0000\n\ntron (1.13.1) xenial; urgency=medium\n\n  * 1.13.1 tagged with 'make release'\n    Commit: Merge pull request #836 from Yelp/jfong/TRON-1658-\n    kubernetesactionrunfromstate  TRON-1658: Fix issue restoring\n    KubernetesActionRuns\n\n -- root <root@ac0cf0844867>  Wed, 18 Aug 2021 17:28:33 +0000\n\ntron (1.13.0) xenial; urgency=medium\n\n  * 1.13.0 tagged with 'make release'\n    Commit: Merge pull request #830 from Yelp/u/kawaiwan/automate-\n    backfills  Make tronctl backfill actually run backfills\n\n -- root <root@e0578145385c>  Tue, 17 Aug 2021 16:38:44 +0000\n\ntron (1.12.0) xenial; urgency=medium\n\n  * 1.12.0 tagged with 'make release'\n    Commit: Actually kill k8s Pods on KubernetesActionRun::kill() (#828)\n    We had left this method stubbed out until we implemented kill() in\n    task_processing, but now we can actually finish implementation here.\n\n -- root <root@1aef58341f12>  Mon, 16 Aug 2021 22:03:29 +0000\n\ntron (1.11.0) xenial; urgency=medium\n\n  * 1.11.0 tagged with 'make release'\n    Commit: Support adding/dropping capabilities (#827)  We'll need this\n    for parity with the Mesos implementation and because it's a good\n    security practice :p\n\n -- root <root@d986f96cb5d1>  Tue, 03 Aug 2021 19:32:46 +0000\n\ntron (1.10.3) xenial; urgency=medium\n\n  * 1.10.3 tagged with 'make release'\n    Commit: Merge pull request #824 from Yelp/u/kawaiwan/turn-on-\n    taskproc-metrics  Install yelp-meteorite when building yelp env\n\n -- root <root@63ec31ec3c18>  Mon, 02 Aug 2021 20:22:57 +0000\n\ntron (1.10.2) xenial; urgency=medium\n\n  * 1.10.2 tagged with 'make release'\n    Commit: Use un-typo'd failed platform_type for k8s events (#825)\n    see https://github.com/Yelp/task_processing/pull/173 :p\n\n -- root <root@931eacd3331e>  Mon, 02 Aug 2021 18:56:46 +0000\n\ntron (1.10.1) xenial; urgency=medium\n\n  * 1.10.1 tagged with 'make release'\n    Commit: Merge pull request #822 from Yelp/jfong/TRON-1627-\n    secret_env_to_taskproc  TRON-1627: Pass secret_env to taskproc from\n    kubernetes\n\n -- root <root@e151da5deeea>  Wed, 28 Jul 2021 22:43:21 +0000\n\ntron (1.10.0) xenial; urgency=medium\n\n  * 1.10.0 tagged with 'make release'\n    Commit: Enable submitting a task to k8s using task_proc (#818)  *\n    Enable submitting a task to k8s using task_proc  We aren't yet\n    reading any of the events that task_proc is bubbling back up to us,\n    but that'll come next.  With this, we should be giving task_proc all\n    the information it needs to actually create a usable Pod.\n\n -- root <root@346e4b8d8ad9>  Tue, 27 Jul 2021 20:24:40 +0000\n\ntron (1.9.1) xenial; urgency=medium\n\n  * 1.9.1 tagged with 'make release'\n    Commit: Release v1.9.0\n\n -- root <root@b17d2081ca9e>  Mon, 26 Jul 2021 19:30:29 +0000\n\ntron (1.8.1) xenial; urgency=medium\n\n  * 1.8.1 tagged with 'make release'\n    Commit: Add black for formatting + some additional linters (#805)\n    We don't have any automatically enforced formatting (yapf is\n    optional and not part of our pre-commit), so let's use black since\n    that seems to be what we've settled on.  Additionally, I've added\n    some other 'standard' pre-commit hooks (as well as reformatted the\n    pre-commit config file.)\n\n -- root <root@4602d2a09117>  Thu, 10 Jun 2021 18:52:48 +0000\n\ntron (1.8.0) xenial; urgency=medium\n\n  * 1.8.0 tagged with 'make release'\n    Commit: Add initial k8s config + global k8s toggle + job opt-out\n    (#802)  We'll be adding two toggles to control Tron's usage of k8s:\n    * a global killswitch (k8s_options['enabled']) for when we want to\n    go   back to Mesos for an entire cluster (and, in the future, for\n    when we   want to quickly stop all Tronjobs). * a per-job opt-out ()\n    for any jobs that encounter issues with   k8s or that we want to\n    migrate at a specific time.  Since I was adding a k8s config section\n    to the Tron master config, I also went ahead and added a way to\n    configure what the k8s API address should be (i.e., the 'master'\n    address).\n\n -- root <root@74d959c3dc13>  Wed, 02 Jun 2021 16:01:28 +0000\n\ntron (1.7.0) xenial; urgency=medium\n\n  * 1.7.0 tagged with 'make release'\n    Commit: Implementation of skip-and-publish (#789)  Skipping an\n    action that has downstream triggers is error-prone as its easy to\n    forget that you have triggers to emit and what triggers to emit. You\n    probably need to go check your tronfig definitions to see what\n    triggers exist and converting the trigger name to a trigger command\n    can be a little tricky (with date magic).  To solve this, we add a\n    new tronctl command that will skip an action and then publish all\n    triggers for that action.\n\n -- root <root@ac177ec48467>  Thu, 15 Apr 2021 17:15:50 +0000\n\ntron (1.6.1) xenial; urgency=medium\n\n  * 1.6.1 tagged with 'make release'\n    Commit: Merge pull request #780 from Yelp/TRON-1570-remove-old-state\n    Stop saving command config and fields replaced by command config and\n    …\n\n -- root <root@413c12b9e90f>  Fri, 30 Oct 2020 16:39:41 +0000\n\ntron (1.6.0) xenial; urgency=medium\n\n  * 1.6.0 tagged with 'make release'\n    Commit: Merge pull request #778 from Yelp/TRON-1161-retry-configs\n    TRON-1161: update configs for retries\n\n -- root <root@d2aa77443876>  Wed, 07 Oct 2020 20:23:20 +0000\n\ntron (1.5.1) xenial; urgency=medium\n\n  * 1.5.1 tagged with 'make release'\n    Commit: Merge pull request #777 from Yelp/only-reconfigure-namespace\n    Only reconfigure jobs in that namespace when a namespace is updated\n\n -- root <root@9dd8ddacff60>  Thu, 01 Oct 2020 18:18:18 +0000\n\ntron (1.5.0) xenial; urgency=medium\n\n  * 1.5.0 tagged with 'make release'\n    Commit: Merge pull request #775 from Yelp/TRON-1563-retries-separate\n    TRON-1563: save state and configs for retries independently\n\n -- root <root@a4d896662a63>  Tue, 15 Sep 2020 00:33:05 +0000\n\ntron (1.4.6) xenial; urgency=medium\n\n  * 1.4.6 tagged with 'make release'\n    Commit: Merge pull request #774 from Yelp/drmorr/TRON-\n    1566/redact_aws_keys  redact AWS credentials from the logs\n\n -- root <root@60f4e0ad0a16>  Wed, 09 Sep 2020 17:44:42 +0000\n\ntron (1.4.5) xenial; urgency=medium\n\n  * 1.4.5 tagged with 'make release'\n    Commit: Merge pull request #773 from Yelp/TRON-1564-allow-check-oom-\n    events  Skip check_oom_events key from monitoring, which is only\n    used by paasta\n\n -- root <root@0e092e6866b4>  Thu, 03 Sep 2020 17:30:12 +0000\n\ntron (1.4.4) xenial; urgency=medium\n\n  * 1.4.4 tagged with 'make release'\n    Commit: Merge pull request #769 from Yelp/try-react  Prototype React\n    version of tronweb\n\n -- root <root@a921747b1922>  Fri, 28 Aug 2020 18:20:16 +0000\n\ntron (1.4.3) xenial; urgency=medium\n\n  * 1.4.3 tagged with 'make release'\n    Commit: Merge pull request #770 from Yelp/TRON-1561-no-overlap-alert-\n    if-queueing  Do not alert for overlapping runs if queueing is\n    disabled\n\n -- root <root@afcdad7311e7>  Wed, 26 Aug 2020 21:02:42 +0000\n\ntron (1.4.2) xenial; urgency=medium\n\n  * 1.4.2 tagged with 'make release'\n    Commit: Merge pull request #765 from Yelp/drmorr/TRON-\n    1554/fix_pypi_url  don't use public pypi for internal builds\n\n -- root <root@ae9f03491631>  Tue, 04 Aug 2020 18:23:42 +0000\n\ntron (1.4.1) xenial; urgency=medium\n\n  * 1.4.1 tagged with 'make release'\n    Commit: v1.4.1\n\n -- root <root@f04de09d1fa0>  Thu, 30 Jul 2020 23:56:44 +0000\n\ntron (1.4.0) xenial; urgency=medium\n\n  * 1.4.0 tagged with 'make release'\n    Commit: Merge branch 'TRON-1527-separate-job-run-state'\n\n -- root <root@faa832e0502e>  Tue, 28 Jul 2020 22:24:08 +0000\n\ntron (1.3.15) xenial; urgency=medium\n\n  * 1.3.15 tagged with 'make release'\n    Commit: Merge pull request #757 from Yelp/dedup-save-queue  De-\n    duplicate items in the DynamoDB save queue\n\n -- root <root@5a7c409e5d89>  Thu, 23 Jul 2020 20:01:54 +0000\n\ntron (1.3.14) xenial; urgency=medium\n\n  * 1.3.14 tagged with 'make release'\n    Commit: Merge pull request #756 from Yelp/TRON-1539-dynamo-metrics\n    Tron 1539 dynamo metrics\n\n -- root <root@07c5254e0dd9>  Wed, 01 Jul 2020 21:26:52 +0000\n\ntron (1.3.13) xenial; urgency=medium\n\n  * 1.3.13 tagged with 'make release'\n    Commit: Merge pull request #755 from Yelp/drmorr/COMPINFRA-\n    333/bump_requirements  Bump requirements to pick up new task_proc\n    version\n\n -- root <root@d26de9c44c56>  Wed, 06 May 2020 22:55:55 +0000\n\ntron (1.3.12) xenial; urgency=medium\n\n  * 1.3.12 tagged with 'make release'\n    Commit: Merge pull request #754 from Yelp/u/dpopes/TRON-1531-\n    keyboard-interactive-suppress-tty-if-no-prompt  TRON-1531: Suppress\n    the need to prompt the user via tty\n\n -- root <root@c1df7d685229>  Tue, 05 May 2020 18:26:53 +0000\n\ntron (1.3.11) xenial; urgency=medium\n\n  * 1.3.11 tagged with 'make release'\n    Commit: Merge pull request #753 from Yelp/u/dpopes/SEC-12778-support-\n    keyboard-interactive-ssh  Support keyboard-interactive as an\n    authentication method for ssh\n\n -- root <root@9879ae11933f>  Mon, 27 Apr 2020 18:01:41 +0000\n\ntron (1.3.10) xenial; urgency=medium\n\n  * 1.3.10 tagged with 'make release'\n    Commit: block new saves if save queue is too big\n\n -- root <root@7fe1902868f5>  Tue, 14 Apr 2020 16:51:46 +0000\n\ntron (1.3.9) xenial; urgency=medium\n\n  * 1.3.9 tagged with 'make release'\n    Commit: fix tests, fix issue where stopping condition can't be\n    reached\n\n -- root <root@17ff16d29c37>  Tue, 14 Apr 2020 14:46:43 +0000\n\ntron (1.3.8) xenial; urgency=medium\n\n  * 1.3.8 tagged with 'make release'\n    Commit: consume save queue in predefined chunks, count errors\n    correctly\n\n -- root <root@56162b61acd9>  Tue, 14 Apr 2020 14:07:12 +0000\n\ntron (1.3.7) xenial; urgency=medium\n\n  * 1.3.7 tagged with 'make release'\n    Commit: Merge pull request #749 from Yelp/u/maksym/fix-redirect  Fix\n    redirect, comment out stuff in tronrepl\n\n -- root <root@f48cdfb768ba>  Wed, 08 Apr 2020 23:17:14 +0000\n\ntron (1.3.6) xenial; urgency=medium\n\n  * 1.3.6 tagged with 'make release'\n    Commit: Merge pull request #745 from Yelp/dependabot/pip/psutil-\n    5.6.6  Bump psutil from 5.6.3 to 5.6.6\n\n -- root <root@c1e0136e3477>  Thu, 02 Apr 2020 23:56:54 +0000\n\ntron (1.3.5) xenial; urgency=medium\n\n  * 1.3.5 tagged with 'make release'\n    Commit: Merge pull request #744 from Yelp/mbehrens-TRON-1144-add-\n    uptime-version  Add tron version and uptime to frontend navbar\n\n -- root <root@d1e49d6d57c9>  Mon, 16 Mar 2020 17:38:19 +0000\n\ntron (1.3.4) xenial; urgency=medium\n\n  * 1.3.4 tagged with 'make release'\n    Commit: Merge pull request #741 from Yelp/fix-tz-forward  Fix bug\n    from fall forward last year\n\n -- root <root@2b5268deb6de>  Fri, 06 Mar 2020 18:59:59 +0000\n\ntron (1.3.3) xenial; urgency=medium\n\n  * 1.3.3 tagged with 'make release'\n    Commit: Merge pull request #737 from Yelp/u/siruitan/TRON-385-\n    skip_validate_in_write_config  Skip job graph validation in\n    write_config\n\n -- root <root@8b441532ebab>  Thu, 06 Feb 2020 18:59:31 +0000\n\ntron (1.3.2) xenial; urgency=medium\n\n  * 1.3.2 tagged with 'make release'\n    Commit: Merge branch 'drmorr/TRON-\n    1369/fixing_the_clusterman_invocation'\n\n -- root <root@d12c97f27fb7>  Wed, 05 Feb 2020 23:32:42 +0000\n\ntron (1.3.1) xenial; urgency=medium\n\n  * 1.3.1 tagged with 'make release'\n    Commit: Merge pull request #735 from Yelp/drmorr/TRON-\n    1454/shorten_tron_directory_names  tron output dir names shortened\n\n -- root <root@24a5021f338d>  Tue, 04 Feb 2020 18:35:32 +0000\n\ntron (1.3.0) xenial; urgency=medium\n\n  * 1.3.0 tagged with 'make release'\n    Commit: Merge branch 'drmorr/TRON-1369/clusterman_env_var'\n\n -- root <root@bd7d68bcdb7a>  Thu, 16 Jan 2020 18:59:08 +0000\n\ntron (1.2.5) xenial; urgency=medium\n\n  * 1.2.5 tagged with 'make release'\n    Commit: Merge branch 'u/kawaiwan/bump-task-proc-to-0.1.8'\n\n -- root <root@308ec6e1aa22>  Tue, 12 Nov 2019 01:01:46 +0000\n\ntron (1.2.4) xenial; urgency=medium\n\n  * 1.2.4 tagged with 'make release'\n    Commit: Merge pull request #723 from Yelp/drmorr/TRON-\n    1329/fix_time_interval_construction  fix time interval construction\n    in check_tron_jobs\n\n -- root <root@1a1c533ec7a5>  Wed, 06 Nov 2019 21:56:14 +0000\n\ntron (1.2.3) xenial; urgency=medium\n\n  * 1.2.3 tagged with 'make release'\n    Commit: Merge pull request #717 from Yelp/monitoring-backfill-\n    buckets  Try to handle backfills better in monitoring precious runs\n\n -- root <root@9bc878dfcb57>  Tue, 15 Oct 2019 23:27:13 +0000\n\ntron (1.2.2) xenial; urgency=medium\n\n  * 1.2.2 tagged with 'make release'\n    Commit: Merge branch 'drmorr/TRON-\n    1303/action_runner_sets_env_variables'\n\n -- root <root@b45f42581c03>  Mon, 14 Oct 2019 19:47:54 +0000\n\ntron (1.2.1) xenial; urgency=medium\n\n  * 1.2.1 tagged with 'make release'\n    Commit: Merge pull request #716 from Yelp/retry-check-state  Do not\n    auto retry action if state is already succeeded\n\n -- root <root@bf4306e0c13c>  Wed, 09 Oct 2019 16:57:32 +0000\n\ntron (1.2.0) xenial; urgency=medium\n\n  * 1.2.0 tagged with 'make release'\n    Commit: Merge pull request #715 from Yelp/auto-recovery\n    Automatically try to recover on unknown ssh actions\n\n -- root <root@2ceaed9bdb6b>  Wed, 09 Oct 2019 00:17:59 +0000\n\ntron (1.1.0) xenial; urgency=medium\n\n  * 1.1.0 tagged with 'make release'\n    Commit: Merge pull request #714 from Yelp/recover-command  Add\n    manual recovery command to api\n\n -- root <root@badb29026ca2>  Thu, 03 Oct 2019 16:54:12 +0000\n\ntron (1.0.4) xenial; urgency=medium\n\n  * 1.0.4 tagged with 'make release'\n    Commit: Merge pull request #708 from Yelp/recovery-check-finished\n    Put back check for action runners that have completed in recovery\n\n -- root <root@1a6c525a4bb8>  Tue, 24 Sep 2019 00:50:16 +0000\n\ntron (1.0.3) xenial; urgency=medium\n\n  * 1.0.3 tagged with 'make release'\n    Commit: Merge pull request #706 from\n    Yelp/fix_check_tron_jobs_job_run_ids  fix job run ids reported by\n    check_tron_jobs\n\n -- root <root@9d73659bb267>  Thu, 19 Sep 2019 21:56:23 +0000\n\ntron (1.0.2) xenial; urgency=medium\n\n  * 1.0.2 tagged with 'make release'\n    Commit: Don't do a retry_delay when manually retrying a failed\n    command. (#704)\n\n -- root <root@de1d559b7b81>  Wed, 18 Sep 2019 20:28:54 +0000\n\ntron (1.0.1) xenial; urgency=medium\n\n  * 1.0.1 tagged with 'make release'\n    Commit: Merge pull request #692 from Yelp/taskproc_bump  Bump\n    taskproc to handle weird staging offers\n\n -- root <root@f6149bf8cd6c>  Wed, 07 Aug 2019 13:37:07 +0000\n\ntron (1.0.0) xenial; urgency=medium\n\n  * 1.0.0 tagged with 'make release'\n    Commit: Released 0.9.14.6 via make release\n\n -- root <root@0be694a64085>  Fri, 19 Jul 2019 23:23:34 +0000\n\ntron (0.9.14.6) xenial; urgency=medium\n\n  * 0.9.14.6 tagged with 'make release'\n    Commit: Merge pull request #688 from Yelp/sort_order_fixes  Make\n    string sorting only in ActionRun displaying.\n\n -- root <root@2cdb1ed0c9dc>  Tue, 16 Jul 2019 21:46:48 +0000\n\ntron (0.9.14.5) xenial; urgency=medium\n\n  * 0.9.14.5 tagged with 'make release'\n    Commit: Released 0.9.14.4 via make release\n\n -- root <root@aeda6aa81577>  Tue, 16 Jul 2019 18:23:28 +0000\n\ntron (0.9.14.4) xenial; urgency=medium\n\n  * 0.9.14.4 tagged with 'make release'\n    Commit: Merge pull request #687 from Yelp/fix_tron_sorting  Fix\n    sorting for DisplayActionRuns when fields are None\n\n -- root <root@73cada4088c5>  Tue, 16 Jul 2019 18:21:47 +0000\n\ntron (0.9.14.4) xenial; urgency=medium\n\n  * 0.9.14.4 tagged with 'make release'\n    Commit: Merge pull request #682 from Yelp/stderr_fix  Fix stderr\n    output on check_tron_jobs\n\n -- root <root@88db47311341>  Fri, 12 Jul 2019 16:56:55 +0000\n\ntron (0.9.14.3) xenial; urgency=medium\n\n  * 0.9.14.3 tagged with 'make release'\n    Commit: Merge pull request #681 from Yelp/refix-scheduling  Use last\n    run time to get next run time before scheduling\n\n -- root <root@e6eae9e0f565>  Thu, 11 Jul 2019 18:11:28 +0000\n\ntron (0.9.14.2) xenial; urgency=medium\n\n  * 0.9.14.2 tagged with 'make release'\n    Commit: Merge pull request #678 from Yelp/ssh-timeouts  Improve\n    logging for deferred errors and apply timeout config to trans…\n\n -- root <root@182fad930a3e>  Fri, 05 Jul 2019 20:32:44 +0000\n\ntron (0.9.14.1) xenial; urgency=medium\n\n  * 0.9.14.1 tagged with 'make release'\n    Commit: Merge branch 'master' of github.com:Yelp/Tron\n\n -- root <root@c66730966e5d>  Mon, 03 Jun 2019 19:01:33 +0000\n\ntron (0.9.14.0) xenial; urgency=medium\n\n  * 0.9.14.0 tagged with 'make release'\n    Commit: Merge pull request #668 from Yelp/another-waiting-fix  Runs\n    with triggers should start in scheduled state\n\n -- root <root@714976665f09>  Thu, 16 May 2019 17:30:06 +0000\n\ntron (0.9.13.4) xenial; urgency=medium\n\n  * 0.9.13.4 tagged with 'make release'\n    Commit: Merge pull request #667 from Yelp/revert-665-revert-654-\n    u/mingqiz/disable_shelvestore  removed mirror store\n\n -- root <root@c5eb8fe59bbf>  Tue, 14 May 2019 23:09:44 +0000\n\ntron (0.9.13.3) xenial; urgency=medium\n\n  * 0.9.13.3 tagged with 'make release'\n    Commit: Merge pull request #666 from Yelp/u/kawaiwan/improve-\n    recovery  Make recovery batch script also check if action runner\n    suddenly goes away\n\n -- root <root@1a4df1f9e377>  Tue, 14 May 2019 00:08:31 +0000\n\ntron (0.9.13.2) xenial; urgency=medium\n\n  * 0.9.13.2 tagged with 'make release'\n    Commit: fix guess_realert when next_run is the same as previous_run\n    (#658)  fix guess_realert when next_run is the same as previous_run\n\n -- root <root@b58ae690f313>  Thu, 02 May 2019 17:48:01 +0000\n\ntron (0.9.13.1) xenial; urgency=medium\n\n  * 0.9.13.1 tagged with 'make release'\n    Commit: Merge pull request #656 from Yelp/u/mingqiz/fix_dynamodb_bug\n    Changed dynamodb partition index to int so it is sorted correctly\n\n -- root <root@05c17c2d745e>  Tue, 30 Apr 2019 20:05:19 +0000\n\ntron (0.9.13.0) xenial; urgency=medium\n\n  * 0.9.13.0 tagged with 'make release'\n    Commit: Merge pull request #649 from Yelp/drmorr/TRON-\n    1095/add_duration_to_tronweb  duration field added to tronweb\n\n -- root <root@d03d1d3762a0>  Mon, 22 Apr 2019 23:16:52 +0000\n\ntron (0.9.12.6) xenial; urgency=medium\n\n  * 0.9.12.6 tagged with 'make release'\n    Commit: Merge pull request #648 from Yelp/u/mingqiz/TRON-1088-bin\n    Removed interval scheduler\n\n -- root <root@265b3f04ba55>  Fri, 19 Apr 2019 18:11:18 +0000\n\ntron (0.9.12.5) xenial; urgency=medium\n\n  * 0.9.12.5 tagged with 'make release'\n    Commit: Merge branch 'drmorr/TRON-605/fix_monitoring_for_stuck_jobs'\n\n -- root <root@546a411b1e3e>  Mon, 15 Apr 2019 21:02:28 +0000\n\ntron (0.9.12.4) xenial; urgency=medium\n\n  * 0.9.12.4 tagged with 'make release'\n    Commit: Merge pull request #640 from Yelp/u/mingqiz/TRON-638-\n    improve_read_speed  Improve dynamodb read/write speed\n\n -- root <root@0e05cd00bd1c>  Wed, 10 Apr 2019 22:59:47 +0000\n\ntron (0.9.12.3) xenial; urgency=medium\n\n  * 0.9.12.3 tagged with 'make release'\n    Commit: Merge pull request #639 from Yelp/u/mingqiz/TRON-1041-\n    open_file_limit  changed max number of open files to 10000\n\n -- root <root@8634a3efac61>  Wed, 03 Apr 2019 00:39:28 +0000\n\ntron (0.9.12.2) xenial; urgency=medium\n\n  * 0.9.12.2 tagged with 'make release'\n    Commit: tron/__init__.py\n\n -- root <root@f0ee26eb2d46>  Tue, 02 Apr 2019 18:10:28 +0000\n\ntron (0.9.12.1) xenial; urgency=medium\n\n  * 0.9.12.1 tagged with 'make release'\n    Commit: Merge pull request #631 from Yelp/u/mingqiz/TRON-638-\n    fix_validation  fixed data validation of dynamodb migration\n\n -- root <root@62efe760e39e>  Fri, 22 Mar 2019 18:06:11 +0000\n\ntron (0.9.12.0) xenial; urgency=medium\n\n  * 0.9.12.0 tagged with 'make release'\n    Commit: Merge branch 'drmorr/TRON-390/xjob_dep_viz'\n\n -- root <root@778dfeb4d872>  Wed, 13 Mar 2019 18:39:40 +0000\n\ntron (0.9.11.1) xenial; urgency=medium\n\n  * 0.9.11.1 tagged with 'make release'\n    Commit: Merge pull request #628 from Yelp/u/mingqiz/TRON-661  Fixed\n    trusty build\n\n -- root <root@5270842b166b>  Tue, 12 Mar 2019 00:32:40 +0000\n\ntron (0.9.11.0) xenial; urgency=medium\n\n  * 0.9.11.0 tagged with 'make release'\n    Commit: Merge pull request #617 from Yelp/u/mingqiz/TRON-638-\n    migration  Migrating from Berkley DB to DynamoDB (TRON-638)\n\n -- root <root@24e1b7483b5b>  Fri, 08 Mar 2019 17:27:53 +0000\n\ntron (0.9.10.0) xenial; urgency=medium\n\n  * 0.9.10.0 tagged with 'make release'\n    Commit: Merge pull request #622 from Yelp/waiting-state  Waiting\n    state if an action is waiting for normal or cross-job dependencies\n\n -- root <root@fbc31cafd48e>  Mon, 04 Mar 2019 19:48:20 +0000\n\ntron (0.9.9.15) xenial; urgency=medium\n\n  * 0.9.9.15 tagged with 'make release'\n    Commit: Merge pull request #624 from Yelp/add-waiting-state  Add\n    waiting state first for rollback safety\n\n -- root <root@4aecdcf190e2>  Mon, 04 Mar 2019 17:53:07 +0000\n\ntron (0.9.9.14) trusty; urgency=medium\n\n  * 0.9.9.14 tagged with 'make release'\n    Commit: Merge pull request #615 from Yelp/fi'-check-job-duration\n    Ignore duration=None for jobs waiting on e'ternal dependency\n\n -- root <root@0651e12fe496>  Fri, 08 Feb 2019 12:31:54 +0000\n\ntron (0.9.9.13) trusty; urgency=medium\n\n  * 0.9.9.13 tagged with 'make release'\n    Commit: fi' disk=none error when restoring from state\n\n -- root <root@347a7c4195b5>  Thu, 07 Feb 2019 16:18:18 +0000\n\ntron (0.9.9.12) trusty; urgency=medium\n\n  * 0.9.9.12 tagged with 'make release'\n    Commit: Merge pull request #613 from Yelp/fix-jobrun-state-proxy\n    Fix inconsistency between JobRun state attribute and is_<state>\n    checks\n\n -- root <root@a8f427620200>  Thu, 07 Feb 2019 15:14:29 +0000\n\ntron (0.9.9.11) trusty; urgency=medium\n\n  * 0.9.9.11 tagged with 'make release'\n    Commit: Merge pull request #610 from Yelp/disk_support  Added disk\n    support to tron on mesos\n\n -- root <root@15e900d9051e>  Mon, 04 Feb 2019 18:50:33 +0000\n\ntron (0.9.9.10) trusty; urgency=medium\n\n  * 0.9.9.10 tagged with 'make release'\n    Commit: Merge pull request #611 from Yelp/fix-job-appearing-pending\n    Fix job appearing pending when waiting on trigger requirement\n\n -- root <root@1158d4960025>  Thu, 31 Jan 2019 11:44:45 +0000\n\ntron (0.9.9.9) trusty; urgency=medium\n\n  * 0.9.9.9 tagged with 'make release'\n    Commit: tron.config.schema: use enum types values e'plicitly to fi'\n    regression after migrating to native enums\n\n -- root <root@721323e97d41>  Mon, 07 Jan 2019 13:46:43 +0000\n\ntron (0.9.9.8) trusty; urgency=medium\n\n  * 0.9.9.8 tagged with 'make release'\n    Commit: Merge pull request #603 from Yelp/recovery-none-action-runs\n    Skip job runs with no action runs during recovery\n\n -- root <root@59681c79eb69>  Thu, 20 Dec 2018 19:19:02 +0000\n\ntron (0.9.9.7) trusty; urgency=medium\n\n  * 0.9.9.7 tagged with 'make release'\n  * Fix type bug in creating tasks during recovery\n  * Only recover Mesos actions with no end time\n\n -- root <root@c190ab3d065f>  Tue, 18 Dec 2018 19:27:22 +0000\n\ntron (0.9.9.6) trusty; urgency=medium\n\n  * 0.9.9.6 tagged with 'make release'\n  * Fixes for inactive framework\n\n -- root <root@1ade25b89bcb>  Thu, 06 Dec 2018 00:59:04 +0000\n\ntron (0.9.9.5) trusty; urgency=medium\n\n  * 0.9.9.5 tagged with 'make release'\n  * Allow passing extra options to systemd-based distros.\n  * Update end times for actions\n  * Fix bug related to Mesos offer timeouts\n  * Do not reschedule disabled job if it is reconfigured\n\n -- root <root@ca07d1ca6c45>  Tue, 04 Dec 2018 21:38:20 +0000\n\ntron (0.9.9.4) trusty; urgency=medium\n\n  * 0.9.9.4 tagged with 'make release'\n    - Fix signal handling so that Tron gracefull shuts down\n    - Add more information to log lines\n\n -- root <root@ec5f977bc63f>  Mon, 19 Nov 2018 23:59:43 +0000\n\ntron (0.9.9.3) trusty; urgency=medium\n\n  * 0.9.9.3 tagged with 'make release'\n    Commit: Merge pull request #587 from Yelp/TRON-536  Removed spaces\n    in action run html\n\n -- root <root@df98fb91b710>  Thu, 15 Nov 2018 00:20:45 +0000\n\ntron (0.9.9.2) trusty; urgency=medium\n\n  * 0.9.9.2 tagged with 'make release'\n    Commit: Merge pull request #583 from Yelp/u/kawaiwan/add-cluster-to-\n    tron-metrics  Add cluster option to metrics script\n\n -- root <root@56ea502bead2>  Mon, 12 Nov 2018 18:25:34 +0000\n\ntron (0.9.9.1) trusty; urgency=medium\n\n  * 0.9.9.1 tagged with 'make release'\n    Commit: modify action_id during job migration (#580)  * modify\n    action_id during job migration  * pin pre-commit 1.11.2\n\n -- root <root@fe8465f9b037>  Tue, 30 Oct 2018 21:42:21 +0000\n\ntron (0.9.9.0) trusty; urgency=medium\n\n  * 0.9.9.0 tagged with 'make release'\n  * Tronctl publish/discard events\n  * Update DST fall back behavior to be consistent\n  * Update requirements\n  * Add script for fetching metrics\n\n -- root <root@0dab49ce8592>  Tue, 30 Oct 2018 17:54:04 +0000\n\ntron (0.9.8.4) trusty; urgency=medium\n\n  * 0.9.8.4 tagged with 'make release'\n    Commit: Revert 'Merge pull request #570 from Yelp/pin-requirements'\n    This reverts commit 33fb19df2d2ec508d254f209c1515bbffabc1523,\n    reversing changes made to b3c7ab8ae8ffa2f7a08e275175989ea5d8d54a8a.\n\n -- root <root@073ed3171e5d>  Fri, 26 Oct 2018 21:36:49 +0000\n\ntron (0.9.8.3) trusty; urgency=medium\n\n  * 0.9.8.3 tagged with 'make release'\n    Commit: Merge pull request #576 from Yelp/yapf-it  yapf -rip\n\n -- root <root@ee73a07491c1>  Fri, 26 Oct 2018 17:16:09 +0000\n\ntron (0.9.8.2) trusty; urgency=medium\n\n  * 0.9.8.2 tagged with 'make release'\n    Commit: Merge pull request #576 from Yelp/yapf-it  yapf -rip\n\n -- root <root@7405d22d2ed3>  Fri, 26 Oct 2018 17:14:02 +0000\n\ntron (0.9.8.2) trusty; urgency=medium\n\n  * 0.9.8.2 tagged with 'make release'\n    Commit: Merge pull request #574 from\n    Yelp/u/chl/rename_all_job_name_after_migration  rename all job_name\n    after migration\n\n -- root <root@0e8e0abfa7bf>  Thu, 25 Oct 2018 22:38:54 +0000\n\ntron (0.9.8.1) trusty; urgency=medium\n\n  * 0.9.8.1 tagged with 'make release'\n    Commit: Merge pull request #569 from Yelp/signal-none  Don't log\n    other signals\n\n -- root <root@451d5f0d3ffd>  Tue, 23 Oct 2018 23:30:47 +0000\n\ntron (0.9.8.0) trusty; urgency=medium\n\n  * 0.9.8.0 tagged with 'make release'\n    Commit: Merge pull request #566 from Yelp/monitoring-allow-overlap\n    If runs are allowed to overlap, don't consider that case stuck\n\n -- root <root@7d14a7a2764c>  Tue, 23 Oct 2018 20:51:02 +0000\n\ntron (0.9.7.0) trusty; urgency=medium\n\n  * 0.9.7.0 tagged with 'make release'\n    Commit: fix typo in systemd unit file\n\n -- root <root@e8a748ec4fd2>  Wed, 17 Oct 2018 17:37:58 +0000\n\ntron (0.9.6.5) trusty; urgency=medium\n\n  * 0.9.6.5 tagged with 'make release'\n    Commit: Merge pull request #561 from Yelp/fix-dependent  Fix\n    actiongraph adapter failing to render dependent actions\n\n -- root <root@473767c09859>  Tue, 16 Oct 2018 17:37:45 +0000\n\ntron (0.9.6.4) trusty; urgency=medium\n\n  * 0.9.6.4 tagged with 'make release'\n    Commit: Released 0.9.6.3 via make release\n\n -- root <root@01f20dd7c1de>  Mon, 15 Oct 2018 18:27:07 +0000\n\ntron (0.9.6.3) trusty; urgency=medium\n\n  * 0.9.6.3 tagged with 'make release'\n    Commit: Merge pull request #555 from Yelp/u/chl/TRON-\n    442_create_migrate_job_tool  add tronctl move command\n\n -- root <root@ba6f1cb3e4b9>  Mon, 15 Oct 2018 17:52:20 +0000\n\ntron (0.9.6.2) trusty; urgency=medium\n\n  * 0.9.6.2 tagged with 'make release'\n    Commit: Merge pull request #548 from Yelp/skip-docs-diagram  Skip\n    state diagram in docs\n\n -- root <root@ee50de3acebd>  Tue, 02 Oct 2018 12:23:05 +0000\n\ntron (0.9.6.1) trusty; urgency=medium\n\n  * 0.9.6.1 tagged with 'make release'\n    Commit: fix format string bugs (#538)  * fix format string bugs\n\n -- root <root@3b2deac05b19>  Fri, 21 Sep 2018 23:46:13 +0000\n\ntron (0.9.6.0) trusty; urgency=medium\n\n  * 0.9.6.0 tagged with 'make release'\n  * Remove percent string support\n  * Fix bug with fail and retries\n  * Remove headers from tronfig\n  * Emit triggers for cross-job dependencies\n\n -- root <root@6267c3201084>  Thu, 20 Sep 2018 01:20:15 +0000\n\ntron (0.9.5.1) trusty; urgency=medium\n\n  * 0.9.5.1 tagged with 'make release'\n  * Update taskproc to 0.1.2 for Mesos fixes\n  * Save jobs when re-configured\n  * Fix tronweb CSS\n  * Make scheme optional for Mesos master address\n  * More feedback on killing Mesos actions\n\n -- root <root@df7a4a1e435e>  Mon, 10 Sep 2018 21:14:06 +0000\n\ntron (0.9.5.0) trusty; urgency=medium\n\n  * 0.9.5.0 tagged with 'make release'\n  * Increase upstart timeout\n  * Remove enableall/disableall from jobs controller\n  * Deprecate --nodaemon\n\n -- root <root@dd45ed9b4f6d>  Wed, 05 Sep 2018 22:04:23 +0000\n\ntron (0.9.4.0) trusty; urgency=medium\n\n  * 0.9.4.0 tagged with 'make release'\n  * add string format support (#490)\n  * recover Mesos action runs on restart\n  * run reactor on separate thread\n  * job trigger configs and eventbus\n\n -- root <root@a3969702bcb5>  Tue, 04 Sep 2018 22:23:25 +0000\n\ntron (0.9.3.0) trusty; urgency=medium\n\n  * 0.9.3.0 tagged with 'make release'\n    Commit: make master address optional (#513)\n\n -- root <root@bb6820bc1706>  Fri, 24 Aug 2018 22:42:36 +0000\n\ntron (0.9.2.1) trusty; urgency=medium\n\n  * 0.9.2.1 tagged with 'make release'\n    Commit: Merge pull request #508 from Yelp/nix_testify  Nix testify\n\n -- root <root@c42f36e81d9b>  Wed, 22 Aug 2018 10:53:38 +0000\n\ntron (0.9.2.0) trusty; urgency=medium\n\n  * 0.9.2.0 tagged with 'make release'\n    Commit: catch command rendering type error (#488)  * catch command\n    rendering type error\n\n -- root <root@76f71f6f3016>  Thu, 09 Aug 2018 20:46:45 +0000\n\ntron (0.9.1.9) trusty; urgency=medium\n\n  * 0.9.1.9 tagged with 'make release'\n    Commit: Merge pull request #483 from Yelp/retries-delay-kill\n    Retries delay: kill delayed action correctly\n\n -- root <root@230c51e7feb1>  Tue, 24 Jul 2018 11:00:12 +0000\n\ntron (0.9.1.8) trusty; urgency=medium\n\n  * 0.9.1.8 tagged with 'make release'\n  * fix _get_seconds_from_duration bug in monitoring\n\n -- root <root@7cb213fae950>  Tue, 10 Jul 2018 20:49:53 +0000\n\ntron (0.9.1.7) trusty; urgency=medium\n\n  * 0.9.1.7 tagged with 'make release'\n  * Make unknown alerts critical instead of warning\n  * Add manhole for debugging\n  * Fix validation of full tronfig directory\n\n -- root <root@4a9f80473a39>  Mon, 09 Jul 2018 17:35:38 +0000\n\ntron (0.9.1.6) trusty; urgency=medium\n\n  * 0.9.1.6 tagged with 'make release'\n  * Bug fixes\n  * Remove graceful shutdown\n  * Mesos: Add default volume configs, implement kill/stop commmands\n\n -- root <root@7406049997a9>  Tue, 03 Jul 2018 17:08:51 +0000\n\ntron (0.9.1.5) trusty; urgency=medium\n\n  * 0.9.1.5 tagged with 'make release'\n    Commit: Merge pull request #470 from Yelp/fix-output-dir  Check\n    output dir first\n\n -- root <root@1b6e39a2b125>  Mon, 25 Jun 2018 19:27:00 +0000\n\ntron (0.9.1.4) trusty; urgency=medium\n\n  * 0.9.1.4 tagged with 'make release'\n    Commit: Merge pull request #469 from Yelp/u/robj/improve-action-\n    runner  action_runner logs to the output_dir; add timestamps to logs\n\n -- root <root@8d64f3affc91>  Mon, 25 Jun 2018 17:00:57 +0000\n\ntron (0.9.1.3) trusty; urgency=medium\n\n  * 0.9.1.3 tagged with 'make release'\n    Commit: Merge pull request #468 from Yelp/u/robj/keep-fs-reasders-\n    alive  handle failures streaming  to stdout/stderr\n\n -- root <root@0ab0eb2c4985>  Fri, 22 Jun 2018 17:21:54 +0000\n\ntron (0.9.1.2) trusty; urgency=medium\n\n  * 0.9.1.2 tagged with 'make release'\n    Commit: Merge pull request #462 from Yelp/missing-deps  add missing\n    requests and psutil deps\n\n -- root <root@c9cde1a47f3b>  Wed, 20 Jun 2018 17:56:19 +0000\n\ntron (0.9.1.1) trusty; urgency=medium\n\n  * 0.9.1.1 tagged with 'make release'\n    Commit: bump version\n\n -- root <root@7f9302ada75f>  Fri, 15 Jun 2018 09:34:03 +0000\n\ntron (0.9.1.0) trusty; urgency=medium\n\n  * 0.9.1.0 tagged with 'make release'\n    Commit: Merge pull request #452 from Yelp/mesos-logging  Get output\n    from Mesos tasks\n\n -- root <root@614825dc5b50>  Wed, 13 Jun 2018 13:48:43 +0000\n\ntron (0.9.0.0) trusty; urgency=medium\n\n  * 0.9.0.0 tagged with 'make release'\n    Commit: Merge pull request #451 from Yelp/fix-machine-state-during-\n    recovery  set the machine state to running before recovery\n\n -- root <root@7686eebd0261>  Tue, 05 Jun 2018 16:48:52 +0000\n\ntron (0.8.0.6) trusty; urgency=medium\n\n  * 0.8.0.6 tagged with 'make release'\n  * Support for expected runtime alerts\n  * Pre calculate state machine transitions\n  * Bug fixes\n\n -- root <root@71b346549edd>  Wed, 16 May 2018 18:31:23 +0000\n\ntron (0.8.0.5) trusty; urgency=medium\n\n  * 0.8.0.5 tagged with 'make release'\n    Commit: Merge pull request #435 from Yelp/encode-stdout\n    maybe_encode all data in the file serializer\n\n -- root <root@0ac7a8ef93a0>  Tue, 24 Apr 2018 02:46:08 +0000\n\ntron (0.8.0.4) trusty; urgency=medium\n\n  * 0.8.0.4 tagged with 'make release'\n    Commit: friendlier output from tronctl retry\n\n -- root <root@dafe28d81983>  Fri, 20 Apr 2018 14:15:05 +0000\n\ntron (0.8.0.3) trusty; urgency=medium\n\n  * 0.8.0.3 tagged with 'make release'\n    Feature: Job actions can now be re-tried using cli command\n    `tronctl retry <action reference>`. This will automatically\n    trigger dependent actions upon success of re-tried action.\n\n -- root <root@21bbc7053b21>  Thu, 19 Apr 2018 13:14:04 +0000\n\ntron (0.8.0.2) trusty; urgency=medium\n\n  * 0.8.0.2 tagged with 'make release'\n    Fix: regression in tronweb jobs list\n\n -- root <root@aa16172c0160>  Wed, 18 Apr 2018 11:09:19 +0000\n\ntron (0.8.0.1) trusty; urgency=medium\n\n  * 0.8.0.1 tagged with 'make release'\n    Commit: Merge pull request #370 from Yelp/python3-deb  Python3 deb\n\n -- root <root@ef2489238ddf>  Mon, 16 Apr 2018 14:38:29 +0000\n\ntron (0.8.0.0) trusty; urgency=medium\n\n  * 0.8.0.0 tagged with 'make release'\n    Commit: remove duplicate ignore for debian/debhelper-build-stamp\n\n -- root <root@7a84f15cf87d>  Wed, 14 Mar 2018 11:14:08 +0000\n\ntron (0.7.8.3) trusty; urgency=medium\n\n  * 0.7.8.3 tagged with 'make release'\n    Commit: Merge pull request #426 from Yelp/maybe-decode-all-the-\n    things  Maybe decode all the things\n\n -- root <root@134c326c0301>  Thu, 12 Apr 2018 13:29:50 +0000\n\ntron (0.7.8.2) trusty; urgency=medium\n\n  * 0.7.8.2 tagged with 'make release'\n    Commit: Merge pull request #424 from Yelp/cleanup-retries-validation\n    Fix validation of cleanup action\n\n -- root <root@bdc822939744>  Tue, 10 Apr 2018 11:52:29 +0000\n\ntron (0.7.8.1) trusty; urgency=medium\n\n  * 0.7.8.1 tagged with 'make release'\n  * Retries attribute for action runs\n  * Preparing for Python 3 upgrade\n\n -- root <root@29a498a76bfe>  Fri, 06 Apr 2018 14:45:12 +0000\n\ntron (0.7.8.0) trusty; urgency=medium\n\n  * 0.7.8.0 tagged with 'make release'\n  * Script to clean up namespaces\n  * Improve check_tron_jobs logging\n  * Fix bug in date context math with timezones\n  * Improve tab completion\n  * Remove more service code\n  * Use config values to create PaaSTA action run that prints\n\n -- root <root@d2932ccaf8e4>  Tue, 03 Apr 2018 18:51:45 +0000\n\ntron (0.7.7.1) trusty; urgency=medium\n\n  * 0.7.7.1 tagged with 'make release'\n    Commit: Preparing 0.7.7.1 release  - fix shelve regression - use\n    bsddb3 directly - remove service functionality from tronweb - add\n    CORS header - dockerize itests - test debian package in travis - tab\n    completion improvements\n\n -- root <root@98871b48f7e4>  Fri, 23 Mar 2018 21:11:20 +0000\n\ntron (0.7.7.0) trusty; urgency=medium\n\n  * 0.7.7.0 tagged with 'make release'\n  * Cache job names in tab completion\n  * Remove core service class (services are deprecated now)\n  * Add backward compatible shelve\n  * Add initial config fields for actions on PaaSTA\n  * Bug fixes in tronfig and DST time resolution\n\n -- root <root@bc3b0b62294a>  Thu, 22 Mar 2018 18:33:40 +0000\n\ntron (0.7.6.1) trusty; urgency=medium\n\n  * 0.7.6.1 tagged with 'make release'\n    Commit: bump version, fix package building issues\n\n -- root <root@d28ba802797d>  Mon, 12 Mar 2018 17:08:34 +0000\n\ntron (0.7.6.0) trusty; urgency=medium\n\n  * 0.7.6.0 tagged with 'make release'\n    Commit: version bump 0.7.6.0\n\n -- root <root@c77afe41973b>  Mon, 12 Mar 2018 15:22:58 +0000\n\ntron (0.7.5.3) trusty; urgency=medium\n\n  * 0.7.5.3 tagged with 'make release'\n    Commit: Merge pull request #372 from Yelp/u/jgl/TRON-\n    212_upgrade_to_argparse  Upgrade to argparse\n\n -- root <root@401cb8cd587b>  Thu, 08 Mar 2018 00:39:13 +0000\n\ntron (0.7.5.2) trusty; urgency=medium\n\n  * 0.7.5.2 tagged with 'make release'\n    Commit: Merge pull request #373 from Yelp/fix_tz_naive_localization\n    Only localize datetimes when they lack tzinfo\n\n -- root <root@28c49fd423bf>  Fri, 02 Mar 2018 20:49:06 +0000\n\ntron (0.7.5.1) trusty; urgency=medium\n\n  * 0.7.5.1 tagged with 'make release'\n    Commit: Merge pull request #367 from\n    Yelp/u/jgl/better_pidfile_error_message  Make tron pidfile error\n    message more clear\n\n -- root <root@b5305b0b6d16>  Wed, 28 Feb 2018 23:12:22 +0000\n\ntron (0.7.5.0) trusty; urgency=medium\n\n  * 0.7.5.0 tagged with 'make release'\n    Commit: Merge pull request #360 from Yelp/per_job_tz  Allow jobs to\n    override the default timezone\n\n -- root <root@d339239f37b4>  Wed, 28 Feb 2018 02:07:09 +0000\n\ntron (0.7.4.2) trusty; urgency=medium\n\n  * 0.7.4.2 tagged with 'make release'\n    Commit: added xenial building support\n\n -- root <root@cd05b3203eb4>  Tue, 27 Feb 2018 01:23:33 +0000\n\ntron (0.7.4.1) trusty; urgency=medium\n\n  * 0.7.4.1 tagged with 'make release'\n    Commit: Released 0.7.4.1 via make release\n\n -- root <root@209774aa33a5>  Fri, 23 Feb 2018 23:35:28 +0000\n\ntron (0.7.4.0) trusty; urgency=medium\n\n  * 0.7.4.0 tagged with 'make release'\n  * Remove support for mongodb in state serialization\n  * Remove deprecated restart_interval option for services\n  * Fix unicode bug in root URL\n\n -- root <root@ed515052fdab>  Tue, 13 Feb 2018 20:57:16 +0000\n\ntron (0.7.3.2) trusty; urgency=medium\n\n  * 0.7.3.2 tagged with 'make release'\n    Commit: Merge pull request #347 from Yelp/twisted-twisted  Twisted\n    fix, example cluster and itest improvements\n\n -- root <root@065c430c301e>  Fri, 09 Feb 2018 14:39:45 +0000\n\ntron (0.7.3.1) trusty; urgency=medium\n\n  * 0.7.3.1 tagged with 'make release'\n    Commit: Merge branch dont-start-on-boot\n\n -- root <root@4d7566959515>  Fri, 09 Feb 2018 09:27:12 +0000\n\ntron (0.7.3.0) trusty; urgency=medium\n\n  * $0.7.3.0 tagged with \\make release'rCommit: Merge pull request\n\n -- root <root@185112144f8b>  Thu, 08 Feb 2018 22:42:17 +0000\n\ntron (0.7.2.0) trusty; urgency=medium\n\n  * 0.7.2.0 tagged with make release'\n  * Use upstart instead of sysv-init\n  * Added prototype check_tron_jobs and monitoring configs\n  * Add --delete option for tronfig namespaces\n\n -- root <root@16fb9840b464>  Thu, 01 Feb 2018 11:07:08 +0000\n\ntron (0.7.1.0) trusty; urgency=medium\n\n  * 0.7.1.0 tagged with 'make release'\n    Commit: dont assume the USER env var  use the more reliable\n    getpass.getuser() instead of expecting a USER env var to be present.\n\n -- root <root@ac0c082f7689>  Tue, 10 Oct 2017 12:47:50 +0000\n\ntron (0.7.0.0) trusty; urgency=medium\n\n  * 0.7.0.0 tagged with 'make release'\n    Commit: fix init.d script\n\n -- root <root@a3aeebf1169f>  Fri, 25 Aug 2017 14:22:47 +0000\n\ntron (0.6.2.1) lucid; urgency=low\n\n  * Only keep last buffer from ssh connection\n\n -- Federico Giraud <fgiraud@yelp.com>  Mon, 15 Aug 2016 10:15:56 -0700\n\ntron (0.6.1.12) lucid; urgency=low\n\n  * Fix memory leaks from event recorder and twisted\n\n -- Yejun Yang <yejun@yelp.com>  Wed, 06 Jan 2016 18:07:37 -0800\n\ntron (0.6.1.11) lucid; urgency=low\n\n  * Add job and service support fields: owner, summary, notes\n\n -- Yejun Yang <yejun@yelp.com>  Fri, 02 Oct 2015 11:38:26 -0700\n\ntron (0.6.1.10) lucid; urgency=low\n\n  * Optimize tronweb dashboard performance\n\n -- Yejun Yang <yejun@yelp.com>  Fri, 19 Dec 2014 13:45:23 -0800\n\ntron (0.6.1.9) lucid; urgency=low\n\n  * Log some known exceptions\n  * Check overlapped run id with instance\n\n -- Yejun Yang <yejun@yelp.com>  Thu, 11 Dec 2014 11:54:32 -0800\n\ntron (0.6.1.8) lucid; urgency=low\n\n  * Fix service instance restore state, run monitor instead of queue.\n\n -- Yejun Yang <yejun@yelp.com>  Tue, 04 Nov 2014 14:14:29 -0800\n\ntron (0.6.1.7) lucid; urgency=low\n\n  * Display error message when instance start fail\n\n -- Yejun Yang <yejun@yelp.com>  Tue, 04 Nov 2014 10:18:12 -0800\n\ntron (0.6.1.6) lucid; urgency=low\n\n  * Ignore service instance start error\n  * Ignore duplicated run id\n\n -- Yejun Yang <yejun@yelp.com>  Mon, 03 Nov 2014 17:21:24 -0800\n\ntron (0.6.1.5) lucid; urgency=low\n\n  * Increase channel start timeout\n  * Fix service monitor restart too soon\n\n -- Yejun Yang <yejun@yelp.com>  Thu, 03 Jul 2014 10:28:45 -0700\n\ntron (0.6.1.4) lucid; urgency=low\n\n  * Remove incorrectly fixed dead code\n  * Service monitor task always notify failed instead of down\n\n -- Yejun Yang <yejun@yelp.com>  Tue, 01 Apr 2014 10:43:48 -0700\n\ntron (0.6.1.3) lucid; urgency=low\n\n  * Fix bug in node service stop\n\n -- Yejun Yang <yejun@yelp.com>  Thu, 27 Feb 2014 17:43:57 -0800\n\ntron (0.6.1.2) lucid; urgency=low\n\n  * Fix bug prevent reconnection\n  * Add new config monitor_retries\n\n -- Yejun Yang <yejun@yelp.com>  Wed, 05 Feb 2014 10:29:59 -0800\n\ntron (0.6.1) unstable; urgency=low\n\n  * tronweb was replaced with a clientside version\n  * more ssh options are now configurable\n  * adding an experimental feature to support a max_runtime on jobs\n  * adding tronctl kill to SIGKILL a service\n  * add a `--no-header` option to tronfig\n\n -- Daniel Nephin <dnephin@yelp.com>  Thu, 02 May 2013 17:34:46 -0700\n\ntron (0.6.0.2) unstable; urgency=low\n\n  * Allow serviceinstances to transition from unknown to down\n  * Better handling for serviceinstance monitor task failing\n\n -- Daniel Nephin <dnephin@yelp.com>  Thu, 04 Apr 2013 12:47:14 -0700\n\ntron (0.6.0.1) unstable; urgency=low\n\n  * minor visual improvements to tronview\n\n -- Daniel Nephin <dnephin@yelp.com>  Tue, 26 Mar 2013 12:22:38 -0700\n\ntron (0.6.0) unstable; urgency=low\n\n  * action.requires must be a list (string has been deprecated since 0.3.3)\n  * tronctl zap has been removed (it shouldn't be necessary anymore)\n  * service monitoring code has been re-written (services should not longer get stuck in a stopping state)\n  * hosts can not be validated by specifying a known_hosts file\n  * additional validation for ssh options and context variables has been moved into configuration validation\n  * tronview now displays additional details about jobs and services\n\n -- Daniel Nephin <dnephin@yelp.com>  Mon, 25 Mar 2013 11:14:13 -0700\n\ntron (0.5.2.3) unstable; urgency=low\n\n  * Fix a bug that was preventing nodes from connecting with provided username\n  * Patched an issue with the SSH connection that could cause exceptions on channel close\n\n -- Daniel Nephin <dnephin@yyelp.com>  Fri, 15 Feb 2013 11:19:25 -0800\n\ntron (0.5.2) unstable; urgency=low\n\n  * Tron now supports the ability to use different users per node connection.\n  * Fragmented configuration is now possible by using namespaced config files.\n  * Additional cleanup and stability patches have been applied.\n  * State persistence configuration can now be changed without restarting trond\n  * State saving now includes a namespace, you will need to run `tools/migration/migrate_state.py` to migrate old state.\n\n -- Thomas Robinson <trobinso@yelp.com>  Wed, 9 Jan 2013 16:25:53 -0700\n\ntron (0.5.1) unstable; urgency=low\n\n  * Jobs which are disabled will no longer be re-enabled when part of their configuration changes.\n  * Individual actions for a Job can no longer be started independently before a job is started. This was never intentionally supported.\n  * Adding a new configuration option `allow_overlap` for Jobs, which allows job runs to overlap each other.\n  * Jobs can now be configured using crontab syntax.\n\n -- Daniel Nephin <dnephin@yyelp.com>  Wed, 25 Jul 2012 16:25:53 -0700\n\ntron (0.5.0.2) unstable; urgency=low\n\n  * Fix a bug with daemonizing and some versions of twisted reactor.\n\n -- Daniel Nephin <dnephin@yelp.com>  Tue, 17 Jul 2012 19:21:39 -0700\n\ntron (0.5.0) unstable; urgency=low\n\n  * Names for nodes, jobs, actions and service can now contain underscore characters but are restricted to 255 characters.\n  * trond now supports a graceful shutdown. Send trond SIGINT to have it wait for all currently running jobs to complete before shutting down. SIGTERM also performs some cleanup before terminating.\n  * State serialization has changed.  See :ref:`config_state` for configuration options.  `tools/migration/migrate_state.py` is included to migrate your existing Tron state to a new store.  YAML store is now deprecated.\n  * Old style config, which was deprecated in 0.3 will no longer work.\n\n -- Daniel Nephin <dnephin@yelp.com>  Tue, 05 Jun 2012 18:47:34 -0700\n\ntron (0.4.1) unstable; urgency=low\n\n  * tronview will once again attempt to find the tty width even when stdout is not a tty.\n  * Fixed last_success for job context.\n  * Job runs which are manually cancelled will now continue to schedule new runs.\n\n -- Daniel Nephin <dnephin@yelp.com>  Wed, 30 May 2012 16:35:44 -0700\n\ntron (0.4.0) unstable; urgency=low\n\n  * Jobs now continue to run all possible actions after one of its actions fail\n  * Enabling a disabled job now schedules the next run using current time instead of the last successful run (which could cause many runs to be scheduled in the past if the job had been disabled for a while)\n  * Resolved many inconsistencies and bugs around Job scheduling.\n\n -- Daniel Nephin <dnephin@yelp.com>  Fri, 11 May 2012 18:00:00 -0800\n\ntron (0.3.3-1) unstable; urgency=low\n\n  * Remove logrotate script from debian packaging\n  * Add logging.conf to debian packaging\n\n -- James Brown <jbrown@yelp.com>  Thu, 19 Apr 2012 14:33:17 -0700\n\ntron (0.3.3) unstable; urgency=low\n\n  * Adding a configuration migration script for porting 0.2.x configs to the new 0.3.x\n  * Remove working_dir from the configuration and replace with output_stream_dir\n  * Remove logging confiruation from the general config.  Logging is now configured using python standaring logging\n\n -- Daniel Nephin <dnephin@yelp.com>  Wed, 18 Apr 2012 18:00:00 -0800\n\ntron (0.3.2) unstable; urgency=low\n\n  * Fixes a bug when there are multiple node pools\n  * Adds more unit tests\n\n -- Daniel Nephin <dnephin@yelp.com>  Wed, 11 Apr 2012 11:35:05 -0800\n\ntron (0.3.1) unstable; urgency=low\n\n  * Bug fix release\n  * Adding state diagrams to documentation\n\n -- Daniel Nephin <dnephin@yelp.com>  Tue, 27 Mar 2012 11:35:05 -0800\n\ntron (0.3.0) unstable; urgency=low\n\n  * !Tags, *references, and &anchors are now deprecated in the trond\n    configuration file.  Support will be removed for them in 0.5.\n  * Adding an enabled option for jobs, so they can be configured as disabled by default\n  * tron commands (tronview, tronfig, tronctl) now support a global config (defaults to /etc/tron/tron.yaml)\n  * tronview will now pipe its output through 'less' if appropriate\n\n -- Daniel Nephin <dnephin@yelp.com>  Mon, 19 Feb 2012 11:35:05 -0800\n\ntron (0.2.10-1) unstable; urgency=low\n\n  * ssh_options is actually optional (sjohnson)\n  * Cleanup actions no longer cause jobs using an interval scheduler to stop being scheduled if an action fails (sjohnson)\n  * Failed actions can be skipped, causing dependent actions to run (dnephin)\n  * Tests have been moved from test/ to tests/ (sjohnson)\n  * Everything under tron/ web/ and bin/ should now pass pyflakes\n\n -- Daniel Nephin <dnephin@yelp.com>  Fri, 17 Feb 2012 11:35:05 -0800\n\ntron (0.2.9-1) unstable; urgency=low\n\n  * tronweb works and is documented (mowings-iseatz)\n  * Daylight Savings Time behavior is more well-defined (sjohnson)\n  * Jobs that fail after running over their next scheduled time are no longer forgotten (sjohnson)\n  * Reconfiguring syslog no longer requires restarting trond to take effect (jbrown)\n\n -- Steve Johnson <sjohnson@yelp.com>  Mon, 6 Feb 2012 16:26:05 -0800\n\ntron (0.2.8.1-1) unstable; urgency=low\n\n  * Set a meaningful Formatter when logging to syslog (jbrown)\n  * Included prebuilt man pages in distribution so Sphinx isn't required to\n    have them\n\n -- James Brown <jbrown@yelp.com>  Mon, 12 Dec 2011 16:26:05 -0800\n\ntron (0.2.8-1) unstable; urgency=low\n\n  * Now on PyPI (irskep)\n  * New HTML documentation at http://packages.python.org/tron (irskep)\n  * Cleanup actions: run a command after the success or failure of a job (irskep)\n  * Logging to syslog with syslog_address config field (irskep)\n  * \"zap\" command for services (irskep)\n  * simplejson is no longer a dependency for Python 2.6 and up (irskep)\n  * Fix weekday-specified jobs (mon, tues, ...) running a day late (irskep)\n  * Fix services being allowed in jobs list and causing weird crashes (irskep)\n  * Fix missing import in www.py (irskep)\n  * Better resilience to subtlely bad tronfigs (jbrown)\n\n -- Steve Johnson <sjohnson@yelp.com>  Fri, 25 Nov 2011 23:27:00 -0400\n\ntron (0.2.7-1) unstable; urgency=low\n\n  * Really fix date parsing (rhettg)\n  * Revert instant service monitor so we wait a while before checking our services (rhettg)\n  * Clean up some logging (rhettg)\n\n -- Rhett Garber <rhettg@gmail.com>  Wed, 14 Sep 2011 16:23:00 -0700\n\ntron (0.2.6-2) unstable; urgency=low\n\n  * Fix date parsing\n\n -- James Brown <jbrown@yelp.com>  Wed, 14 Sep 2011 15:36:30 -0700\n\ntron (0.2.6-1) unstable; urgency=low\n\n  * Support for functional testing. Fixes #49 (irskep)\n  * Context variables for year, month and day. Fixes #57 (irskep)\n  * Integrate Google App Engine Cron scheduling syntax. Fixes #71 (irskep)\n  * Fix crash during service monitoring because of node connect failures. Fixes #77 (rhettg)\n  * Make action runs explicitly not re-startable. Fixes #78 (rhettg)\n  * Flush and fsync state file. Fixes #74 (rhettg)\n  * Handle node disconnect while waiting for channel to start. Fixes #75 (rhettg)\n  * Replace an aggressive assert with a log message for monitor inconsistency. Fixes #73 (rhettg)\n  * Handle tronview event listing issue with garbage collected entites. Fixes #70 (rhettg)\n  * Prevent SSH stampedes by delaying some node EXEC calls (rhettg)\n\n -- Rhett Garber <rhettg@gmail.com>  Wed, 14 Sep 2011 14:30:15 -0700\n\n\ntron (0.2.5-1) unstable; urgency=low\n\n  * Introduce event collection system (rhettg)\n  * Fix a crash in rebuilding all services under certain reconfig scenarios. Fixes #67 (rhettg)\n  * Fix potential service situation where monitors would stop running after failures (rhettg)\n  * Additional logging around startup failures (rhettg)\n\n -- Rhett Garber <rhettg@gmail.com>  Wed, 22 June 2011 13:24:00 -0800\n\ntron (0.2.4-1) unstable; urgency=low\n\n  * Final tronfig fix for stdout/stdin behavior (rhettg)\n\n -- Rhett Garber <rhettg@gmail.com>  Tue, 12 Apr 2011 10:50:00 -0800\n\ntron (0.2.3-2) unstable; urgency=low\n\n  * Made tronfig work with non-interactive uploads again. (jbrown)\n\n -- James Brown <jbrown@yelp.com>  Mon, 11 Apr 2011 22:06:56 -0700\n\ntron (0.2.3-1) unstable; urgency=low\n\n  * Resolved an issue where tronfig via stdin wouldn't catch all errors. (rhettg)\n  * Provided additional config time validation to catch bad configurations. (rhettg)\n\n -- Rhett Garber <rhettg@gmail.com>  Thu, 7 Apr 2011 10:50:00 -0800\n\ntron (0.2.2-1) unstable; urgency=low\n\n  * Resolved an issue where certain service reconfigurations would\n    cause the service to be stuck in the DOWN state (rhettg)\n  * Reworked service to keep consistant instance numbers across restarts\n    and reconfigs (rhettg)\n\n -- Rhett Garber <rhettg@gmail.com>  Wed, 23 Mar 2011 18:26:00 -0800\n\ntron (0.2.1-1) unstable; urgency=low\n\n  * Resolve an issue where run_time wasn't set for manually started jobs (rhettg)\n  * Support for multiple arguments to tronctl (for starting things in bulk) (rhettg)\n  * Support for starting a job with a specific run_time (rhettg)\n  * Resolved an issue where services, after a reconfig, wouldn't cause state changes (rhettg)\n  * Updated man pages (rhettg)\n\n -- Rhett Garber <rhettg@gmail.com>  Wed, 09 Feb 2011 15:20:00 -0800\n\ntron (0.2.0-1) unstable; urgency=low\n\n  * New services system (rhettg)\n\n -- Rhett Garber <rhettg@gmail.com>  Mon, 06 Feb 2011 15:15:00 -0800\n\n\ntron (0.1.10-1) unstable; urgency=low\n\n  * Remove use of deprecated twisted timeout calls. Fixes #9 (rhettg)\n  * Handle newer versions of twisted (rhettg)\n  * Dynamic column widths in tronview and better overflow (ebaum)\n  * Command now displayed in tronview for an action Fixes #32 (ebaum)\n  * Respect tronview -n option for stdout/stderr output. Fixes #41 (ebaum)\n  * Show warnings option for tronview. Fixes #46 (ebaum)\n  * Suppress headers option for tronview. (ebaum)\n  * Fix an issue where default empty config failed to apply. (rhettg)\n  * Add versioning to both tron module, command and state file (rhettg)\n  * Set umask on daemon to allow proper pid-file control (rhettg)\n  * Fix issue with command context not propogating on live reconfigs. Fixes\n    #53 (rhettg)\n\n\n -- Rhett Garber <rhettg@gmail.com>  Fri, 14 Jan 2011 13:10:00 -0800\n\ntron (0.1.9-1) unstable; urgency=low\n\n  * Fix issue with config changes causing previous job runs to be in an\n    unstable state. #42 (ebaum)\n\n -- Rhett Garber <rhettg@gmail.com>  Mon, 14 Dec 2010 14:12:00 -0800\n\ntron (0.1.8-1) unstable; urgency=low\n\n  * Address issue with bad format strings in commands causing untold\n    disasters. #45 (rhettg)\n\n -- Rhett Garber <rhettg@gmail.com>  Mon, 06 Dec 2010 17:38:00 -0800\n\ntron (0.1.7-1) unstable; urgency=low\n\n  * Improve log rotation scripts under Debian (jbrown)\n  * Fix an issue where removing a job with a live reconfig caused the job not to actually be removed. #44 (rhettg)\n  * Some logging changes to make debugging issues easier (rhettg)\n  * Some cleanup and better error/delay handling around process control for state writing. (rhettg)\n\n -- Rhett Garber <rhettg@gmail.com>  Mon, 22 Nov 2010 15:52:00 -0800\n\ntron (0.1.6-1) unstable; urgency=low\n\n  * Fix issue with live reconfigs causing intervals to be skipped (rhettg)\n  * Added log file re-opening on SIGHUP (fixes #37) (rhettg)\n  * Fix some issues with cmp functions for jobs that caused incorrect\n    reconfigs (#38) (mtytel)\n  * Fix issue with manually starting all_node jobs/services (mtytel)\n\n -- Rhett Garber <rhettg@gmail.com>  Fri, 15 Oct 2010 15:05:00 -0700\n\ntron (0.1.5-1) unstable; urgency=low\n\n  * Fixed crash due to config bug where SSH options were sometimes missing (rhettg)\n  * Tweaks to command line interface (rhettg)\n\n -- Rhett Garber <rhettg@gmail.com>  Wed, 14 Sep 2010 10:14:00 -0700\n\ntron (0.1.4-1) UNRELEASED; urgency=low\n\n  * Simpler default options and config for trond (rhettg)\n  * Trond daemonizing for proper init.d start/stop behavior (rhettg)\n  * Fixes to reduce state file writing (matthewtytel)\n  * Better pre-validation for tronfig (matthewtytel)\n  * Updates to man pages (matthewtytel)\n\n -- Rhett Garber <rhettg@gmail.com>  Tue, 7 Sep 2010 16:51:00 -0700\n\ntron (0.1.3-3) UNRELEASED; urgency=low\n\n  * Use /var/lib/tron/ for a working directory (roguelazer)\n  * Fix bug in Node configuration with services (rhettg)\n\n -- James Brown <roguelazer@gmail.com>  Mon, 2 Sep 2010 11:35:00 -0700\n\ntron (0.1.3-2) UNRELEASED; urgency=low\n\n  * No longer depend on libyaml\n\n -- James Brown <roguelazer@gmail.com>  Mon, 2 Sep 2010 11:00:46 -0700\n\ntron (0.1.3-1) UNRELEASED; urgency=low\n\n  * Better debian packaging (roguelazer)\n  * Cleaner configuration (rhettg)\n  * SIGHUP handling for reconfiguration (matthewtytel)\n  * Command Context (environment variables for command execution) (rhettg)\n  * Show job duration, alphabetize job list and direct stdout/stderr access (matthewtytel)\n\n -- James Brown <roguelazer@gmail.com>  Mon, 30 Aug 2010 18:33:00 -0700\n\ntron (0.1.2) UNRELEASED; urgency=low\n\n  * Services (matthewtytel)\n  * Smarter node pools (run all nodes) (matthewtytel)\n  * Randomized node pool selection (matthewtytel)\n\n -- Rhett Garber <rhettg@gmail.com>  Thu, 19 Aug 2010 11:05:00 -0700\n\ntron (0.1.1) UNRELEASED; urgency=low\n\n  * On the fly reconfiguration (matthewtytel)\n  * Saving state (matthewtytel)\n  * job enable/disable (matthewtytel)\n\n -- Rhett Garber <rhettg@gmail.com>  Thu, 19 Aug 2010 11:05:00 -0700\n\ntron (0.1.0) UNRELEASED; urgency=low\n\n  * Initial release. (Closes: #XXXXXX)\n\n -- Rhett Garber <rhett@yelp.com>  Tue, 23 Mar 2010 07:34:36 -0700\n"
  },
  {
    "path": "debian/compat",
    "content": "10\n"
  },
  {
    "path": "debian/control",
    "content": "Source: tron\nSection: admin\nPriority: optional\nMaintainer: Daniel Nephin <dnephin@yelp.com>\nBuild-Depends: debhelper (>= 7), python3.10-dev, libdb5.3-dev, libyaml-dev, libssl-dev, libffi-dev, dh-virtualenv\nStandards-Version: 3.8.3\n\nPackage: tron\nArchitecture: all\nHomepage: http://github.com/yelp/Tron\nDepends: bsdutils, python3.10, libdb5.3, libyaml-0-2, ${shlibs:Depends}, ${misc:Depends}\nDescription: Tron is a job scheduling, running and monitoring package.\n  Designed to replace Cron for complex scheduling and dependencies.\n  Provides:\n    Centralized configuration for running jobs across multiple machines\n    Dependencies on jobs and resources\n    Monitoring of jobs\n"
  },
  {
    "path": "debian/copyright",
    "content": "This package was debianized by Steve Johnson <sjohnson@yelp.com>\n on Sat, 26 Nov 2011 15:13:00 -0400.\n\nIt was downloaded from http://github.com/yelp/Tron\n\nUpstream Author:\n\n    Rhett Garber <rhettg@gmail.com>\n    Matt Tytel <matthewtytel@gmail.com>\n\nCopyright:\n\n    Copyright 2010 Yelp\n\nLicense:\n\n  Licensed under the Apache License, Version 2.0 (the \"License\");\n  you may not use this file except in compliance with the License.\n  You may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\n  Unless required by applicable law or agreed to in writing, software\n  distributed under the License is distributed on an \"AS IS\" BASIS,\n  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n  See the License for the specific language governing permissions and\n  limitations under the License.\n"
  },
  {
    "path": "debian/docs",
    "content": "LICENSE.txt\nREADME.md\n"
  },
  {
    "path": "debian/install",
    "content": "tron/logging.conf var/lib/tron\ntronweb/ opt/venvs/tron/\n"
  },
  {
    "path": "debian/pycompat",
    "content": "2\n"
  },
  {
    "path": "debian/pyversions",
    "content": "2.5-2.6\n"
  },
  {
    "path": "debian/rules",
    "content": "#!/usr/bin/make -f\n# -*- makefile -*-\n\nDH_VERBOSE := 1\n\n%:\n\tdh $@ --with python-virtualenv\n\n# do not call `make clean` as part of packaging\noverride_dh_auto_clean:\n\ttrue\n\noverride_dh_auto_build:\n\ttrue\n\n# do not call `make test` as part of packaging\noverride_dh_auto_test:\n\ttrue\n\noverride_dh_virtualenv:\n\techo $(PIP_INDEX_URL)\n\tdh_virtualenv --index-url $(PIP_INDEX_URL) \\\n\t            --extra-pip-arg  --trusted-host=169.254.255.254 \\\n\t            --extra-pip-arg  --only-binary=cryptography \\\n                --python=/usr/bin/python3.10 \\\n                --preinstall cython==0.29.36 \\\n                --preinstall pip==24.3.1 \\\n                --preinstall setuptools==65.5.1\n\t@echo patching k8s client lib for configuration class\n\tpatch debian/tron/opt/venvs/tron/lib/python3.10/site-packages/kubernetes/client/configuration.py contrib/patch-config-loggers.diff\noverride_dh_installinit:\n\tdh_installinit --noscripts\n"
  },
  {
    "path": "debian/tron.conffiles",
    "content": "/var/lib/tron/logging.conf\n"
  },
  {
    "path": "debian/tron.default",
    "content": "# Defaults for tron initscript\n# sourced by /etc/init.d/tron\n# installed at /etc/default/tron by the maintainer scripts\n\n#\n# This is a POSIX shell fragment\n#\n\n# Additional options that are passed to the Daemon.\nDAEMON_OPTS=\"--log-conf /var/lib/tron/logging.conf\"\n\nLISTEN_HOST=\"0.0.0.0\"\nLISTEN_PORT=\"8089\"\n\n# User the daemon will run as. Needs to have appropriate credentials to SSH into your working nodes.\n# You should take care in setting permissions appropriately for log and working directories on /var\nDAEMONUSER=\"\"\n\n# Enable this when you have configured tron to your liking.\nRUN=\"no\"\n"
  },
  {
    "path": "debian/tron.dirs",
    "content": "var/lib/tron/\nvar/log/tron/\n"
  },
  {
    "path": "debian/tron.example",
    "content": "sample_config.yaml\n"
  },
  {
    "path": "debian/tron.links",
    "content": "opt/venvs/tron/bin/check_tron_jobs usr/bin/check_tron_jobs.py\nopt/venvs/tron/bin/tronctl usr/bin/tronctl\nopt/venvs/tron/bin/trond usr/bin/trond\nopt/venvs/tron/bin/tronfig usr/bin/tronfig\nopt/venvs/tron/bin/tronview usr/bin/tronview\nopt/venvs/tron/bin/generate_tron_tab_completion_cache usr/bin/generate_tron_tab_completion_cache\nopt/venvs/tron/bin/tronctl_tabcomplete.sh usr/share/bash-completion/completions/tronctl\nopt/venvs/tron/bin/tronview_tabcomplete.sh usr/share/bash-completion/completions/tronview\n"
  },
  {
    "path": "debian/tron.manpages",
    "content": "docs/source/man/tronctl.1\ndocs/source/man/trond.8\ndocs/source/man/tronview.1\ndocs/source/man/tronfig.1\n"
  },
  {
    "path": "debian/tron.postinst",
    "content": "#!/bin/sh -e\n#\n# Post-installation script for tron\n\n#DEBHELPER#\n\nexit 0\n"
  },
  {
    "path": "debian/tron.service",
    "content": "[Unit]\nDescription=trond\nAfter=network.target\n# Attempt restarts indefinitely (If omitted, systemd attempts max 5x within StartLimitIntervalSec)\nStartLimitIntervalSec=0\nStartLimitBurst=0\n\n[Service]\nUser=tron\nEnvironmentFile=/etc/default/tron\nExecStartPre=/bin/bash -c 'if pgrep -x trond >/dev/null; then echo \"ERROR: trond process already running\" >&2; exit 1; fi'\nExecStart=/usr/bin/zk-flock -k 60 tron_master_${CLUSTER_NAME} \"/usr/bin/trond --lock-file=${LOCKFILE:-$PIDFILE} --working-dir=${WORKINGDIR} --host ${LISTEN_HOST} --port ${LISTEN_PORT} ${DAEMON_OPTS}\"\nExecStopPost=/usr/bin/logger -t tron_exit_status \"SERVICE_RESULT:${SERVICE_RESULT} EXIT_CODE:${EXIT_CODE} EXIT_STATUS:${EXIT_STATUS}\"\n# This is generally not recommended, but we need to not send SIGKILL to the child trond process and instead let the SIGTERM from zk-flock propagate down\nKillMode=process\nTimeoutStopSec=20\nRestart=always\n# Wait between restart attempts\nRestartSec=10\nLimitNOFILE=100000\n\n[Install]\nWantedBy=multi-user.target\n"
  },
  {
    "path": "debian/tron.upstart",
    "content": "description \"trond\"\n\nstart on filesystem and (started networking)\nstop on shutdown\n\nrespawn\nkill timeout 20\n\nscript\n  set -a\n  if [ -f /etc/default/tron ] ; then\n      . /etc/default/tron\n  fi\n  if [ \"x$RUN\" != \"xyes\" ]; then\n      log_failure_msg \"$NAME disabled, please adjust the configuration to your needs \"\n      log_failure_msg \"and then set RUN to 'yes' in /etc/default/$NAME to enable it.\"\n      exit 0\n  fi\n  exec start-stop-daemon --start -c $DAEMONUSER --exec /usr/bin/trond -- $DAEMON_OPTS\nend script\n"
  },
  {
    "path": "debian/watch",
    "content": "# Example watch control file for uscan\n# Rename this file to \"watch\" and then you can run the \"uscan\" command\n# to check for upstream updates and more.\n# See uscan(1) for format\n\n# Compulsory line, this is a version 3 file\nversion=3\n\nhttp://githubredir.debian.net/github/Yelp/Tron\n"
  },
  {
    "path": "dev/config/MASTER.yaml",
    "content": "# Please visit y/tron-development for a guide on how to setup Tron for local development\nstate_persistence:\n  name: \"tron_state\"\n  table_name: \"tmp-tron-state\"\n  store_type: \"dynamodb\"\n  buffer_size: 1\n  dynamodb_region: us-west-1\n\neventbus_enabled: True\nssh_options:\n   agent: True\n\nnodes:\n  - hostname: localhost\n\n# Replace this with the path relative to your home dir to use\n# action_runner:\n#   runner_type: \"subprocess\"\n#   remote_status_path: \"pg/tron/status\"\n#   remote_exec_path: \"pg/tron/.tox/py310/bin\"\n\njobs:\n  testjob0:\n    enabled: true\n    node: localhost\n    schedule: \"cron * * * * *\"\n    run_limit: 5\n    actions:\n      zeroth:\n        command: env\n        trigger_downstreams:\n          minutely: \"{ymdhm}\"\n        cpus: 1\n        mem: 100\n\n  testjob1:\n    enabled: false\n    node: localhost\n    schedule: \"cron * * * * *\"\n    actions:\n      first:\n        command: \"sleep 5\"\n        cpus: 1\n        mem: 100\n      second:\n        command: \"echo 'hello world'\"\n        requires: [first]\n        triggered_by:\n          - \"MASTER.testjob0.zeroth.minutely.{ymdhm}\"\n        trigger_downstreams:\n          minutely: \"{ymdhm}\"\n        cpus: 1\n        mem: 100\n\n  testjob2:\n    enabled: false\n    node: localhost\n    schedule: \"cron * * * * *\"\n    actions:\n      first:\n        command: \"echo 'goodbye, world'\"\n        cpus: 1\n        mem: 100\n        triggered_by:\n          - \"MASTER.testjob1.second.minutely.{ymdhm}\"\n\n  retrier:\n    node: localhost\n    schedule: \"cron 0 0 1 1 *\"\n    actions:\n      failing:\n        command: exit 1\n        retries: 1\n        retries_delay: 5m\n"
  },
  {
    "path": "dev/config/_manifest.yaml",
    "content": "MASTER: config/MASTER.yaml\n"
  },
  {
    "path": "dev/logging.conf",
    "content": "[loggers]\nkeys=root, twisted, tron, tron.serialize.runstate.statemanager, tron.api.www.access, task_processing, tron.mesos.task_output, pymesos\n\n[handlers]\nkeys=stdoutHandler, accessHandler, nullHandler\n\n[formatters]\nkeys=defaultFormatter, accessFormatter\n\n[logger_root]\nlevel=WARN\nhandlers=stdoutHandler\n\n[logger_twisted]\nlevel=WARN\nhandlers=stdoutHandler\nqualname=twisted\npropagate=0\n\n[logger_tron]\nlevel=DEBUG\nhandlers=stdoutHandler\nqualname=tron\npropagate=0\n\n[logger_tron.api.www.access]\nlevel=DEBUG\nhandlers=accessHandler\nqualname=tron.api.www.access\npropagate=0\n\n[logger_tron.serialize.runstate.statemanager]\nlevel=DEBUG\nhandlers=stdoutHandler\nqualname=tron.serialize.runstate.statemanager\npropagate=0\n\n[logger_task_processing]\nlevel=INFO\nhandlers=stdoutHandler\nqualname=task_processing\npropagate=0\n\n[logger_pymesos]\nlevel=DEBUG\nhandlers=stdoutHandler\nqualname=pymesos\npropagate=0\n\n[logger_tron.mesos.task_output]\nlevel=INFO\nhandlers=nullHandler\nqualname=tron.mesos.task_output\npropagate=0\n\n[handler_stdoutHandler]\nclass=logging.StreamHandler\nlevel=DEBUG\nformatter=defaultFormatter\nargs=()\n\n[handler_nullHandler]\nclass=logging.NullHandler\nlevel=DEBUG\nargs=()\n\n[handler_accessHandler]\nclass=logging.StreamHandler\nlevel=DEBUG\nformatter=accessFormatter\nargs=()\n\n[formatter_defaultFormatter]\nformat=%(asctime)s %(name)s %(levelname)s %(message)s\n\n[formatter_accessFormatter]\nformat=%(message)s\n"
  },
  {
    "path": "docs/source/_static/nature.css",
    "content": "/*\n * nature.css_t\n * ~~~~~~~~~~~~\n *\n * Sphinx stylesheet -- nature theme.\n *\n * :copyright: Copyright 2007-2011 by the Sphinx team, see AUTHORS.\n * :license: BSD, see LICENSE for details.\n *\n */\n\n@import url(\"basic.css\");\n\n/* -- page layout ----------------------------------------------------------- */\n\nbody {\n    font-family: Arial, sans-serif;\n    font-size: 100%;\n    background-color: #111;\n    color: #555;\n    margin: 0;\n    padding: 0;\n}\n\ndiv.documentwrapper {\n    float: left;\n    width: 100%;\n}\n\ndiv.bodywrapper {\n    margin: 0 0 0 230px;\n}\n\nhr {\n    border: 1px solid #B1B4B6;\n}\n\ndiv.document {\n    background-color: #eee;\n}\n\ndiv.body {\n    background-color: #ffffff;\n    color: #3E4349;\n    padding: 0 30px 30px 30px;\n    font-size: 0.9em;\n}\n\ndiv.footer {\n    color: #555;\n    width: 100%;\n    padding: 13px 0;\n    text-align: center;\n    font-size: 75%;\n}\n\ndiv.footer a {\n    color: #444;\n    text-decoration: underline;\n}\n\ndiv.related {\n    background-color: #C41200;\n    line-height: 32px;\n    color: #fff;\n    text-shadow: 0px 1px 0 #444;\n    font-size: 0.9em;\n}\n\ndiv.related a {\n    color: #F3F3CC;\n}\n\ndiv.sphinxsidebar {\n    font-size: 0.75em;\n    line-height: 1.5em;\n}\n\ndiv.sphinxsidebarwrapper{\n    padding: 20px 0;\n}\n\ndiv.sphinxsidebar h3,\ndiv.sphinxsidebar h4 {\n    font-family: Arial, sans-serif;\n    color: #222;\n    font-size: 1.2em;\n    font-weight: normal;\n    margin: 0;\n    padding: 5px 10px;\n    background-color: #ddd;\n    text-shadow: 1px 1px 0 white\n}\n\ndiv.sphinxsidebar h4{\n    font-size: 1.1em;\n}\n\ndiv.sphinxsidebar h3 a {\n    color: #444;\n}\n\n\ndiv.sphinxsidebar p {\n    color: #888;\n    padding: 5px 20px;\n}\n\ndiv.sphinxsidebar p.topless {\n}\n\ndiv.sphinxsidebar ul {\n    margin: 10px 20px;\n    padding: 0;\n    color: #000;\n}\n\ndiv.sphinxsidebar a {\n    color: #444;\n}\n\ndiv.sphinxsidebar input {\n    border: 1px solid #ccc;\n    font-family: sans-serif;\n    font-size: 1em;\n}\n\ndiv.sphinxsidebar input[type=text]{\n    margin-left: 20px;\n}\n\n/* -- body styles ----------------------------------------------------------- */\n\na {\n    color: #005B81;\n    text-decoration: none;\n}\n\na:hover {\n    color: #E32E00;\n    text-decoration: underline;\n}\n\ndiv.body h1,\ndiv.body h2,\ndiv.body h3,\ndiv.body h4,\ndiv.body h5,\ndiv.body h6 {\n    font-family: Arial, sans-serif;\n    background-color: #BED4EB;\n    font-weight: normal;\n    color: #212224;\n    margin: 30px 0px 10px 0px;\n    padding: 5px 0 5px 10px;\n    text-shadow: 0px 1px 0 white\n}\n\ndiv.body h1 { border-top: 20px solid white; margin-top: 0; font-size: 200%; }\ndiv.body h2 { font-size: 150%; background-color: #C8D5E3; }\ndiv.body h3 { font-size: 120%; background-color: #D8DEE3; }\ndiv.body h4 { font-size: 110%; background-color: #D8DEE3; }\ndiv.body h5 { font-size: 100%; background-color: #D8DEE3; }\ndiv.body h6 { font-size: 100%; background-color: #D8DEE3; }\n\na.headerlink {\n    color: #c60f0f;\n    font-size: 0.8em;\n    padding: 0 4px 0 4px;\n    text-decoration: none;\n}\n\na.headerlink:hover {\n    background-color: #c60f0f;\n    color: white;\n}\n\ndiv.body p, div.body dd, div.body li {\n    line-height: 1.5em;\n}\n\ndiv.admonition p.admonition-title + p {\n    display: inline;\n}\n\ndiv.highlight{\n    background-color: white;\n}\n\ndiv.note {\n    background-color: #eee;\n    border: 1px solid #ccc;\n}\n\ndiv.seealso {\n    background-color: #ffc;\n    border: 1px solid #ff6;\n}\n\ndiv.topic {\n    background-color: #eee;\n}\n\ndiv.warning {\n    background-color: #ffe4e4;\n    border: 1px solid #f66;\n}\n\np.admonition-title {\n    display: inline;\n}\n\np.admonition-title:after {\n    content: \":\";\n}\n\npre {\n    padding: 10px;\n    background-color: White;\n    color: #222;\n    line-height: 1.2em;\n    border: 1px solid #C6C9CB;\n    font-size: 1.1em;\n    margin: 1.5em 0 1.5em 0;\n    -webkit-box-shadow: 1px 1px 1px #d8d8d8;\n    -moz-box-shadow: 1px 1px 1px #d8d8d8;\n}\n\ntt {\n    background-color: #ecf0f3;\n    color: #222;\n    /* padding: 1px 2px; */\n    font-size: 1.1em;\n    font-family: monospace;\n}\n\n.viewcode-back {\n    font-family: Arial, sans-serif;\n}\n\ndiv.viewcode-block:target {\n    background-color: #f4debf;\n    border-top: 1px solid #ac9;\n    border-bottom: 1px solid #ac9;\n}\n"
  },
  {
    "path": "docs/source/command_context.rst",
    "content": "\n.. _built_in_cc:\n\nBuilt-In Command Context Variables\n==================================\n\nTron includes some built in command context variables that can be used in\ncommand configuration for actions.\n\nThese variables can be used in the command of an action, using Python's format syntax (``{}``).\n\nOnce rendered into the command, they will **not** change. This is especially important for datetime-based context variables. Once a run is constructed, the datetime-based variables are \"frozen\", and will not change, even if the job is retried, or rerun one week later.\n\nFor example::\n\n    # myservice.yaml\n    myjob:\n      node: localhost\n      actions:\n        myaction1:\n          command: \"Hello world! I'm {action} for job {name} running on {node}\"\n\nThe command would get rendered at job runtime to::\n\n    Hello world! I'm myaction1 for myservice.myjob running on localhost\n\n\n**shortdate**\n    Run date in ``YYYY-MM-DD`` format. Supports simple arithmetic of the\n    form ``{shortdate+6}`` which returns a date 6 days in the future,\n    ``{shortdate-2}`` which returns a date 2 days before the run date.\n    NOTE: this takes into account the job's configured timezone, if any.\n\n**ym, ymd, ymdh, ymdhm**\n    Same as ``shortdate`` but better granularity. Arithmetic works with most\n    granular unit: ``ymdh+1`` is  +1 hours, ``ymdhm+1`` is +1 minute.\n    NOTE: this takes into account the job's configured timezone, if any.\n\n**year**\n    Current year in ``YYYY`` format. Supports the same arithmetic operations\n    as `shortdate`. For example, ``{year-1}`` would return the year previous\n    to the run date.\n    NOTE: this takes into account the job's configured timezone, if any.\n\n**month**\n    Current month in `MM` format. Supports the same arithmetic operations\n    as `shortdate`. For example, ``{month+2}`` would return 2 months in the\n    future.\n    NOTE: this takes into account the job's configured timezone, if any.\n\n**day**\n    Current day in `DD` format. Supports the same arithmetic operations\n    as `shortdate`. For example, ``{day+1}`` would return the day after the\n    run date.\n    NOTE: this takes into account the job's configured timezone, if any.\n\n**hour**\n    Current hour in `HH` (0-23) format. Supports the same arithmetic operations\n    as `shortdate`. For example, ``{hour+1}`` would return the hour after the\n    run hour (mod 24).\n    NOTE: this takes into account the job's configured timezone, if any.\n\n**unixtime**\n    Current timestamp. Supports addition and subtraction of seconds. For\n    example ``{unixtime+20}`` would return the timestamp 20 seconds after\n    the jobs runtime.\n\n**daynumber**\n    Current day number as an ordinal (datetime.toordinal()). Supports addition\n    and subtraction of days. For example ``{daynumber-3}`` would be 3 days\n    before the run date.\n    NOTE: this takes into account the job's configured timezone, if any.\n\n**name**\n    Name of the job (e.g. ``myservice.myjob``).\n\n**actionnname**\n    The name of the action (e.g. ``myaction1``).\n\n**node**\n    Hostname of the node the action is being run on (e.g. ``localhost``).\n\n**runid**\n    Run ID of the job run (e.g. ``sample_job.23``)\n\n**cleanup_job_status**\n    ``SUCCESS`` if all actions have succeeded when the cleanup action runs,\n    ``FAILURE`` otherwise. ``UNKNOWN`` if used in an action other than the\n    cleanup action.\n\n**last_success**\n    The last successful run date (defaults to current date if there was no\n    previous successful run). Supports date arithmetic using the form\n    ``{last_success#shortdate-1}``.\n\n**manual**\n    ``true`` if the job was run manually. ``false`` otherwise.\n    Manual job runs are those runs launched via the ``tronctl start`` command (as opposed to those launched by the scheduler).\n    This variable is useful changing the behavior when jobs are run manually, like adding more verbose logging::\n\n    command: \"myjob --verbose={manual}\"\n\n**namespace**\n    The namespace of the config where the job comes from. Often ``MASTER`` or ``servicename``.\n    Usually matches the name of service where the code runs.\n    For example, if the job name is ``myservice.mycooljob.1.myaction``, ``{namespace}`` would be rendered as ``myservice``.\n\n\nBuilt In Environment Variables\n==============================\n\nThe following environment variables are also in the process environment.\n\nThese can be used like a normal linux environment variable using ``$``, like ``$TRON_JOB_NAMESPACE`` will be expanded at runtime and replace by the appropriate string.\n\nNote: These are **different** that Tron Context Variables, which are referenced using python style f-strings (``{myvariable}``) and are \"rendered\" into the command only, and not available as normal environment variables.\n\nIn all examples here, imagine running tronview like ``tronview myservice.myjob.42.myaction``. The example variables represent aspects of that particular action:\n\n**TRON_JOB_NAMESPACE**\n    This is the tron config namespace where the job lives. Example: ``myservice``.\n\n**TRON_JOB_NAME**\n    This variable is the top level key in the tron configuration file, like ``myjob``.\n\n**TRON_RUN_NUM**\n    This is the job run number. Example: ``42``.\n\n**TRON_ACTION**\n    This is the action name of the particular job. Example: ``myaction``.\n"
  },
  {
    "path": "docs/source/conf.py",
    "content": "#\n# Tron documentation build configuration file, created by\n# sphinx-quickstart on Mon Nov  7 18:05:54 2011.\n#\n# This file is execfile()d with the current directory set to its containing dir.\n#\n# Note that not all possible configuration values are present in this\n# autogenerated file.\n#\n# All configuration values have a default; values that are commented out\n# serve to show the default.\nimport os\nimport sys\nfrom unittest.mock import MagicMock\n\nsys.path.insert(0, os.path.abspath(\"../..\"))\n\nimport tron  # noqa\n\n\nclass Mock(MagicMock):\n    @classmethod\n    def __getattr__(cls, name):\n        return MagicMock()\n\n\nMOCK_MODULES = [\"bsddb3\"]\nsys.modules.update((mod_name, Mock()) for mod_name in MOCK_MODULES)\n\n\n# -- General configuration -----------------------------------------------------\n\n# If your documentation needs a minimal Sphinx version, state it here.\nneeds_sphinx = \"1.0\"\n\n# Add any Sphinx extension module names here, as strings. They can be extensions\n# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.\nextensions = [\"sphinx.ext.autodoc\", \"sphinx.ext.coverage\"]\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = [\"_templates\"]\n\n# The suffix of source filenames.\nsource_suffix = \".rst\"\n\n# The encoding of source files.\n# source_encoding = 'utf-8-sig'\n\n# The master toctree document.\nmaster_doc = \"index\"\n\n# General information about the project.\nproject = \"Tron\"\ncopyright = \"2011, Yelp, Inc.\"\n\n# The version info for the project you're documenting, acts as replacement for\n# |version| and |release|, also used in various other places throughout the\n# built documents.\n#\n# The short X.Y version.\nversion = tron.__version__\n# The full version, including alpha/beta/rc tags.\nrelease = tron.__version__\n\n\n# There are two options for replacing |today|: either, you set today to some\n# non-false value, then it is used:\n# today = ''\n# Else, today_fmt is used as the format for a strftime call.\n# today_fmt = '%B %d, %Y'\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\nexclude_patterns = [\"_build\"]\n\n# The reST default role (used for this markup: `text`) to use for all documents.\n# default_role = None\n\n# If true, '()' will be appended to :func: etc. cross-reference text.\n# add_function_parentheses = True\n\n# If true, the current module name will be prepended to all description\n# unit titles (such as .. function::).\n# add_module_names = True\n\n# If true, sectionauthor and moduleauthor directives will be shown in the\n# output. They are ignored by default.\n# show_authors = False\n\n# The name of the Pygments (syntax highlighting) style to use.\npygments_style = \"sphinx\"\n\n# A list of ignored prefixes for module index sorting.\n# modindex_common_prefix = []\n\n\n# -- Options for HTML output ---------------------------------------------------\n\n# The theme to use for HTML and HTML Help pages.  See the documentation for\n# a list of builtin themes.\nhtml_theme = \"nature\"\n\n# Theme options are theme-specific and customize the look and feel of a theme\n# further.  For a list of options available for each theme, see the\n# documentation.\n# html_theme_options = {}\n\n# Add any paths that contain custom themes here, relative to this directory.\n# html_theme_path = []\n\n# The name for this set of Sphinx documents.  If None, it defaults to\n# \"<project> v<release> documentation\".\n# html_title = None\n\n# A shorter title for the navigation bar.  Default is the same as html_title.\n# html_short_title = None\n\n# The name of an image file (relative to this directory) to place at the top\n# of the sidebar.\n# html_logo = None\n\n# The name of an image file (within the static path) to use as favicon of the\n# docs.  This file should be a Windows icon file (.ico) being 16x16 or 32x32\n# pixels large.\n# html_favicon = None\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = [\"_static\"]\n\n# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,\n# using the given strftime format.\n# html_last_updated_fmt = '%b %d, %Y'\n\n# If true, SmartyPants will be used to convert quotes and dashes to\n# typographically correct entities.\n# html_use_smartypants = True\n\n# Custom sidebar templates, maps document names to template names.\n# html_sidebars = {}\n\n# Additional templates that should be rendered to pages, maps page names to\n# template names.\n# html_additional_pages = {}\n\n# If false, no module index is generated.\n# html_domain_indices = True\n\n# If false, no index is generated.\n# html_use_index = True\n\n# If true, the index is split into individual pages for each letter.\n# html_split_index = False\n\n# If true, links to the reST sources are added to the pages.\n# html_show_sourcelink = True\n\n# If true, \"Created using Sphinx\" is shown in the HTML footer. Default is True.\n# html_show_sphinx = True\n\n# If true, \"(C) Copyright ...\" is shown in the HTML footer. Default is True.\n# html_show_copyright = True\n\n# If true, an OpenSearch description file will be output, and all pages will\n# contain a <link> tag referring to it.  The value of this option must be the\n# base URL from which the finished HTML is served.\n# html_use_opensearch = ''\n\n# This is the file name suffix for HTML files (e.g. \".xhtml\").\n# html_file_suffix = None\n\n# Output file base name for HTML help builder.\nhtmlhelp_basename = \"Trondoc\"\n\n\n# -- Options for LaTeX output --------------------------------------------------\n\nlatex_elements = {\n    # The paper size ('letterpaper' or 'a4paper').\n    #'papersize': 'letterpaper',\n    # The font size ('10pt', '11pt' or '12pt').\n    #'pointsize': '10pt',\n    # Additional stuff for the LaTeX preamble.\n    #'preamble': '',\n}\n\n# Grouping the document tree into LaTeX files. List of tuples\n# (source start file, target name, title, author, documentclass [howto/manual]).\nlatex_documents = [\n    (\n        \"index\",\n        \"Tron.tex\",\n        \"Tron Documentation\",\n        \"Yelp, Inc.\",\n        \"manual\",\n    ),\n]\n\n# The name of an image file (relative to this directory) to place at the top of\n# the title page.\n# latex_logo = None\n\n# For \"manual\" documents, if this is true, then toplevel headings are parts,\n# not chapters.\n# latex_use_parts = False\n\n# If true, show page references after internal links.\n# latex_show_pagerefs = False\n\n# If true, show URL addresses after external links.\n# latex_show_urls = False\n\n# Documents to append as an appendix to all manuals.\n# latex_appendices = []\n\n# If false, no module index is generated.\n# latex_domain_indices = True\n\n\n# -- Options for manual page output --------------------------------------------\n\n# One entry per manual page. List of tuples\n# (source start file, name, description, authors, manual section).\nman_pages = [\n    (\n        \"man_tronview\",\n        \"tronview\",\n        \"tronview documentation\",\n        [\"Yelp, Inc.\"],\n        1,\n    ),\n    (\n        \"man_tronfig\",\n        \"tronfig\",\n        \"tronfig documentation\",\n        [\"Yelp, Inc.\"],\n        1,\n    ),\n    (\n        \"man_tronctl\",\n        \"tronctl\",\n        \"control Tron jobs\",\n        [\"Yelp, Inc.\"],\n        1,\n    ),\n    (\n        \"man_trond\",\n        \"trond\",\n        \"trond documentation\",\n        [\"Yelp, Inc.\"],\n        8,\n    ),\n]\n\n# If true, show URL addresses after external links.\n# man_show_urls = False\n\n\n# -- Options for Texinfo output ------------------------------------------------\n\n# Grouping the document tree into Texinfo files. List of tuples\n# (source start file, target name, title, author,\n#  dir menu entry, description, category)\ntexinfo_documents = [\n    (\n        \"index\",\n        \"Tron\",\n        \"Tron Documentation\",\n        \"Yelp, Inc.\",\n        \"Tron\",\n        \"One line description of project.\",\n        \"Miscellaneous\",\n    ),\n]\n\n# Documents to append as an appendix to all manuals.\n# texinfo_appendices = []\n\n# If false, no module index is generated.\n# texinfo_domain_indices = True\n\n# How to display URL addresses: 'footnote', 'no', or 'inline'.\n# texinfo_show_urls = 'footnote'\n"
  },
  {
    "path": "docs/source/config.rst",
    "content": "Configuration\n=============\n\n.. _config_syntax:\n\nSyntax\n------\n\nThe Tron configuration file uses YAML syntax. The recommended configuration\nstyle requires only strings, decimal values, lists, and dictionaries: the\nsubset of YAML that can be losslessly transformed into JSON. (In fact, your\nconfiguration can be entirely JSON, since YAML is mostly a strict superset\nof JSON.)\n\nPast versions of Tron used additional YAML-specific features such as tags,\nanchors, and aliases. These features still work in version 0.3, but are now\ndeprecated.\n\nBasic Example\n-------------\n\n::\n\n    ssh_options:\n      agent: true\n\n    nodes:\n      - name: local\n        hostname: 'localhost'\n\n    jobs:\n      \"getting_node_info\":\n        node: local\n        schedule: \"cron */10 * * * *\"\n        actions:\n          \"uname\":\n            command: \"uname -a\"\n          \"cpu_info\":\n            command: \"cat /proc/cpuinfo\"\n            requires: [uname]\n\n.. _command_context_variables:\n\nCommand Context Variables\n-------------------------\n\n**command** attribute values may contain **command context variables** that are\ninserted at runtime. The **command context** is populated both by Tron (see\n:ref:`built_in_cc`) and by the config file (see :ref:`command_context`). For\nexample::\n\n    jobs:\n     \"command_context_demo\":\n       node: local\n       schedule: \"1st monday in june\"\n       actions:\n         \"print_run_id\":\n           # prints 'command_context_demo.1' on the first run,\n           # 'command_context_demo.2' on the second, etc.\n           command: \"echo {runid}\"\n\nSSH\n---\n\n**ssh_options** (optional)\n    Options for SSH connections to Tron nodes. When tron runs a job\n    on a node, it will add some jitter (random delay) to the run, which can be\n    configured with the options below.\n\n    **agent** (optional, default ``False``)\n        Set to ``True`` if :command:`trond` should use an SSH agent. This requires\n        that ``$SSH_AUTH_SOCK`` exists in the environment and points to the\n        correct socket.\n\n    **identities** (optional, default ``[]``)\n        List of paths to SSH identity files\n\n    **known_hosts_file** (optional, default ``None``)\n        The path to an ssh known hosts file\n\n    **connect_timeout** (optional, default ``30``)\n        Timeout in seconds when establishing an ssh connection\n\n    **idle_connection_timeout** (optional, default ``3600``)\n        Timeout in seconds that an ssh connection can remain idle after which\n        it is closed\n\n    **jitter_min_load** (optional, default ``4``)\n        Minimum `load` on a node before any jitter is introduced. See\n        `jitter_load_factor` for a description of how load is calculated\n\n    **jitter_max_delay** (optional, default ``20``)\n        Maximum number of seconds to add to a run\n\n    **jitter_load_factor** (optional, default ``1``)\n        Factor used to increment the count of running actions for determining\n        the upper bound of jitter to add (ex. A factor of 2 would increase the\n        upper bound by 2 seconds per running action)\n\nExample::\n\n    ssh_options:\n        agent:                    false\n        known_hosts_file:         /etc/ssh/known_hosts\n        identities:\n            - /home/batch/.ssh/id_dsa-nopasswd\n\n        connect_timeout:          30\n        idle_connection_timeout:  3600\n\n        jitter_min_load:          4\n        jitter_max_delay:         20\n        jitter_load_factor:       1\n\n.. _time_zone:\n\nTime Zone\n---------\n\n**time_zone** (optional)\n    Local time as observed by the system clock. If your system is obeying a\n    time zone with daylight savings time, then some of your jobs may run early\n    or late on the days bordering each mode. See :ref:`dst_notes` for more\n    information.\n\nExample::\n\n        time_zone: US/Pacific\n\n.. _command_context:\n\nCommand Context\n---------------\n\n**command_context**\n    Dictionary of custom :ref:`command context variables\n    <command_context_variables>`. It is an arbitrary set of key-value pairs.\n\nExample::\n\n        command_context:\n            PYTHON: /usr/bin/python\n            TMPDIR: /tmp\n\nSee a list of :ref:`built_in_cc`.\n\n\nOutput Stream Directory\n-----------------------\n**output_stream_dir**\n    A path to the directory used to store the stdout/stderr logs from jobs.\n    It defaults to the ``--working_dir`` option passed to :ref:`trond`.\n\nExample::\n\n    output_stream_dir: \"/home/tronuser/output/\"\n\n\n.. _config_state:\n\nState Persistence\n-----------------\n**state_persistence**\n    Configure how trond should persist its state to disk. By default a `shelve`\n    store is used and saved to `./tron_state` in the working directory.\n\n    **store_type**\n        Valid options are:\n            **shelve** - uses the `shelve` module and saves to a local file\n\n            **yaml** - uses `yaml` and saves to a local file (this is not recommend and is provided to be backwards compatible with previous versions of Tron).\n\n        You will need the appropriate python module for the option you choose.\n\n    **name**\n        The name of this store. This will be the filename for a **shelve** or\n        **yaml** store.\n\n    **buffer_size**\n        The number of save calls to buffer before writing the state.  Defaults to 1,\n        which is no buffering.\n\n\nExample::\n\n    state_persistence:\n        store_type: shelve\n        name: tron_store\n\n\n.. _action_runners:\n\nAction Runners\n--------------\n\n**Note:** this is an experimental feature\n\n\n**action_runner**\n    Action runner configuration allows you to run Job actions through a script\n    which records it's pid. This provides support for a max_runtime option\n    on jobs, and allows you to stop or kill the action from :command:`tronctl`.\n\n    **runner_type**\n        Valid options are:\n            **none**\n                Run actions without a wrapper. This is the default\n\n            **subprocess**\n                Run actions with a script which records the pid and runs the\n                action command in a subprocess (on the remote node). This\n                requires that :command:`bin/action_runner.py` and\n                :command:`bin/action_status.py` are available on the remote\n                host.\n\n    **remote_status_path**\n        Path used to store status files. Defaults to `/tmp`.\n\n    **remote_exec_path**\n        Directory path which contains :command:`action_runner.py` and\n        :command:`action_status.py` scripts.\n\n\nExample::\n\n    action_runner:\n        runner_type:        \"subprocess\"\n        remote_status_path: \"/tmp/tron\"\n        remote_exec_path:   \"/usr/local/bin\"\n\n\nNodes\n-----\n\n**nodes**\n    List of nodes. Each node has the following options:\n\n    **hostname** (required)\n        The hostname or IP address of the node\n\n    **name** (optional, defaults to ``hostname``)\n        A name to refer to this node\n\n    **username** (optional, defaults to current user)\n        The name of the user to connect with\n\n    **port** (optional, defaults to 22)\n        The port number of the node\n\n\nExample::\n\n    nodes:\n        - name: node1\n          hostname: 'batch1'\n        - hostname: 'batch2'    # name is 'batch2'\n\nNode Pools\n----------\n\n**node_pools**\n    List of node pools, each with a ``name`` and ``nodes`` list. ``name``\n    defaults to the names of each node joined by underscores.\n\nExample::\n\n    node_pools:\n        - name: pool\n          nodes: [node1, batch1]\n        - nodes: [batch1, node1]    # name is 'batch1_node1'\n\nJobs and Actions\n----------------\n\n**jobs**\n    List of jobs for Tron to manage. See :doc:`jobs` for the options available\n    to jobs and their actions.\n\n.. _config_logging:\n\nLogging\n-------\n\nAs of v0.3.3 Logging is no longer configured in the tron configuration file.\n\nTron uses Python's standard logging and by default uses a rotating log file\nhandler that rotates files each day. The default log directory is\n``/var/log/tron/tron.log``.\n\nTo configure logging pass -l <logging.conf> to trond. You can modify the\ndefault logging.conf by copying it from tron/logging.conf. See\nhttp://docs.python.org/howto/logging.html#configuring-logging\n\nInteresting logs\n~~~~~~~~~~~~~~~~\n\nMost tron logs are named by using pythons `__file__` which uses the modules\nname.  There are a couple special cases:\n\n**twisted**\n    Twisted sends its logs to the `twisted` log\n\n**tron.api.www.access**\n    API access logs are sent to this log at the INFO log level.  They follow\n    a standard apache combined log format.\n"
  },
  {
    "path": "docs/source/developing.rst",
    "content": ".. _developing:\n\nContributing to Tron\n====================\n\nTron is an open source project and welcomes contributions from the community.\nThe source and issue tracker are hosted on github at\nhttp://github.com/yelp/Tron.\n\nSetting Up an Environment\n-------------------------\n\nTron works well with `virtualenv <http://www.virtualenv.org>`_, which can be\nsetup using `virtualenvwrapper\n<http://www.doughellmann.com/projects/virtualenvwrapper/>`_::\n\n    $ mkvirtualenv tron --distribute --no-site-packages\n    $ pip install -r dev/req_dev.txt\n\n``req_dev.txt`` contains a list of packages required for development,\nto run the tests, and `Sphinx <http://sphinx.pocoo.org/>`_ to build the documentation.\n\nCoding Standards\n----------------\n\nAll code should be `PEP8 <http://www.python.org/dev/peps/pep-0008/>`_ compliant,\nand should pass pyflakes without warnings. All new code should include full\ntest coverage, and bug fixes should include a test which reproduces the\nreported issue.\n\nThis documentation must also be kept up to date with any changes in functionality.\n\n\nRunning Tron in a Sandbox\n-------------------------\n\nThe source package includes a development logging.conf and a\nsample configuration file with a few test cases. To run a development instance\nof Tron create a working directory and start\n:command:`trond` using the following::\n\n    $ make dev\n\n\nRunning the Tests\n-----------------\nRun the tests using ``make test``.\n\nContributing\n------------\n\nThere should be a github issue created prior to all pull requests.  Pull requests\nshould be made to the ``Yelp:development`` branch, and should include additions to\n``CHANGES.txt`` which describe what has changed.\n"
  },
  {
    "path": "docs/source/generated/modules.rst",
    "content": "tron\n====\n\n.. toctree::\n   :maxdepth: 4\n\n   tron\n"
  },
  {
    "path": "docs/source/generated/tron.actioncommand.rst",
    "content": "tron.actioncommand module\n=========================\n\n.. automodule:: tron.actioncommand\n   :members:\n   :undoc-members:\n   :show-inheritance:\n"
  },
  {
    "path": "docs/source/generated/tron.api.adapter.rst",
    "content": "tron.api.adapter module\n=======================\n\n.. automodule:: tron.api.adapter\n   :members:\n   :undoc-members:\n   :show-inheritance:\n"
  },
  {
    "path": "docs/source/generated/tron.api.async_resource.rst",
    "content": "tron.api.async\\_resource module\n===============================\n\n.. automodule:: tron.api.async_resource\n   :members:\n   :undoc-members:\n   :show-inheritance:\n"
  },
  {
    "path": "docs/source/generated/tron.api.auth.rst",
    "content": "tron.api.auth module\n====================\n\n.. automodule:: tron.api.auth\n   :members:\n   :undoc-members:\n   :show-inheritance:\n"
  },
  {
    "path": "docs/source/generated/tron.api.controller.rst",
    "content": "tron.api.controller module\n==========================\n\n.. automodule:: tron.api.controller\n   :members:\n   :undoc-members:\n   :show-inheritance:\n"
  },
  {
    "path": "docs/source/generated/tron.api.requestargs.rst",
    "content": "tron.api.requestargs module\n===========================\n\n.. automodule:: tron.api.requestargs\n   :members:\n   :undoc-members:\n   :show-inheritance:\n"
  },
  {
    "path": "docs/source/generated/tron.api.resource.rst",
    "content": "tron.api.resource module\n========================\n\n.. automodule:: tron.api.resource\n   :members:\n   :undoc-members:\n   :show-inheritance:\n"
  },
  {
    "path": "docs/source/generated/tron.api.rst",
    "content": "tron.api package\n================\n\nSubmodules\n----------\n\n.. toctree::\n   :maxdepth: 4\n\n   tron.api.adapter\n   tron.api.async_resource\n   tron.api.auth\n   tron.api.controller\n   tron.api.requestargs\n   tron.api.resource\n\nModule contents\n---------------\n\n.. automodule:: tron.api\n   :members:\n   :undoc-members:\n   :show-inheritance:\n"
  },
  {
    "path": "docs/source/generated/tron.command_context.rst",
    "content": "tron.command\\_context module\n============================\n\n.. automodule:: tron.command_context\n   :members:\n   :undoc-members:\n   :show-inheritance:\n"
  },
  {
    "path": "docs/source/generated/tron.commands.authentication.rst",
    "content": "tron.commands.authentication module\n===================================\n\n.. automodule:: tron.commands.authentication\n   :members:\n   :undoc-members:\n   :show-inheritance:\n"
  },
  {
    "path": "docs/source/generated/tron.commands.backfill.rst",
    "content": "tron.commands.backfill module\n=============================\n\n.. automodule:: tron.commands.backfill\n   :members:\n   :undoc-members:\n   :show-inheritance:\n"
  },
  {
    "path": "docs/source/generated/tron.commands.client.rst",
    "content": "tron.commands.client module\n===========================\n\n.. automodule:: tron.commands.client\n   :members:\n   :undoc-members:\n   :show-inheritance:\n"
  },
  {
    "path": "docs/source/generated/tron.commands.cmd_utils.rst",
    "content": "tron.commands.cmd\\_utils module\n===============================\n\n.. automodule:: tron.commands.cmd_utils\n   :members:\n   :undoc-members:\n   :show-inheritance:\n"
  },
  {
    "path": "docs/source/generated/tron.commands.display.rst",
    "content": "tron.commands.display module\n============================\n\n.. automodule:: tron.commands.display\n   :members:\n   :undoc-members:\n   :show-inheritance:\n"
  },
  {
    "path": "docs/source/generated/tron.commands.retry.rst",
    "content": "tron.commands.retry module\n==========================\n\n.. automodule:: tron.commands.retry\n   :members:\n   :undoc-members:\n   :show-inheritance:\n"
  },
  {
    "path": "docs/source/generated/tron.commands.rst",
    "content": "tron.commands package\n=====================\n\nSubmodules\n----------\n\n.. toctree::\n   :maxdepth: 4\n\n   tron.commands.authentication\n   tron.commands.backfill\n   tron.commands.client\n   tron.commands.cmd_utils\n   tron.commands.display\n   tron.commands.retry\n\nModule contents\n---------------\n\n.. automodule:: tron.commands\n   :members:\n   :undoc-members:\n   :show-inheritance:\n"
  },
  {
    "path": "docs/source/generated/tron.config.config_parse.rst",
    "content": "tron.config.config\\_parse module\n================================\n\n.. automodule:: tron.config.config_parse\n   :members:\n   :undoc-members:\n   :show-inheritance:\n"
  },
  {
    "path": "docs/source/generated/tron.config.config_utils.rst",
    "content": "tron.config.config\\_utils module\n================================\n\n.. automodule:: tron.config.config_utils\n   :members:\n   :undoc-members:\n   :show-inheritance:\n"
  },
  {
    "path": "docs/source/generated/tron.config.manager.rst",
    "content": "tron.config.manager module\n==========================\n\n.. automodule:: tron.config.manager\n   :members:\n   :undoc-members:\n   :show-inheritance:\n"
  },
  {
    "path": "docs/source/generated/tron.config.rst",
    "content": "tron.config package\n===================\n\nSubmodules\n----------\n\n.. toctree::\n   :maxdepth: 4\n\n   tron.config.config_parse\n   tron.config.config_utils\n   tron.config.manager\n   tron.config.schedule_parse\n   tron.config.schema\n   tron.config.static_config\n\nModule contents\n---------------\n\n.. automodule:: tron.config\n   :members:\n   :undoc-members:\n   :show-inheritance:\n"
  },
  {
    "path": "docs/source/generated/tron.config.schedule_parse.rst",
    "content": "tron.config.schedule\\_parse module\n==================================\n\n.. automodule:: tron.config.schedule_parse\n   :members:\n   :undoc-members:\n   :show-inheritance:\n"
  },
  {
    "path": "docs/source/generated/tron.config.schema.rst",
    "content": "tron.config.schema module\n=========================\n\n.. automodule:: tron.config.schema\n   :members:\n   :undoc-members:\n   :show-inheritance:\n"
  },
  {
    "path": "docs/source/generated/tron.config.static_config.rst",
    "content": "tron.config.static\\_config module\n=================================\n\n.. automodule:: tron.config.static_config\n   :members:\n   :undoc-members:\n   :show-inheritance:\n"
  },
  {
    "path": "docs/source/generated/tron.core.action.rst",
    "content": "tron.core.action module\n=======================\n\n.. automodule:: tron.core.action\n   :members:\n   :undoc-members:\n   :show-inheritance:\n"
  },
  {
    "path": "docs/source/generated/tron.core.actiongraph.rst",
    "content": "tron.core.actiongraph module\n============================\n\n.. automodule:: tron.core.actiongraph\n   :members:\n   :undoc-members:\n   :show-inheritance:\n"
  },
  {
    "path": "docs/source/generated/tron.core.actionrun.rst",
    "content": "tron.core.actionrun module\n==========================\n\n.. automodule:: tron.core.actionrun\n   :members:\n   :undoc-members:\n   :show-inheritance:\n"
  },
  {
    "path": "docs/source/generated/tron.core.job.rst",
    "content": "tron.core.job module\n====================\n\n.. automodule:: tron.core.job\n   :members:\n   :undoc-members:\n   :show-inheritance:\n"
  },
  {
    "path": "docs/source/generated/tron.core.job_collection.rst",
    "content": "tron.core.job\\_collection module\n================================\n\n.. automodule:: tron.core.job_collection\n   :members:\n   :undoc-members:\n   :show-inheritance:\n"
  },
  {
    "path": "docs/source/generated/tron.core.job_scheduler.rst",
    "content": "tron.core.job\\_scheduler module\n===============================\n\n.. automodule:: tron.core.job_scheduler\n   :members:\n   :undoc-members:\n   :show-inheritance:\n"
  },
  {
    "path": "docs/source/generated/tron.core.jobgraph.rst",
    "content": "tron.core.jobgraph module\n=========================\n\n.. automodule:: tron.core.jobgraph\n   :members:\n   :undoc-members:\n   :show-inheritance:\n"
  },
  {
    "path": "docs/source/generated/tron.core.jobrun.rst",
    "content": "tron.core.jobrun module\n=======================\n\n.. automodule:: tron.core.jobrun\n   :members:\n   :undoc-members:\n   :show-inheritance:\n"
  },
  {
    "path": "docs/source/generated/tron.core.recovery.rst",
    "content": "tron.core.recovery module\n=========================\n\n.. automodule:: tron.core.recovery\n   :members:\n   :undoc-members:\n   :show-inheritance:\n"
  },
  {
    "path": "docs/source/generated/tron.core.rst",
    "content": "tron.core package\n=================\n\nSubmodules\n----------\n\n.. toctree::\n   :maxdepth: 4\n\n   tron.core.action\n   tron.core.actiongraph\n   tron.core.actionrun\n   tron.core.job\n   tron.core.job_collection\n   tron.core.job_scheduler\n   tron.core.jobgraph\n   tron.core.jobrun\n   tron.core.recovery\n\nModule contents\n---------------\n\n.. automodule:: tron.core\n   :members:\n   :undoc-members:\n   :show-inheritance:\n"
  },
  {
    "path": "docs/source/generated/tron.eventbus.rst",
    "content": "tron.eventbus module\n====================\n\n.. automodule:: tron.eventbus\n   :members:\n   :undoc-members:\n   :show-inheritance:\n"
  },
  {
    "path": "docs/source/generated/tron.kubernetes.rst",
    "content": "tron.kubernetes module\n======================\n\n.. automodule:: tron.kubernetes\n   :members:\n   :undoc-members:\n   :show-inheritance:\n"
  },
  {
    "path": "docs/source/generated/tron.manhole.rst",
    "content": "tron.manhole module\n===================\n\n.. automodule:: tron.manhole\n   :members:\n   :undoc-members:\n   :show-inheritance:\n"
  },
  {
    "path": "docs/source/generated/tron.mcp.rst",
    "content": "tron.mcp module\n===============\n\n.. automodule:: tron.mcp\n   :members:\n   :undoc-members:\n   :show-inheritance:\n"
  },
  {
    "path": "docs/source/generated/tron.mesos.rst",
    "content": "tron.mesos module\n=================\n\n.. automodule:: tron.mesos\n   :members:\n   :undoc-members:\n   :show-inheritance:\n"
  },
  {
    "path": "docs/source/generated/tron.metrics.rst",
    "content": "tron.metrics module\n===================\n\n.. automodule:: tron.metrics\n   :members:\n   :undoc-members:\n   :show-inheritance:\n"
  },
  {
    "path": "docs/source/generated/tron.node.rst",
    "content": "tron.node module\n================\n\n.. automodule:: tron.node\n   :members:\n   :undoc-members:\n   :show-inheritance:\n"
  },
  {
    "path": "docs/source/generated/tron.prom_metrics.rst",
    "content": "tron.prom\\_metrics module\n=========================\n\n.. automodule:: tron.prom_metrics\n   :members:\n   :undoc-members:\n   :show-inheritance:\n"
  },
  {
    "path": "docs/source/generated/tron.rst",
    "content": "tron package\n============\n\nSubpackages\n-----------\n\n.. toctree::\n   :maxdepth: 4\n\n   tron.api\n   tron.commands\n   tron.config\n   tron.core\n   tron.serialize\n   tron.utils\n\nSubmodules\n----------\n\n.. toctree::\n   :maxdepth: 4\n\n   tron.actioncommand\n   tron.command_context\n   tron.eventbus\n   tron.kubernetes\n   tron.manhole\n   tron.mcp\n   tron.mesos\n   tron.metrics\n   tron.node\n   tron.prom_metrics\n   tron.scheduler\n   tron.ssh\n   tron.trondaemon\n   tron.yaml\n\nModule contents\n---------------\n\n.. automodule:: tron\n   :members:\n   :undoc-members:\n   :show-inheritance:\n"
  },
  {
    "path": "docs/source/generated/tron.scheduler.rst",
    "content": "tron.scheduler module\n=====================\n\n.. automodule:: tron.scheduler\n   :members:\n   :undoc-members:\n   :show-inheritance:\n"
  },
  {
    "path": "docs/source/generated/tron.serialize.filehandler.rst",
    "content": "tron.serialize.filehandler module\n=================================\n\n.. automodule:: tron.serialize.filehandler\n   :members:\n   :undoc-members:\n   :show-inheritance:\n"
  },
  {
    "path": "docs/source/generated/tron.serialize.rst",
    "content": "tron.serialize package\n======================\n\nSubpackages\n-----------\n\n.. toctree::\n   :maxdepth: 4\n\n   tron.serialize.runstate\n\nSubmodules\n----------\n\n.. toctree::\n   :maxdepth: 4\n\n   tron.serialize.filehandler\n\nModule contents\n---------------\n\n.. automodule:: tron.serialize\n   :members:\n   :undoc-members:\n   :show-inheritance:\n"
  },
  {
    "path": "docs/source/generated/tron.serialize.runstate.dynamodb_state_store.rst",
    "content": "tron.serialize.runstate.dynamodb\\_state\\_store module\n=====================================================\n\n.. automodule:: tron.serialize.runstate.dynamodb_state_store\n   :members:\n   :undoc-members:\n   :show-inheritance:\n"
  },
  {
    "path": "docs/source/generated/tron.serialize.runstate.rst",
    "content": "tron.serialize.runstate package\n===============================\n\nSubmodules\n----------\n\n.. toctree::\n   :maxdepth: 4\n\n   tron.serialize.runstate.dynamodb_state_store\n   tron.serialize.runstate.shelvestore\n   tron.serialize.runstate.statemanager\n   tron.serialize.runstate.yamlstore\n\nModule contents\n---------------\n\n.. automodule:: tron.serialize.runstate\n   :members:\n   :undoc-members:\n   :show-inheritance:\n"
  },
  {
    "path": "docs/source/generated/tron.serialize.runstate.shelvestore.rst",
    "content": "tron.serialize.runstate.shelvestore module\n==========================================\n\n.. automodule:: tron.serialize.runstate.shelvestore\n   :members:\n   :undoc-members:\n   :show-inheritance:\n"
  },
  {
    "path": "docs/source/generated/tron.serialize.runstate.statemanager.rst",
    "content": "tron.serialize.runstate.statemanager module\n===========================================\n\n.. automodule:: tron.serialize.runstate.statemanager\n   :members:\n   :undoc-members:\n   :show-inheritance:\n"
  },
  {
    "path": "docs/source/generated/tron.serialize.runstate.yamlstore.rst",
    "content": "tron.serialize.runstate.yamlstore module\n========================================\n\n.. automodule:: tron.serialize.runstate.yamlstore\n   :members:\n   :undoc-members:\n   :show-inheritance:\n"
  },
  {
    "path": "docs/source/generated/tron.ssh.rst",
    "content": "tron.ssh module\n===============\n\n.. automodule:: tron.ssh\n   :members:\n   :undoc-members:\n   :show-inheritance:\n"
  },
  {
    "path": "docs/source/generated/tron.trondaemon.rst",
    "content": "tron.trondaemon module\n======================\n\n.. automodule:: tron.trondaemon\n   :members:\n   :undoc-members:\n   :show-inheritance:\n"
  },
  {
    "path": "docs/source/generated/tron.utils.collections.rst",
    "content": "tron.utils.collections module\n=============================\n\n.. automodule:: tron.utils.collections\n   :members:\n   :undoc-members:\n   :show-inheritance:\n"
  },
  {
    "path": "docs/source/generated/tron.utils.crontab.rst",
    "content": "tron.utils.crontab module\n=========================\n\n.. automodule:: tron.utils.crontab\n   :members:\n   :undoc-members:\n   :show-inheritance:\n"
  },
  {
    "path": "docs/source/generated/tron.utils.exitcode.rst",
    "content": "tron.utils.exitcode module\n==========================\n\n.. automodule:: tron.utils.exitcode\n   :members:\n   :undoc-members:\n   :show-inheritance:\n"
  },
  {
    "path": "docs/source/generated/tron.utils.logreader.rst",
    "content": "tron.utils.logreader module\n===========================\n\n.. automodule:: tron.utils.logreader\n   :members:\n   :undoc-members:\n   :show-inheritance:\n"
  },
  {
    "path": "docs/source/generated/tron.utils.observer.rst",
    "content": "tron.utils.observer module\n==========================\n\n.. automodule:: tron.utils.observer\n   :members:\n   :undoc-members:\n   :show-inheritance:\n"
  },
  {
    "path": "docs/source/generated/tron.utils.persistable.rst",
    "content": "tron.utils.persistable module\n=============================\n\n.. automodule:: tron.utils.persistable\n   :members:\n   :undoc-members:\n   :show-inheritance:\n"
  },
  {
    "path": "docs/source/generated/tron.utils.proxy.rst",
    "content": "tron.utils.proxy module\n=======================\n\n.. automodule:: tron.utils.proxy\n   :members:\n   :undoc-members:\n   :show-inheritance:\n"
  },
  {
    "path": "docs/source/generated/tron.utils.queue.rst",
    "content": "tron.utils.queue module\n=======================\n\n.. automodule:: tron.utils.queue\n   :members:\n   :undoc-members:\n   :show-inheritance:\n"
  },
  {
    "path": "docs/source/generated/tron.utils.rst",
    "content": "tron.utils package\n==================\n\nSubmodules\n----------\n\n.. toctree::\n   :maxdepth: 4\n\n   tron.utils.collections\n   tron.utils.crontab\n   tron.utils.exitcode\n   tron.utils.logreader\n   tron.utils.observer\n   tron.utils.persistable\n   tron.utils.proxy\n   tron.utils.queue\n   tron.utils.state\n   tron.utils.timeutils\n   tron.utils.trontimespec\n   tron.utils.twistedutils\n\nModule contents\n---------------\n\n.. automodule:: tron.utils\n   :members:\n   :undoc-members:\n   :show-inheritance:\n"
  },
  {
    "path": "docs/source/generated/tron.utils.state.rst",
    "content": "tron.utils.state module\n=======================\n\n.. automodule:: tron.utils.state\n   :members:\n   :undoc-members:\n   :show-inheritance:\n"
  },
  {
    "path": "docs/source/generated/tron.utils.timeutils.rst",
    "content": "tron.utils.timeutils module\n===========================\n\n.. automodule:: tron.utils.timeutils\n   :members:\n   :undoc-members:\n   :show-inheritance:\n"
  },
  {
    "path": "docs/source/generated/tron.utils.trontimespec.rst",
    "content": "tron.utils.trontimespec module\n==============================\n\n.. automodule:: tron.utils.trontimespec\n   :members:\n   :undoc-members:\n   :show-inheritance:\n"
  },
  {
    "path": "docs/source/generated/tron.utils.twistedutils.rst",
    "content": "tron.utils.twistedutils module\n==============================\n\n.. automodule:: tron.utils.twistedutils\n   :members:\n   :undoc-members:\n   :show-inheritance:\n"
  },
  {
    "path": "docs/source/generated/tron.yaml.rst",
    "content": "tron.yaml module\n================\n\n.. automodule:: tron.yaml\n   :members:\n   :undoc-members:\n   :show-inheritance:\n"
  },
  {
    "path": "docs/source/index.rst",
    "content": "Tron\n====\n\nTron is a centralized system for managing periodic batch processes\nacross a cluster. If this is your first time using Tron, read :doc:`tutorial`\nand :doc:`overview` to get a better idea of what it is, how it works, and how\nto use it.\n\n.. note::\n\n    Please report bugs in the documentation at `our Github issue tracker\n    <http://www.github.com/yelp/Tron/issues>`_.\n\nTable of Contents\n-----------------\n\n.. toctree::\n    :maxdepth: 2\n\n    whats-new.rst\n    tutorial.rst\n    overview.rst\n    config.rst\n    jobs.rst\n    command_context.rst\n    tronweb.rst\n    tools.rst\n    developing.rst\n\nGenerated Docs\n~~~~~~~~~~~~~~\n\n.. toctree::\n   :maxdepth: 1\n\n   generated/modules\n\nIndices and tables\n==================\n\n* :ref:`genindex`\n* :ref:`search`\n"
  },
  {
    "path": "docs/source/jobs.rst",
    "content": "Jobs\n====\n\nA job consists of a name, a node/node pool, a list of actions, a schedule, and\nan optional cleanup action. They are periodic events that do not interact with\nother jobs while running.\n\nIf all actions exit with status 0, the job has succeeded. If any action exists\nwith a nonzero status, the job has failed.\n\n\nRequired Fields\n---------------\n\nJobs are defined in the form of a dictionary, where the **name** is the key.\nThe Name of the job is used in :command:`tronview` and :command:`tronctl`. Here is an example::\n\n  jobs:\n    \"foo\":\n      \"schedule\": ...\n      \"command\": ...\n      \"actions\":\n        \"run_first\":\n          \"command\": ...\n\n**node**\n    Reference to the node or pool to run the job in. If a pool, the job is\n    run in a random node in the pool.\n\n**schedule**\n    When to run this job. Schedule fields can take multiple forms. See\n    :ref:`job_scheduling`.\n\n**actions**\n    List of :ref:`actions <job_actions>`.\n\nOptional Fields\n---------------\n\n**monitoring** (default **{}**)\n    Dictionary of key: value pairs to inform the monitoring framework on how to\n    alert teams for job failures.\n\n    If you're using PaaSTA, for any monitoring fields not specified for a job,\n    Tron will default to those set in `monitoring.yaml` in your soa-configs.\n\n    You can see more about this behavior in the `PaaSTA docs`_.\n\n    .. _PaaSTA docs: https://paasta.readthedocs.io/en/latest/yelpsoa_configs.html#monitoring-yaml\n\n    **team**\n      Team responsible for the job. Must already be defined in the monitoring\n      framework.\n\n    **page** (default **False**)\n      Boolean on whether or not an alert for this job is page-worthy.\n\n    **runbook**\n      Runbook associated with the job.\n\n    **tip** (default **None**)\n      A short 1-line version of the runbook.\n\n    **notification_email**\n      A comma-separated string of email destinations. Defaults to the \"team\"\n      default.\n\n    **slack_channels**\n      A list of Slack channels to send alerts to. Defaults to the team setting.\n      Set an empty list to specify no Slack notifications.\n\n    **ticket** (default **False**)\n      A Boolean value to enable ticket creation.\n\n    **project** (default **None**)\n      A string representing the JIRA project that the ticket should go under.\n      Defaults to the team value.\n\n    **priority** (default **None**)\n      A JIRA ticket priority to use when creating a ticket. This only makes\n      sense to use when in combination with the ticket parameter set to True.\n      This value should be a string value like '0', '1', '3.14', etc. If not\n      set, the default will be the default_priority setting for the sensu team\n      or the default priority used for the JIRA project.\n\n    **tags** (default **None**)\n      A list of arbitrary tags that can be used in handlers for different\n      metadata needs.\n\n    **component** (default **None**)\n      A list of components affected by the event. A good example here would be\n      to include the job that is being affected.\n\n    **description** (default **None**)\n      Human readable text giving more context on any monitoring events.\n\n    **check_that_every_day_has_a_successful_run** (default **False**)\n      If **True**, the latest job run each day will be checked to see if it was\n      successful.\n\n      If **False**, only the latest overall run will be checked to see if it was\n      successful.\n\n    **page_for_expected_runtime** (default **False**)\n      If **True**, when either a job or an action exceeds its configured ``expected_runtime``, the generated alert will be considered \"critical\" and will page the user.\n\n      If **False**, then an alert will not page the user.\n\n\n**queueing** (default **True**)\n    If a job run is still running when the next job run is to be scheduled,\n    add the next run to a queue if this is **True**. Otherwise, cancel\n    the job run. Note that if the scheduler used for this job is\n    not defined to queue overlapping then this setting is ignored.\n\n**allow_overlap** (default **False**)\n    If **True** new job runs will start even if the previous run is still running.\n    By default new job runs are either cancelled or queued (see **queuing**).\n\n**run_limit** (default **50**)\n    Number of runs which will be stored. Once a Job has more then run_limit\n    runs, the output and state for the oldest run are removed. Failed runs\n    will not be removed.\n\n**all_nodes** (default **False**)\n    If **True** run this job on each node in the\n    node pool list. If a node appears more than once in the list, the job will\n    be run on that node once for each appearance.\n\n    If **False** run this job on a random node\n    from the node pool list. If a node appears more than once in the list, the\n    job will be more likely to run on that node, proportionate to the number of\n    appearances.\n\n    If **node** is not a node pool, this option has no effect.\n\n**cleanup_action**\n    Action to run when either all actions have succeeded or the job has failed.\n    See :ref:`job_cleanup_actions`.\n\n**enabled** (default **True**)\n    If **False** the job will not be scheduled to run.\n\n**max_runtime** (default **None**)\n    A time interval (ex: \"2 hours\") that limits the duration of each job run.\n    If the job run is still running after this duration, all of its actions\n    are sent SIGTERM.\n\n**time_zone** (default **None**)\n    Time zone used for calculating when a job should run. Defaults to\n    None, which means it will use the default time_zone set in the master\n    config.\n\n**expected_runtime** (default **24h**)\n    A time interval (ex: \"2 hours\") that specifies the maximum expected duration of each job run.\n    Single units like (20m, 1h, 2d) are accepted, but you can't use mixed units like (1h 20m)\n    Monitoring will alert if a job run is still running after this duration.\n    Use max_runtime instead if hard limit is needed.\n\n\n.. _job_actions:\n\nActions\n-------\n\nActions consist primarily of a **command**. An action's command is\nexecuted as soon as its dependencies (specified by **requires**) are satisfied.\nSo if your job has 10 actions, 1 of which depends on the other 9, then Tron\nwill launch the first 9 actions in parallel and run the last one when all have\ncompleted successfully.\n\nIf any action exits with nonzero status, the job will continue to run any\nactions which do not depend on the failed action.\n\n\nRequired Fields\n^^^^^^^^^^^^^^^\n\nActions are defined as a dictionary, where the Name of the action is the key.\nThe Name is used in :command:`tronview` and :command:`tronctl`.\n\n**command**\n    Command to run. Commands are run using ``/bin/sh`` so bash\n    expressions will not work, and could cause the job to fail.\n\nOptional Fields\n^^^^^^^^^^^^^^^\n\n**requires**\n    List of action names that must complete successfully before this\n    action is run. Actions can only require actions in the same job.\n\n**node**\n    Node or node pool to run the action on if different from the rest of the\n    job.\n\n**retries**\n    An integer representing how many times Tron is allowed to automatically\n    retry the command. Tron will immediately re-run the command if it fails,\n    and the action will not enter the failed state until retries are exhausted.\n    Defaults to None (0 retries allowed).\n\n**retries_delay**\n    A timedelta to wait in between retries.\n\n**expected_runtime** (default **24h**)\n    A time interval (ex: \"2 hours\") that specifies the maximum expected duration\n    of each action run. Monitoring will alert if a action run is still running\n    after this duration.\n\n**trigger_downstreams** (bool or dict)\n    Upon successful completion of an action, will emit a trigger for every\n    item in the dictionary. When set to ``true``, a default dict of\n    ``{shortdate: \"{shortdate}\"}`` is assumed. Emitted triggers will be in form:\n    ``<namespace>.<job>.<action>.<dict key>.<rendered value>``. See\n    ``triggered_by`` for more information.\n\n**triggered_by** (list)\n    When list is not empty, action will not start until all required triggers\n    have been emitted by upstream actions. Unlike with ``requires`` attribute,\n    dependent actions don't have to belong to the same job. ``triggered_by``\n    template may contain any pattern allowed in ``command`` attribute.\n    See :ref:`shortdate` for an explanation of shortdate\n\n    Example:\n\n::\n    triggered_by:\n    - \"other_namespace.some_job.action1.shortdate.{shortdate-1}\"\n\n**trigger_timeout** (default **24h**)\n    How long will action wait for dependencies listed in ``triggered_by`` before\n    failing. Is not included in ``expected_runtime``. If upstream job fails, no\n    trigger event will be emitted and downstream jobs will fail with trigger\n    timeout. Re-running upstream job will emit the trigger upon successful\n    completion and if any downstream job is still waiting - it will proceed\n    normally. Timed out downstream jobs will not be re-started, and you need to\n    use ``tronctl publish`` to trigger it manually.  .\n\n\n\nExample Actions\n^^^^^^^^^^^^^^^\n\n::\n\n    jobs:\n      \"convert_logs\":\n        node: node1\n        schedule:\n          start_time: 04:00:00\n        actions:\n          \"verify_logs_present\":\n            command: \"ls /var/log/app/log_{shortdate-1}.txt\"\n          \"convert_logs\":\n            command: \"convert_logs /var/log/app/log_{shortdate-1}.txt /var/log/app_converted/log_{shortdate-1}.txt\"\n            requires: [verify_logs_present]\n\n.. _job_scheduling:\n\nScheduling\n----------\n\nTron supports four methods for configuring the schedule of a job. Schedulers\nsupport a jitter parameter that allows them to vary their runtime by a\nrandom time delta.\n\n\nDaily\n^^^^^\n\nRun the job on specific days at a specific time. The time expression is\n``HH:MM:SS[ MTWRFSU]``.\n\nShort form::\n\n    schedule: \"daily 04:00:00\"\n\nShort form with days::\n\n    schedule: \"daily 04:00:00 MWF\"\n\nLong form::\n\n    schedule:\n        type:   \"daily\"\n        value:  \"07:00:00 MWF\"\n        jitter: \"10 min\"            # Optional\n\nCron\n^^^^\n\nSchedule a job using cron syntax.  Tron supports predefined schedules, ranges,\nand lists for each field. It supports the *L* in day of month field only (which\nschedules the job on the last day of the month). Only one of the day fields\n(day of month and day of week) can have a value.\n\n\nShort form::\n\n    schedule: \"cron */5 * * 7,8 *\"  # Every 5 minutes in July and August\n\n::\n\n    schedule: \"cron 0 3-6 * * *\"    # Every hour between 3am and 6am\n\nLong form::\n\n    schedule:                       # long form\n        type: \"cron\"\n        value: \"30 4 L * *\"         # The last day of the month at 4:30am\n\n\nComplex\n^^^^^^^\n\nMore powerful version of the daily scheduler based on the one used by Google\nApp Engine's cron library. To use this scheduler, use a string in this format\nas the schedule::\n\n    (\"every\"|ordinal) (days) [\"of|in\" (monthspec)] ([\"at\"] HH:MM)\n\n**ordinal**\n    Comma-separated list of ``1st`` and so forth. Use ``every`` if you don't want\n    to limit by day of the month.\n\n**days**\n    Comma-separated list of days of the week (for example, ``mon``, ``tuesday``,\n    with both short and long forms being accepted); ``every day`` is equivalent\n    to ``every mon,tue,wed,thu,fri,sat,sun``\n\n**monthspec**\n    Comma-separated list of month names (for example, ``jan``, ``march``, ``sep``).\n    If omitted, implies every month. You can also say ``month`` to mean every\n    month, as in ``1,8th,15,22nd of month 09:00``.\n\n**HH:MM**\n    Time of day in 24 hour time.\n\nSome examples::\n\n    2nd,third mon,wed,thu of march 17:00\n    every monday at 09:00\n    1st monday of sep,oct,nov at 17:00\n    every day of oct at 00:00\n\nIn the config::\n\n    schedule: \"every monday at 09:00\"\n\n::\n\n    schedule:\n        type: \"groc daily\"\n        value: \"every day 11:22\"\n        jitter: \"5 min\"\n\n.. _dst_notes:\n\nNotes on Daylight Saving Time\n^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n\nSome system clocks are configured to track local time and may observe daylight\nsavings time. For example, on November 6, 2011, 1 AM occurred twice.  Prior to\nversion 0.2.9, this would cause Tron to schedule a daily midnight job to be run\nan hour early on November 7, at 11 PM. For some jobs this doesn't matter, but\nfor jobs that depend on the availability of data for a day, it can cause a\nfailure.\n\nSimilarly, some jobs on March 14, 2011 were scheduled an hour late.\n\nTo avoid this problem, set the :ref:`time_zone` config variable. For example::\n\n    time_zone: US/Pacific\n\nIf a job is scheduled at a time that occurs twice, such as 1 AM on \"fall back\",\nit will be run on the *first* occurrence of that time.\n\nIf a job is scheduled at a time that does not exists, such as 2 AM on \"spring\nforward\", it will be run an hour later in the \"new\" time, in this case 3 AM. In\nthe \"old\" time this is 2 AM, so from the perspective of previous jobs, it runs\nat the correct time.\n\nIn general, Tron tries to schedule a job as soon as is correct, and no sooner.\nA job that is schedule for 2:30 AM will not run at 3 AM on \"spring forward\"\nbecause that would be half an hour too soon from a pre-switch perspective (2\nAM).\n\n.. note::\n\n    If you experience unexpected scheduler behavior, `file an issue on Tron's\n    Github page <http://www.github.com/yelp/tron/issues/new>`_.\n\n.. _job_cleanup_actions:\n\nCleanup Actions\n---------------\n\nCleanup actions run after the job succeeds or fails. They are specified just\nlike regular actions except that there is only one per job and it has no name\nor requirements list.\n\nIf your job creates shared resources that should be destroyed after a run\nregardless of success or failure, such as intermediate files or Amazon Elastic\nMapReduce job flows, you can use cleanup actions to tear them down.\n\nThe command context variable ``cleanup_job_status`` is provided to cleanup\nactions and has a value of ``SUCCESS`` or ``FAILURE`` depending on the job's\nfinal state. For example::\n\n    -\n        # ...\n        cleanup_action:\n          command: \"python -m mrjob.tools.emr.job_flow_pool --terminate MY_POOL\"\n\n\nStates\n------\n\nThe following are the possible states for a Job and Job Run.\n\nJob States\n^^^^^^^^^^\n\n**ENABLED**\n    A run is scheduled and new runs will continue to be scheduled.\n\n**DISABLED**\n    No new runs will be scheduled, and scheduled runs will be cancelled.\n\n**RUNNING**\n    Job run currently in progress.\n\nJob Run States\n^^^^^^^^^^^^^^\n\n**SCHE**\n    The run is scheduled for a specific time\n\n**RUNN**\n    The run is currently running\n\n**SUCC**\n    The run completed successfully\n\n**FAIL**\n    The run failed\n\n**WAITING**\n    The run has actions that are waiting for dependencies\n\n**QUE**\n    The run is queued behind another run(s) and will start when said runs finish\n\n**CANC**\n    The run was scheduled, but later cancelled.\n\n**UNKWN**\n    The run is in an unknown state. This state could indicate a bug in Tron, or\n    an exceptional situation with the infrastructure that requires manual inspection.\n    Actions for this job may in fact still be running, but Tron cannot reach them.\n\n\nTroubleshooting\n^^^^^^^^^^^^^^^\n**My job doesn't start even though the trigger are emitted?**\n    Check that both jobs are in the same tron master. A \"tron master\" refers to a\n    cluster; like tron-norcal-devc, tron-nova-prod, etc. Triggers don't work across\n    tron masters! You can emit the event manually using command line or API\n\n**S3 consistency issues**\n    If your downstream job relies on s3 list to process data you may see it triggered\n    before S3 had finished replicating. This was previously masked by log_done\n    continuously polling S3 to determine if upstream finished. See STREAMINT-269 for\n    details.\n\n.. _shortdate:\n**What does shortdate in triggers mean?**\n    There are two concepts of shortdate here.\n\n    shortdate in triggered_by: this shortdate is technically the run_date,\n    indicating when the tron job runs\n    shortdate in command: this shortdate is used by batch jobs to specify which s3\n    dir it is writing to or polling.\n"
  },
  {
    "path": "docs/source/man/tronctl.1",
    "content": ".TH \"TRONCTL\" \"1\" \"April 24, 2013\" \"0.6\" \"Tron\"\n.SH NAME\ntronctl \\- control Tron jobs and services\n.\n.nr rst2man-indent-level 0\n.\n.de1 rstReportMargin\n\\\\$1 \\\\n[an-margin]\nlevel \\\\n[rst2man-indent-level]\nlevel margin: \\\\n[rst2man-indent\\\\n[rst2man-indent-level]]\n-\n\\\\n[rst2man-indent0]\n\\\\n[rst2man-indent1]\n\\\\n[rst2man-indent2]\n..\n.de1 INDENT\n.\\\" .rstReportMargin pre:\n. RS \\\\$1\n. nr rst2man-indent\\\\n[rst2man-indent-level] \\\\n[an-margin]\n. nr rst2man-indent-level +1\n.\\\" .rstReportMargin post:\n..\n.de UNINDENT\n. RE\n.\\\" indent \\\\n[an-margin]\n.\\\" old: \\\\n[rst2man-indent\\\\n[rst2man-indent-level]]\n.nr rst2man-indent-level -1\n.\\\" new: \\\\n[rst2man-indent\\\\n[rst2man-indent-level]]\n.in \\\\n[rst2man-indent\\\\n[rst2man-indent-level]]u\n..\n.\\\" Man page generated from reStructeredText.\n.\n.SH SYNOPSYS\n.sp\n\\fBtronctl [\\-\\-server <host:port>] [\\-\\-verbose] <command> <job_name | job_run_id | action_run_id | service_name>\\fP\n.SH DESCRIPTION\n.sp\n\\fBtronctl\\fP is the control interface for Tron. \\fBtronctl\\fP allows you to\nenable, disable, start, stop and cancel Tron Jobs and Services.\n.SH OPTIONS\n.INDENT 0.0\n.TP\n.B \\fB\\-\\-server=<config\\-file>\\fP\nConfig file containing the address of the server the tron instance is running on\n.TP\n.B \\fB\\-\\-verbose\\fP\nDisplays status messages along the way\n.TP\n.B \\fB\\-\\-run\\-date=<YYYY\\-MM\\-DD>\\fP\nFor starting a new job, specifies the run date that should be set. Defaults to today.\n.UNINDENT\n.SH JOB COMMANDS\n.INDENT 0.0\n.TP\n.B disable <job_name>\nDisables the job. Cancels all scheduled and queued runs. Doesn\\(aqt\nschedule any more.\n.TP\n.B enable <job_name>\nEnables the job and schedules a new run.\n.TP\n.B start <job_name>\nCreates a new run of the specified job and runs it immediately.\n.TP\n.B start <job_run_id>\nAttempt to start the given job run. A Job run only starts if no\nother instance is running. If the job has already started, it will attempt\nto start any actions in the SCH or QUE state.\n.TP\n.B start <action_run_id>\nAttempt to start the action run.\n.TP\n.B restart <job_run_id>\nCreates a new job run with the same run time as this job.\n.TP\n.B cancel <job_run_id | action_run_id>\nCancels the specified job run or action run.\n.TP\n.B success <job_run_id | action_run_id>\nMarks the specified job run or action run as succeeded.  This behaves the\nsame as the run actually completing.  Dependant actions are run and queued\nruns start.\n.TP\n.B skip <action_run_id>\nMarks the specified action run as skipped.  This allows dependant actions\nto run, but will not publish any downstream triggers.\n.TP\n.B skip-and-publish <action_run_id>\nMarks the specified action run as skipped.  This allows dependant actions\nto run and will publish downstream triggers.\n.TP\n.B fail <job_run_id | action_run_id>\nMarks the specified job run or action run as failed.  This behaves the same\nas the job actually failing.\n.TP\n.B stop <action_run_id>\nStop an action run\n.TP\n.B stop <service>\nStop (SIGTERM) a service\n.TP\n.B kill <service>\nForce stop (SIGKILL) a service\n.TP\n.B kill <action_run_id>\nForce stop (SIGKILL) an action run\n.TP\n.B move <job_name> <new_job_name>\nRename a job\n.TP\n.B publish <action_run_trigger>\nPulish a action run trigger. This command can be used to trigger an action run in waiting state.\ne.g. \\fBtronctl publish yelp-main.transaction_extract_report.run.shortdate.2019-10-31\\fP\n.TP\n.B discard <action_run_trigger>\nDiscard existing actionrun trigger.\ne.g. \\fBtronctl discard  yelp-main.transaction_extract_report.run.shortdate.2019-10-31\\fP\n.TP\n.B version\nPrint tron client and server versions\n\n.UNINDENT\n.SH SERVICE COMMANDS\n.INDENT 0.0\n.TP\n.B start <service name>\nStart the service.\n.TP\n.B stop <service name>\nStop the service.\n.UNINDENT\n.SH EXAMPLES\n.sp\n.nf\n.ft C\n$ tronctl start job0\nNew Job Run job0.2 created\n\n$ tronctl start job0.3\nJob Run job0.3 now in state RUNN\n\n$ tronctl cancel job0.4\nJob Run job0.4 now in state CANC\n\n$ tronctl fail job0.4\nJob Run job0.4 now in state FAIL\n\n$ tronctl restart job0.4\nJob Run job0.4 now in state RUNN\n\n$ tronctl success job0.5\nJob Run job0.5 now in state SUCC\n.ft P\n.fi\n.SH BUGS\n.sp\nPost bugs to \\fI\\%http://www.github.com/yelp/tron/issues\\fP.\n.SH SEE ALSO\n.sp\n\\fBtrond\\fP (8), \\fBtronfig\\fP (1), \\fBtronview\\fP (1),\n.SH AUTHOR\nYelp, Inc.\n.SH COPYRIGHT\n2011, Yelp, Inc.\n.\\\" Generated by docutils manpage writer.\n.\\\"\n.\n"
  },
  {
    "path": "docs/source/man/trond.8",
    "content": ".TH \"TROND\" \"8\" \"April 24, 2013\" \"0.6\" \"Tron\"\n.SH NAME\ntrond \\- trond documentation\n.\n.nr rst2man-indent-level 0\n.\n.de1 rstReportMargin\n\\\\$1 \\\\n[an-margin]\nlevel \\\\n[rst2man-indent-level]\nlevel margin: \\\\n[rst2man-indent\\\\n[rst2man-indent-level]]\n-\n\\\\n[rst2man-indent0]\n\\\\n[rst2man-indent1]\n\\\\n[rst2man-indent2]\n..\n.de1 INDENT\n.\\\" .rstReportMargin pre:\n. RS \\\\$1\n. nr rst2man-indent\\\\n[rst2man-indent-level] \\\\n[an-margin]\n. nr rst2man-indent-level +1\n.\\\" .rstReportMargin post:\n..\n.de UNINDENT\n. RE\n.\\\" indent \\\\n[an-margin]\n.\\\" old: \\\\n[rst2man-indent\\\\n[rst2man-indent-level]]\n.nr rst2man-indent-level -1\n.\\\" new: \\\\n[rst2man-indent\\\\n[rst2man-indent-level]]\n.in \\\\n[rst2man-indent\\\\n[rst2man-indent-level]]u\n..\n.\\\" Man page generated from reStructeredText.\n.\n.SH SYNOPSYS\n.sp\n\\fBtrond [\\-\\-working\\-dir=<working dir>] [\\-\\-verbose] [\\-\\-debug]\\fP\n.SH DESCRIPTION\n.sp\n\\fBtrond\\fP is the tron daemon that manages all jobs and services.\n.SH OPTIONS\n.INDENT 0.0\n.TP\n.B \\fB\\-\\-version\\fP\nshow program\\(aqs version number and exit\n.TP\n.B \\fB\\-h, \\-\\-help\\fP\nshow this help message and exit\n.TP\n.B \\fB\\-\\-working\\-dir=WORKING_DIR\\fP\nDirectory where tron\\(aqs state and output is stored (default /var/lib/tron/)\n.TP\n.B \\fB\\-l LOG_CONF, \\-\\-log\\-conf=LOG_CONF\\fP\nLogging configuration file to setup python logger\n.TP\n.B \\fB\\-c CONFIG_FILE, \\-\\-config\\-file=CONFIG_FILE\\fP\nConfiguration file to load (default in working dir)\n.TP\n.B \\fB\\-v, \\-\\-verbose\\fP\nVerbose logging\n.TP\n.B \\fB\\-\\-debug\\fP\nDebug mode, extra error reporting, no daemonizing\n.TP\n.B \\fB\\-\\-nodaemon\\fP\n(DEPRECATED in 0.9.4) Indicates we should not fork and daemonize the process (default False)\n.TP\n.B \\fB\\-\\-lock\\-file=LOCKFILE\\fP\nWhere to store the lock file of the executing process (default /var/run/tron.lock)\n.TP\n.B \\fB\\-P LISTEN_PORT, \\-\\-port=LISTEN_PORT\\fP\nWhat port to listen on, defaults 8089\n.TP\n.B \\fB\\-H LISTEN_HOST, \\-\\-host=LISTEN_HOST\\fP\nWhat host to listen on defaults to localhost\n.UNINDENT\n.SH FILES\n.INDENT 0.0\n.TP\n.B Working directory\nThe directory where state and saved output of processes are stored.\n.TP\n.B Lock file\nEnsures only one daemon runs at a time.\n.TP\n.B Log File\ntrond error log, configured from logging.conf\n.UNINDENT\n.SH SIGNALS\n.INDENT 0.0\n.TP\n.B \\fISIGINT\\fP\nGraceful shutdown. Waits for running jobs to complete.\n.TP\n.B \\fISIGTERM\\fP\nDoes some cleanup before shutting down.\n.TP\n.B \\fISIGHUP\\fP\nReload the configuration file.\n.TP\n.B \\fISIGUSR1\\fP\nWill drop into an ipdb debugging prompt.\n.UNINDENT\n.SH LOGGING\n.sp\nTron uses Python\\(aqs standard logging and by default uses a rotating log file\nhandler that rotates files each day. Logs go to \\fB/var/log/tron/tron.log\\fP.\n.sp\nTo configure logging pass \\-l <logging.conf> to trond. You can modify the\ndefault logging.conf by coping it from tron/logging.conf. See\n\\fI\\%http://docs.python.org/howto/logging.html#configuring-logging\\fP\n.SH BUGS\n.sp\ntrond has issues around daylight savings time and may run jobs an hour early\nat the boundary.\n.sp\nPost further bugs to \\fI\\%http://www.github.com/yelp/tron/issues\\fP.\n.SH SEE ALSO\n.sp\n\\fBtronctl\\fP (1), \\fBtronfig\\fP (1), \\fBtronview\\fP (1),\n.SH AUTHOR\nYelp, Inc.\n.SH COPYRIGHT\n2011, Yelp, Inc.\n.\\\" Generated by docutils manpage writer.\n.\\\"\n.\n"
  },
  {
    "path": "docs/source/man/tronfig.1",
    "content": ".TH \"TRONFIG\" \"1\" \"April 24, 2013\" \"0.6\" \"Tron\"\n.SH NAME\ntronfig \\- tronfig documentation\n.\n.nr rst2man-indent-level 0\n.\n.de1 rstReportMargin\n\\\\$1 \\\\n[an-margin]\nlevel \\\\n[rst2man-indent-level]\nlevel margin: \\\\n[rst2man-indent\\\\n[rst2man-indent-level]]\n-\n\\\\n[rst2man-indent0]\n\\\\n[rst2man-indent1]\n\\\\n[rst2man-indent2]\n..\n.de1 INDENT\n.\\\" .rstReportMargin pre:\n. RS \\\\$1\n. nr rst2man-indent\\\\n[rst2man-indent-level] \\\\n[an-margin]\n. nr rst2man-indent-level +1\n.\\\" .rstReportMargin post:\n..\n.de UNINDENT\n. RE\n.\\\" indent \\\\n[an-margin]\n.\\\" old: \\\\n[rst2man-indent\\\\n[rst2man-indent-level]]\n.nr rst2man-indent-level -1\n.\\\" new: \\\\n[rst2man-indent\\\\n[rst2man-indent-level]]\n.in \\\\n[rst2man-indent\\\\n[rst2man-indent-level]]u\n..\n.\\\" Man page generated from reStructeredText.\n.\n.SH SYNOPSYS\n.sp\n\\fBtronfig [\\-\\-server server_name ] [\\-\\-verbose | \\-v] [<namespace>] [\\-p] [\\-]\\fP\n.SH DESCRIPTION\n.sp\n\\fBtronfig\\fP allows live editing of the Tron configuration.  It retrieves\nthe configuration file for local editing, verifies the configuration,\nand sends it back to the tron server. The configuration is applied\nimmediately.\n.SH OPTIONS\n.INDENT 0.0\n.TP\n.B \\fB\\-\\-server <server_name>\\fP\nThe server the tron instance is running on\n.TP\n.B \\fB\\-\\-verbose\\fP\nDisplays status messages along the way\n.TP\n.B \\fB\\-\\-version\\fP\nDisplays version string\n.TP\n.B \\fB\\-p\\fP\nPrint the configuration\n.TP\n.B \\fBnamespace\\fP\nThe configuration namespace to edit. Defaults to MASTER\n.TP\n.B \\fB\\-\\fP\nRead new config from \\fBstdin\\fP.\n.UNINDENT\n.SH CONFIGURATION\n.sp\nBy default tron will run with a blank configuration file. The config file is\nsaved to \\fB<working_dir>/config/\\fP by default. See the full documentation at\n\\fI\\%http://tron.readthedocs.io/en/latest/config.html\\fP.\n.SH BUGS\n.sp\nPost bugs to \\fI\\%http://www.github.com/yelp/tron/issues\\fP.\n.SH SEE ALSO\n.sp\n\\fBtrond\\fP (8), \\fBtronctl\\fP (1), \\fBtronview\\fP (1),\n.SH AUTHOR\nYelp, Inc.\n.SH COPYRIGHT\n2011, Yelp, Inc.\n.\\\" Generated by docutils manpage writer.\n.\\\"\n.\n"
  },
  {
    "path": "docs/source/man/tronview.1",
    "content": ".TH \"TRONVIEW\" \"1\" \"April 24, 2013\" \"0.6\" \"Tron\"\n.SH NAME\ntronview \\- tronview documentation\n.\n.nr rst2man-indent-level 0\n.\n.de1 rstReportMargin\n\\\\$1 \\\\n[an-margin]\nlevel \\\\n[rst2man-indent-level]\nlevel margin: \\\\n[rst2man-indent\\\\n[rst2man-indent-level]]\n-\n\\\\n[rst2man-indent0]\n\\\\n[rst2man-indent1]\n\\\\n[rst2man-indent2]\n..\n.de1 INDENT\n.\\\" .rstReportMargin pre:\n. RS \\\\$1\n. nr rst2man-indent\\\\n[rst2man-indent-level] \\\\n[an-margin]\n. nr rst2man-indent-level +1\n.\\\" .rstReportMargin post:\n..\n.de UNINDENT\n. RE\n.\\\" indent \\\\n[an-margin]\n.\\\" old: \\\\n[rst2man-indent\\\\n[rst2man-indent-level]]\n.nr rst2man-indent-level -1\n.\\\" new: \\\\n[rst2man-indent\\\\n[rst2man-indent-level]]\n.in \\\\n[rst2man-indent\\\\n[rst2man-indent-level]]u\n..\n.\\\" Man page generated from reStructeredText.\n.\n.SH SYNOPSYS\n.sp\n\\fBtronview [\\-n <numshown>] [\\-\\-server <server_name>] [\\-\\-verbose] [<job_name> | <job_run_id> | <action_run_id>]\\fP\n.SH DESCRIPTION\n.sp\n\\fBtronview\\fP displays the status of tron scheduled jobs and services.\n.INDENT 0.0\n.TP\n.B tronview\nShow all configured jobs and services\n.TP\n.B tronview <job_name|service_name>\nShows details for a job or service. Ex:\n.sp\n.nf\n.ft C\n$ tronview my_job\n.ft P\n.fi\n.TP\n.B tronview <job_run_id|service_instance_id>\nShow details for specific run or instance. Ex:\n.sp\n.nf\n.ft C\n$ tronview my_job.0\n.ft P\n.fi\n.TP\n.B tronview <action_run_id>\nShow details for specific action run. Ex:\n.sp\n.nf\n.ft C\n$ tronview my_job.0.my_action\n.ft P\n.fi\n.UNINDENT\n.SH OPTIONS\n.INDENT 0.0\n.TP\n.B \\fB\\-\\-version\\fP\nshow program\\(aqs version number and exit\n.TP\n.B \\fB\\-h, \\-\\-help\\fP\nshow this help message and exit\n.TP\n.B \\fB\\-v, \\-\\-verbose\\fP\nVerbose logging\n.TP\n.B \\fB\\-n NUM_DISPLAYS, \\-\\-numshown=NUM_DISPLAYS\\fP\nThe maximum number of job runs or lines of output to display(0 for show\nall).  Does not affect the display of all jobs and the display of actions\nfor given job.\n.TP\n.B \\fB\\-\\-server=SERVER\\fP\nServer URL to connect to\n.TP\n.B \\fB\\-c, \\-\\-color\\fP\nDisplay in color\n.TP\n.B \\fB\\-\\-nocolor\\fP\nDisplay without color\n.TP\n.B \\fB\\-o, \\-\\-stdout\\fP\nSolely displays stdout\n.TP\n.B \\fB\\-e, \\-\\-stderr\\fP\nSolely displays stderr\n.TP\n.B \\fB\\-E\\fP\nlist all emitted triggers\n.TP\n.B \\fB\\-s, \\-\\-save\\fP\nSave server and color options to client config file (~/.tron)\n.UNINDENT\n.SH STATES\n.sp\nFor complete list of states with a diagram of valid transitions see\n\\fI\\%http://packages.python.org/tron/jobs.html#states\\fP and\n\\fI\\%http://packages.python.org/tron/services.html#states\\fP\n.SH BUGS\n.sp\nPost bugs to \\fI\\%http://www.github.com/yelp/tron/issues\\fP.\n.SH SEE ALSO\n.sp\n\\fBtrond\\fP (8), \\fBtronctl\\fP (1), \\fBtronfig\\fP (1),\n.SH AUTHOR\nYelp, Inc.\n.SH COPYRIGHT\n2011, Yelp, Inc.\n.\\\" Generated by docutils manpage writer.\n.\\\"\n.\n"
  },
  {
    "path": "docs/source/man_tronctl.rst",
    "content": ".. _tronctl:\n\ntronctl\n=======\n\nSynopsis\n--------\n\n``tronctl [--server <host:port>] [--verbose] <command> <job_name | job_run_id | action_run_id>``\n\nDescription\n-----------\n\n**tronctl** is the control interface for Tron. :command:`tronctl` allows you to\nenable, disable, start, stop and cancel Tron Jobs and Services.\n\nOptions\n-------\n\n``--server=<config-file>``\n    Config file containing the address of the server the tron instance is running on\n\n``--verbose``\n        Displays status messages along the way\n\n``--run-date=<YYYY-MM-DD>``\n        For starting a new job, specifies the run date that should be set. Defaults to today.\n\n``--start-date=<YYYY-MM-DD>``\n        For backfills, specifies the starting date of the first job of the backfill.\n        Note that many jobs operate on the previous day's data.\n\n``--end-date=<YYYY-MM-DD>``\n        For backfills, specifies the final date of the backfill. Defaults to today.\n        Note that many jobs operate on the previous day's data.\n\nJob Commands\n------------\n\ndisable <job_name>\n    Disables the job. Cancels all scheduled and queued runs. Doesn't\n    schedule any more.\n\nenable <job_name>\n    Enables the job and schedules a new run.\n\nstart <job_name>\n    Creates a new run of the specified job and runs it immediately.\n    Tron will use the latest version of the code and tron config available for the new run id.\n\nstart <job_run_id>\n    Attempt to start the given job run. A Job run only starts if no\n    other instance is running. If the job has already started, it will attempt\n    to start any actions in the SCH or QUE state.\n    Tron will use the latest version of the code and tron config available for the new run id.\n\nstart <action_run_id>\n    Attempt to start the action run.\n    Tron will use the latest version of the code and tron config available for the new run id.\n\nrestart <job_run_id>\n    Creates a new job run with the same run time as this job.\n    Tron will use the latest version of the code and tron config available for the new run id.\n\nretry <action_run_id>\n    Re-run an action within an existing job run.\n    Will use the exact same code version and tron config as the previous run.\n\nrerun <job_run_id>\n    Creates a new job run with the same run time as this job (same as restart).\n    Tron will use the latest version of the code and tron config available for the new run id.\n\nbackfill <job_id>\n    Creates a series of tronctl start commands for a sequence of dates.\n    --start-date must be provided for a backfill.\n\ncancel <job_run_id | action_run_id>\n    Cancels the specified job run or action run.\n\nsuccess <job_run_id | action_run_id>\n    Marks the specified job run or action run as succeeded.  This behaves the\n    same as the run actually completing.  Dependent actions are run and queued\n    runs start.\n\nskip <action_run_id>\n    Marks the specified action run as skipped.  This allows dependent actions\n    to run, but will not publish any downstream triggers.\n\nskip-and-publish <action_run_id>\n    Marks the specified action run as skipped.  This allows dependent actions\n    to run and will publish downstream triggers.\n\nfail <job_run_id | action_run_id>\n    Marks the specified job run or action run as failed.  This behaves the same\n    as the job actually failing.\n\nstop <action_run_id>\n    Stop an action run\n\nkill <action_run_id>\n    Force stop (SIGKILL) an action run\n\n\nExamples\n--------\n\n::\n\n    $ tronctl start job0\n    New Job Run job0.2 created\n\n    $ tronctl start job0.3\n    Job Run job0.3 now in state RUNN\n\n    $ tronctl cancel job0.4\n    Job Run job0.4 now in state CANC\n\n    $ tronctl fail job0.4\n    Job Run job0.4 now in state FAIL\n\n    $ tronctl restart job0.4\n    Job Run job0.4 now in state RUNN\n\n    $ tronctl success job0.5\n    Job Run job0.5 now in state SUCC\n\n    $ tronctl retry MASTER.job.5.action1\n    Retrying ActionRun: MASTER.job.5.action1\n\nBugs\n----\n\nPost bugs to http://www.github.com/yelp/tron/issues.\n\nSee Also\n--------\n\n**trond** (8), **tronfig** (1), **tronview** (1),\n"
  },
  {
    "path": "docs/source/man_trond.rst",
    "content": ".. _trond:\n\ntrond\n=====\n\nSynopsis\n--------\n\n``trond [--working-dir=<working dir>] [--verbose] [--debug]``\n\nDescription\n-----------\n\n**trond** is the tron daemon that manages all jobs.\n\nOptions\n-------\n\n``--version``\n    show program's version number and exit\n\n``-h, --help``\n    show this help message and exit\n\n``--working-dir=WORKING_DIR``\n    Directory where tron's state and output is stored (default /var/lib/tron/)\n\n``-l LOG_CONF, --log-conf=LOG_CONF``\n    Logging configuration file to setup python logger\n\n``-c CONFIG_FILE, --config-file=CONFIG_FILE``\n    Configuration file to load (default in working dir)\n\n``-v, --verbose``\n    Verbose logging\n\n``--debug``\n    Debug mode, extra error reporting, no daemonizing\n\n``--nodaemon``\n    [DEPRECATED in 0.9.4] Indicates we should not fork and daemonize the process (default False)\n\n``--lock-file=LOCKFILE``\n    Where to store the lock file of the executing process (default /var/run/tron.lock)\n\n``-P LISTEN_PORT, --port=LISTEN_PORT``\n    What port to listen on, defaults 8089\n\n``-H LISTEN_HOST, --host=LISTEN_HOST``\n    What host to listen on defaults to localhost\n\nFiles\n-----\n\nWorking directory\n    The directory where state and saved output of processes are stored.\n\nLock file\n    Ensures only one daemon runs at a time.\n\nLog File\n    trond error log, configured from logging.conf\n\n\nSignals\n-------\n\n`SIGINT`\n    Graceful shutdown. Waits for running jobs to complete.\n\n`SIGTERM`\n    Does some cleanup before shutting down.\n\n`SIGHUP`\n    Reload the configuration file.\n\n`SIGUSR1`\n    Will drop into an ipdb debugging prompt.\n\nLogging\n-------\n\nTron uses Python's standard logging and by default uses a rotating log file\nhandler that rotates files each day. Logs go to ``/var/log/tron/tron.log``.\n\nTo configure logging pass -l <logging.conf> to trond. You can modify the\ndefault logging.conf by coping it from tron/logging.conf. See\nhttp://docs.python.org/howto/logging.html#configuring-logging\n\n\nBugs\n----\n\ntrond has issues around daylight savings time and may run jobs an hour early\nat the boundary.\n\nPost further bugs to http://www.github.com/yelp/tron/issues.\n\nSee Also\n--------\n\n**tronctl** (1), **tronfig** (1), **tronview** (1),\n"
  },
  {
    "path": "docs/source/man_tronfig.rst",
    "content": ".. _tronfig:\n\ntronfig\n=======\n\nSynopsis\n--------\n\n``tronfig [--server server_name ] [--verbose | -v] [<namespace>] [-p] [-]``\n\nDescription\n-----------\n\n**tronfig** allows live editing of the Tron configuration.  It retrieves\nthe configuration file for local editing, verifies the configuration,\nand sends it back to the tron server. The configuration is applied\nimmediately.\n\nOptions\n-------\n\n``--server <server_name>``\n    The server the tron instance is running on\n\n``--verbose``\n    Displays status messages along the way\n\n``--version``\n    Displays version string\n\n``-p``\n    Print the configuration\n\n``namespace``\n    The configuration namespace to edit. Defaults to MASTER\n\n``-``\n    Read new config from ``stdin``.\n\nConfiguration\n-------------\n\nBy default tron will run with a blank configuration file. The config file is\nsaved to ``<working_dir>/config/`` by default. See the full documentation at\nhttp://tron.readthedocs.io/en/latest/config.html.\n\n\nBugs\n----\n\nPost bugs to http://www.github.com/yelp/tron/issues.\n\nSee Also\n--------\n\n**trond** (8), **tronctl** (1), **tronview** (1),\n"
  },
  {
    "path": "docs/source/man_tronview.rst",
    "content": ".. _tronview:\n\ntronview\n========\n\nSynopsis\n--------\n\n``tronview [-n <numshown>] [--server <server_name>] [--verbose] [<job_name> | <job_run_id> | <action_run_id>]``\n\nDescription\n-----------\n\n**tronview** displays the status of tron scheduled jobs.\n\ntronview\n    Show all configured jobs\n\ntronview <job_name>\n    Shows details for a job. Ex::\n\n    $ tronview my_job\n\ntronview <job_run_id>\n    Show details for specific run or instance. Ex::\n\n    $ tronview my_job.0\n\ntronview <action_run_id>\n    Show details for specific action run. Ex::\n\n    $ tronview my_job.0.my_action\n\nOptions\n-------\n\n``--version``\n    show program's version number and exit\n\n``-h, --help``\n    show this help message and exit\n\n``-v, --verbose``\n    Verbose logging\n\n``-n NUM_DISPLAYS, --numshown=NUM_DISPLAYS``\n    The maximum number of job runs or lines of output to display(0 for show\n    all).  Does not affect the display of all jobs and the display of actions\n    for given job.\n\n``--server=SERVER``\n    Server URL to connect to\n\n``-c, --color``\n    Display in color\n\n``--nocolor``\n    Display without color\n\n``-o, --stdout``\n    Solely displays stdout\n\n``-e, --stderr``\n    Solely displays stderr\n\n``-s, --save``\n    Save server and color options to client config file (~/.tron)\n\n\nStates\n----------\nFor complete list of states with a diagram of valid transitions see\nhttp://packages.python.org/tron/jobs.html#states\n\n\nBugs\n----\n\nPost bugs to http://www.github.com/yelp/tron/issues.\n\nSee Also\n--------\n\n**trond** (8), **tronctl** (1), **tronfig** (1),\n"
  },
  {
    "path": "docs/source/overview.rst",
    "content": "Overview\n========\n\nBatch process scheduling on a single UNIX machines has\nhistorically been managed by :command:`cron` and its derivatives. But if you\nhave many batches, complex dependencies between batches, or many machines,\nmaintaining config files across them may be difficult. Tron solves this\nproblem by centralizing the configuration and scheduling of jobs to a single daemon.\n\nThe Tron system is split into four commands:\n\n:ref:`trond`\n    Daemon responsible for scheduling, running, and saving state. Provides an\n    HTTP interface to tools.\n\n:ref:`tronview`\n    View job state and output.\n\n:ref:`tronctl`\n    Start, stop, enable, disable, and otherwise control jobs.\n\n:ref:`tronfig`\n    Change Tron's configuration while the daemon is still running.\n\nThe config file uses YAML syntax, and is further described in :doc:`config`.\n\nNodes, Jobs and Actions\n-----------------------\n\nTron's orders consist of *jobs*. :doc:`Jobs <jobs>` contain\n:ref:`actions <job_actions>` which may depend on other actions in the same job\nand run on a schedule.\n\n:command:`trond` is given access (via public key SSH) to one or more *nodes* on\nwhich to run jobs.  For example, this configuration has two nodes,\neach of which is responsible for a single job::\n\n    nodes:\n        hostname: 'localhost'\n      - name: node1\n        hostname: 'batch1'\n      - name: node2\n        hostname: 'batch2'\n\n    jobs:\n      \"job0\":\n        node: node1\n        schedule: \"cron * * * * *\"\n        actions:\n          \"batch1action\":\n            command: \"sleep 3; echo asdfasdf\"\n      \"job1\":\n        node: node2\n        schedule: \"cron * * * * *\"\n        actions:\n          \"batch2action\":\n            command: \"cat big.txt; sleep 10\"\n\n\nHow the nodes are set up and assigned to jobs is entirely up to you. They may\nhave different operating systems, access to different databases, different\nprivileges for the Tron user, etc.\n\nSee also:\n\n* :doc:`jobs`\n* :doc:`config`\n\n.. _overview_pools:\n\nNode Pools\n----------\n\nNodes can be grouped into *pools*. To continue the previous example::\n\n    node_pools:\n        - name:pool\n          nodes: [node1, node2]\n\n    jobs:\n      # ...\n      \"job2\":\n        node: pool\n        schedule: \"cron * * * * *\"\n        actions:\n          \"pool_action\":\n            command: \"ls /; sleep 1\"\n        cleanup_action:\n          command: \"echo 'all done'\"\n\n``job2``'s action will be run on a random node from ``pool`` every 5 seconds.\nWhen ``pool_action`` is complete, ``cleanup_action`` will run on the same node.\n\nFor more information, see :doc:`jobs`.\n\nCaveats\n-------\n\nWhile Tron solves many scheduling-related problems, there are a few things to\nwatch out for.\n\n**Tron keeps an SSH connection open for the entire lifespan of a process.**\nThis means that to upgrade :command:`trond`, you have to either wait until no\njobs are running, or accept an inconsistent state. This limitation is being\nworked on, and should be improved in later releases.\n\n**Tron is under active development.** This means that some things will change.\nWhenever possible these changes will be backwards compatible, but in some\ncases there may be non-backwards compatible changes.\n\n**Tron does not support unicode.** Tron is built using `twisted <http://twistedmatrix.com/>`_\nwhich does not support unicode.\n"
  },
  {
    "path": "docs/source/sample_config.yaml",
    "content": "# optional and settable from the command line\nworking_dir: './working'\n\n# optional\nssh_options:\n  agent: true # default False\n  identities: # default []\n    - \"/home/batch/.ssh/id_dsa-nopasswd\"\n\ncommand_context:\n  PYTHON: /usr/bin/python\n  TMPDIR: /tmp\n\n# required\nnodes:\n  - name: node1\n    hostname: 'batch1'\n    username: 'tronuser'\n  - name: node2\n    hostname: 'batch2'\n    username: 'tronuser'\n\nnode_pools:\n  - name: pool\n    nodes: [node1, node2]\n\njobs:\n  \"job0\":\n    node: pool\n    all_nodes: True\n    schedule:\n        start_time: 04:00:00\n    queueing: False\n    actions:\n      \"verify_logs_present\":\n        command: >\n            ls /var/log/app/log_{shortdate-1}.txt\n      \"convert_logs\":\n        command: >\n            convert_logs /var/log/app/log_{shortdate-1}.txt \\\n                /var/log/app_converted/log_{shortdate-1}.txt\n        requires: [verify_logs_present]\n    # this will run when the job succeeds or fails\n    cleanup_action:\n        command: \"rm /{TMPDIR}/random_temp_file\"\n\n  \"job1\":\n    node: node\n    schedule: \"every monday at 09:00\"\n    queueing: False\n    actions:\n      \"actionAlone\":\n        command: \"cat big.txt; sleep 10\"\n"
  },
  {
    "path": "docs/source/tools.rst",
    "content": "Man Pages\n=========\n\n.. toctree::\n    :maxdepth: 2\n\n    man_tronctl.rst\n    man_trond.rst\n    man_tronfig.rst\n    man_tronview.rst\n"
  },
  {
    "path": "docs/source/tron.yaml",
    "content": "ssh_options:\n  agent: true\n\nnodes:\n  - name: node0\n    hostname: 'localhost'\n\njobs:\n  \"uptime_job\":\n    node: node0\n    schedule: \"cron */10 * * * *\"\n    actions:\n      \"uptimer\":\n        command: \"uptime\"\n"
  },
  {
    "path": "docs/source/tronweb.rst",
    "content": ".. _tronweb:\n\ntronweb\n========\n\ntronweb is the web-based UI for tron.\n\nSee http://localhost:8089/web/\n"
  },
  {
    "path": "docs/source/tutorial.rst",
    "content": "Tutorial\n========\n\nTo install Tron you will need:\n\n* A copy of the most recent Tron release from either\n  `github <http://github.com/yelp/Tron>`_ or `pypi <http://pypi.python.org/pypi/tron>`_\n  (see :ref:`installing_tron`).\n* A server on which to run :command:`trond`.\n* One or more batch boxes which will run the Jobs.\n* An SSH key and a user that will allow the tron daemon to login to all of the\n  batch machines without a password prompt.\n\n.. _installing_tron:\n\nInstalling Tron\n---------------\n\nThe easiest way to install Tron is from PyPI::\n\n    $ sudo pip install tron\n\nYou can also get a copy of the current development release from\n`github <http://github.com/yelp/Tron>`_. See `setup.py` in the source package\nfor a full list of required packages.\n\nIf you are interested in working on Tron development see :ref:`developing`\nfor additional requirements and setting up a dev environment.\n\n\nRunning Tron\n-------------\n\nTron runs as a single daemon, :command:`trond`.\n\nOn your management node, run::\n\n    $ sudo -u <tron user> trond\n\nThe chosen user will need SSH access to all your worker nodes, as well as\npermission to write to the working directory, log file, and lock file\n(see ``trond --help`` for defaults).  You can change these directories using\ncommand line options. Also see :ref:`config_logging` on how to change the\ndefault logging settings.\n\n\nOnce :command:`trond` is running, you can view its status using :command:`tronview`\n(by default tronview will connect to localhost, use ``--server=<host>:<port> -s``\nto specify a different server, and have that setting saved in ``~/.tron``)::\n\n    $ tronview\n\n    Jobs:\n    No jobs\n\nConfiguring Tron\n----------------\n\nThere are a few options on how to configure tron, but the most straightforward\nis through tronfig::\n\n    $ tronfig\n\nThis will open your configured :envvar:`$EDITOR` with the current configuration\nfile. Edit your file to be something like this::\n\n    ssh_options:\n      agent: true\n\n    nodes:\n      - name: local\n        hostname: 'localhost'\n\n    jobs:\n      \"getting_node_info\":\n        node: local\n        schedule: \"cron */10 * * * *\"\n        actions:\n          \"uname\":\n            command: \"uname -a\"\n          \"cpu_info\":\n            command: \"cat /proc/cpuinfo\"\n            requires: [uname]\n\nAfter you exit your editor, the configuration will be validated and uploaded to `trond`.\n\nNow if you run :command:`tronview` again, you'll see ``getting_node_info`` as a\nconfigured job. Note that it is configured to run 10 minutes from now. This\nshould give you time to examine the job to ensure you really want to run it.\n\n::\n\n    Jobs:\n    Name              State      Scheduler            Last Success\n    getting_node_info ENABLED    INTERVAL:0:10:00     None\n\nYou can quickly disable a job by using :command:`tronctl`::\n\n    $ tronctl disable getting_node_info\n    Job getting_node_info is disabled\n\nThis will stop scheduled jobs and prevent anymore from being scheduled. You are\nnow in manual control. To manually execute a job immediately, do this::\n\n    $ tronctl start getting_node_info\n    New job getting_node_info.1 created\n\nYou can monitor this job run by using :command:`tronview`::\n\n    $ tronview getting_node_info.1\n    Job Run: getting_node_info.1\n    State: SUCC\n    Node: localhost\n\n    Action ID & Command  State  Start Time           End Time             Duration\n    .uname               SUCC   2011-02-28 16:57:48  2011-02-28 16:57:48  0:00:00\n    .cpu_info            SUCC   2011-02-28 16:57:48  2011-02-28 16:57:48  0:00:00\n\n    $ tronview getting_node_info.1.uname\n    Action Run: getting_node_info.1.uname\n    State: SUCC\n    Node: localhost\n\n    uname -a\n\n    Requirements:\n\n    Stdout:\n    Linux dev05 2.6.24-24-server #1 SMP Wed Apr 15 15:41:09 UTC 2009 x86_64 GNU/Linux\n    Stderr:\n\nTron also provides a simple, optional web UI that can be used to get tronview data in a browser. See :doc:`tronweb` for setup\ninstructions.\n\nThat's it for the basics. You might want to look at :doc:`overview` for a more\ncomprehensive description of how Tron works.\n"
  },
  {
    "path": "docs/source/whats-new.rst",
    "content": "What's New\n==========\n\nSee the `CHANGELOG <https://github.com/Yelp/Tron/releases/latest>`_.\n"
  },
  {
    "path": "itest.sh",
    "content": "#!/bin/bash\n\nset -euxo pipefail\n\nexport DEBIAN_FRONTEND=noninteractive\n\napt-get update\napt-get install -y software-properties-common gdebi-core curl\nadd-apt-repository -y ppa:deadsnakes/ppa\napt-get update\n\ngdebi --non-interactive /work/dist/*.deb\n\n# TODO: change default MASTER config to not require ssh agent\napt-get install -y ssh\nservice ssh start\neval $(ssh-agent)\n\ntrond --help\ntronfig --help\n\n\n/opt/venvs/tron/bin/python - <<EOF\nfrom yaml import CSafeLoader\nfrom yaml import CSafeDumper\nEOF\n\nexport TRON_WORKDIR=/nail/tron\nmkdir -p $TRON_WORKDIR\nexport TRON_START_TIME=$(date +%s)\n\ntrond --working-dir=$TRON_WORKDIR &\nTRON_PID=$!\n\nfor i in {1..5}; do\n    if curl localhost:8089/api/status 2>/dev/null; then\n        break\n    fi\n    if [ \"$i\" == \"5\" ]; then\n        echo \"Failed to start\"\n        kill -9 $TRON_PID\n        exit 1\n    fi\n    sleep 1\ndone\nkill -0 $TRON_PID\n\ncurl localhost:8089/api/status | grep -qi alive\n\ntronfig -p MASTER\ntronfig -n MASTER /work/testfiles/MASTER.yaml\ntronfig /work/testfiles/MASTER.yaml\ncat /work/testfiles/MASTER.yaml | tronfig -n MASTER -\n\nif test -L /opt/venvs/tron/lib/python3.10/encodings/punycode.py; then\n    echo \"Whoa, the tron package shouldn't have an encoding symlink!\"\n    echo \"Check out https://github.com/spotify/dh-virtualenv/issues/272\"\n    exit 1\nfi\n\nkill -SIGTERM $TRON_PID\nwait $TRON_PID || true\n"
  },
  {
    "path": "mypy.ini",
    "content": "[mypy]\npython_version = 3.10\n# TODO: we'd like to be as strict as we are internally, but we need to fully type Tron first\n# disallow_any_generics = true\ndisallow_incomplete_defs = True\n# disallow_untyped_calls = true\ndisallow_untyped_decorators = True\n# disallow_untyped_defs = true\n\nshow_column_numbers = True\nshow_error_codes = True\nshow_error_context = True\n\nwarn_incomplete_stub = True\nwarn_redundant_casts = True\nwarn_return_any = True\nwarn_unreachable = True\nwarn_unused_ignores = True\n\nexclude = .tox/\n\n[mypy-clusterman_metrics.*]\nignore_missing_imports = True\n\n[mypy-twisted.internet.*]\nignore_missing_imports = True\n"
  },
  {
    "path": "osx-bdb.sh",
    "content": "#!/bin/bash\n\nexport BERKELEYDB_DIR=$(brew --prefix berkeley-db)\nexport YES_I_HAVE_THE_RIGHT_TO_USE_THIS_BERKELEY_DB_VERSION=1\n"
  },
  {
    "path": "package.json",
    "content": "{\n    \"private\": true,\n    \"homepage\": \"./\",\n    \"dependencies\": {},\n    \"browserslist\": {\n        \"production\": [\n            \">0.2%\",\n            \"not dead\",\n            \"not op_mini all\"\n        ],\n        \"development\": [\n            \"last 1 chrome version\",\n            \"last 1 firefox version\",\n            \"last 1 safari version\"\n        ]\n    },\n    \"devDependencies\": {\n        \"eslint\": \"^6.6.0\",\n        \"eslint-config-airbnb\": \"^18.2.0\",\n        \"eslint-plugin-import\": \"^2.22.0\",\n        \"eslint-plugin-jsx-a11y\": \"^6.3.1\",\n        \"eslint-plugin-react\": \"^7.20.6\",\n        \"eslint-plugin-react-hooks\": \"^4.1.0\"\n    },\n    \"resolutions\": {\n        \"axe-core\": \"4.7.0\"\n    },\n    \"engines\": {\n        \"node-version-shim\": \"10.x\",\n        \"node\": \">=10\"\n    }\n}\n"
  },
  {
    "path": "pyproject.toml",
    "content": "[tool.black]\nline-length = 120\ntarget_version = ['py310']\n"
  },
  {
    "path": "requirements-dev-minimal.txt",
    "content": "asynctest\nbotocore-stubs\ndebugpy\nflake8\nmoto\nmypy\npre-commit\npytest\npytest-asyncio\nrequirements-tools\ntypes-boto3\ntypes-cachetools\ntypes-psutil\ntypes-pytz\ntypes-PyYAML\ntypes-requests<2.31.0.7 # newer types-requests requires urllib3>=2\ntypes-setuptools\n"
  },
  {
    "path": "requirements-dev.txt",
    "content": "asynctest==0.12.0\nboto3-stubs==1.35.63\nbotocore-stubs==1.38.19\ncfgv==2.0.1\ndebugpy==1.8.1\ndistlib==0.3.6\nfilelock==3.4.1\nflake8==5.0.4\nidentify==2.5.5\niniconfig==1.1.1\nmccabe==0.7.0\nmoto==4.1.0\nmypy==1.9.0\nmypy-extensions==1.0.0\nnodeenv==1.8.0\npackaging==19.2\nplatformdirs==2.5.2\npluggy==0.13.0\npre-commit==2.20.0\npy==1.10.0\npycodestyle==2.9.0\npyflakes==2.5.0\npyparsing==2.4.2\npytest==7.0.1\npytest-asyncio==0.14.0\nrequirements-tools==2.1.0\nresponses==0.13.0\ntoml==0.10.2\ntomli==2.0.1\ntypes-awscrt==0.27.2\ntypes-boto3==1.0.2\ntypes-cachetools==5.5.0.20240820\ntypes-psutil==6.1.0.20241221\ntypes-pytz==2024.2.0.20240913\ntypes-PyYAML==6.0.12\ntypes-requests==2.31.0.5\ntypes-s3transfer==0.13.0\ntypes-setuptools==75.8.0.20250110\ntypes-urllib3==1.26.25.14\nvirtualenv==20.17.1\nxmltodict==0.12.0\n"
  },
  {
    "path": "requirements-docs.txt",
    "content": "Jinja2==3.1.2\nmarkupsafe==2.1.1\nmock==3.0.5\nPygments==2.13.0\nSphinx==6.1.3\nSphinx-PyPI-upload==0.2.1\n"
  },
  {
    "path": "requirements-minimal.txt",
    "content": "addict  # not sure why check-requirements is not picking this up from task_processing[mesos_executor]\nargcomplete\nboto3\nbsddb3\ncryptography\ndataclasses\necdsa>=0.13.3\nhttp-parser  # not sure why check-requirements is not picking this up from task_processing[mesos_executor]\nhumanize\nipdb\nipython\nJinja2>=3.1.2\nlockfile\nprometheus-client\npsutil\npy-bcrypt\npyasn1\npyformance\npymesos  # not sure why check-requirements is not picking this up from task_processing[mesos_executor]\npyopenssl # vault-tools dependency, but mypy is picking up some code (that we don't use) that imports pyopenssl in Twisted here we are. we could add Twisted[tls], but while hacky - this feels a little more explicit\npysensu-yelp\nPyStaticConfiguration\npytimeparse\npytz\nPyYAML>=5.1\nrequests\ntask_processing[mesos_executor,k8s]>=1.2.0\nTwisted>=19.7.0\nurllib3>=1.24.2\nWerkzeug>=0.15.3\n"
  },
  {
    "path": "requirements.txt",
    "content": "addict==2.2.1\nargcomplete==1.9.5\nasttokens==2.2.1\nattrs==19.3.0\nAutomat==20.2.0\nbackcall==0.1.0\nboto3==1.34.80\nbotocore==1.34.80\nbsddb3==6.2.7\ncachetools==4.2.1\ncertifi==2022.12.7\ncffi==1.15.0\ncharset-normalizer==2.0.12\nconstantly==15.1.0\ncryptography==41.0.5\ndataclasses==0.6\ndecorator==4.4.0\necdsa==0.13.3\nexecuting==1.2.0\ngoogle-auth==1.23.0\nhttp-parser==0.9.0\nhumanize==4.10.0\nhyperlink==19.0.0\nidna==2.8\nincremental==22.10.0\nipdb==0.13.2\nipython==8.10.0\nipython-genutils==0.2.0\njedi==0.16.0\nJinja2==3.1.2\njmespath==0.9.4\nkubernetes==26.1.0\nlockfile==0.12.2\nMarkupSafe==2.1.1\nmatplotlib-inline==0.1.3\noauthlib==3.1.0\nparso==0.7.0\npexpect==4.7.0\npickleshare==0.7.5\nprometheus-client==0.21.1\nprompt-toolkit==3.0.38\npsutil==5.6.6\nptyprocess==0.6.0\npure-eval==0.2.2\npy-bcrypt==0.4\npyasn1==0.4.7\npyasn1-modules==0.2.8\npycparser==2.19\npyformance==0.4\nPygments==2.13.0\npymesos==0.3.9\npyopenssl==24.2.1\npyrsistent==0.15.4\npysensu-yelp==1.0.3\nPyStaticConfiguration==0.11.1\npython-dateutil==2.8.1\npytimeparse==1.1.8\npytz==2019.3\nPyYAML==6.0.1\nrequests==2.27.1\nrequests-oauthlib==1.2.0\nrsa==4.9\ns3transfer==0.10.1\nsetuptools==65.5.1\nsix==1.15.0\nstack-data==0.6.2\ntask-processing==1.3.5\ntraitlets==5.0.0\nTwisted==22.10.0\ntyping-extensions==4.5.0\nurllib3==1.25.10\nwcwidth==0.1.7\nwebsocket-client==0.56.0\nWerkzeug==2.2.3\nzope.interface==7.2\n"
  },
  {
    "path": "setup.cfg",
    "content": "[build_docs]\nsource-dir = docs/\nbuild-dir  = docs/_build\nall_files  = 1\n\n[upload_docs]\nupload_dir = docs/_build/html\n"
  },
  {
    "path": "setup.py",
    "content": "try:\n    from setuptools import setup, find_packages\n\n    assert setup\nexcept ImportError:\n    from distutils.core import setup\n\nimport glob\nimport tron\n\nsetup(\n    name=\"tron\",\n    version=tron.__version__,\n    provides=[\"tron\"],\n    author=\"Yelp\",\n    author_email=\"yelplabs@yelp.com\",\n    url=\"http://github.com/Yelp/Tron\",\n    description=\"Job scheduling and monitoring system\",\n    classifiers=[\n        \"Programming Language :: Python\",\n        \"Programming Language :: Python :: 3.10\",\n        \"Operating System :: OS Independent\",\n        \"License :: OSI Approved :: Apache Software License\",\n        \"Topic :: System :: Monitoring\",\n        \"Topic :: System :: Systems Administration\",\n        \"Intended Audience :: Developers\",\n        \"Intended Audience :: System Administrators\",\n        \"Development Status :: 4 - Beta\",\n    ],\n    packages=find_packages(\n        exclude=[\"tests.*\", \"tests\", \"example-cluster\"],\n    )\n    + [\"tronweb\"],\n    scripts=glob.glob(\"bin/*\") + glob.glob(\"tron/bin/*.py\"),\n    include_package_data=True,\n    long_description=\"\"\"\nTron is a centralized system for managing periodic batch processes across a\ncluster. If you find cron or fcron to be insufficient for managing complex work\nflows across multiple computers, Tron might be for you.\n\nFor more information, look at the\n`tutorial <http://tron.readthedocs.io/en/latest/tutorial.html>`_ or the\n`full documentation <http://tron.readthedocs.io/en/latest/index.html>`_.\n\"\"\",\n)\n"
  },
  {
    "path": "testfiles/MASTER.yaml",
    "content": "eventbus_enabled: true\n\nstate_persistence:\n  name: \"/nail/tron/tron_state\"\n  store_type: \"shelve\"\n  buffer_size: 10\n\nssh_options:\n  agent: False\n  identities:\n    - /work/example-cluster/insecure_key\n\naction_runner:\n  runner_type: \"subprocess\"\n  remote_status_path: \"/tmp/tron\"\n  remote_exec_path: \"/work/tron/bin/\"\n\nnodes:\n  - hostname: localhost\n    username: root\n\ntime_zone: US/Eastern\n\njobs:\n  one:\n    node: localhost\n    schedule: \"cron */5 * * * *\"\n    actions:\n      one:\n        command: exit 1\n        retries: 3\n        retries_delay: 1m\n        trigger_downstreams: {ymdhm: \"{ymdhm}\"}\n  two:\n    node: localhost\n    schedule: \"cron */5 * * * *\"\n    actions:\n      two:\n        command: sleep 10 && date\n        triggered_by: [\"MASTER.one.one.ymdhm.{ymdhm}\"]\n        trigger_timeout: 30s\n"
  },
  {
    "path": "testifycompat/__init__.py",
    "content": "from unittest import TestCase  # noqa: F401\n\nfrom testifycompat.assertions import *  # noqa: F401, F403\nfrom testifycompat.fixtures import *  # noqa: F401, F403\n\n\nversion = \"0.1.2\"\n\n\ndef run():\n    raise AssertionError(\n        \"Oops, you tried to use testifycompat.run(). This function doesn't \"\n        \"do anything, it only exists as backwards compatibility with testify. \"\n        \"You should remove it from your code.\",\n    )\n"
  },
  {
    "path": "testifycompat/assertions.py",
    "content": "\"\"\"Compatiblity functions for py.test to migrate code from testify.\n\nThis is not a complete list, but should hopefully cover most of the common\nassertions.\n\nThese assertions should **not** be used in new code, and are only for migrating\nold tests.\n\"\"\"\nimport pytest\n\n\ndef assert_equal(left, right, *args):\n    assert left == right\n\n\nassert_sets_equal = assert_dicts_equal = assert_datetimes_equal = assert_equal\nassert_equals = assert_equal\n\n\ndef assert_true(val):\n    assert val\n\n\ndef assert_false(val):\n    assert not val\n\n\ndef assert_raises_and_contains(exc, text, func, *args, **kwargs):\n    with pytest.raises(exc) as excinfo:\n        func(*args, **kwargs)\n\n    text = text if isinstance(text, list) else [text]\n    for item in text:\n        assert item in str(excinfo.exconly())\n\n\ndef assert_raises(exc, func=None, *args, **kwargs):\n    if func is None:\n        return pytest.raises(exc)\n\n    with pytest.raises(exc):\n        func(*args, **kwargs)\n\n\ndef assert_in(item, container):\n    assert item in container\n\n\ndef assert_not_in(item, container):\n    assert item not in container\n\n\ndef assert_is(left, right):\n    assert left is right\n\n\ndef assert_is_not(left, right):\n    assert left is not right\n\n\ndef assert_not_equal(left, right):\n    assert left != right\n\n\ndef assert_lt(left, right):\n    assert left < right\n\n\ndef assert_lte(left, right):\n    assert left <= right\n\n\ndef assert_gt(left, right):\n    assert left > right\n\n\ndef assert_gte(left, right):\n    assert left >= right\n\n\ndef assert_in_range(val, start, end):\n    assert start < val < end\n\n\ndef assert_between(val, start, end):\n    assert start <= val <= end\n\n\ndef assert_all_in(left, right):\n    \"\"\"Assert that everything in `left` is also in `right`\n    Note: This is different than `assert_subset()` because python sets use\n    `__hash__()` for comparision whereas `in` uses `__eq__()`.\n    \"\"\"\n    for item in left:\n        assert item in right\n\n\ndef assert_starts_with(val, prefix):\n    assert val.startswith(prefix)\n\n\ndef assert_not_reached():\n    assert False\n\n\ndef assert_empty(iterable):\n    assert len(list(iterable)) == 0\n\n\ndef assert_not_empty(iterable):\n    assert len(list(iterable)) > 0\n\n\ndef assert_length(sequence, expected):\n    assert len(list(sequence)) == expected\n\n\ndef assert_sorted_equal(left, right):\n    assert sorted(left) == sorted(right)\n\n\ndef assert_isinstance(object_, type_):\n    assert isinstance(object_, type_)\n"
  },
  {
    "path": "testifycompat/bin/__init__.py",
    "content": ""
  },
  {
    "path": "testifycompat/bin/migrate.py",
    "content": "#!/usr/bin/env python\n\"\"\"\n\n.. warning::\n\n    This script is still very experimental. Use at your own risk. It will\n    be replaced over time with lib2to3 fixers.\n\n\nUsage:\n\n    ``python -m testifycompat.bin.migrate <filenames>``\n\nExample:\n\n    ``find tests -name *.py | xargs python migrate.py``\n\n\n\"\"\"\nimport functools\nimport re\nimport sys\n\n\ndef replace(pattern, repl):\n    return functools.partial(re.sub, pattern, repl)\n\n\nreplaces = [\n    # Replace imports\n    replace(r\"^from testify import \", \"from testifycompat import \"),\n    replace(r\"^from testify.assertions import \", \"from testifycompat import \"),\n    replace(r\"^import testify as T\", \"import testifycompat as T\"),\n    # Replace test classes\n    replace(\n        r\"^class (?:Test)?(\\w+)(?:Test|TestCase)\\((?:T\\.)?TestCase\\):$\",\n        \"class Test\\\\1(object):\",\n    ),\n    replace(\n        r\"^class (?:Test)?(\\w+)(?:Test|TestCase)(\\(\\w+TestCase\\)):$\",\n        \"class Test\\\\1\\\\2:\",\n    ),\n    # Replace some old assertions\n    replace(r\"self.assert_\\((.*)\\)\", \"assert \\\\1\"),\n]\n\n\ndef run_replacement(contents):\n    for line in contents:\n        for replacement in replaces:\n            line = replacement(line)\n        yield line\n\n\ndef strip_if_main_run(contents):\n    if len(contents) < 2:\n        return contents\n    if \"run()\" in contents[-1] and \"if __name__ == \" in contents[-2]:\n        return contents[:-2]\n    return contents\n\n\ndef run_migration_on_file(filename):\n    with open(filename) as fh:\n        lines = fh.read().split(\"\\n\")\n\n    lines = list(run_replacement(lines))\n    lines = strip_if_main_run(lines)\n\n    with open(filename, \"w\") as fh:\n        fh.write(\"\\n\".join(lines))\n\n\nif __name__ == \"__main__\":\n    for filename in sys.argv[1:]:\n        run_migration_on_file(filename)\n"
  },
  {
    "path": "testifycompat/fixtures.py",
    "content": "\"\"\"\nCompatibility fixtures for migrating code from testify to py.test\n\n.. note::\n\n    ``class_`` fixtures must be applied to @classmethods. py.test will not run\n    a class_* fixture that is not attached to a class-level method, so your\n    tests will probably fail.\n\"\"\"\nimport pytest\n\n\ndef setup(func):\n    return pytest.fixture(autouse=True)(func)\n\n\ndef setup_teardown(func):\n    return pytest.yield_fixture(autouse=True)(func)\n\n\ndef teardown(func):\n    def teardown_(*args, **kwargs):\n        yield\n        func(*args, **kwargs)\n\n    return pytest.yield_fixture(autouse=True)(teardown_)\n\n\ndef class_setup(func):\n    return pytest.fixture(autouse=True, scope=\"class\")(func)\n\n\ndef class_setup_teardown(func):\n    return pytest.yield_fixture(autouse=True, scope=\"class\")(func)\n\n\ndef class_teardown(func):\n    def teardown_(*args, **kwargs):\n        yield\n        func(*args, **kwargs)\n\n    return pytest.yield_fixture(autouse=True, scope=\"class\")(teardown_)\n\n\ndef suite(name, reason=None):\n    \"\"\"Translate a :func:`testify.suite` decorator into the appropriate\n    :mod:`pytest.mark` call. For the disabled suite this results in a\n    skipped test. For other suites it will return a  `pytest.mark.<name>`\n    decorator.\n    \"\"\"\n    if name == \"disabled\":\n        return pytest.mark.skipif(True, reason=reason)\n\n    return getattr(pytest.mark, name)\n"
  },
  {
    "path": "tests/__init__.py",
    "content": "from twisted.python import log\n\nobserver = log.PythonLoggingObserver()\nobserver.start()\n"
  },
  {
    "path": "tests/actioncommand_test.py",
    "content": "import shlex\nfrom unittest import mock\n\nfrom testifycompat import assert_equal\nfrom testifycompat import assert_not_equal\nfrom testifycompat import setup\nfrom testifycompat import TestCase\nfrom tests.testingutils import autospec_method\nfrom tron import actioncommand\nfrom tron.actioncommand import ActionCommand\nfrom tron.config import schema\nfrom tron.serialize import filehandler\n\n\nclass TestActionCommand(TestCase):\n    @setup\n    def setup_command(self):\n        self.serializer = mock.create_autospec(filehandler.FileHandleManager)\n        self.serializer.open.return_value = filehandler.NullFileHandle\n        self.ac = ActionCommand(\"action.1.do\", \"do\", self.serializer)\n\n    def test_init(self):\n        assert_equal(self.ac.state, ActionCommand.PENDING)\n\n    def test_init_no_serializer(self):\n        ac = ActionCommand(\"action.1.do\", \"do\")\n        ac.write_stdout(\"something\")\n        ac.write_stderr(\"else\")\n        assert_equal(ac.stdout, filehandler.NullFileHandle)\n        ac.done()\n\n    def test_started(self):\n        assert self.ac.started()\n        assert self.ac.start_time is not None\n        assert_equal(self.ac.state, ActionCommand.RUNNING)\n\n    def test_started_already_started(self):\n        self.ac.started()\n        assert not self.ac.started()\n\n    def test_exited(self):\n        self.ac.started()\n        assert self.ac.exited(123)\n        assert_equal(self.ac.exit_status, 123)\n        assert self.ac.end_time is not None\n\n    def test_exited_from_pending(self):\n        assert self.ac.exited(123)\n        assert_equal(self.ac.state, ActionCommand.FAILSTART)\n\n    def test_exited_bad_state(self):\n        self.ac.started()\n        self.ac.exited(123)\n        assert not self.ac.exited(1)\n\n    def test_write_stderr_no_fh(self):\n        message = \"this is the message\"\n        # Test without a stderr\n        self.ac.write_stderr(message)\n\n    def test_write_stderr(self):\n        message = \"this is the message\"\n        serializer = mock.create_autospec(filehandler.FileHandleManager)\n        fh = serializer.open.return_value = mock.create_autospec(\n            filehandler.FileHandleWrapper,\n        )\n        ac = ActionCommand(\"action.1.do\", \"do\", serializer)\n\n        ac.write_stderr(message)\n        fh.write.assert_called_with(message)\n\n    def test_done(self):\n        self.ac.started()\n        self.ac.exited(123)\n        assert self.ac.done()\n\n    def test_done_bad_state(self):\n        assert not self.ac.done()\n\n    def test_handle_errback(self):\n        message = \"something went wrong\"\n        self.ac.handle_errback(message)\n        assert_equal(self.ac.state, ActionCommand.FAILSTART)\n        assert self.ac.end_time\n\n    def test_is_unknown(self):\n        assert self.ac.is_unknown\n\n    def test_is_failed(self):\n        assert not self.ac.is_failed\n\n    def test_is_failed_true(self):\n        self.ac.exit_status = 255\n        assert self.ac.is_failed\n\n    def test_is_complete(self):\n        assert not self.ac.is_complete\n\n    def test_is_complete_true(self):\n        self.ac.machine.state = self.ac.COMPLETE\n        assert self.ac.is_complete, self.ac.machine.state\n\n    def test_is_done(self):\n        self.ac.machine.state = self.ac.FAILSTART\n        assert self.ac.is_done, self.ac.machine.state\n        self.ac.machine.state = self.ac.COMPLETE\n        assert self.ac.is_done, self.ac.machine.state\n\n\nclass TestCreateActionCommandFactoryFromConfig(TestCase):\n    def test_create_default_action_command_no_config(self):\n        config = ()\n        factory = actioncommand.create_action_runner_factory_from_config(\n            config,\n        )\n        assert_equal(type(factory), actioncommand.NoActionRunnerFactory)\n\n    def test_create_default_action_command(self):\n        config = schema.ConfigActionRunner(\n            schema.ActionRunnerTypes.none.value,\n            None,\n            None,\n        )\n        factory = actioncommand.create_action_runner_factory_from_config(\n            config,\n        )\n        assert type(factory) is actioncommand.NoActionRunnerFactory\n\n    def test_create_action_command_with_simple_runner(self):\n        status_path, exec_path = \"/tmp/what\", \"/remote/bin\"\n        config = schema.ConfigActionRunner(\n            schema.ActionRunnerTypes.subprocess.value,\n            status_path,\n            exec_path,\n        )\n        factory = actioncommand.create_action_runner_factory_from_config(\n            config,\n        )\n        assert_equal(factory.status_path, status_path)\n        assert_equal(factory.exec_path, exec_path)\n\n\nclass TestSubprocessActionRunnerFactory(TestCase):\n    @setup\n    def setup_factory(self):\n        self.status_path = \"status_path\"\n        self.exec_path = \"exec_path\"\n        self.factory = actioncommand.SubprocessActionRunnerFactory(\n            self.status_path,\n            self.exec_path,\n        )\n\n    def test_from_config(self):\n        config = mock.Mock()\n        runner_factory = actioncommand.SubprocessActionRunnerFactory.from_config(\n            config,\n        )\n        assert_equal(runner_factory.status_path, config.remote_status_path)\n        assert_equal(runner_factory.exec_path, config.remote_exec_path)\n\n    def test_create(self):\n        serializer = mock.create_autospec(actioncommand.StringBufferStore)\n        id, command = \"id\", \"do a thing\"\n        autospec_method(self.factory.build_command)\n        action_command = self.factory.create(id, command, serializer)\n        assert_equal(action_command.id, id)\n        assert_equal(\n            action_command.command,\n            self.factory.build_command.return_value,\n        )\n        assert_equal(action_command.stdout, serializer.open.return_value)\n        assert_equal(action_command.stderr, serializer.open.return_value)\n\n    def test_build_command_complex_quoting(self):\n        id = \"id\"\n        command = '/bin/foo -c \"foo\" --foo \"bar\"'\n        exec_name = \"action_runner.py\"\n        actual = self.factory.build_command(id, command, exec_name)\n        assert_equal(\n            shlex.split(actual),\n            [\n                f\"{self.exec_path}/{exec_name}\",\n                f\"{self.status_path}/{id}\",\n                command,\n                id,\n            ],\n        )\n\n    def test_build_stop_action_command(self):\n        id, command = \"id\", \"do a thing\"\n        autospec_method(self.factory.build_command)\n        action_command = self.factory.build_stop_action_command(id, command)\n        assert_equal(\n            action_command.id,\n            f\"{id}.{self.factory.build_command.return_value}\",\n        )\n        assert_equal(\n            action_command.command,\n            self.factory.build_command.return_value,\n        )\n\n    def test__eq__true(self):\n        first = actioncommand.SubprocessActionRunnerFactory(\"a\", \"b\")\n        second = actioncommand.SubprocessActionRunnerFactory(\"a\", \"b\")\n        assert_equal(first, second)\n\n    def test__eq__false(self):\n        first = actioncommand.SubprocessActionRunnerFactory(\"a\", \"b\")\n        second = actioncommand.SubprocessActionRunnerFactory(\"a\", \"c\")\n        assert_not_equal(first, second)\n        assert_not_equal(first, None)\n        assert_not_equal(first, actioncommand.NoActionRunnerFactory)\n"
  },
  {
    "path": "tests/api/__init__.py",
    "content": ""
  },
  {
    "path": "tests/api/adapter_test.py",
    "content": "import shutil\nimport tempfile\nfrom unittest import mock\n\nfrom testifycompat import assert_equal\nfrom testifycompat import run\nfrom testifycompat import setup\nfrom testifycompat import teardown\nfrom testifycompat import TestCase\nfrom tests import mocks\nfrom tests.assertions import assert_length\nfrom tron import node\nfrom tron import scheduler\nfrom tron.api import adapter\nfrom tron.api.adapter import ActionRunAdapter\nfrom tron.api.adapter import JobRunAdapter\nfrom tron.api.adapter import ReprAdapter\nfrom tron.api.adapter import RunAdapter\nfrom tron.core import actiongraph\nfrom tron.core import actionrun\nfrom tron.core import job\n\n\nclass MockAdapter(ReprAdapter):\n\n    field_names = [\"one\", \"two\"]\n    translated_field_names = [\"three\", \"four\"]\n\n    def get_three(self):\n        return 3\n\n    def get_four(self):\n        return 4\n\n\nclass TestReprAdapter(TestCase):\n    @setup\n    def setup_adapter(self):\n        self.original = mock.Mock(one=1, two=2)\n        self.adapter = MockAdapter(self.original)\n\n    def test__init__(self):\n        assert_equal(self.adapter._obj, self.original)\n        assert_equal(self.adapter.fields, MockAdapter.field_names)\n\n    def test_get_translation_mapping(self):\n        expected = {\n            \"three\": self.adapter.get_three,\n            \"four\": self.adapter.get_four,\n        }\n        assert_equal(self.adapter.translators, expected)\n\n    def test_get_repr(self):\n        expected = dict(one=1, two=2, three=3, four=4)\n        assert_equal(self.adapter.get_repr(), expected)\n\n\nclass SampleClassStub:\n    def __init__(self):\n        self.true_flag = True\n        self.false_flag = False\n\n    @adapter.toggle_flag(\"true_flag\")\n    def expects_true(self):\n        return \"This is true\"\n\n    @adapter.toggle_flag(\"false_flag\")\n    def expects_false(self):\n        return \"This is false\"\n\n\nclass TestToggleFlag(TestCase):\n    @setup\n    def setup_stub(self):\n        self.stub = SampleClassStub()\n\n    def test_toggle_flag_true(self):\n        assert_equal(self.stub.expects_true(), \"This is true\")\n\n    def test_toggle_flag_false(self):\n        assert not self.stub.expects_false()\n\n\nclass TestRunAdapter(TestCase):\n    @setup\n    def setup_adapter(self):\n        self.original = mock.Mock()\n        self.adapter = RunAdapter(self.original)\n\n    def test_get_state(self):\n        assert_equal(self.adapter.get_state(), self.original.state)\n\n    @mock.patch(\"tron.api.adapter.NodeAdapter\", autospec=True)\n    def test_get_node(self, mock_node_adapter):\n        assert_equal(\n            self.adapter.get_node(),\n            mock_node_adapter.return_value.get_repr.return_value,\n        )\n        mock_node_adapter.assert_called_with(self.original.node)\n\n    def test_get_duration(self):\n        self.original.start_time = None\n        assert_equal(self.adapter.get_duration(), \"\")\n\n\nclass TestActionRunAdapter(TestCase):\n    @setup\n    def setup_adapter(self):\n        self.temp_dir = tempfile.mkdtemp()\n        self.action_run = mock.MagicMock()\n        self.job_run = mock.MagicMock()\n        self.adapter = ActionRunAdapter(self.action_run, self.job_run, 4)\n\n    @teardown\n    def teardown_adapter(self):\n        shutil.rmtree(self.temp_dir)\n\n    def test__init__(self):\n        assert_equal(self.adapter.max_lines, 4)\n        assert_equal(self.adapter.job_run, self.job_run)\n        assert_equal(self.adapter._obj, self.action_run)\n\n    def test_get_repr(self):\n        result = self.adapter.get_repr()\n        assert_equal(result[\"command\"], self.action_run.rendered_command)\n\n\nclass TestActionRunGraphAdapter(TestCase):\n    @setup\n    def setup_adapter(self):\n        self.ar1 = mock.MagicMock(action_name=\"a1\")\n        self.ar2 = mock.MagicMock(action_name=\"a2\")\n        self.a1 = mock.MagicMock()\n        self.a2 = mock.MagicMock()\n        self.a1.name = \"a1\"\n        self.a2.name = \"a2\"\n        self.action_runs = mock.create_autospec(\n            actionrun.ActionRunCollection,\n            action_graph=actiongraph.ActionGraph(\n                {\n                    \"a1\": self.a1,\n                    \"a2\": self.a2,\n                },\n                {\"a1\": set(), \"a2\": {\"a1\"}},\n                {\"a1\": set(), \"a2\": set()},\n            ),\n        )\n        self.adapter = adapter.ActionRunGraphAdapter(self.action_runs)\n        self.action_runs.__iter__.return_value = [self.ar1, self.ar2]\n\n    def test_get_repr(self):\n        result = self.adapter.get_repr()\n        assert len(result) == 2\n        assert self.ar1.id == result[0][\"id\"]\n        assert [\"a1\"] == result[1][\"dependencies\"]\n        assert self.ar1.rendered_command == result[0][\"command\"]\n        assert self.ar1.command_config.command == result[0][\"raw_command\"]\n\n\nclass TestJobRunAdapter(TestCase):\n    @setup\n    def setup_adapter(self):\n        action_runs = mock.MagicMock()\n        action_runs.__iter__.return_value = iter([mock.Mock(), mock.Mock()])\n        self.job_run = mock.Mock(\n            action_runs=action_runs,\n            action_graph=mocks.MockActionGraph(),\n        )\n        self.adapter = JobRunAdapter(self.job_run, include_action_runs=True)\n\n    def test__init__(self):\n        assert self.adapter.include_action_runs\n\n    def test_get_runs(self):\n        with mock.patch(\"tron.api.adapter.ActionRunAdapter\", autospec=True):\n            assert_length(self.adapter.get_runs(), 2)\n\n    def test_get_runs_without_action_runs(self):\n        self.adapter.include_action_runs = False\n        assert_equal(self.adapter.get_runs(), None)\n\n\nclass TestNodeAdapter(TestCase):\n    @setup\n    def setup_adapter(self):\n        self.node = mock.create_autospec(node.Node)\n        self.adapter = adapter.NodeAdapter(self.node)\n\n    def test_repr(self):\n        result = self.adapter.get_repr()\n        assert_equal(result[\"hostname\"], self.node.hostname)\n        assert_equal(result[\"username\"], self.node.username)\n\n\nclass TestNodePoolAdapter(TestCase):\n    @setup\n    def setup_adapter(self):\n        self.pool = mock.create_autospec(node.NodePool)\n        self.adapter = adapter.NodePoolAdapter(self.pool)\n\n    @mock.patch(\"tron.api.adapter.adapt_many\", autospec=True)\n    def test_repr(self, mock_many):\n        result = self.adapter.get_repr()\n        assert_equal(result[\"name\"], self.pool.get_name.return_value)\n        mock_many.assert_called_with(\n            adapter.NodeAdapter,\n            self.pool.get_nodes.return_value,\n        )\n\n\nclass TestJobIndexAdapter(TestCase):\n    @setup\n    def setup_adapter(self):\n        self.job = mock.create_autospec(job.Job)\n        self.adapter = adapter.JobIndexAdapter(self.job)\n\n    def test_repr(self):\n        result = self.adapter.get_repr()\n        self.job.get_runs.assert_called_with()\n        runs = self.job.get_runs.return_value\n        runs.get_newest.assert_called_with()\n        expected = {\n            \"name\": self.job.get_name.return_value,\n            \"actions\": [],\n        }\n        assert_equal(result, expected)\n\n    def test_get_actions(self):\n        action_run = mock.Mock()\n        job_run = self.job.get_runs.return_value.get_newest.return_value\n        job_run.action_runs.__iter__.return_value = [action_run]\n        result = self.adapter.get_actions()\n        expected = {\n            \"name\": action_run.action_name,\n            \"command\": action_run.command_config.command,\n        }\n        assert_equal(result, [expected])\n\n    def test_get_actions_no_runs(self):\n        self.job.get_runs.return_value.get_newest.return_value = None\n        result = self.adapter.get_actions()\n        assert_equal(result, [])\n\n\nclass TestSchedulerAdapter(TestCase):\n    @setup\n    def setup_adapter(self):\n        self.scheduler = mock.create_autospec(scheduler.GeneralScheduler)\n        self.adapter = adapter.SchedulerAdapter(self.scheduler)\n\n    @mock.patch(\"tron.api.adapter.scheduler.get_jitter_str\", autospec=True)\n    def test_repr(self, mock_get_jitter):\n        result = self.adapter.get_repr()\n        expected = {\n            \"type\": self.scheduler.get_name.return_value,\n            \"value\": self.scheduler.get_value.return_value,\n            \"jitter\": mock_get_jitter.return_value,\n        }\n        assert_equal(result, expected)\n        mock_get_jitter.assert_called_with(self.scheduler.get_jitter())\n\n\nif __name__ == \"__main__\":\n    run()\n"
  },
  {
    "path": "tests/api/auth_test.py",
    "content": "from unittest.mock import MagicMock\nfrom unittest.mock import patch\n\nimport pytest\nfrom twisted.web.server import Request\n\nfrom tron.api.auth import AuthorizationFilter\nfrom tron.api.auth import AuthorizationOutcome\n\n\n@pytest.fixture\ndef mock_auth_filter():\n    with patch(\"tron.api.auth.requests\"):\n        yield AuthorizationFilter(\"http://localhost:31337/whatever\", True)\n\n\ndef mock_request(path: str, token: str, method: str):\n    res = MagicMock(spec=Request, path=path.encode(), method=method.encode())\n    res.getHeader.return_value = token\n    return res\n\n\ndef test_is_request_authorized(mock_auth_filter):\n    mock_auth_filter.session.post.return_value.json.return_value = {\n        \"result\": {\"allowed\": True, \"reason\": \"User allowed\"}\n    }\n    assert mock_auth_filter.is_request_authorized(\n        mock_request(\"/api/jobs/foobar.run.2\", \"aaa.bbb.ccc\", \"get\")\n    ) == AuthorizationOutcome(True, \"User allowed\")\n    mock_auth_filter.session.post.assert_called_once_with(\n        url=\"http://localhost:31337/whatever\",\n        json={\n            \"input\": {\n                \"path\": \"/api/jobs/foobar.run.2\",\n                \"backend\": \"tron\",\n                \"token\": \"aaa.bbb.ccc\",\n                \"method\": \"get\",\n                \"service\": \"foobar\",\n            }\n        },\n        timeout=2,\n    )\n\n\ndef test_is_request_authorized_fail(mock_auth_filter):\n    mock_auth_filter.session.post.side_effect = Exception\n    assert mock_auth_filter.is_request_authorized(\n        mock_request(\"/allowed\", \"eee.ddd.fff\", \"get\")\n    ) == AuthorizationOutcome(False, \"Auth backend error\")\n\n\ndef test_is_request_authorized_malformed(mock_auth_filter):\n    mock_auth_filter.session.post.return_value.json.return_value = {\"foo\": \"bar\"}\n    assert mock_auth_filter.is_request_authorized(\n        mock_request(\"/allowed\", \"eee.ddd.fff\", \"post\")\n    ) == AuthorizationOutcome(False, \"Malformed auth response\")\n\n\ndef test_is_request_authorized_no_enforce(mock_auth_filter):\n    mock_auth_filter.session.post.return_value.json.return_value = {\n        \"result\": {\"allowed\": False, \"reason\": \"Missing token\"}\n    }\n    with patch.object(mock_auth_filter, \"enforce\", False):\n        assert mock_auth_filter.is_request_authorized(mock_request(\"/foobar\", \"\", \"post\")) == AuthorizationOutcome(\n            True, \"Auth dry-run\"\n        )\n\n\ndef test_is_request_authorized_disabled(mock_auth_filter):\n    mock_auth_filter.session.post.return_value.json.return_value = {\n        \"result\": {\"allowed\": False, \"reason\": \"Missing token\"}\n    }\n    with patch.object(mock_auth_filter, \"endpoint\", None):\n        assert mock_auth_filter.is_request_authorized(mock_request(\"/buzz\", \"\", \"post\")) == AuthorizationOutcome(\n            True, \"Auth not enabled\"\n        )\n\n\n@pytest.mark.parametrize(\n    \"path,expected\",\n    (\n        (\"/api/jobs/someservice.instance/110/run\", \"someservice\"),\n        (\"/api/jobs/someweirdservice/110/run\", \"someweirdservice\"),\n        (\"/api/jobs/\", None),\n        (\"/api\", None),\n    ),\n)\ndef test_extract_service_from_path(path, expected):\n    assert AuthorizationFilter._extract_service_from_path(path) == expected\n"
  },
  {
    "path": "tests/api/controller_test.py",
    "content": "from unittest import mock\n\nimport pytest\n\nfrom tron import mcp\nfrom tron.api import controller\nfrom tron.api.controller import ConfigController\nfrom tron.api.controller import EventsController\nfrom tron.api.controller import InvalidCommandForActionState\nfrom tron.api.controller import JobCollectionController\nfrom tron.api.controller import UnknownCommandError\nfrom tron.config import ConfigError\nfrom tron.config import manager\nfrom tron.core import actionrun\nfrom tron.core import jobrun\nfrom tron.core.job_collection import JobCollection\nfrom tron.core.job_scheduler import JobScheduler\n\n\nclass TestJobCollectionController:\n    @pytest.fixture(autouse=True)\n    def setup_controller(self):\n        self.collection = mock.create_autospec(\n            JobCollection,\n            enable=mock.Mock(),\n            disable=mock.Mock(),\n        )\n        self.controller = JobCollectionController(self.collection)\n\n    def test_handle_command_unknown(self):\n        with pytest.raises(UnknownCommandError):\n            self.controller.handle_command(\"enableall\")\n            self.controller.handle_command(\"disableall\")\n\n    def test_handle_command_move_non_existing_job(self):\n        self.collection.get_names.return_value = []\n        result = self.controller.handle_command(\n            \"move\",\n            old_name=\"old.test\",\n            new_name=\"new.test\",\n        )\n        assert \"doesn't exist\" in result\n\n    def test_handle_command_move_to_existing_job(self):\n        self.collection.get_names.return_value = [\"old.test\", \"new.test\"]\n        result = self.controller.handle_command(\n            \"move\",\n            old_name=\"old.test\",\n            new_name=\"new.test\",\n        )\n        assert \"exists already\" in result\n\n    def test_handle_command_move(self):\n        self.collection.get_names.return_value = [\"old.test\"]\n        result = self.controller.handle_command(\n            \"move\",\n            old_name=\"old.test\",\n            new_name=\"new.test\",\n        )\n        assert \"Error\" not in result\n\n\nclass TestActionRunController:\n    @pytest.fixture(autouse=True)\n    def setup_controller(self):\n        self.action_run = mock.create_autospec(\n            actionrun.ActionRun,\n            cancel=mock.Mock(),\n        )\n        self.job_run = mock.create_autospec(jobrun.JobRun)\n        self.job_run.is_scheduled = False\n        self.controller = controller.ActionRunController(\n            self.action_run,\n            self.job_run,\n        )\n        self.job_run.action_runs.cleanup_action_run = None\n\n    def test_handle_command_start_failed(self):\n        self.job_run.is_scheduled = True\n        result = self.controller.handle_command(\"start\")\n        assert not self.action_run.start.mock_calls\n        assert \"cannot be started\" in result\n\n    def test_handle_command_recover_failed(self):\n        self.action_run.is_unknown = False\n        result = self.controller.handle_command(\"recover\")\n        assert not self.action_run.recover.mock_calls\n        assert \"cannot be recovered\" in result\n\n    def test_handle_command_mapped_command(self):\n        result = self.controller.handle_command(\"cancel\")\n        self.action_run.cancel.assert_called_with()\n        assert \"now in state\" in result\n\n    def test_handle_command_mapped_command_failed(self):\n        self.action_run.cancel.return_value = False\n        with pytest.raises(InvalidCommandForActionState):\n            self.controller.handle_command(\"cancel\")\n\n    def test_handle_termination_not_implemented(self):\n        self.action_run.stop.side_effect = NotImplementedError\n        result = self.controller.handle_termination(\"stop\")\n        assert \"Failed to stop\" in result\n\n    def test_handle_termination_success_without_extra_msg(self):\n        self.action_run.kill.return_value = None\n        result = self.controller.handle_termination(\"kill\")\n        assert \"Attempting to kill\" in result\n\n    def test_handle_termination_success_with_extra_msg(self):\n        self.action_run.kill.return_value = \"Warning Message\"\n        result = self.controller.handle_termination(\"kill\")\n        assert \"Attempting to kill\" in result\n        assert \"Warning Message\" in result\n\n    def test_handle_retry_default(self):\n        self.controller.handle_command(\"retry\")\n        self.action_run.retry.assert_called_once_with(original_command=True)\n\n    def test_handle_retry_new_command(self):\n        self.controller.handle_command(\"retry\", use_latest_command=True)\n        self.action_run.retry.assert_called_once_with(original_command=False)\n\n\nclass TestJobRunController:\n    @pytest.fixture(autouse=True)\n    def setup_controller(self):\n        self.job_run = mock.create_autospec(\n            jobrun.JobRun,\n            run_time=mock.Mock(),\n            cancel=mock.Mock(),\n        )\n        self.job_scheduler = mock.create_autospec(JobScheduler)\n        self.controller = controller.JobRunController(\n            self.job_run,\n            self.job_scheduler,\n        )\n\n    def test_handle_command_restart(self):\n        self.controller.handle_command(\"restart\")\n        self.job_scheduler.manual_start.assert_called_with(\n            self.job_run.run_time,\n        )\n\n    def test_handle_mapped_command(self):\n        result = self.controller.handle_command(\"start\")\n        self.job_run.start.assert_called_with()\n        assert \"now in state\" in result\n\n    def test_handle_mapped_command_failure(self):\n        self.job_run.cancel.return_value = False\n        result = self.controller.handle_command(\"cancel\")\n        self.job_run.cancel.assert_called_with()\n        assert \"Failed to cancel\" in result\n\n\nclass TestJobController:\n    @pytest.fixture(autouse=True)\n    def setup_controller(self):\n        self.job_scheduler = mock.create_autospec(JobScheduler)\n        self.controller = controller.JobController(self.job_scheduler)\n\n    def test_handle_command_enable(self):\n        self.controller.handle_command(\"enable\")\n        self.job_scheduler.enable.assert_called_with()\n\n    def test_handle_command_disable(self):\n        self.controller.handle_command(\"disable\")\n        self.job_scheduler.disable.assert_called_with()\n\n    def test_handle_command_start(self):\n        run_time = mock.Mock()\n        self.controller.handle_command(\"start\", run_time)\n        self.job_scheduler.manual_start.assert_called_with(run_time=run_time)\n\n\nclass TestConfigController:\n    @pytest.fixture(autouse=True)\n    def setup_controller(self):\n        self.mcp = mock.create_autospec(mcp.MasterControlProgram)\n        self.manager = mock.create_autospec(manager.ConfigManager)\n        self.mcp.get_config_manager.return_value = self.manager\n        self.controller = ConfigController(self.mcp)\n\n    def test_get_config_content_new(self):\n        self.manager.__contains__.return_value = False\n        content = self.controller._get_config_content(\"name\")\n        assert content == self.controller.DEFAULT_NAMED_CONFIG\n        assert not self.manager.read_raw_config.call_count\n\n    def test_get_config_content_old(self):\n        self.manager.__contains__.return_value = True\n        name = \"the_name\"\n        content = self.controller._get_config_content(name)\n        assert content == self.manager.read_raw_config.return_value\n        self.manager.read_raw_config.assert_called_with(name)\n\n    def test_read_config(self):\n        self.manager.__contains__.return_value = True\n        name = \"MASTER\"\n        resp = self.controller.read_config(name)\n        self.manager.read_raw_config.assert_called_with(name)\n        self.manager.get_hash.assert_called_with(name)\n        assert resp[\"config\"] == self.manager.read_raw_config.return_value\n        assert resp[\"hash\"] == self.manager.get_hash.return_value\n\n    def test_update_config(self):\n        name, content, config_hash = \"foo_namespace\", mock.Mock(), mock.Mock()\n        self.manager.get_hash.return_value = config_hash\n        assert not self.controller.update_config(name, content, config_hash)\n        self.manager.get_hash.assert_called_with(name)\n        self.manager.write_config.assert_called_with(name, content)\n        self.mcp.reconfigure.assert_called_with(name)\n\n    def test_update_config_failure(self):\n        name, content, old_content, config_hash = (\n            \"foo_namespace\",\n            mock.Mock(),\n            mock.Mock(),\n            mock.Mock(),\n        )\n        self.manager.get_hash.return_value = config_hash\n        self.manager.write_config.side_effect = [ConfigError(\"It broke\"), None]\n        self.controller.read_config = mock.Mock(return_value={\"config\": old_content})\n        error = self.controller.update_config(\n            name,\n            content,\n            config_hash,\n        )\n        assert error == \"It broke\"\n        self.manager.write_config.call_args_list = [\n            (name, content),\n            (name, old_content),\n        ]\n        assert self.mcp.reconfigure.call_count == 1\n        self.mcp.reconfigure.assert_called_with(name)\n\n    def test_update_config_hash_mismatch(self):\n        name, content, config_hash = \"foo_namespace\", mock.Mock(), mock.Mock()\n        error = self.controller.update_config(name, content, config_hash)\n        assert error == \"Configuration has changed. Please try again.\"\n\n    def test_delete_config(self):\n        name, content, config_hash = \"foo_namespace\", \"\", mock.Mock()\n        self.manager.get_hash.return_value = config_hash\n        assert not self.controller.delete_config(name, content, config_hash)\n        self.manager.delete_config.assert_called_with(name)\n        self.mcp.reconfigure.assert_called_with(name)\n        self.manager.get_hash.assert_called_with(name)\n\n    def test_delete_config_failure(self):\n        name, content, config_hash = \"foo_namespace\", \"\", mock.Mock()\n        self.manager.get_hash.return_value = config_hash\n        self.manager.delete_config.side_effect = Exception(\"some error\")\n        error = self.controller.delete_config(name, content, config_hash)\n        assert error\n        self.manager.delete_config.assert_called_with(name)\n        assert not self.mcp.reconfigure.call_count\n\n    def test_delete_config_hash_mismatch(self):\n        name, content, config_hash = \"foo_namespace\", \"\", mock.Mock()\n        error = self.controller.delete_config(name, content, config_hash)\n        assert error == \"Configuration has changed. Please try again.\"\n\n    def test_delete_config_content_not_empty(self):\n        name, content, config_hash = \"foo_namespace\", \"\", mock.Mock()\n        error = self.controller.delete_config(name, content, config_hash)\n        assert error\n\n    def test_get_namespaces(self):\n        result = self.controller.get_namespaces()\n        self.manager.get_namespaces.assert_called_with()\n        assert result == self.manager.get_namespaces.return_value\n\n\nclass TestEventsController:\n    @pytest.fixture(autouse=True)\n    def setup(self):\n        with mock.patch(\"tron.api.controller.EventBus\", autospec=True) as eb:\n            eb.instance = mock.Mock()\n            self.eventbus = eb\n            self.controller = EventsController()\n            yield\n\n    def test_info(self):\n        self.eventbus.instance = None\n        assert self.controller.info() == dict(error=\"EventBus disabled\")\n\n        self.eventbus.instance = mock.Mock()\n        assert self.controller.info() == dict(response=self.eventbus.instance.event_log)\n\n    def test_publish(self):\n        event = mock.Mock()\n        self.eventbus.instance = None\n        self.eventbus.has_event.return_value = True\n        self.eventbus.publish.return_value = False\n\n        assert self.controller.info() == dict(error=\"EventBus disabled\")\n        assert len(self.eventbus.publish.mock_calls) == 0\n\n        self.eventbus.instance = mock.Mock()\n        assert self.controller.publish(event) == dict(response=f\"event {event} already published\")\n        assert len(self.eventbus.publish.mock_calls) == 0\n\n        self.eventbus.has_event.return_value = False\n        assert self.controller.publish(event) == dict(error=f\"could not publish {event}\")\n        assert len(self.eventbus.publish.mock_calls) == 1\n\n        self.eventbus.publish.return_value = True\n        assert self.controller.publish(event) == dict(response=\"OK\")\n        assert len(self.eventbus.publish.mock_calls) == 2\n\n    def test_discard(self):\n        event = mock.Mock()\n        self.eventbus.instance = None\n        self.eventbus.discard.return_value = False\n\n        assert self.controller.info() == dict(error=\"EventBus disabled\")\n        assert len(self.eventbus.discard.mock_calls) == 0\n\n        self.eventbus.instance = mock.Mock()\n        assert self.controller.discard(event) == dict(error=f\"could not discard {event}\")\n        assert len(self.eventbus.discard.mock_calls) == 1\n\n        self.eventbus.discard.return_value = True\n        assert self.controller.discard(event) == dict(response=\"OK\")\n        assert len(self.eventbus.discard.mock_calls) == 2\n"
  },
  {
    "path": "tests/api/requestargs_test.py",
    "content": "import datetime\nfrom unittest.mock import MagicMock\n\nfrom testifycompat import assert_equal\nfrom testifycompat import run\nfrom testifycompat import setup\nfrom testifycompat import TestCase\nfrom tron.api.requestargs import get_bool\nfrom tron.api.requestargs import get_datetime\nfrom tron.api.requestargs import get_integer\nfrom tron.api.requestargs import get_string\n\n\nclass TestRequestArgs(TestCase):\n    @setup\n    def setup_args(self):\n        self.args = {\n            b\"number\": [b\"123\"],\n            b\"string\": [b\"astring\"],\n            b\"boolean\": [b\"1\"],\n            b\"datetime\": [b\"2012-03-14 15:09:26\"],\n        }\n        self.datetime = datetime.datetime(2012, 3, 14, 15, 9, 26)\n        self.request = MagicMock(args=self.args)\n\n    def _add_arg(self, name, value):\n        name = name.encode()\n        value = value.encode()\n        if name not in self.args:\n            self.args[name] = []\n        self.args[name].append(value)\n\n    def test_get_integer_valid_int(self):\n        self._add_arg(\"number\", \"5\")\n        assert_equal(get_integer(self.request, \"number\"), 123)\n\n    def test_get_integer_invalid_int(self):\n        self._add_arg(\"nan\", \"beez\")\n        assert not get_integer(self.request, \"nan\")\n\n    def test_get_integer_missing(self):\n        assert not get_integer(self.request, \"missing\")\n\n    def test_get_string(self):\n        self._add_arg(\"string\", \"bogus\")\n        assert_equal(get_string(self.request, \"string\"), \"astring\")\n\n    def test_get_string_missing(self):\n        assert not get_string(self.request, \"missing\")\n\n    def test_get_bool(self):\n        assert get_bool(self.request, \"boolean\")\n\n    def test_get_bool_false(self):\n        self._add_arg(\"false\", \"0\")\n        assert not get_bool(self.request, \"false\")\n\n    def test_get_bool_missing(self):\n        assert not get_bool(self.request, \"missing\")\n\n    def test_get_datetime_valid(self):\n        assert_equal(get_datetime(self.request, \"datetime\"), self.datetime)\n\n    def test_get_datetime_invalid(self):\n        self._add_arg(\"nope\", \"2012-333-4\")\n        assert not get_datetime(self.request, \"nope\")\n\n    def test_get_datetime_missing(self):\n        assert not get_datetime(self.request, \"missing\")\n\n\nif __name__ == \"__main__\":\n    run()\n"
  },
  {
    "path": "tests/api/resource_test.py",
    "content": "\"\"\"\nTest cases for the web services interface to tron\n\"\"\"\nfrom unittest import mock\nfrom unittest.mock import MagicMock\n\nimport pytest\nimport staticconf.testing\nimport twisted.web.http\nimport twisted.web.resource\nimport twisted.web.server\nfrom twisted.web import http\n\nfrom tests.assertions import assert_call\nfrom tests.testingutils import autospec_method\nfrom tron import __version__\nfrom tron import mcp\nfrom tron import node\nfrom tron.api import controller\nfrom tron.core import job\nfrom tron.core import jobrun\nfrom tron.core.job_collection import JobCollection\nfrom tron.core.job_scheduler import JobScheduler\n\nwith mock.patch(\n    \"tron.api.async_resource.AsyncResource.bounded\",\n    lambda fn: fn,\n    autospec=None,\n):\n    with mock.patch(\n        \"tron.api.async_resource.AsyncResource.exclusive\",\n        lambda fn: fn,\n        autospec=None,\n    ):\n        from tron.api import resource as www\n\nREQUEST = twisted.web.server.Request(mock.Mock(), None)\nREQUEST.childLink = lambda val: \"/jobs/%s\" % val\n\n\ndef build_request(**kwargs):\n    args = {k.encode(): [v.encode()] for k, v in kwargs.items()}\n    return mock.create_autospec(twisted.web.server.Request, args=args)\n\n\n@pytest.fixture\ndef mock_request():\n    return build_request()\n\n\n@pytest.fixture\ndef mock_respond():\n    with mock.patch(\n        \"tron.api.resource.respond\",\n        autospec=True,\n    ) as mock_respond:\n        mock_respond.side_effect = lambda request, response, code=None: response\n        yield mock_respond\n\n\n@pytest.mark.usefixtures(\"mock_respond\")\nclass WWWTestCase:\n    \"\"\"Patch www.response to not json encode.\"\"\"\n\n    pass\n\n\n@pytest.mark.parametrize(\n    \"response,code,expected_code\",\n    [\n        (\"a_string\", None, 200),\n        ([{\"a\": \"list\"}], None, 200),\n        ({\"a\": \"dict\"}, None, 200),\n        ({\"a\": \"dict\"}, 501, 501),\n        ({\"error\": \"something went wrong\"}, None, 500),\n    ],\n)\ndef test_respond(response, code, expected_code):\n    request = build_request()\n    www.respond(request, response, code=code)\n    request.setResponseCode.assert_called_once_with(expected_code)\n\n\nclass TestHandleCommand:\n    @pytest.fixture\n    def mock_respond(self, mock_respond):\n        # in this test case, we don't want a side effect\n        mock_respond.side_effect = None\n        return mock_respond\n\n    def test_handle_command_unknown(self, mock_respond):\n        command = \"the command\"\n        request = build_request(command=command)\n        mock_controller, obj = mock.Mock(), mock.Mock()\n        error = controller.UnknownCommandError()\n        mock_controller.handle_command.side_effect = error\n        www.handle_command(request, mock_controller, obj)\n        mock_controller.handle_command.assert_called_with(command)\n        mock_respond.assert_called_with(\n            request=request,\n            response={\"error\": f\"Unknown command '{command}' for '{obj}'\"},\n            code=http.NOT_IMPLEMENTED,\n        )\n\n    def test_handle_command(self, mock_respond):\n        command = \"the command\"\n        request = build_request(command=command)\n        mock_controller, obj = mock.Mock(), mock.Mock()\n        www.handle_command(request, mock_controller, obj)\n        mock_controller.handle_command.assert_called_with(command)\n        mock_respond.assert_called_with(\n            request=request,\n            response={\"result\": mock_controller.handle_command.return_value},\n        )\n\n    def test_handle_command_error(self, mock_respond):\n        command = \"the command\"\n        request = build_request(command=command)\n        mock_controller, obj = mock.Mock(), mock.Mock()\n        error = Exception(\"uncaught exception\")\n        mock_controller.handle_command.side_effect = error\n        www.handle_command(request, mock_controller, obj)\n        mock_controller.handle_command.assert_called_with(command)\n        mock_respond.assert_called_with(request=request, response={\"error\": mock.ANY})\n\n\nclass TestActionRunResource(WWWTestCase):\n    @pytest.fixture(autouse=True)\n    def setup_resource(self):\n        self.job_run = mock.MagicMock()\n        self.action_run = mock.MagicMock(output_path=[\"one\"])\n        with mock.patch(\"tron.config.static_config.load_yaml_file\", autospec=True,), mock.patch(\n            \"tron.config.static_config.build_configuration_watcher\",\n            autospec=True,\n        ):\n            self.resource = www.ActionRunResource(self.action_run, self.job_run)\n\n    def test_render_GET(self, mock_respond):\n        request = build_request()\n        mock_config = {\"logging.max_lines_to_display\": 1000}\n        mock_configuration = staticconf.testing.MockConfiguration(mock_config, namespace=\"tron\")\n        with mock_configuration:\n            response = self.resource.render_GET(request)\n        assert response[\"id\"] == self.resource.action_run.id\n\n\nclass TestJobrunResource(WWWTestCase):\n    @pytest.fixture(autouse=True)\n    def setup_resource(self):\n        self.job_run = mock.MagicMock()\n        self.job_scheduler = mock.Mock()\n        self.resource = www.JobRunResource(self.job_run, self.job_scheduler)\n\n    def test_render_GET(self, mock_request):\n        response = self.resource.render_GET(mock_request)\n        assert response[\"id\"] == self.job_run.id\n\n\nclass TestApiRootResource(WWWTestCase):\n    @pytest.fixture(autouse=True)\n    def setup_resource(self):\n        self.mcp = mock.create_autospec(mcp.MasterControlProgram)\n        self.resource = www.ApiRootResource(self.mcp)\n\n    def test__init__(self):\n        expected_children = [\n            b\"jobs\",\n            b\"config\",\n            b\"metrics\",\n            b\"status\",\n            b\"events\",\n            b\"prom-metrics\",\n            b\"\",\n        ]\n        assert set(expected_children) == set(self.resource.children)\n\n    def test_render_GET(self):\n        expected_keys = [\n            \"jobs\",\n            \"namespaces\",\n        ]\n        response = self.resource.render_GET(build_request())\n        assert set(response.keys()) == set(expected_keys)\n        self.mcp.get_job_collection().get_jobs.assert_called_with()\n\n\nclass TestRootResource(WWWTestCase):\n    @pytest.fixture(autouse=True)\n    def setup_resource(self):\n        self.web_path = \"/bogus/path\"\n        self.mcp = mock.create_autospec(mcp.MasterControlProgram)\n        self.resource = www.RootResource(self.mcp, self.web_path)\n\n    def test_render_GET(self):\n        request = build_request()\n        response = self.resource.render_GET(request)\n        assert response == 1\n        assert request.redirect.call_count == 1\n        request.finish.assert_called_with()\n\n    def test_get_children(self):\n        assert set(self.resource.children) == {b\"api\", b\"web\", b\"\"}\n\n\nclass TestActionRunHistoryResource(WWWTestCase):\n    @pytest.fixture(autouse=True)\n    def setup_resource(self):\n        self.action_runs = [mock.MagicMock(), mock.MagicMock()]\n        self.resource = www.ActionRunHistoryResource(self.action_runs)\n\n    def test_render_GET(self, request):\n        response = self.resource.render_GET(request)\n        assert len(response) == len(self.action_runs)\n\n\nclass TestJobCollectionResource(WWWTestCase):\n    @pytest.fixture(autouse=True)\n    def setup_resource(self):\n        job_collection = mock.create_autospec(JobCollection)\n        job_collection.get_by_name = lambda name: name if name == \"testname\" else None\n        self.resource = www.JobCollectionResource(job_collection)\n\n    def test_render_GET(self):\n        self.resource.get_data = MagicMock()\n        result = self.resource.render_GET(REQUEST)\n        assert_call(self.resource.get_data, 0, False, False, True, True)\n        assert \"jobs\" in result\n\n    def test_getChild(self):\n        child = self.resource.getChild(b\"testname\", mock.Mock())\n        assert isinstance(child, www.JobResource)\n\n    def test_getChild_missing_job(self):\n        child = self.resource.getChild(b\"bar\", mock.Mock())\n        assert isinstance(child, www.ErrorResource)\n\n\nclass TestJobResource(WWWTestCase):\n    @pytest.fixture(autouse=True)\n    def setup_resource(self):\n        self.job_scheduler = mock.create_autospec(JobScheduler)\n        self.job_runs = mock.create_autospec(jobrun.JobRunCollection)\n        self.job = mock.create_autospec(\n            job.Job,\n            runs=self.job_runs,\n            all_nodes=False,\n            allow_overlap=True,\n            queueing=True,\n            action_graph=mock.MagicMock(),\n            scheduler=mock.Mock(),\n            node_pool=mock.create_autospec(node.NodePool),\n            max_runtime=mock.Mock(),\n            expected_runtime=mock.MagicMock(),\n        )\n        self.job.get_name.return_value = \"foo\"\n        self.job_scheduler.get_job.return_value = self.job\n        self.job_scheduler.get_job_runs.return_value = self.job_runs\n        self.resource = www.JobResource(self.job_scheduler)\n\n    def test_render_GET(self, mock_request):\n        result = self.resource.render_GET(mock_request)\n        assert result[\"name\"] == self.job_scheduler.get_job().get_name()\n\n    def test_get_run_from_identifier_HEAD(self):\n        job_run = self.resource.get_run_from_identifier(\"HEAD\")\n        self.job_scheduler.get_job_runs.assert_called_with()\n        assert job_run == self.job_runs.get_newest.return_value\n\n    def test_get_run_from_identifier_number(self):\n        job_run = self.resource.get_run_from_identifier(\"3\")\n        self.job_scheduler.get_job_runs.assert_called_with()\n        assert job_run == self.job_runs.get_run_by_num.return_value\n        self.job_runs.get_run_by_num.assert_called_with(3)\n\n    def test_get_run_from_identifier_negative_index(self):\n        job_run = self.resource.get_run_from_identifier(\"-2\")\n        assert job_run == self.job_runs.get_run_by_index.return_value\n        self.job_runs.get_run_by_index.assert_called_with(-2)\n\n    def test_getChild(self):\n        autospec_method(self.resource.get_run_from_identifier)\n        identifier = b\"identifier\"\n        resource = self.resource.getChild(identifier, None)\n        assert resource.job_run == self.resource.get_run_from_identifier.return_value\n\n    def test_getChild_action_run_history(self):\n        autospec_method(\n            self.resource.get_run_from_identifier,\n            return_value=None,\n        )\n        action_name = \"action_name\"\n        action_runs = [mock.Mock(), mock.Mock()]\n        self.job.action_graph.names.return_value = [action_name]\n        self.job.runs.get_action_runs.return_value = action_runs\n        resource = self.resource.getChild(action_name, None)\n        assert resource.__class__ == www.ActionRunHistoryResource\n        assert resource.action_runs == action_runs\n\n\nclass TestConfigResource:\n    @pytest.fixture(autouse=True)\n    def setup_resource(self):\n        self.mcp = mock.create_autospec(mcp.MasterControlProgram)\n        self.resource = www.ConfigResource(self.mcp)\n        self.controller = self.resource.controller = mock.create_autospec(\n            controller.ConfigController,\n        )\n\n    def test_render_GET(self, mock_respond):\n        name = \"the_name\"\n        request = build_request(name=name)\n        self.resource.render_GET(request)\n        self.controller.read_config.assert_called_with(name)\n        mock_respond.assert_called_with(\n            request=request,\n            response=self.resource.controller.read_config.return_value,\n        )\n\n    def test_render_POST_update(self, mock_respond):\n        name, config, hash = \"the_name\", \"config\", \"hash\"\n        request = build_request(name=name, config=config, hash=hash)\n        self.resource.render_POST(request)\n        self.resource.controller.update_config.assert_called_with(name, config, hash)\n        expected_response = {\n            \"status\": \"Active\",\n            \"error\": self.resource.controller.update_config.return_value,\n        }\n        mock_respond.assert_called_with(request=request, response=expected_response)\n\n    def test_render_POST_delete(self, mock_respond):\n        name, config, hash = \"the_name\", \"\", \"\"\n        request = build_request(name=name, config=config, hash=hash)\n        self.resource.render_POST(request)\n        self.resource.controller.delete_config.assert_called_with(name, config, hash)\n        expected_response = {\n            \"status\": \"Active\",\n            \"error\": self.resource.controller.delete_config.return_value,\n        }\n        mock_respond.assert_called_with(request=request, response=expected_response)\n\n\nclass TestStatusResource:\n    def test_render_GET(self, request, mock_respond):\n        self.mcp = mock.create_autospec(mcp.MasterControlProgram)\n        self.mcp.boot_time = 999\n        resource = www.StatusResource(self.mcp)\n        resource.render_GET(request)\n        expected_response = {\n            \"status\": \"I'm alive.\",\n            \"version\": __version__,\n            \"boot_time\": self.mcp.boot_time,\n        }\n        mock_respond.assert_called_with(\n            request=request,\n            response=expected_response,\n        )\n\n\nclass TestMetricsResource:\n    @mock.patch(\"tron.api.resource.view_all_metrics\", autospec=True)\n    def test_render_GET(self, mock_view_metrics, request, mock_respond):\n        resource = www.MetricsResource()\n        resource.render_GET(request)\n        mock_respond.assert_called_with(\n            request=request,\n            response=mock_view_metrics.return_value,\n        )\n\n\nclass TestTronSite:\n    @mock.patch(\"tron.api.resource.meter\", autospec=True)\n    def test_log_request(self, mock_meter):\n        site = www.TronSite.create(\n            mock.create_autospec(mcp.MasterControlProgram),\n            \"webpath\",\n        )\n        request = mock.Mock(code=500)\n        site.log(request)\n        assert mock_meter.call_count == 1\n"
  },
  {
    "path": "tests/assertions.py",
    "content": "\"\"\"\n Assertions for testify.\n\"\"\"\nfrom testifycompat import assert_in\nfrom testifycompat import assert_not_reached\n\n\ndef assert_raises(expected_exception_class, callable_obj, *args, **kwargs):\n    \"\"\"Returns the exception if the callable raises expected_exception_class\"\"\"\n    try:\n        callable_obj(*args, **kwargs)\n    except expected_exception_class as e:\n        # we got the expected exception\n        return e\n    assert_not_reached(\n        \"No exception was raised (expected %s)\" % expected_exception_class,\n    )\n\n\ndef assert_length(sequence, expected, msg=None):\n    \"\"\"Assert that a sequence or iterable has an expected length.\"\"\"\n    msg = msg or \"%(sequence)s has length %(length)s expected %(expected)s\"\n    length = len(list(sequence))\n    assert length == expected, msg % locals()\n\n\ndef assert_call(mock, call_idx, *args, **kwargs):\n    \"\"\"Assert that a function was called on mock with the correct args.\"\"\"\n    actual = mock.mock_calls[call_idx] if mock.mock_calls else None\n    msg = f\"Call {call_idx} expected {(args, kwargs)}, was {actual}\"\n    assert actual == (args, kwargs), msg\n\n\ndef assert_mock_calls(expected, mock_calls):\n    \"\"\"Assert that all expected calls are in the list of mock_calls.\"\"\"\n    for expected_call in expected:\n        assert_in(expected_call, mock_calls)\n"
  },
  {
    "path": "tests/bin/__init__.py",
    "content": ""
  },
  {
    "path": "tests/bin/action_runner_test.py",
    "content": "import tempfile\nfrom unittest import mock\n\nimport pytest\n\nfrom testifycompat import assert_equal\nfrom testifycompat import setup\nfrom testifycompat import setup_teardown\nfrom testifycompat import TestCase\nfrom tron.bin import action_runner\n\n\nclass TestStatusFile(TestCase):\n    @setup\n    def setup_status_file(self):\n        self.filename = tempfile.NamedTemporaryFile().name\n        self.status_file = action_runner.StatusFile(self.filename)\n\n    def test_get_content(self):\n        command, proc, run_id = \"do this\", mock.Mock(), \"Job.test.1\"\n        with (\n            mock.patch(\"tron.bin.action_runner.time.time\", autospec=True) as faketime,\n            mock.patch(\"tron.bin.action_runner.os.getpid\", autospec=True) as fakepid,\n        ):\n            faketime.return_value = 0\n            fakepid.return_value = 2\n            content = self.status_file.get_content(\n                command=command,\n                proc=proc,\n                run_id=run_id,\n            )\n            expected = dict(\n                run_id=run_id,\n                command=command,\n                pid=proc.pid,\n                return_code=proc.returncode,\n                runner_pid=2,\n                timestamp=0,\n            )\n        assert_equal(content, expected)\n\n\nclass TestRegister(TestCase):\n    mock_isdir = mock_status_file = None\n    mock_makedirs = None\n\n    @setup_teardown\n    def patch_sys(self):\n        with (\n            mock.patch(\"tron.bin.action_runner.os.path.isdir\", autospec=True) as self.mock_isdir,\n            mock.patch(\"tron.bin.action_runner.os.makedirs\", autospec=True) as self.mock_makedirs,\n            mock.patch(\"tron.bin.action_runner.os.access\", autospec=True) as self.mock_access,\n            mock.patch(\"tron.bin.action_runner.StatusFile\", autospec=True) as self.mock_status_file,\n        ):\n            self.output_path = \"/bogus/path/does/not/exist\"\n            self.command = \"command\"\n            self.run_id = \"Job.test.1\"\n            self.proc = mock.Mock()\n            self.proc.wait.return_value = 0\n            yield\n\n    def test_validate_output_dir_does_not_exist(self):\n        self.mock_isdir.return_value = False\n        self.mock_access.return_value = True\n        action_runner.validate_output_dir(self.output_path)\n        self.mock_makedirs.assert_called_with(self.output_path)\n\n    def test_validate_output_dir_does_not_exist_create_fails(self):\n        self.mock_isdir.return_value = False\n        self.mock_access.return_value = True\n        self.mock_makedirs.side_effect = OSError\n        with pytest.raises(OSError):\n            action_runner.validate_output_dir(self.output_path)\n\n    def test_validate_output_dir_exists_not_writable(self):\n        self.mock_isdir.return_value = True\n        self.mock_access.return_value = False\n        with pytest.raises(OSError):\n            action_runner.validate_output_dir(self.output_path)\n\n    def test_run_proc(self):\n        self.mock_isdir.return_value = True\n        self.mock_access.return_value = True\n        action_runner.run_proc(\n            self.output_path,\n            self.command,\n            self.run_id,\n            self.proc,\n        )\n        self.mock_status_file.assert_called_with(\n            self.output_path + \"/\" + action_runner.STATUS_FILE,\n        )\n        self.mock_status_file.return_value.wrap.assert_called_with(\n            command=self.command,\n            run_id=self.run_id,\n            proc=self.proc,\n        )\n        self.proc.wait.assert_called_with()\n\n\nclass TestBuildEnvironment:\n    def test_build_environment(self):\n        with mock.patch(\n            \"tron.bin.action_runner.os.environ\",\n            dict(PATH=\"/usr/bin/nowhere\"),\n            autospec=None,\n        ):\n            env = action_runner.build_environment(\"MASTER.foo.10.bar\")\n\n        assert env == dict(\n            PATH=\"/usr/bin/nowhere\",\n            TRON_JOB_NAMESPACE=\"MASTER\",\n            TRON_JOB_NAME=\"foo\",\n            TRON_RUN_NUM=\"10\",\n            TRON_ACTION=\"bar\",\n        )\n\n    def test_build_environment_invalid_run_id(self):\n        with mock.patch(\n            \"tron.bin.action_runner.os.environ\",\n            dict(PATH=\"/usr/bin/nowhere\"),\n            autospec=None,\n        ):\n            env = action_runner.build_environment(\"asdf\")\n\n        assert env == dict(\n            PATH=\"/usr/bin/nowhere\",\n            TRON_JOB_NAMESPACE=\"UNKNOWN\",\n            TRON_JOB_NAME=\"UNKNOWN\",\n            TRON_RUN_NUM=\"UNKNOWN\",\n            TRON_ACTION=\"UNKNOWN\",\n        )\n\n    def test_build_environment_too_long_run_id(self):\n        with mock.patch(\n            \"tron.bin.action_runner.os.environ\",\n            dict(PATH=\"/usr/bin/nowhere\"),\n            autospec=None,\n        ):\n            env = action_runner.build_environment(\"MASTER.foo.10.bar.baz\")\n\n        assert env == dict(\n            PATH=\"/usr/bin/nowhere\",\n            TRON_JOB_NAMESPACE=\"MASTER\",\n            TRON_JOB_NAME=\"foo\",\n            TRON_RUN_NUM=\"10\",\n            TRON_ACTION=\"bar.baz\",\n        )\n\n\nclass TestBuildLabels:\n    def test_build_labels(self):\n        labels = action_runner.build_labels(\"MASTER.foo.10.bar\")\n\n        assert labels == {\n            \"tron.yelp.com/run_num\": \"10\",\n        }\n\n    def test_build_labels_with_merging(self):\n        current_labels = {\"LABEL1\": \"value_1\"}\n        labels = action_runner.build_labels(\"MASTER.foo.10.bar\", current_labels)\n\n        assert labels == {\n            \"tron.yelp.com/run_num\": \"10\",\n            \"LABEL1\": \"value_1\",\n        }\n\n    def test_build_labels_with_merging_on_unknown(self):\n        current_labels = {\"LABEL1\": \"value_1\"}\n        labels = action_runner.build_labels(\"asdf\", current_labels)\n\n        assert labels == {\n            \"tron.yelp.com/run_num\": \"UNKNOWN\",\n            \"LABEL1\": \"value_1\",\n        }\n\n    def test_build_labels_invalid_run_id(self):\n        labels = action_runner.build_labels(\"asdf\")\n\n        assert labels == {\n            \"tron.yelp.com/run_num\": \"UNKNOWN\",\n        }\n\n    def test_build_labels_too_long_run_id(self):\n        labels = action_runner.build_labels(\"MASTER.foo.10.bar.baz\")\n\n        assert labels == {\n            \"tron.yelp.com/run_num\": \"10\",\n        }\n\n    def test_build_labels_with_attempt_number_zero(self):\n        labels = action_runner.build_labels(\"MASTER.foo.10.bar\", attempt_number=0)\n\n        assert labels == {\n            \"tron.yelp.com/run_num\": \"10\",\n            \"tron.yelp.com/attempt_number\": \"0\",\n        }\n\n    def test_build_labels_with_attempt_number_retry(self):\n        labels = action_runner.build_labels(\"MASTER.foo.10.bar\", attempt_number=2)\n\n        assert labels == {\n            \"tron.yelp.com/run_num\": \"10\",\n            \"tron.yelp.com/attempt_number\": \"2\",\n        }\n\n    def test_build_labels_with_attempt_number_and_original_labels(self):\n        current_labels = {\"LABEL1\": \"value_1\"}\n        labels = action_runner.build_labels(\"MASTER.foo.10.bar\", current_labels, attempt_number=1)\n\n        assert labels == {\n            \"tron.yelp.com/run_num\": \"10\",\n            \"tron.yelp.com/attempt_number\": \"1\",\n            \"LABEL1\": \"value_1\",\n        }\n\n    def test_build_labels_without_attempt_number_omits_label(self):\n        labels = action_runner.build_labels(\"MASTER.foo.10.bar\")\n\n        assert \"tron.yelp.com/attempt_number\" not in labels\n"
  },
  {
    "path": "tests/bin/action_status_test.py",
    "content": "import signal\nimport tempfile\nfrom unittest import mock\n\nfrom testifycompat import setup_teardown\nfrom testifycompat import TestCase\nfrom tron import yaml\nfrom tron.bin import action_status\n\n\nclass TestActionStatus(TestCase):\n    @setup_teardown\n    def setup_status_file(self):\n        self.status_file = tempfile.NamedTemporaryFile(mode=\"r+\")\n        self.status_content = {\n            \"pid\": 1234,\n            \"return_code\": None,\n            \"run_id\": \"MASTER.foo.bar.1234\",\n        }\n        self.status_file.write(yaml.safe_dump(self.status_content))\n        self.status_file.flush()\n        self.status_file.seek(0)\n        yield\n        self.status_file.close()\n\n    @mock.patch(\"tron.bin.action_status.os.killpg\", autospec=True)\n    @mock.patch(\n        \"tron.bin.action_status.os.getpgid\",\n        autospec=True,\n        return_value=42,\n    )\n    def test_send_signal(self, mock_getpgid, mock_kill):\n        action_status.send_signal(signal.SIGKILL, self.status_file)\n        mock_getpgid.assert_called_with(self.status_content[\"pid\"])\n        mock_kill.assert_called_with(42, signal.SIGKILL)\n\n    def test_get_field_retrieves_last_entry(self):\n        self.status_file.seek(0, 2)\n        additional_status_content = {\n            \"pid\": 1234,\n            \"return_code\": 0,\n            \"run_id\": \"MASTER.foo.bar.1234\",\n            \"command\": \"echo \" + \"really_long\" * 100,\n        }\n        self.status_file.write(\n            yaml.safe_dump(additional_status_content, explicit_start=True),\n        )\n        self.status_file.flush()\n        self.status_file.seek(0)\n        assert action_status.get_field(\"return_code\", self.status_file) == 0\n\n    def test_get_field_none(self):\n        assert action_status.get_field(\"return_code\", self.status_file) is None\n"
  },
  {
    "path": "tests/bin/check_tron_jobs_test.py",
    "content": "import time\nfrom unittest import mock\nfrom unittest.mock import patch\nfrom unittest.mock import PropertyMock\n\nimport pytest\n\nfrom testifycompat import assert_equal\nfrom testifycompat import TestCase\nfrom tron.bin import check_tron_jobs\nfrom tron.bin.check_tron_jobs import State\n\n\n@pytest.fixture(autouse=True)\ndef mock_run_interval():\n    with patch.object(check_tron_jobs, \"_run_interval\", 300):\n        yield\n\n\nclass TestCheckJobs(TestCase):\n    @patch(\"tron.bin.check_tron_jobs.check_job_result\", autospec=True)\n    @patch(\"tron.bin.check_tron_jobs.Client\", autospec=True)\n    @patch(\"tron.bin.check_tron_jobs.cmd_utils\", autospec=True)\n    @patch(\"tron.bin.check_tron_jobs.parse_cli\", autospec=True)\n    def test_check_job_result_exception(\n        self,\n        mock_args,\n        mock_cmd_utils,\n        mock_client,\n        mock_check_job_result,\n    ):\n        type(mock_args.return_value).job = PropertyMock(return_value=None)\n        mock_client.return_value.jobs.return_value = [\n            {\n                \"name\": \"job1\",\n            },\n            {\n                \"name\": \"job2\",\n            },\n            {\n                \"name\": \"job3\",\n            },\n        ]\n        mock_check_job_result.side_effect = [\n            KeyError(\"foo\"),\n            None,\n            TypeError,\n        ]\n        error_code = check_tron_jobs.main()\n        assert_equal(error_code, 1)\n        assert_equal(mock_check_job_result.call_count, 3)\n\n    # These tests test job run succeeded scenarios\n    def test_job_succeeded(self):\n        job_runs = {\n            \"status\": \"running\",\n            \"next_run\": None,\n            \"runs\": [\n                {\n                    \"id\": \"MASTER.test.3\",\n                    \"state\": \"scheduled\",\n                    \"run_time\": time.strftime(\n                        \"%Y-%m-%d %H:%M:%S\",\n                        time.localtime(time.time() + 600),\n                    ),\n                    \"end_time\": None,\n                },\n                {\n                    \"id\": \"MASTER.test.2\",\n                    \"state\": \"running\",\n                    \"run_time\": time.strftime(\n                        \"%Y-%m-%d %H:%M:%S\",\n                        time.localtime(time.time() - 600),\n                    ),\n                    \"end_time\": None,\n                },\n                {\n                    \"id\": \"MASTER.test.1\",\n                    \"state\": \"succeeded\",\n                    \"run_time\": time.strftime(\n                        \"%Y-%m-%d %H:%M:%S\",\n                        time.localtime(time.time() - 1800),\n                    ),\n                    \"end_time\": time.strftime(\n                        \"%Y-%m-%d %H:%M:%S\",\n                        time.localtime(time.time() - 1700),\n                    ),\n                },\n            ],\n        }\n        run, state = check_tron_jobs.get_relevant_run_and_state(job_runs)\n        assert_equal(run[\"id\"], \"MASTER.test.1\")\n        assert_equal(state, State.SUCCEEDED)\n\n    def test_job_running_and_action_succeeded(self):\n        job_runs = {\n            \"status\": \"running\",\n            \"next_run\": None,\n            \"runs\": [\n                {\n                    \"id\": \"MASTER.test.3\",\n                    \"state\": \"scheduled\",\n                    \"run_time\": time.strftime(\n                        \"%Y-%m-%d %H:%M:%S\",\n                        time.localtime(time.time() + 600),\n                    ),\n                    \"end_time\": None,\n                },\n                {\n                    \"id\": \"MASTER.test.2\",\n                    \"state\": \"running\",\n                    \"run_time\": time.strftime(\n                        \"%Y-%m-%d %H:%M:%S\",\n                        time.localtime(time.time() - 600),\n                    ),\n                    \"end_time\": None,\n                    \"runs\": [\n                        {\n                            \"id\": \"MASTER.test.2.action2\",\n                            \"state\": \"running\",\n                        },\n                        {\n                            \"id\": \"MASTER.test.1.action1\",\n                            \"state\": \"succeeded\",\n                        },\n                    ],  # noqa: E122\n                },\n                {\n                    \"id\": \"MASTER.test.1\",\n                    \"state\": \"succeeded\",\n                    \"run_time\": time.strftime(\n                        \"%Y-%m-%d %H:%M:%S\",\n                        time.localtime(time.time() - 1800),\n                    ),\n                    \"end_time\": time.strftime(\n                        \"%Y-%m-%d %H:%M:%S\",\n                        time.localtime(time.time() - 1700),\n                    ),\n                },\n            ],\n        }\n        run, state = check_tron_jobs.get_relevant_run_and_state(job_runs)\n        assert_equal(run[\"id\"], \"MASTER.test.1\")\n        assert_equal(state, State.SUCCEEDED)\n\n    def test_get_relevant_action_picks_the_first_one_succeeded(self):\n        action_runs = [\n            {\n                \"id\": \"MASTER.test.action1\",\n                \"action_name\": \"action1\",\n                \"state\": \"succeeded\",\n                \"start_time\": time.strftime(\n                    \"%Y-%m-%d %H:%M:%S\",\n                    time.localtime(time.time() - 1200),\n                ),\n                \"duration\": \"0:18:01.475067\",\n            },\n            {\n                \"id\": \"MASTER.test.action2\",\n                \"action_name\": \"action2\",\n                \"state\": \"succeeded\",\n                \"start_time\": time.strftime(\n                    \"%Y-%m-%d %H:%M:%S\",\n                    time.localtime(time.time() - 600),\n                ),\n                \"duration\": \"0:08:02.005783\",\n            },\n            {\n                \"id\": \"MASTER.test.action1\",\n                \"action_name\": \"action1\",\n                \"state\": \"succeeded\",\n                \"start_time\": time.strftime(\n                    \"%Y-%m-%d %H:%M:%S\",\n                    time.localtime(time.time()),\n                ),\n                \"duration\": \"0:00:01.006305\",\n            },\n        ]\n        actual = check_tron_jobs.get_relevant_action(\n            action_runs=action_runs,\n            last_state=State.SUCCEEDED,\n            actions_expected_runtime={\n                \"action1\": 86400.0,\n                \"action2\": 86400.0,\n                \"action3\": 86400.0,\n            },\n        )\n        assert_equal(actual[\"id\"], \"MASTER.test.action1\")\n\n    # These tests test job run failed scenarios\n    def test_job_failed(self):\n        job_runs = {\n            \"status\": \"running\",\n            \"next_run\": None,\n            \"runs\": [\n                {\n                    \"id\": \"MASTER.test.3\",\n                    \"state\": \"scheduled\",\n                    \"run_time\": time.strftime(\n                        \"%Y-%m-%d %H:%M:%S\",\n                        time.localtime(time.time() + 600),\n                    ),\n                    \"end_time\": None,\n                },\n                {\n                    \"id\": \"MASTER.test.2\",\n                    \"state\": \"running\",\n                    \"run_time\": time.strftime(\n                        \"%Y-%m-%d %H:%M:%S\",\n                        time.localtime(time.time() - 600),\n                    ),\n                    \"end_time\": None,\n                },\n                {\n                    \"id\": \"MASTER.test.1\",\n                    \"state\": \"failed\",\n                    \"run_time\": time.strftime(\n                        \"%Y-%m-%d %H:%M:%S\",\n                        time.localtime(time.time() - 1800),\n                    ),\n                    \"end_time\": time.strftime(\n                        \"%Y-%m-%d %H:%M:%S\",\n                        time.localtime(time.time() - 1700),\n                    ),\n                },\n            ],\n        }\n        run, state = check_tron_jobs.get_relevant_run_and_state(job_runs)\n        assert_equal(run[\"id\"], \"MASTER.test.1\")\n        assert_equal(state, State.FAILED)\n\n    def test_most_recent_end_time_job_failed(self):\n        job_runs = {\n            \"status\": \"scheduled\",\n            \"next_run\": None,\n            \"runs\": [\n                {\n                    \"id\": \"MASTER.test.3\",\n                    \"state\": \"scheduled\",\n                    \"run_time\": time.strftime(\n                        \"%Y-%m-%d %H:%M:%S\",\n                        time.localtime(time.time() + 600),\n                    ),\n                    \"end_time\": None,\n                },\n                {\n                    \"id\": \"MASTER.test.2\",\n                    \"state\": \"succeeded\",\n                    \"run_time\": time.strftime(\n                        \"%Y-%m-%d %H:%M:%S\",\n                        time.localtime(time.time() - 1800),\n                    ),\n                    \"end_time\": time.strftime(\n                        \"%Y-%m-%d %H:%M:%S\",\n                        time.localtime(time.time() - 1700),\n                    ),\n                },\n                {\n                    \"id\": \"MASTER.test.1\",\n                    \"state\": \"failed\",\n                    \"run_time\": time.strftime(\n                        \"%Y-%m-%d %H:%M:%S\",\n                        time.localtime(time.time() - 600),\n                    ),\n                    \"end_time\": time.strftime(\n                        \"%Y-%m-%d %H:%M:%S\",\n                        time.localtime(time.time() - 500),\n                    ),\n                },\n            ],\n        }\n        run, state = check_tron_jobs.get_relevant_run_and_state(job_runs)\n        assert_equal(run[\"id\"], \"MASTER.test.1\")\n        assert_equal(state, State.FAILED)\n\n    def test_rerun_job_failed(self):\n        job_runs = {\n            \"status\": \"scheduled\",\n            \"next_run\": None,\n            \"runs\": [\n                {\n                    \"id\": \"MASTER.test.4\",\n                    \"state\": \"scheduled\",\n                    \"run_time\": time.strftime(\n                        \"%Y-%m-%d %H:%M:%S\",\n                        time.localtime(time.time() + 600),\n                    ),\n                    \"end_time\": None,\n                },\n                {\n                    \"id\": \"MASTER.test.3\",\n                    \"state\": \"failed\",\n                    \"run_time\": time.strftime(\n                        \"%Y-%m-%d %H:%M:%S\",\n                        time.localtime(time.time() - 1800),\n                    ),\n                    \"end_time\": time.strftime(\n                        \"%Y-%m-%d %H:%M:%S\",\n                        time.localtime(time.time() - 100),\n                    ),\n                },\n                {\n                    \"id\": \"MASTER.test.2\",\n                    \"state\": \"succeeded\",\n                    \"run_time\": time.strftime(\n                        \"%Y-%m-%d %H:%M:%S\",\n                        time.localtime(time.time() - 600),\n                    ),\n                    \"end_time\": time.strftime(\n                        \"%Y-%m-%d %H:%M:%S\",\n                        time.localtime(time.time() - 500),\n                    ),\n                },\n                {\n                    \"id\": \"MASTER.test.1\",\n                    \"state\": \"failed\",\n                    \"run_time\": time.strftime(\n                        \"%Y-%m-%d %H:%M:%S\",\n                        time.localtime(time.time() - 1800),\n                    ),\n                    \"end_time\": time.strftime(\n                        \"%Y-%m-%d %H:%M:%S\",\n                        time.localtime(time.time() - 1700),\n                    ),\n                },\n            ],\n        }\n        run, state = check_tron_jobs.get_relevant_run_and_state(job_runs)\n        assert_equal(run[\"id\"], \"MASTER.test.3\")\n        assert_equal(state, State.FAILED)\n\n    def test_job_running_but_action_failed_already(self):\n        job_runs = {\n            \"status\": \"running\",\n            \"next_run\": None,\n            \"runs\": [\n                {\n                    \"id\": \"MASTER.test.3\",\n                    \"state\": \"scheduled\",\n                    \"run_time\": time.strftime(\n                        \"%Y-%m-%d %H:%M:%S\",\n                        time.localtime(time.time() + 600),\n                    ),\n                    \"end_time\": None,\n                },\n                {\n                    \"id\": \"MASTER.test.2\",\n                    \"state\": \"running\",\n                    \"run_time\": time.strftime(\n                        \"%Y-%m-%d %H:%M:%S\",\n                        time.localtime(time.time() - 600),\n                    ),\n                    \"end_time\": None,\n                    \"runs\": [\n                        {\n                            \"id\": \"MASTER.test.2.action2\",\n                            \"state\": \"running\",\n                        },\n                        {\n                            \"id\": \"MASTER.test.1.action1\",\n                            \"state\": \"failed\",\n                        },\n                    ],  # noqa: E122\n                },\n                {\n                    \"id\": \"MASTER.test.1\",\n                    \"state\": \"succeeded\",\n                    \"run_time\": time.strftime(\n                        \"%Y-%m-%d %H:%M:%S\",\n                        time.localtime(time.time() - 1800),\n                    ),\n                    \"end_time\": time.strftime(\n                        \"%Y-%m-%d %H:%M:%S\",\n                        time.localtime(time.time() - 1700),\n                    ),\n                },\n            ],\n        }\n        run, state = check_tron_jobs.get_relevant_run_and_state(job_runs)\n        assert_equal(run[\"id\"], \"MASTER.test.2\")\n        assert_equal(state, State.FAILED)\n\n    def test_get_relevant_action_picks_the_one_that_failed(self):\n        action_runs = [\n            {\n                \"node\": {\n                    \"username\": \"batch\",\n                    \"hostname\": \"localhost\",\n                    \"name\": \"localhost\",\n                    \"port\": 22,\n                },\n                \"raw_command\": \"/bin/false\",\n                \"requirements\": [],\n                \"run_num\": \"582\",\n                \"exit_status\": 1,\n                \"stdout\": None,\n                \"start_time\": \"2018-02-05 17:40:00\",\n                \"id\": \"MASTER.kwatest.582.action1\",\n                \"action_name\": \"action1\",\n                \"state\": \"failed\",\n                \"command\": \"/bin/false\",\n                \"end_time\": \"2018-02-05 17:40:00\",\n                \"stderr\": None,\n                \"duration\": \"0:00:00.065018\",\n                \"job_name\": \"MASTER.kwatest\",\n            },\n            {\n                \"node\": {\n                    \"username\": \"batch\",\n                    \"hostname\": \"localhost\",\n                    \"name\": \"localhost\",\n                    \"port\": 22,\n                },\n                \"raw_command\": \"/bin/true\",\n                \"requirements\": [],\n                \"run_num\": \"582\",\n                \"exit_status\": 0,\n                \"stdout\": None,\n                \"start_time\": \"2018-02-05 17:40:00\",\n                \"id\": \"MASTER.kwatest.582.action2\",\n                \"action_name\": \"action2\",\n                \"state\": \"succeeded\",\n                \"command\": \"/bin/true\",\n                \"end_time\": \"2018-02-05 17:40:00\",\n                \"stderr\": None,\n                \"duration\": \"0:00:00.046243\",\n                \"job_name\": \"MASTER.kwatest\",\n            },\n        ]\n        actual = check_tron_jobs.get_relevant_action(\n            action_runs=action_runs,\n            last_state=State.FAILED,\n            actions_expected_runtime={},\n        )\n        assert_equal(actual[\"state\"], \"failed\")\n\n    # These tests test job/action stuck scenarios\n    def test_job_next_run_starting_no_overlap_is_stuck(self):\n        job_runs = {\n            \"status\": \"running\",\n            \"next_run\": None,\n            \"runs\": [\n                {\n                    \"id\": \"MASTER.test.2\",\n                    \"state\": \"queued\",\n                    \"run_time\": time.strftime(\n                        \"%Y-%m-%d %H:%M:%S\",\n                        time.localtime(time.time() - 600),\n                    ),\n                    \"end_time\": None,\n                },\n                {\n                    \"id\": \"MASTER.test.1\",\n                    \"state\": \"running\",\n                    \"run_time\": time.strftime(\n                        \"%Y-%m-%d %H:%M:%S\",\n                        time.localtime(time.time() - 1200),\n                    ),\n                    \"end_time\": None,\n                },\n            ],\n        }\n        run, state = check_tron_jobs.get_relevant_run_and_state(job_runs)\n        assert_equal(run[\"id\"], \"MASTER.test.1\")\n        assert_equal(state, State.STUCK)\n\n    def test_job_next_run_starting_overlap_allowed_not_stuck(self):\n        job_runs = {\n            \"status\": \"running\",\n            \"next_run\": None,\n            \"allow_overlap\": True,\n            \"runs\": [\n                {\n                    \"id\": \"MASTER.test.3\",\n                    \"state\": \"queued\",\n                    \"run_time\": time.strftime(\n                        \"%Y-%m-%d %H:%M:%S\",\n                        time.localtime(time.time() - 600),\n                    ),\n                    \"end_time\": None,\n                },\n                {\n                    \"id\": \"MASTER.test.2\",\n                    \"state\": \"running\",\n                    \"run_time\": time.strftime(\n                        \"%Y-%m-%d %H:%M:%S\",\n                        time.localtime(time.time() - 1200),\n                    ),\n                    \"end_time\": None,\n                },\n                {\n                    \"id\": \"MASTER.test.1\",\n                    \"state\": \"succeeded\",\n                    \"run_time\": time.strftime(\n                        \"%Y-%m-%d %H:%M:%S\",\n                        time.localtime(time.time() - 1800),\n                    ),\n                    \"end_time\": time.strftime(\n                        \"%Y-%m-%d %H:%M:%S\",\n                        time.localtime(time.time() - 1700),\n                    ),\n                },\n            ],\n        }\n        run, state = check_tron_jobs.get_relevant_run_and_state(job_runs)\n        assert_equal(run[\"id\"], \"MASTER.test.1\")\n        assert_equal(state, State.SUCCEEDED)\n\n    def test_job_next_run_running_no_queueing_not_stuck(self):\n        job_runs = {\n            \"status\": \"running\",\n            \"next_run\": None,\n            \"allow_overlap\": False,\n            \"queueing\": False,\n            \"runs\": [\n                {\n                    \"id\": \"MASTER.test.3\",\n                    \"state\": \"cancelled\",\n                    \"run_time\": time.strftime(\n                        \"%Y-%m-%d %H:%M:%S\",\n                        time.localtime(time.time() - 600),\n                    ),\n                    \"end_time\": None,\n                },\n                {\n                    \"id\": \"MASTER.test.2\",\n                    \"state\": \"running\",\n                    \"run_time\": time.strftime(\n                        \"%Y-%m-%d %H:%M:%S\",\n                        time.localtime(time.time() - 1200),\n                    ),\n                    \"end_time\": None,\n                },\n                {\n                    \"id\": \"MASTER.test.1\",\n                    \"state\": \"succeeded\",\n                    \"run_time\": time.strftime(\n                        \"%Y-%m-%d %H:%M:%S\",\n                        time.localtime(time.time() - 1800),\n                    ),\n                    \"end_time\": time.strftime(\n                        \"%Y-%m-%d %H:%M:%S\",\n                        time.localtime(time.time() - 1700),\n                    ),\n                },\n            ],\n        }\n        run, state = check_tron_jobs.get_relevant_run_and_state(job_runs)\n        assert_equal(run[\"id\"], \"MASTER.test.1\")\n        assert_equal(state, State.SUCCEEDED)\n\n    def test_job_next_run_starting_no_queueing_not_stuck(self):\n        job_runs = {\n            \"status\": \"starting\",\n            \"next_run\": None,\n            \"allow_overlap\": False,\n            \"queueing\": False,\n            \"runs\": [\n                {\n                    \"id\": \"MASTER.test.3\",\n                    \"state\": \"cancelled\",\n                    \"run_time\": time.strftime(\n                        \"%Y-%m-%d %H:%M:%S\",\n                        time.localtime(time.time() - 600),\n                    ),\n                    \"end_time\": None,\n                },\n                {\n                    \"id\": \"MASTER.test.2\",\n                    \"state\": \"starting\",\n                    \"run_time\": time.strftime(\n                        \"%Y-%m-%d %H:%M:%S\",\n                        time.localtime(time.time() - 1200),\n                    ),\n                    \"end_time\": None,\n                },\n                {\n                    \"id\": \"MASTER.test.1\",\n                    \"state\": \"succeeded\",\n                    \"run_time\": time.strftime(\n                        \"%Y-%m-%d %H:%M:%S\",\n                        time.localtime(time.time() - 1800),\n                    ),\n                    \"end_time\": time.strftime(\n                        \"%Y-%m-%d %H:%M:%S\",\n                        time.localtime(time.time() - 1700),\n                    ),\n                },\n            ],\n        }\n        run, state = check_tron_jobs.get_relevant_run_and_state(job_runs)\n        assert_equal(run[\"id\"], \"MASTER.test.1\")\n        assert_equal(state, State.SUCCEEDED)\n\n    def test_job_running_job_exceeds_expected_runtime(self):\n        job_runs = {\n            \"status\": \"running\",\n            \"next_run\": None,\n            \"expected_runtime\": 480.0,\n            \"allow_overlap\": True,\n            \"runs\": [\n                {\n                    \"id\": \"MASTER.test.100\",\n                    \"state\": \"scheduled\",\n                    \"run_time\": time.strftime(\n                        \"%Y-%m-%d %H:%M:%S\",\n                        time.localtime(time.time() + 600),\n                    ),\n                    \"end_time\": None,\n                    \"start_time\": None,\n                    \"duration\": \"\",\n                },\n                {\n                    \"id\": \"MASTER.test.99\",\n                    \"state\": \"running\",\n                    \"run_time\": time.strftime(\n                        \"%Y-%m-%d %H:%M:%S\",\n                        time.localtime(time.time() - 600),\n                    ),\n                    \"start_time\": time.strftime(\n                        \"%Y-%m-%d %H:%M:%S\",\n                        time.localtime(time.time() - 600),\n                    ),\n                    \"end_time\": None,\n                    \"duration\": \"0:10:01.883601\",\n                },\n            ],\n        }\n        run, state = check_tron_jobs.get_relevant_run_and_state(job_runs)\n        assert_equal(run[\"id\"], \"MASTER.test.99\")\n        assert_equal(state, State.STUCK)\n\n    def test_job_starting_job_exceeds_expected_runtime(self):\n        job_runs = {\n            \"status\": \"running\",\n            \"next_run\": None,\n            \"expected_runtime\": 480.0,\n            \"allow_overlap\": True,\n            \"runs\": [\n                {\n                    \"id\": \"MASTER.test.100\",\n                    \"state\": \"scheduled\",\n                    \"run_time\": time.strftime(\n                        \"%Y-%m-%d %H:%M:%S\",\n                        time.localtime(time.time() + 600),\n                    ),\n                    \"end_time\": None,\n                    \"start_time\": None,\n                    \"duration\": \"\",\n                },\n                {\n                    \"id\": \"MASTER.test.99\",\n                    \"state\": \"starting\",\n                    \"run_time\": time.strftime(\n                        \"%Y-%m-%d %H:%M:%S\",\n                        time.localtime(time.time() - 600),\n                    ),\n                    \"start_time\": time.strftime(\n                        \"%Y-%m-%d %H:%M:%S\",\n                        time.localtime(time.time() - 600),\n                    ),\n                    \"end_time\": None,\n                    \"duration\": \"0:10:01.883601\",\n                },\n            ],\n        }\n        run, state = check_tron_jobs.get_relevant_run_and_state(job_runs)\n        assert_equal(run[\"id\"], \"MASTER.test.99\")\n        assert_equal(state, State.STUCK)\n\n    def test_job_waiting_job_exceeds_expected_runtime_already_started(self):\n        job_runs = {\n            \"status\": \"running\",\n            \"next_run\": None,\n            \"expected_runtime\": 480.0,\n            \"allow_overlap\": True,\n            \"runs\": [\n                {\n                    \"id\": \"MASTER.test.100\",\n                    \"state\": \"scheduled\",\n                    \"run_time\": time.strftime(\n                        \"%Y-%m-%d %H:%M:%S\",\n                        time.localtime(time.time() + 600),\n                    ),\n                    \"end_time\": None,\n                    \"start_time\": None,\n                    \"duration\": \"\",\n                },\n                {\n                    \"id\": \"MASTER.test.99\",\n                    \"state\": \"waiting\",\n                    \"run_time\": time.strftime(\n                        \"%Y-%m-%d %H:%M:%S\",\n                        time.localtime(time.time() - 600),\n                    ),\n                    \"start_time\": time.strftime(\n                        \"%Y-%m-%d %H:%M:%S\",\n                        time.localtime(time.time() - 600),\n                    ),\n                    \"end_time\": None,\n                    \"duration\": \"0:10:01.883601\",\n                },\n            ],\n        }\n        run, state = check_tron_jobs.get_relevant_run_and_state(job_runs)\n        assert_equal(run[\"id\"], \"MASTER.test.99\")\n        assert_equal(state, State.STUCK)\n\n    def test_job_running_action_exceeds_expected_runtime(self):\n        job_runs = {\n            \"status\": \"running\",\n            \"next_run\": None,\n            \"actions_expected_runtime\": {\n                \"action1\": 720.0,\n                \"action2\": 480.0,\n            },\n            \"runs\": [\n                dict(\n                    id=\"MASTER.test.3\",\n                    state=\"scheduled\",\n                    run_time=time.strftime(\n                        \"%Y-%m-%d %H:%M:%S\",\n                        time.localtime(time.time() + 600),\n                    ),\n                    end_time=None,\n                    duration=\"\",\n                ),\n                dict(\n                    id=\"MASTER.test.2\",\n                    state=\"running\",\n                    run_time=time.strftime(\n                        \"%Y-%m-%d %H:%M:%S\",\n                        time.localtime(time.time() - 600),\n                    ),\n                    end_time=None,\n                    duration=\"0:10:01.883601\",\n                    runs=[\n                        dict(\n                            id=\"MASTER.test.2.action2\",\n                            state=\"running\",\n                            action_name=\"action2\",\n                            start_time=time.strftime(\n                                \"%Y-%m-%d %H:%M:%S\",\n                                time.localtime(time.time() - 600),\n                            ),\n                            duration=\"0:10:01.883601\",\n                        ),\n                        dict(\n                            id=\"MASTER.test.2.action1\",\n                            state=\"running\",\n                            action_name=\"action1\",\n                            start_time=time.strftime(\n                                \"%Y-%m-%d %H:%M:%S\",\n                                time.localtime(time.time() - 600),\n                            ),\n                            duration=\"0:10:01.885401\",\n                        ),\n                    ],\n                ),\n                dict(\n                    id=\"MASTER.test.1\",\n                    state=\"succeeded\",\n                    run_time=time.strftime(\n                        \"%Y-%m-%d %H:%M:%S\",\n                        time.localtime(time.time() - 1800),\n                    ),\n                    end_time=time.strftime(\n                        \"%Y-%m-%d %H:%M:%S\",\n                        time.localtime(time.time() - 1700),\n                    ),\n                    duration=\"0:15:00.453601\",\n                ),\n            ],\n        }\n        run, state = check_tron_jobs.get_relevant_run_and_state(job_runs)\n        assert_equal(run[\"id\"], \"MASTER.test.2\")\n        assert_equal(state, State.STUCK)\n\n    def test_job_running_action_exceeds_expected_runtime_and_other_action_failed(self):\n        job_runs = {\n            \"status\": \"running\",\n            \"next_run\": None,\n            \"actions_expected_runtime\": {\n                \"action1\": 720.0,\n                \"action2\": 480.0,\n            },\n            \"runs\": [\n                dict(\n                    id=\"MASTER.test.1\",\n                    state=\"running\",\n                    run_time=time.strftime(\n                        \"%Y-%m-%d %H:%M:%S\",\n                        time.localtime(time.time() - 600),\n                    ),\n                    end_time=None,\n                    duration=\"0:10:01.883601\",\n                    runs=[\n                        dict(\n                            id=\"MASTER.test.1.action2\",\n                            state=\"failed\",\n                            action_name=\"action2\",\n                            start_time=time.strftime(\n                                \"%Y-%m-%d %H:%M:%S\",\n                                time.localtime(time.time() - 600),\n                            ),\n                            duration=\"0:10:01.883601\",\n                        ),\n                        dict(\n                            id=\"MASTER.test.1.action1\",\n                            state=\"running\",\n                            action_name=\"action1\",\n                            start_time=time.strftime(\n                                \"%Y-%m-%d %H:%M:%S\",\n                                time.localtime(time.time() - 600),\n                            ),\n                            duration=\"0:10:01.885401\",\n                        ),\n                    ],\n                ),\n            ],\n        }\n        run, state = check_tron_jobs.get_relevant_run_and_state(job_runs)\n        assert_equal(run[\"id\"], \"MASTER.test.1\")\n        assert_equal(state, State.FAILED)\n\n    def test_job_stuck_when_runtime_not_sorted(self):\n        job_runs = {\n            \"status\": \"running\",\n            \"next_run\": None,\n            \"runs\": [\n                {\n                    \"id\": \"MASTER.test.2\",\n                    \"state\": \"running\",\n                    \"run_time\": time.strftime(\n                        \"%Y-%m-%d %H:%M:%S\",\n                        time.localtime(time.time() - 600),\n                    ),\n                    \"end_time\": None,\n                },\n                {\n                    \"id\": \"MASTER.test.1\",\n                    \"state\": \"scheduled\",\n                    \"run_time\": time.strftime(\n                        \"%Y-%m-%d %H:%M:%S\",\n                        time.localtime(time.time()),\n                    ),\n                    \"end_time\": None,\n                },\n            ],\n        }\n        run, state = check_tron_jobs.get_relevant_run_and_state(job_runs)\n        assert_equal(run[\"id\"], \"MASTER.test.2\")\n        assert_equal(state, State.STUCK)\n\n    def test_get_relevant_action_pick_the_one_stuck(self):\n        action_runs = [\n            {\n                \"id\": \"MASTER.test.1.action3\",\n                \"state\": \"succeeded\",\n                \"start_time\": time.strftime(\n                    \"%Y-%m-%d %H:%M:%S\",\n                    time.localtime(time.time() - 1200),\n                ),\n                \"duration\": \"0:18:01.475067\",\n            },\n            {\n                \"id\": \"MASTER.test.1.action2\",\n                \"state\": \"running\",\n                \"start_time\": time.strftime(\n                    \"%Y-%m-%d %H:%M:%S\",\n                    time.localtime(time.time() - 1100),\n                ),\n                \"duration\": \"0:18:40.005783\",\n            },\n            {\n                \"id\": \"MASTER.test.1.action1\",\n                \"state\": \"succeeded\",\n                \"start_time\": time.strftime(\n                    \"%Y-%m-%d %H:%M:%S\",\n                    time.localtime(time.time() - 1000),\n                ),\n                \"duration\": \"0:00:01.006305\",\n            },\n        ]\n        actual = check_tron_jobs.get_relevant_action(\n            action_runs=action_runs,\n            last_state=State.STUCK,\n            actions_expected_runtime={\n                \"action1\": 86400.0,\n                \"action2\": 86400.0,\n                \"action3\": 86400.0,\n            },\n        )\n        assert_equal(actual[\"id\"], \"MASTER.test.1.action2\")\n\n    def test_get_relevant_action_pick_the_one_exceeds_expected_runtime(self):\n        action_runs = [\n            {\n                \"id\": \"MASTER.test.1.action3\",\n                \"state\": \"running\",\n                \"start_time\": time.strftime(\n                    \"%Y-%m-%d %H:%M:%S\",\n                    time.localtime(time.time() - 600),\n                ),\n                \"duration\": \"0:10:00.006305\",\n            },\n            {\n                \"id\": \"MASTER.test.1.action2\",\n                \"state\": \"running\",\n                \"start_time\": time.strftime(\n                    \"%Y-%m-%d %H:%M:%S\",\n                    time.localtime(time.time() - 600),\n                ),\n                \"duration\": \"0:10:00.006383\",\n            },\n            {\n                \"id\": \"MASTER.test.1.action1\",\n                \"state\": \"succeeded\",\n                \"start_time\": time.strftime(\n                    \"%Y-%m-%d %H:%M:%S\",\n                    time.localtime(time.time() - 600),\n                ),\n                \"duration\": \"0:10:00.006331\",\n            },\n        ]\n        actions_expected_runtime = {\n            \"action3\": 480.0,\n            \"action2\": 720.0,\n            \"action1\": 900.0,\n        }\n        actual = check_tron_jobs.get_relevant_action(\n            action_runs=action_runs,\n            last_state=State.STUCK,\n            actions_expected_runtime=actions_expected_runtime,\n        )\n        assert_equal(actual[\"id\"], \"MASTER.test.1.action3\")\n\n    def test_get_relevant_action_pick_the_one_starting(self):\n        action_runs = [\n            {\n                \"id\": \"MASTER.test.1.action3\",\n                \"state\": \"starting\",\n                \"start_time\": time.strftime(\n                    \"%Y-%m-%d %H:%M:%S\",\n                    time.localtime(time.time() - 600),\n                ),\n                \"duration\": \"0:10:00.006305\",\n            },\n            {\n                \"id\": \"MASTER.test.1.action2\",\n                \"state\": \"running\",\n                \"start_time\": time.strftime(\n                    \"%Y-%m-%d %H:%M:%S\",\n                    time.localtime(time.time() - 600),\n                ),\n                \"duration\": \"0:10:00.006383\",\n            },\n            {\n                \"id\": \"MASTER.test.1.action1\",\n                \"state\": \"succeeded\",\n                \"start_time\": time.strftime(\n                    \"%Y-%m-%d %H:%M:%S\",\n                    time.localtime(time.time() - 600),\n                ),\n                \"duration\": \"0:10:00.006331\",\n            },\n        ]\n        actions_expected_runtime = {\n            \"action3\": 480.0,\n            \"action2\": 720.0,\n            \"action1\": 900.0,\n        }\n        actual = check_tron_jobs.get_relevant_action(\n            action_runs=action_runs,\n            last_state=State.STUCK,\n            actions_expected_runtime=actions_expected_runtime,\n        )\n        assert_equal(actual[\"id\"], \"MASTER.test.1.action3\")\n\n    def test_get_relevant_action_pick_the_one_exceeds_expected_runtime_with_long_duration(\n        self,\n    ):\n        action_runs = [\n            {\n                \"id\": \"MASTER.test.1.action3\",\n                \"action_name\": \"action3\",\n                \"state\": \"running\",\n                \"start_time\": time.strftime(\n                    \"%Y-%m-%d %H:%M:%S\",\n                    time.localtime(time.time() - 600),\n                ),\n                \"duration\": \"1 day, 0:10:00.006305\",\n            },\n            {\n                \"id\": \"MASTER.test.1.action2\",\n                \"action_name\": \"action2\",\n                \"state\": \"running\",\n                \"start_time\": time.strftime(\n                    \"%Y-%m-%d %H:%M:%S\",\n                    time.localtime(time.time() - 600),\n                ),\n                \"duration\": \"2 days, 0:10:00.006383\",\n            },\n            {\n                \"id\": \"MASTER.test.1.action1\",\n                \"action_name\": \"action1\",\n                \"state\": \"running\",\n                \"start_time\": time.strftime(\n                    \"%Y-%m-%d %H:%M:%S\",\n                    time.localtime(time.time() - 600),\n                ),\n                \"duration\": \"1 day, 0:10:00.006331\",\n            },\n        ]\n        actions_expected_runtime = {\n            \"action3\": 100000.0,\n            \"action2\": 100000.0,\n            \"action1\": 100000.0,\n        }\n        actual = check_tron_jobs.get_relevant_action(\n            action_runs=action_runs,\n            last_state=State.STUCK,\n            actions_expected_runtime=actions_expected_runtime,\n        )\n        assert_equal(actual[\"id\"], \"MASTER.test.1.action2\")\n\n    def test_no_job_scheduled_or_queuing(self):\n        \"\"\"If the past 2 runs succeeded but no future job is scheuled,\n        we should consider the job to have suceeded.\n        \"\"\"\n        job_runs = {\n            \"status\": \"succeeded\",\n            \"next_run\": None,\n            \"runs\": [\n                {\n                    \"id\": \"MASTER.test.2\",\n                    \"state\": \"succeeded\",\n                    \"run_time\": time.strftime(\n                        \"%Y-%m-%d %H:%M:%S\",\n                        time.localtime(time.time() - 300),\n                    ),\n                    \"end_time\": time.strftime(\n                        \"%Y-%m-%d %H:%M:%S\",\n                        time.localtime(time.time() - 600),\n                    ),\n                },\n                {\n                    \"id\": \"MASTER.test.1\",\n                    \"state\": \"succeeded\",\n                    \"run_time\": time.strftime(\n                        \"%Y-%m-%d %H:%M:%S\",\n                        time.localtime(time.time() - 900),\n                    ),\n                    \"end_time\": time.strftime(\n                        \"%Y-%m-%d %H:%M:%S\",\n                        time.localtime(time.time() - 1200),\n                    ),\n                },\n            ],\n            \"monitoring\": {},\n        }\n        run, state = check_tron_jobs.get_relevant_run_and_state(job_runs)\n        assert_equal(run[\"id\"], \"MASTER.test.2\")\n        assert_equal(state, State.SUCCEEDED)\n\n    # These tests test job without succeeded/failed run scenarios\n    def test_job_no_runs_to_check(self):\n        job_runs = {\n            \"status\": \"scheduled\",\n            \"next_run\": None,\n            \"runs\": [\n                {\n                    \"id\": \"MASTER.test.1\",\n                    \"state\": \"scheduled\",\n                    \"run_time\": time.strftime(\n                        \"%Y-%m-%d %H:%M:%S\",\n                        time.localtime(time.time() + 1200),\n                    ),\n                    \"end_time\": None,\n                },\n            ],\n        }\n        run, state = check_tron_jobs.get_relevant_run_and_state(job_runs)\n        assert_equal(run[\"id\"], \"MASTER.test.1\")\n        assert_equal(state, State.NO_RUNS_TO_CHECK)\n\n    def test_job_has_no_runs_at_all(self):\n        job_runs = {\n            \"status\": \"running\",\n            \"next_run\": None,\n            \"runs\": [],\n        }\n        run, state = check_tron_jobs.get_relevant_run_and_state(job_runs)\n        assert_equal(run, None)\n        assert_equal(state, State.NO_RUN_YET)\n\n    # These tests test job/action unknown scenarios\n    def test_job_unknown(self):\n        job_runs = {\n            \"status\": \"running\",\n            \"next_run\": None,\n            \"runs\": [\n                {\n                    \"id\": \"MASTER.test.3\",\n                    \"state\": \"scheduled\",\n                    \"run_time\": time.strftime(\n                        \"%Y-%m-%d %H:%M:%S\",\n                        time.localtime(time.time() - 600),\n                    ),\n                    \"end_time\": None,\n                },\n                {\n                    \"id\": \"MASTER.test.2\",\n                    \"state\": \"unknown\",\n                    \"run_time\": time.strftime(\n                        \"%Y-%m-%d %H:%M:%S\",\n                        time.localtime(time.time() - 1200),\n                    ),\n                    \"end_time\": None,\n                },\n                {\n                    \"id\": \"MASTER.test.1\",\n                    \"state\": \"succeeded\",\n                    \"run_time\": time.strftime(\n                        \"%Y-%m-%d %H:%M:%S\",\n                        time.localtime(time.time() - 1800),\n                    ),\n                    \"end_time\": time.strftime(\n                        \"%Y-%m-%d %H:%M:%S\",\n                        time.localtime(time.time() - 1700),\n                    ),\n                },\n            ],\n        }\n        run, state = check_tron_jobs.get_relevant_run_and_state(job_runs)\n        assert_equal(run[\"id\"], \"MASTER.test.2\")\n        assert_equal(state, State.UNKNOWN)\n\n    def test_job_running_but_action_unknown_already(self):\n        job_runs = {\n            \"status\": \"running\",\n            \"next_run\": None,\n            \"runs\": [\n                {\n                    \"id\": \"MASTER.test.3\",\n                    \"state\": \"scheduled\",\n                    \"run_time\": time.strftime(\n                        \"%Y-%m-%d %H:%M:%S\",\n                        time.localtime(time.time() + 600),\n                    ),\n                    \"end_time\": None,\n                },\n                {\n                    \"id\": \"MASTER.test.2\",\n                    \"state\": \"running\",\n                    \"run_time\": time.strftime(\n                        \"%Y-%m-%d %H:%M:%S\",\n                        time.localtime(time.time() - 600),\n                    ),\n                    \"end_time\": None,\n                    \"runs\": [\n                        {\n                            \"id\": \"MASTER.test.2.action2\",\n                            \"state\": \"running\",\n                        },\n                        {\n                            \"id\": \"MASTER.test.1.action1\",\n                            \"state\": \"unknown\",\n                        },\n                    ],  # noqa: E122\n                },\n                {\n                    \"id\": \"MASTER.test.1\",\n                    \"state\": \"succeeded\",\n                    \"run_time\": time.strftime(\n                        \"%Y-%m-%d %H:%M:%S\",\n                        time.localtime(time.time() - 1800),\n                    ),\n                    \"end_time\": time.strftime(\n                        \"%Y-%m-%d %H:%M:%S\",\n                        time.localtime(time.time() - 1700),\n                    ),\n                },\n            ],\n        }\n        run, state = check_tron_jobs.get_relevant_run_and_state(job_runs)\n        assert_equal(run[\"id\"], \"MASTER.test.2\")\n        assert_equal(state, State.UNKNOWN)\n\n    def test_job_waiting_but_action_unknown_already(self):\n        job_runs = {\n            \"status\": \"waiting\",\n            \"next_run\": None,\n            \"runs\": [\n                {\n                    \"id\": \"MASTER.test.3\",\n                    \"state\": \"scheduled\",\n                    \"run_time\": time.strftime(\n                        \"%Y-%m-%d %H:%M:%S\",\n                        time.localtime(time.time() + 600),\n                    ),\n                    \"end_time\": None,\n                },\n                {\n                    \"id\": \"MASTER.test.2\",\n                    \"state\": \"waiting\",\n                    \"run_time\": time.strftime(\n                        \"%Y-%m-%d %H:%M:%S\",\n                        time.localtime(time.time() - 600),\n                    ),\n                    \"end_time\": None,\n                    \"runs\": [\n                        {\n                            \"id\": \"MASTER.test.2.action2\",\n                            \"state\": \"waiting\",\n                        },\n                        {\n                            \"id\": \"MASTER.test.1.action1\",\n                            \"state\": \"unknown\",\n                        },\n                    ],  # noqa: E122\n                },\n                {\n                    \"id\": \"MASTER.test.1\",\n                    \"state\": \"succeeded\",\n                    \"run_time\": time.strftime(\n                        \"%Y-%m-%d %H:%M:%S\",\n                        time.localtime(time.time() - 1800),\n                    ),\n                    \"end_time\": time.strftime(\n                        \"%Y-%m-%d %H:%M:%S\",\n                        time.localtime(time.time() - 1700),\n                    ),\n                },\n            ],\n        }\n        run, state = check_tron_jobs.get_relevant_run_and_state(job_runs)\n        assert_equal(run[\"id\"], \"MASTER.test.2\")\n        assert_equal(state, State.UNKNOWN)\n\n    # These tests test guess realert feature\n    def test_guess_realert_every(self):\n        job_runs = {\n            \"status\": \"running\",\n            \"next_run\": time.strftime(\n                \"%Y-%m-%d %H:%M:%S\",\n                time.localtime(time.time() + 600),\n            ),\n            \"runs\": [\n                {\n                    \"id\": \"MASTER.test.3\",\n                    \"state\": \"scheduled\",\n                    \"start_time\": None,\n                    \"run_time\": time.strftime(\n                        \"%Y-%m-%d %H:%M:%S\",\n                        time.localtime(time.time() + 600),\n                    ),\n                },\n                {\n                    \"id\": \"MASTER.test.2\",\n                    \"state\": \"failed\",\n                    \"start_time\": time.strftime(\n                        \"%Y-%m-%d %H:%M:%S\",\n                        time.localtime(time.time() - 600),\n                    ),\n                },\n                {\n                    \"id\": \"MASTER.test.1\",\n                    \"state\": \"succeeded\",\n                    \"start_time\": time.strftime(\n                        \"%Y-%m-%d %H:%M:%S\",\n                        time.localtime(time.time() - 1800),\n                    ),\n                },\n            ],  # noqa: E122\n        }\n        realert_every = check_tron_jobs.guess_realert_every(job_runs)\n        assert_equal(realert_every, 4)\n\n    def test_guess_realert_every_no_action_run_starts(self):\n        job_runs = {\n            \"status\": \"running\",\n            \"next_run\": time.strftime(\n                \"%Y-%m-%d %H:%M:%S\",\n                time.localtime(time.time() + 600),\n            ),\n            \"runs\": [\n                {\n                    \"id\": \"MASTER.test.3\",\n                    \"state\": \"scheduled\",\n                    \"start_time\": None,\n                },\n                {\n                    \"id\": \"MASTER.test.2\",\n                    \"state\": \"failed\",\n                    \"start_time\": None,\n                    \"run_time\": time.strftime(\n                        \"%Y-%m-%d %H:%M:%S\",\n                        time.localtime(time.time() - 200),\n                    ),\n                },\n                {\n                    \"id\": \"MASTER.test.1\",\n                    \"state\": \"succeeded\",\n                    \"start_time\": time.strftime(\n                        \"%Y-%m-%d %H:%M:%S\",\n                        time.localtime(time.time() - 600),\n                    ),\n                    \"run_time\": time.strftime(\n                        \"%Y-%m-%d %H:%M:%S\",\n                        time.localtime(time.time() - 600),\n                    ),\n                },\n            ],  # noqa: E122\n        }\n        realert_every = check_tron_jobs.guess_realert_every(job_runs)\n        assert_equal(realert_every, 2)\n\n    def test_guess_realert_every_queue_job(self):\n        job_runs = {\n            \"status\": \"running\",\n            \"next_run\": None,\n            \"runs\": [\n                {\n                    \"id\": \"MASTER.test.3\",\n                    \"state\": \"queued\",\n                    \"start_time\": None,\n                },\n                {\n                    \"id\": \"MASTER.test.2\",\n                    \"state\": \"running\",\n                    \"start_time\": time.strftime(\n                        \"%Y-%m-%d %H:%M:%S\",\n                        time.localtime(time.time() - 600),\n                    ),\n                },\n                {\n                    \"id\": \"MASTER.test.1\",\n                    \"state\": \"succeeded\",\n                    \"start_time\": time.strftime(\n                        \"%Y-%m-%d %H:%M:%S\",\n                        time.localtime(time.time() - 1800),\n                    ),\n                },\n            ],\n        }\n        realert_every = check_tron_jobs.guess_realert_every(job_runs)\n        assert_equal(realert_every, -1)\n\n    def test_guess_realert_every_frequent_run(self):\n        job_runs = {\n            \"status\": \"running\",\n            \"next_run\": time.strftime(\n                \"%Y-%m-%d %H:%M:%S\",\n                time.localtime(time.time() + 10),\n            ),\n            \"runs\": [\n                {\n                    \"id\": \"MASTER.test.3\",\n                    \"state\": \"scheduled\",\n                    \"start_time\": None,\n                },\n                {\n                    \"id\": \"MASTER.test.2\",\n                    \"state\": \"failed\",\n                    \"start_time\": time.strftime(\n                        \"%Y-%m-%d %H:%M:%S\",\n                        time.localtime(time.time() - 10),\n                    ),\n                },\n                {\n                    \"id\": \"MASTER.test.1\",\n                    \"state\": \"succeeded\",\n                    \"start_time\": time.strftime(\n                        \"%Y-%m-%d %H:%M:%S\",\n                        time.localtime(time.time() - 20),\n                    ),\n                },\n            ],  # noqa: E122\n        }\n        realert_every = check_tron_jobs.guess_realert_every(job_runs)\n        assert_equal(realert_every, 1)\n\n    def test_guess_realert_every_first_time_job(self):\n        job_runs = {\n            \"status\": \"enabled\",\n            \"next_run\": time.strftime(\n                \"%Y-%m-%d %H:%M:%S\",\n                time.localtime(time.time() + 600),\n            ),\n            \"runs\": [\n                {\n                    \"id\": \"MASTER.test.1\",\n                    \"state\": \"scheduled\",\n                    \"start_time\": None,\n                },\n            ],\n        }\n        realert_every = check_tron_jobs.guess_realert_every(job_runs)\n        assert_equal(realert_every, -1)\n\n\nclass TestCheckPreciousJobs:\n    @pytest.fixture(autouse=True)\n    def setup_job(self):\n        self.job_name = \"fake_job\"\n        self.monitoring = {\n            \"team\": \"fake_team\",\n            \"notification_email\": \"fake_email\",\n            check_tron_jobs.PRECIOUS_JOB_ATTR: True,\n        }\n        self.runs = [\n            {\n                \"id\": f\"{self.job_name}.15\",\n                \"job_name\": self.job_name,\n                \"run_num\": 15,\n                \"run_time\": \"2018-10-13 12:00:00\",\n                \"start_time\": \"2018-10-13 12:00:00\",\n                \"end_time\": \"2018-10-13 12:30:00\",\n                \"state\": \"succeeded\",\n                \"exit_status\": 0,\n            },\n            {\n                \"id\": f\"{self.job_name}.14\",\n                \"job_name\": self.job_name,\n                \"run_num\": 14,\n                \"run_time\": \"2018-10-12 12:00:00\",\n                \"start_time\": \"2018-10-12 12:00:00\",\n                \"end_time\": \"2018-10-12 12:30:00\",\n                \"state\": \"failed\",\n                \"exit_status\": 1,\n            },\n            {\n                \"id\": f\"{self.job_name}.13\",\n                \"job_name\": self.job_name,\n                \"run_num\": 13,\n                \"run_time\": \"2018-10-11 13:00:00\",\n                \"start_time\": \"2018-10-11 13:00:00\",\n                \"end_time\": \"2018-10-11 13:30:00\",\n                \"state\": \"failed\",\n                \"exit_status\": 1,\n            },\n            {\n                \"id\": f\"{self.job_name}.12\",\n                \"job_name\": self.job_name,\n                \"run_num\": 12,\n                \"run_time\": \"2018-10-11 12:00:00\",\n                \"start_time\": \"2018-10-11 12:00:00\",\n                \"end_time\": \"2018-10-11 12:30:00\",\n                \"state\": \"succeeded\",\n                \"exit_status\": 0,\n            },\n            {\n                \"id\": f\"{self.job_name}.11\",\n                \"job_name\": self.job_name,\n                \"run_num\": 11,\n                \"run_time\": \"2018-10-10 13:00:00\",\n                \"start_time\": \"2018-10-10 13:00:00\",\n                \"end_time\": \"2018-10-10 13:30:00\",\n                \"state\": \"succeeded\",\n                \"exit_status\": 0,\n            },\n            {\n                \"id\": f\"{self.job_name}.10\",\n                \"job_name\": self.job_name,\n                \"run_num\": 10,\n                \"run_time\": \"2018-10-10 12:00:00\",\n                \"start_time\": \"2018-10-10 12:00:00\",\n                \"end_time\": \"2018-10-10 12:30:00\",\n                \"state\": \"failed\",\n                \"exit_status\": 1,\n            },\n        ]\n        self.job = {\n            \"name\": \"fake_job\",\n            \"status\": \"enabled\",\n            \"monitoring\": self.monitoring,\n            \"runs\": self.runs,\n        }\n\n    @patch(\"time.time\", mock.Mock(return_value=1539460800.0), autospec=None)\n    def test_sort_runs_by_interval_day(self):\n        run_buckets = check_tron_jobs.sort_runs_by_interval(self.job, \"day\")\n\n        assert set(run_buckets.keys()) == {\n            \"2018.10.10\",\n            \"2018.10.11\",\n            \"2018.10.12\",\n            \"2018.10.13\",\n        }\n        assert len(run_buckets[\"2018.10.10\"]) == 2\n        assert len(run_buckets[\"2018.10.11\"]) == 2\n        assert len(run_buckets[\"2018.10.12\"]) == 1\n        assert len(run_buckets[\"2018.10.13\"]) == 1\n\n    @patch(\"time.time\", mock.Mock(return_value=1539633600.0), autospec=None)\n    def test_sort_runs_by_interval_day_empty_buckets(self):\n        self.job[\"runs\"] = [\n            {\n                \"id\": f\"{self.job_name}.16\",\n                \"job_name\": self.job_name,\n                \"run_num\": 16,\n                \"run_time\": \"2018-10-15 12:00:00\",\n                \"start_time\": \"2018-10-15 12:00:00\",\n                \"end_time\": \"2018-10-15 12:30:00\",\n                \"state\": \"succeeded\",\n                \"exit_status\": 0,\n            }\n        ] + self.job[\"runs\"]\n\n        run_buckets = check_tron_jobs.sort_runs_by_interval(self.job, \"day\")\n\n        assert \"2018.10.14\" in run_buckets\n        assert run_buckets[\"2018.10.14\"] == []\n\n    @patch(\"time.time\", mock.Mock(return_value=1539633600.0), autospec=None)\n    def test_sort_runs_by_interval_day_old_empty_buckets(self):\n        # If the newest run is a backfill for an older date, shouldn't be\n        # included for buckets\n        self.job[\"runs\"] = [\n            {\n                \"id\": f\"{self.job_name}.16\",\n                \"job_name\": self.job_name,\n                \"run_num\": 16,\n                \"run_time\": \"2018-10-01 12:00:00\",\n                \"start_time\": \"2018-10-01 12:00:00\",\n                \"end_time\": \"2018-10-01 12:30:00\",\n                \"state\": \"succeeded\",\n                \"exit_status\": 0,\n            }\n        ] + self.job[\"runs\"]\n\n        run_buckets = check_tron_jobs.sort_runs_by_interval(self.job, \"day\")\n        # Current time is patched to 2018-10-15, so we should include\n        # NUM_PRECIOUS intervals, starting 2018-10-09\n        # 2018-10-01 should not be included, even though there is a run for it,\n        # because it is too old\n        assert \"2018.10.01\" not in run_buckets\n        assert run_buckets[\"2018.10.14\"] == []\n        assert len(run_buckets) == 7\n\n    @patch(\n        \"tron.bin.check_tron_jobs.guess_realert_every\",\n        mock.Mock(return_value=1),\n        autospec=None,\n    )\n    @patch(\"tron.bin.check_tron_jobs.Client\", autospec=True)\n    @patch(\"tron.bin.check_tron_jobs.compute_check_result_for_job_runs\", autospec=True)\n    @patch(\"tron.bin.check_tron_jobs.get_object_type_from_identifier\", autospec=True)\n    def test_compute_check_result_for_job_not_precious(\n        self,\n        mock_get_obj_type,\n        mock_check_job_runs,\n        mock_client,\n    ):\n        client = mock_client(\"fake_server\")\n        client.cluster_name = \"fake_cluster\"\n        del self.job[\"monitoring\"][check_tron_jobs.PRECIOUS_JOB_ATTR]\n        client.job = mock.Mock(return_value=self.job)\n        mock_check_job_runs.return_value = {\n            \"output\": \"fake_output\",\n            \"status\": \"fake_status\",\n        }\n\n        results = check_tron_jobs.compute_check_result_for_job(\n            client,\n            self.job,\n            url_index={},\n        )\n\n        # make sure all job runs for a job are included by not incl count arg\n        assert client.job.call_args_list == [\n            mock.call(\n                mock_get_obj_type.return_value.url,\n                include_action_runs=True,\n            ),\n        ]\n        assert len(results) == 1\n        assert results[0][\"name\"] == \"check_tron_job.fake_job\"\n        assert mock_check_job_runs.call_count == 1\n\n    @patch(\n        \"tron.bin.check_tron_jobs.guess_realert_every\",\n        mock.Mock(return_value=1),\n        autospec=None,\n    )\n    @patch(\"tron.bin.check_tron_jobs.Client\", autospec=True)\n    def test_compute_check_result_for_job_disabled(self, mock_client):\n        client = mock_client(\"fake_server\")\n        client.cluster_name = \"fake_cluster\"\n        self.job[\"status\"] = \"disabled\"\n\n        results = check_tron_jobs.compute_check_result_for_job(\n            client,\n            self.job,\n            url_index={},\n        )\n\n        assert len(results) == 1\n        assert results[0][\"status\"] == 0\n        assert results[0][\"output\"] == \"OK: fake_job is disabled and won't be checked.\"\n\n    @patch(\n        \"tron.bin.check_tron_jobs.guess_realert_every\",\n        mock.Mock(return_value=1),\n        autospec=None,\n    )\n    @patch(\"time.time\", mock.Mock(return_value=1539460800.0), autospec=None)\n    @patch(\"tron.bin.check_tron_jobs.Client\", autospec=True)\n    @patch(\"tron.bin.check_tron_jobs.compute_check_result_for_job_runs\", autospec=True)\n    @patch(\"tron.bin.check_tron_jobs.get_object_type_from_identifier\", autospec=True)\n    def test_compute_check_result_for_job_enabled(\n        self,\n        mock_get_obj_type,\n        mock_check_job_runs,\n        mock_client,\n    ):\n        client = mock_client(\"fake_server\")\n        client.cluster_name = \"fake_cluster\"\n        self.job[\"monitoring\"][\"check_every\"] = 500\n        self.job[\"monitoring\"][\"hide_stderr\"] = True\n        client.job = mock.Mock(return_value=self.job)\n        mock_check_job_runs.return_value = {\n            \"output\": \"fake_output\",\n            \"status\": \"fake_status\",\n        }\n\n        results = check_tron_jobs.compute_check_result_for_job(\n            client,\n            self.job,\n            url_index={},\n        )\n\n        # Test that hide_stderr is passed to check_job_runs\n        assert mock_check_job_runs.call_args_list[0][1][\"hide_stderr\"] is True\n\n        # make sure all job runs for a job are included by not incl count arg\n        assert client.job.call_args_list == [\n            mock.call(\n                mock_get_obj_type.return_value.url,\n                include_action_runs=True,\n            ),\n        ]\n        assert len(results) == 4\n        assert {res[\"name\"] for res in results} == {\n            \"check_tron_job.fake_job-2018.10.10\",\n            \"check_tron_job.fake_job-2018.10.11\",\n            \"check_tron_job.fake_job-2018.10.12\",\n            \"check_tron_job.fake_job-2018.10.13\",\n        }\n        for res in results:\n            assert res[\"check_every\"] == \"300s\"\n            assert check_tron_jobs.PRECIOUS_JOB_ATTR not in res\n"
  },
  {
    "path": "tests/bin/get_tron_metrics_test.py",
    "content": "import subprocess\nfrom unittest import mock\n\nimport pytest\n\nfrom tron.bin import get_tron_metrics\n\n\ndef test_send_data_metric():\n    process = mock.Mock()\n    process.communicate = mock.Mock(return_value=(b\"fake_output\", b\"fake_error\"))\n    cmd_str = \"meteorite data -v fake_name fake_metric_type fake_value \" \"-d fake_dim_key:fake_dim_value\"\n\n    with mock.patch(\n        \"subprocess.Popen\",\n        mock.Mock(return_value=process),\n        autospec=None,\n    ) as mock_popen:\n        get_tron_metrics.send_data_metric(\n            name=\"fake_name\",\n            metric_type=\"fake_metric_type\",\n            value=\"fake_value\",\n            dimensions={\"fake_dim_key\": \"fake_dim_value\"},\n            dry_run=False,\n        )\n\n        assert mock_popen.call_count == 1\n        assert mock_popen.call_args == mock.call(\n            cmd_str.split(),\n            stdout=subprocess.PIPE,\n            stderr=subprocess.PIPE,\n        )\n\n\ndef test_send_data_metric_dry_run():\n    with mock.patch(\"subprocess.Popen\", autospec=True) as mock_popen:\n        get_tron_metrics.send_data_metric(\n            name=\"fake_name\",\n            metric_type=\"fake_metric_type\",\n            value=\"fake_value\",\n            dimensions=\"fake_dimensions\",\n            dry_run=True,\n        )\n\n        assert mock_popen.call_count == 0\n\n\n@mock.patch(\"tron.bin.get_tron_metrics.send_data_metric\", autospec=True)\ndef test_send_counter(mock_send_data_metric):\n    kwargs = dict(count=\"fake_count\")\n\n    get_tron_metrics.send_counter(\"fake_name\", **kwargs)\n\n    assert mock_send_data_metric.call_count == 1\n    assert mock_send_data_metric.call_args == mock.call(\n        name=\"fake_name\",\n        metric_type=\"counter\",\n        value=\"fake_count\",\n        dimensions={},\n        dry_run=False,\n    )\n\n\n@mock.patch(\"tron.bin.get_tron_metrics.send_data_metric\", autospec=True)\ndef test_send_gauge(mock_send_data_metric):\n    kwargs = dict(value=\"fake_value\")\n\n    get_tron_metrics.send_gauge(\"fake_name\", **kwargs)\n\n    assert mock_send_data_metric.call_count == 1\n    assert mock_send_data_metric.call_args == mock.call(\n        name=\"fake_name\",\n        metric_type=\"gauge\",\n        value=\"fake_value\",\n        dimensions={},\n        dry_run=False,\n    )\n\n\n@mock.patch(\"tron.bin.get_tron_metrics.send_counter\", autospec=True)\ndef test_send_meter(mock_send_counter):\n    get_tron_metrics.send_meter(\"fake_name\")\n\n    assert mock_send_counter.call_count == 1\n    assert mock_send_counter.call_args == mock.call(\"fake_name\")\n\n\n@mock.patch(\"tron.bin.get_tron_metrics.send_gauge\", autospec=True)\ndef test_send_histogram(mock_send_gauge):\n    kwargs = dict(\n        p50=\"fake_p50\",\n        p75=\"fake_p75\",\n        p95=\"fake_p95\",\n        p99=\"fake_p99\",\n    )\n    p50_kwargs = dict(\n        **kwargs,\n        value=\"fake_p50\",\n    )\n\n    get_tron_metrics.send_histogram(\"fake_name\", **kwargs)\n\n    assert mock_send_gauge.call_count == len(kwargs)\n    assert mock_send_gauge.call_args_list[0] == mock.call(\n        \"fake_name.p50\",\n        **p50_kwargs,\n    )\n\n\n@mock.patch(\"tron.bin.get_tron_metrics.send_meter\", autospec=True)\n@mock.patch(\"tron.bin.get_tron_metrics.send_histogram\", autospec=True)\ndef test_send_timer(mock_send_meter, mock_send_histogram):\n    get_tron_metrics.send_timer(\"fake_name\")\n\n    assert mock_send_meter.call_count == 1\n    assert mock_send_meter.call_args == mock.call(\"fake_name\")\n    assert mock_send_histogram.call_count == 1\n    assert mock_send_histogram.call_args == mock.call(\"fake_name\")\n\n\n@pytest.mark.parametrize(\"cluster\", [\"fake_cluster\", None])\ndef test_send_metrics(cluster):\n    mock_send_counter = mock.Mock()\n    metrics = dict(counter=[dict(name=\"fake_name\")])\n\n    with mock.patch(\n        \"tron.bin.get_tron_metrics._METRIC_SENDERS\",\n        dict(counter=mock_send_counter),\n        autospec=None,\n    ):\n        get_tron_metrics.send_metrics(metrics, cluster=cluster, dry_run=True)\n\n    assert mock_send_counter.call_count == 1\n    if cluster:\n        assert mock_send_counter.call_args == mock.call(\n            \"fake_name\",\n            dry_run=True,\n            dimensions={\"tron_cluster\": \"fake_cluster\"},\n        )\n    else:\n        assert mock_send_counter.call_args == mock.call(\"fake_name\", dry_run=True)\n"
  },
  {
    "path": "tests/bin/recover_batch_test.py",
    "content": "import tempfile\nfrom queue import Queue\nfrom unittest import mock\n\nimport pytest\n\nfrom tron.bin import recover_batch\nfrom tron.bin.action_runner import StatusFile\n\n\n@pytest.fixture\ndef mock_file():\n    f = tempfile.NamedTemporaryFile()\n    yield f.name\n    f.close()\n\n\n@mock.patch.object(recover_batch, \"reactor\")\n@mock.patch(\"tron.bin.recover_batch.get_exit_code\", autospec=True)\n@pytest.mark.parametrize(\n    \"exit_code,error_msg,should_stop\",\n    [\n        (1, \"failed\", True),\n        (None, None, False),\n    ],\n)\ndef test_notify(mock_get_exit_code, mock_reactor, exit_code, error_msg, should_stop):\n    mock_get_exit_code.return_value = exit_code, error_msg\n    queue = Queue()\n    path = mock.Mock()\n    recover_batch.notify(queue, \"some_ignored\", path, \"mask\")\n    if should_stop:\n        assert mock_reactor.stop.call_count == 1\n        assert queue.get_nowait() == (exit_code, error_msg)\n    else:\n        assert mock_reactor.stop.call_count == 0\n        assert queue.empty()\n\n\n@mock.patch(\"tron.bin.recover_batch.psutil.pid_exists\", autospec=True)\n@mock.patch(\"tron.bin.recover_batch.read_last_yaml_entries\", autospec=True)\n@pytest.mark.parametrize(\n    \"line,exit_code,is_running,error_msg\",\n    [\n        (\n            {\"return_code\": 0, \"runner_pid\": 12345},\n            0,\n            False,\n            None,\n        ),  # action runner finishes successfully\n        (  # action runner is killed\n            {\"return_code\": -9, \"runner_pid\": 12345},\n            9,\n            False,\n            \"Action run killed by signal SIGKILL\",\n        ),\n        (  # No return code but action_runner pid is not running\n            {\"runner_pid\": 12345},\n            1,\n            False,\n            \"Action runner pid 12345 no longer running. Assuming an exit of 1.\",\n        ),\n        (\n            {\"runner_pid\": 12345},\n            None,\n            True,\n            None,\n        ),  # No return code but action_runner pid is running\n        (\n            {},\n            None,\n            Exception,\n            None,\n        ),  # No return code or PID from the file\n    ],\n)\ndef test_get_exit_code(\n    mock_read_last_yaml_entries,\n    mock_pid_running,\n    line,\n    exit_code,\n    is_running,\n    error_msg,\n):\n    fake_path = \"/file/path\"\n    mock_read_last_yaml_entries.return_value = line\n    mock_pid_running.side_effect = [is_running]\n\n    actual_exit_code, actual_error_msg = recover_batch.get_exit_code(fake_path)\n    assert actual_exit_code == exit_code\n    assert actual_error_msg == error_msg\n\n\ndef test_read_last_yaml_roundtrip(mock_file):\n    \"\"\"Check that read_last_yaml_entries returns the same thing that the action\n    runner wrote.\"\"\"\n    status = StatusFile(mock_file)\n    expected_content = [\n        {\"return_code\": None, \"pid\": 10345, \"command\": \"foo\"},\n        {\"return_code\": 1, \"pid\": 10345, \"command\": \"foo\"},\n    ]\n    with mock.patch.object(status, \"get_content\", side_effect=expected_content):\n        with status.wrap(command=\"echo hello\", run_id=\"job.1.action\", proc=mock.Mock()):\n            # In the context manager, we've written the first value of get_content.\n            first = recover_batch.read_last_yaml_entries(mock_file)\n            assert first == expected_content[0]\n\n    # After, we write another status entry. We should return the latest.\n    second = recover_batch.read_last_yaml_entries(mock_file)\n    assert second == expected_content[1]\n\n\n@mock.patch.object(recover_batch, \"reactor\")\n@mock.patch(\"tron.bin.recover_batch.Queue\", autospec=True)\n@mock.patch(\"tron.bin.recover_batch.get_exit_code\", autospec=True, return_value=(None, None))\n@mock.patch(\"tron.bin.recover_batch.StatusFileWatcher\", autospec=True)\n@pytest.mark.parametrize(\"existing_code,watcher_code\", [(None, 1), (123, None)])\ndef test_run(\n    mock_watcher,\n    mock_get_exit_code,\n    mock_queue,\n    mock_reactor,\n    existing_code,\n    watcher_code,\n):\n    mock_get_exit_code.return_value = (existing_code, \"\")\n    mock_queue.return_value.get.return_value = (watcher_code, \"\")\n    mock_path = mock.Mock()\n    if existing_code is not None:\n        expected = existing_code\n    else:\n        expected = watcher_code\n\n    with pytest.raises(SystemExit) as e:\n        recover_batch.run(mock_path)\n        assert e.code == expected\n\n    assert mock_get_exit_code.call_args_list == [mock.call(mock_path)]\n    if existing_code is not None:\n        assert mock_watcher.call_count == 0\n    else:\n        assert mock_watcher.call_count == 1\n"
  },
  {
    "path": "tests/command_context_test.py",
    "content": "import datetime\nfrom unittest import mock\n\nfrom testifycompat import assert_equal\nfrom testifycompat import assert_raises\nfrom testifycompat import run\nfrom testifycompat import setup\nfrom testifycompat import TestCase\nfrom tron import command_context\nfrom tron import node\nfrom tron import scheduler\nfrom tron.core import actionrun\nfrom tron.core import job\nfrom tron.core import jobrun\nfrom tron.core.jobrun import JobRunCollection\n\n\nclass TestEmptyContext(TestCase):\n    @setup\n    def build_context(self):\n        self.context = command_context.CommandContext(None)\n\n    def test__getitem__(self):\n        assert_raises(KeyError, self.context.__getitem__, \"foo\")\n\n    def test_get(self):\n        assert not self.context.get(\"foo\")\n\n\nclass TestBuildFilledContext(TestCase):\n    def test_build_filled_context_no_objects(self):\n        output = command_context.build_filled_context()\n        assert not output.base\n        assert not output.next\n\n    def test_build_filled_context_single(self):\n        output = command_context.build_filled_context(\n            command_context.JobContext,\n        )\n        assert isinstance(output.base, command_context.JobContext)\n        assert not output.next\n\n    def test_build_filled_context_chain(self):\n        objs = [command_context.JobContext, command_context.JobRunContext]\n        output = command_context.build_filled_context(*objs)\n        assert isinstance(output.base, objs[1])\n        assert isinstance(output.next.base, objs[0])\n        assert not output.next.next\n\n\nclass SimpleContextTestCaseBase(TestCase):\n    __test__ = False\n\n    def test_hit(self):\n        assert_equal(self.context[\"foo\"], \"bar\")\n\n    def test_miss(self):\n        assert_raises(KeyError, self.context.__getitem__, \"your_mom\")\n\n    def test_get_hit(self):\n        assert_equal(self.context.get(\"foo\"), \"bar\")\n\n    def test_get_miss(self):\n        assert not self.context.get(\"unknown\")\n\n\nclass SimpleDictContextTestCase(SimpleContextTestCaseBase):\n    @setup\n    def build_context(self):\n        self.context = command_context.CommandContext(dict(foo=\"bar\"))\n\n\nclass SimpleObjectContextTestCase(SimpleContextTestCaseBase):\n    @setup\n    def build_context(self):\n        class Obj:\n            foo = \"bar\"\n\n        self.context = command_context.CommandContext(Obj)\n\n\nclass ChainedDictContextTestCase(SimpleContextTestCaseBase):\n    @setup\n    def build_context(self):\n        self.next_context = command_context.CommandContext(\n            dict(foo=\"bar\", next_foo=\"next_bar\"),\n        )\n        self.context = command_context.CommandContext(\n            dict(),\n            self.next_context,\n        )\n\n    def test_chain_get(self):\n        assert_equal(self.context[\"next_foo\"], \"next_bar\")\n\n\nclass ChainedDictOverrideContextTestCase(SimpleContextTestCaseBase):\n    @setup\n    def build_context(self):\n        self.next_context = command_context.CommandContext(\n            dict(foo=\"your mom\", next_foo=\"next_bar\"),\n        )\n        self.context = command_context.CommandContext(\n            dict(foo=\"bar\"),\n            self.next_context,\n        )\n\n    def test_chain_get(self):\n        assert_equal(self.context[\"next_foo\"], \"next_bar\")\n\n\nclass ChainedObjectOverrideContextTestCase(SimpleContextTestCaseBase):\n    @setup\n    def build_context(self):\n        class MyObject(TestCase):\n            pass\n\n        obj = MyObject()\n        obj.foo = \"bar\"\n\n        self.next_context = command_context.CommandContext(\n            dict(foo=\"your mom\", next_foo=\"next_bar\"),\n        )\n        self.context = command_context.CommandContext(obj, self.next_context)\n\n    def test_chain_get(self):\n        assert_equal(self.context[\"next_foo\"], \"next_bar\")\n\n\nclass TestJobContext(TestCase):\n    @setup\n    def setup_job(self):\n        self.last_success = mock.Mock(run_time=datetime.datetime(2012, 3, 14))\n        mock_scheduler = mock.create_autospec(scheduler.GeneralScheduler)\n        run_collection = mock.create_autospec(\n            JobRunCollection,\n            last_success=self.last_success,\n        )\n        self.job = job.Job(\n            \"MASTER.jobname\",\n            mock_scheduler,\n            run_collection=run_collection,\n        )\n        self.context = command_context.JobContext(self.job)\n\n    def test_name(self):\n        assert_equal(self.context.name, self.job.name)\n\n    def test__getitem__last_success(self):\n        item = self.context[\"last_success#day-1\"]\n        expected_date = self.last_success.run_time - datetime.timedelta(days=1)\n        assert_equal(item, str(expected_date.day))\n\n        item = self.context[\"last_success#shortdate\"]\n        assert_equal(item, \"2012-03-14\")\n\n    def test__getitem__last_success_bad_date_spec(self):\n        name = \"last_success#beers-3\"\n        assert_raises(KeyError, lambda: self.context[name])\n\n    def test__getitem__last_success_bad_date_name(self):\n        name = \"first_success#shortdate-1\"\n        assert_raises(KeyError, lambda: self.context[name])\n\n    def test__getitem__last_success_no_date_spec(self):\n        name = \"last_success\"\n        assert_raises(KeyError, lambda: self.context[name])\n\n    def test__getitem__missing(self):\n        assert_raises(KeyError, lambda: self.context[\"bogus\"])\n\n    def test_namespace(self):\n        assert self.context.namespace == \"MASTER\"\n\n\nclass TestJobRunContext(TestCase):\n    @setup\n    def setup_context(self):\n        self.jobrun = mock.create_autospec(jobrun.JobRun, run_time=\"sometime\", manual=True)\n        self.context = command_context.JobRunContext(self.jobrun)\n\n    def test_cleanup_job_status(self):\n        self.jobrun.action_runs.is_failed = False\n        self.jobrun.action_runs.is_complete_without_cleanup = True\n        assert_equal(self.context.cleanup_job_status, \"SUCCESS\")\n\n    def test_cleanup_job_status_failure(self):\n        self.jobrun.action_runs.is_failed = True\n        assert_equal(self.context.cleanup_job_status, \"FAILURE\")\n\n    def test_runid(self):\n        assert_equal(self.context.runid, self.jobrun.id)\n\n    def test_manual_run(self):\n        assert self.context.manual == \"true\"\n\n    @mock.patch(\"tron.command_context.timeutils.DateArithmetic\", autospec=True)\n    def test__getitem__(self, mock_date_math):\n        name = \"date_name\"\n        time_value = self.context[name]\n        mock_date_math.parse.assert_called_with(name, self.jobrun.run_time)\n        assert_equal(time_value, mock_date_math.parse.return_value)\n\n\nclass TestActionRunContext(TestCase):\n    @setup\n    def build_context(self):\n        mock_node = mock.create_autospec(node.Node, hostname=\"something\")\n        self.action_run = mock.create_autospec(\n            actionrun.ActionRun,\n            action_name=\"something\",\n            node=mock_node,\n        )\n        self.context = command_context.ActionRunContext(self.action_run)\n\n    def test_actionname(self):\n        assert_equal(self.context.actionname, self.action_run.action_name)\n\n    def test_node_hostname(self):\n        assert_equal(self.context.node, self.action_run.node.hostname)\n\n\nclass TestFiller(TestCase):\n    @setup\n    def setup_filler(self):\n        self.filler = command_context.Filler()\n\n    def test_filler_with_job__getitem__(self):\n        context = command_context.JobContext(self.filler)\n        todays_date = datetime.date.today().strftime(\"%Y-%m-%d\")\n        assert_equal(context[\"last_success#shortdate\"], todays_date)\n\n    def test_filler_with_job_run__getitem__(self):\n        context = command_context.JobRunContext(self.filler)\n        todays_date = datetime.date.today().strftime(\"%Y-%m-%d\")\n        assert_equal(context[\"shortdate\"], todays_date)\n\n\nif __name__ == \"__main__\":\n    run()\n"
  },
  {
    "path": "tests/commands/__init__.py",
    "content": ""
  },
  {
    "path": "tests/commands/backfill_test.py",
    "content": "import datetime\nfrom unittest import mock\n\nimport pytest\n\nfrom tron.commands import backfill\nfrom tron.commands import client\n\nTEST_DATETIME_1 = datetime.datetime.strptime(\"2004-07-01\", \"%Y-%m-%d\")\nTEST_DATETIME_2 = datetime.datetime.strptime(\"2004-07-02\", \"%Y-%m-%d\")\nTEST_DATETIME_3 = datetime.datetime.strptime(\"2004-07-03\", \"%Y-%m-%d\")\n\n\n@pytest.fixture(autouse=True)\ndef mock_sleep():\n    async def empty_coro(*args, **kwargs):\n        return None\n\n    with mock.patch(\"asyncio.sleep\", empty_coro, autospec=None):\n        yield\n\n\n@pytest.fixture(autouse=True)\ndef mock_client():\n    with mock.patch.object(client, \"Client\", autospec=True) as m:\n        yield m\n\n\n@pytest.fixture(autouse=True)\ndef mock_urlopen():  # prevent any requests from being made\n    with mock.patch(\"urllib.request.urlopen\", autospec=True) as m:\n        yield m\n\n\n@pytest.fixture\ndef mock_client_request():\n    with mock.patch.object(client, \"request\", autospec=True) as m:\n        m.return_value = mock.Mock(error=False, content={})  # response\n        yield m\n\n\n@pytest.fixture\ndef fake_backfill_run(mock_client):\n    tron_client = mock_client.return_value\n    tron_client.url_base = \"http://localhost\"\n    yield backfill.BackfillRun(\n        tron_client,\n        client.TronObjectIdentifier(\"JOB\", \"/a_job\"),\n        TEST_DATETIME_1,\n    )\n\n\n@pytest.mark.parametrize(\n    \"is_error,result,expected\",\n    [\n        (True, \"an_error_msg\", None),  # tron api failed\n        (False, \"weird_resp_msg\", None),  # bad response, can't get job run name\n        (False, \"Created JobRun:real_job_run_name\", \"real_job_run_name\"),  # ok\n    ],\n)\ndef test_backfill_run_create(mock_client_request, fake_backfill_run, event_loop, is_error, result, expected):\n    mock_client_request.return_value.error = is_error\n    mock_client_request.return_value.content[\"result\"] = result\n    assert expected == event_loop.run_until_complete(fake_backfill_run.create())\n\n\n@pytest.mark.parametrize(\n    \"obj_type,expected\",\n    [\n        (client.RequestError(\"\"), None),\n        ([client.TronObjectIdentifier(\"JOB_RUN\", \"/a_run\")], client.TronObjectIdentifier(\"JOB_RUN\", \"/a_run\")),\n    ],\n)\n@mock.patch.object(client, \"get_object_type_from_identifier\", autospec=True)\ndef test_backfill_run_get_run_id(mock_get_obj_type, fake_backfill_run, event_loop, obj_type, expected):\n    mock_get_obj_type.side_effect = obj_type\n    assert expected == event_loop.run_until_complete(fake_backfill_run.get_run_id())\n    assert expected == fake_backfill_run.run_id\n\n\n@pytest.mark.parametrize(\n    \"job_run_resp,expected\",\n    [\n        (client.RequestError, \"unknown\"),  # polling failed\n        ([{}], \"unknown\"),  # default to unknown\n        ([{\"state\": \"failed\"}], \"failed\"),  # ok\n    ],\n)\ndef test_backfill_run_sync_state(fake_backfill_run, event_loop, job_run_resp, expected):\n    fake_backfill_run.run_id = client.TronObjectIdentifier(\"JOB_RUN\", \"/a_run\")\n    fake_backfill_run.tron_client.job_runs.side_effect = job_run_resp\n    assert expected == event_loop.run_until_complete(fake_backfill_run.sync_state())\n\n\ndef test_backfill_run_watch_until_completion(fake_backfill_run, event_loop):\n    async def change_run_state():\n        fake_backfill_run.run_state = \"cancelled\"\n\n    fake_backfill_run.sync_state = change_run_state\n    assert \"cancelled\" == event_loop.run_until_complete(fake_backfill_run.watch_until_completion())\n\n\n@pytest.mark.parametrize(\n    \"run_id,response,expected\",\n    [\n        (None, mock.Mock(error=False), False),  # no run_id\n        (client.TronObjectIdentifier(\"JOB_RUN\", \"/a_run\"), mock.Mock(error=True), False),  # api error\n        (client.TronObjectIdentifier(\"JOB_RUN\", \"/a_run\"), mock.Mock(error=False), True),  # ok\n    ],\n)\ndef test_backfill_run_cancel(\n    mock_client_request,\n    fake_backfill_run,\n    event_loop,\n    run_id,\n    response,\n    expected,\n):\n    fake_backfill_run.run_id = run_id\n    mock_client_request.return_value = response\n    assert expected == event_loop.run_until_complete(fake_backfill_run.cancel())\n\n\n@mock.patch(\"tron.commands.backfill.get_auth_token\", lambda: \"\")\n@mock.patch.object(client, \"get_object_type_from_identifier\", autospec=True)\ndef test_run_backfill_for_date_range_job_dne(mock_get_obj_type, event_loop):\n    mock_get_obj_type.side_effect = ValueError\n    with pytest.raises(ValueError):\n        event_loop.run_until_complete(\n            backfill.run_backfill_for_date_range(\"a_server\", \"a_job\", []),\n        )\n\n\n@mock.patch(\"tron.commands.backfill.get_auth_token\", lambda: \"\")\n@mock.patch.object(client, \"get_object_type_from_identifier\", autospec=True)\ndef test_run_backfill_for_date_range_not_a_job(mock_get_obj_type, event_loop):\n    mock_get_obj_type.return_value = client.TronObjectIdentifier(\"JOB_RUN\", \"a_url\")\n    with pytest.raises(ValueError):\n        event_loop.run_until_complete(\n            backfill.run_backfill_for_date_range(\"a_server\", \"a_job\", []),\n        )\n\n\n@pytest.mark.parametrize(\n    \"ignore_errors,expected\",\n    [\n        (True, {\"succeeded\", \"failed\", \"unknown\"}),\n        (False, {\"succeeded\", \"failed\", \"not started\"}),\n    ],\n)\n@mock.patch(\"tron.commands.backfill.get_auth_token\", lambda: \"\")\n@mock.patch.object(client, \"get_object_type_from_identifier\", autospec=True)\ndef test_run_backfill_for_date_range_normal(mock_get_obj_type, event_loop, ignore_errors, expected):\n    run_states = (state for state in [\"succeeded\", \"failed\", \"unknown\"])\n\n    async def fake_run_until_completion(self):\n        self.run_state = next(run_states)\n\n    backfill.BackfillRun.run_until_completion = fake_run_until_completion\n    dates = [TEST_DATETIME_1, TEST_DATETIME_2, TEST_DATETIME_3]\n    mock_get_obj_type.return_value = client.TronObjectIdentifier(\"JOB\", \"a_url\")\n\n    backfill_runs = event_loop.run_until_complete(\n        backfill.run_backfill_for_date_range(\n            \"a_server\",\n            \"a_job\",\n            dates,\n            max_parallel=2,\n            ignore_errors=ignore_errors,\n        )\n    )\n\n    assert {br.run_state for br in backfill_runs} == expected\n"
  },
  {
    "path": "tests/commands/client_test.py",
    "content": "from unittest import mock\nfrom urllib.error import HTTPError\nfrom urllib.error import URLError\n\nfrom testifycompat import assert_equal\nfrom testifycompat import assert_in\nfrom testifycompat import run\nfrom testifycompat import setup\nfrom testifycompat import setup_teardown\nfrom testifycompat import TestCase\nfrom tests.assertions import assert_raises\nfrom tests.testingutils import autospec_method\nfrom tron.commands import client\nfrom tron.commands.client import get_object_type_from_identifier\nfrom tron.commands.client import Response\nfrom tron.commands.client import TronObjectType\n\n\ndef build_file_mock(content):\n    return mock.Mock(\n        read=mock.Mock(return_value=content),\n        headers=mock.Mock(get_content_charset=mock.Mock(return_value=\"utf-8\")),\n    )\n\n\nclass TestRequest(TestCase):\n    @setup\n    def setup_options(self):\n        self.url = \"http://localhost:8089/jobs/\"\n\n    @setup_teardown\n    def patch_urllib(self):\n        patcher = mock.patch(\n            \"tron.commands.client.urllib.request.urlopen\",\n            autospec=True,\n        )\n        with patcher as self.mock_urlopen:\n            yield\n\n    def test_build_url_request_no_data(self):\n        request = client.build_url_request(self.url, None)\n        assert request.has_header(\"User-agent\")\n        assert_equal(request.get_method(), \"GET\")\n        assert_equal(request.get_full_url(), self.url)\n\n    def test_build_url_request_with_data(self):\n        data = {\"param\": \"is_set\", \"other\": 1}\n        request = client.build_url_request(self.url, data)\n        assert request.has_header(\"User-agent\")\n        assert_equal(request.get_method(), \"POST\")\n        assert_equal(request.get_full_url(), self.url)\n        assert_in(\"param=is_set\", request.data.decode())\n        assert_in(\"other=1\", request.data.decode())\n\n    @mock.patch(\"tron.commands.client.log\", autospec=True)\n    def test_load_response_content_success(self, _):\n        content = b\"not:valid:json\"\n        http_response = build_file_mock(content)\n        response = client.load_response_content(http_response)\n        assert_equal(response.error, client.DECODE_ERROR)\n        assert_equal(response.content, content.decode(\"utf-8\"))\n\n    @mock.patch(\"tron.commands.client.log\", autospec=True)\n    def test_request_http_error(self, _):\n        self.mock_urlopen.side_effect = HTTPError(\n            self.url,\n            500,\n            \"broke\",\n            mock.Mock(get_content_charset=mock.Mock(return_value=\"utf-8\")),\n            build_file_mock(b\"oops\"),\n        )\n        response = client.request(self.url)\n        expected = client.Response(500, \"broke\", \"oops\")\n        assert_equal(response, expected)\n\n    @mock.patch(\"tron.commands.client.log\", autospec=True)\n    def test_request_url_error(self, _):\n        self.mock_urlopen.side_effect = URLError(\"broke\")\n        response = client.request(self.url)\n        expected = client.Response(client.URL_ERROR, \"broke\", None)\n        assert_equal(response, expected)\n\n    def test_request_success(self):\n        self.mock_urlopen.return_value = build_file_mock(b'{\"ok\": \"ok\"}')\n        response = client.request(self.url)\n        expected = client.Response(None, None, {\"ok\": \"ok\"})\n        assert_equal(response, expected)\n\n\nclass TestClientRequest(TestCase):\n    @setup\n    def setup_client(self):\n        self.url = \"http://localhost:8089/\"\n        self.client = client.Client(self.url)\n\n    @setup_teardown\n    def patch_request(self):\n        with mock.patch(\n            \"tron.commands.client.request\",\n            autospec=True,\n        ) as self.mock_request:\n            yield\n\n    def test_request_error(self):\n        error_response = Response(\n            error=\"404\",\n            msg=\"Not Found\",\n            content=\"big kahuna error\",\n        )\n        client.request = mock.Mock(return_value=error_response)\n        exception = assert_raises(\n            client.RequestError,\n            self.client.request,\n            \"/jobs\",\n        )\n\n        assert str(exception) == error_response.content\n\n    def test_request_success(self):\n        ok_response = {\"ok\": \"ok\"}\n        client.request.return_value = client.Response(None, None, ok_response)\n        response = self.client.request(\"/jobs\")\n        assert_equal(response, ok_response)\n\n\nclass TestClient(TestCase):\n    @setup\n    def setup_client(self):\n        self.url = \"http://localhost:8089/\"\n        self.client = client.Client(self.url)\n        autospec_method(self.client.request)\n\n    def test_config_post(self):\n        name, data, hash = \"name\", \"stuff\", \"hash\"\n        self.client.config(name, config_data=data, config_hash=hash)\n        expected_data = {\n            \"config\": data,\n            \"name\": name,\n            \"hash\": hash,\n            \"check\": 0,\n        }\n        self.client.request.assert_called_with(\"/api/config\", expected_data)\n\n    def test_config_get_default(self):\n        self.client.config(\"config_name\")\n        self.client.request.assert_called_with(\n            \"/api/config?name=config_name\",\n        )\n\n    def test_http_get(self):\n        self.client.http_get(\"/api/jobs\", {\"include\": 1})\n        self.client.request.assert_called_with(\"/api/jobs?include=1\")\n\n    def test_action_runs(self):\n        self.client.action_runs(\"/api/jobs/name/0/act\", num_lines=40)\n        self.client.request.assert_called_with(\n            \"/api/jobs/name/0/act?include_stderr=1&include_stdout=1&num_lines=40\",\n        )\n\n    def test_job_runs(self):\n        self.client.job_runs(\"/api/jobs/name/0\")\n        self.client.request.assert_called_with(\n            \"/api/jobs/name/0?include_action_graph=0&include_action_runs=1\",\n        )\n\n    def test_job(self):\n        self.client.job(\"/api/jobs/name\", count=20)\n        self.client.request.assert_called_with(\n            \"/api/jobs/name?include_action_runs=0&num_runs=20\",\n        )\n\n    def test_jobs(self):\n        self.client.jobs()\n        self.client.request.assert_called_with(\n            \"/api/jobs?include_action_graph=1&include_action_runs=0&include_job_runs=0&include_node_pool=1\",\n        )\n\n\nclass TestUserAttribution(TestCase):\n    def test_default_user_agent(self):\n        url = \"http://localhost:8089/\"\n        with mock.patch(\n            \"tron.commands.client.os.environ\",\n            autospec=True,\n        ) as mock_environ:\n            mock_environ.get.return_value = \"testuser\"\n            default_client = client.Client(url, user_attribution=False)\n            # we do not add user attribution by default\n            assert \"(testuser)\" not in default_client.headers[\"User-Agent\"]\n\n    def test_attributed_user_agent(self):\n        url = \"http://localhost:8089/\"\n        with mock.patch(\n            \"tron.commands.client.os.environ\",\n            autospec=True,\n        ) as mock_environ:\n            mock_environ.get.return_value = \"testuser\"\n            default_client = client.Client(url, user_attribution=True)\n            # we do not add user attribution by default\n            assert \"(testuser)\" in default_client.headers[\"User-Agent\"]\n\n\nclass TestGetUrl(TestCase):\n    def test_get_job_url_for_action_run(self):\n        url = client.get_job_url(\"MASTER.name.1.act\")\n        assert_equal(url, \"/api/jobs/MASTER.name/1/act\")\n\n    def test_get_job_url_for_job(self):\n        url = client.get_job_url(\"MASTER.name\")\n        assert_equal(url, \"/api/jobs/MASTER.name\")\n\n\nclass TestGetContentFromIdentifier(TestCase):\n    @setup\n    def setup_client(self):\n        self.options = mock.Mock()\n        self.index = {\n            \"namespaces\": [\"OTHER\", \"MASTER\"],\n            \"jobs\": {\n                \"MASTER.namea\": \"\",\n                \"MASTER.nameb\": \"\",\n                \"OTHER.nameg\": \"\",\n            },\n        }\n\n    def test_get_url_from_identifier_job_no_namespace(self):\n        identifier = get_object_type_from_identifier(self.index, \"namea\")\n        assert_equal(identifier.url, \"/api/jobs/MASTER.namea\")\n        assert_equal(identifier.type, TronObjectType.job)\n\n    def test_get_url_from_identifier_job(self):\n        identifier = get_object_type_from_identifier(\n            self.index,\n            \"MASTER.namea\",\n        )\n        assert_equal(identifier.url, \"/api/jobs/MASTER.namea\")\n        assert_equal(identifier.type, TronObjectType.job)\n\n    def test_get_url_from_identifier_job_run(self):\n        identifier = get_object_type_from_identifier(\n            self.index,\n            \"MASTER.nameb.7\",\n        )\n        assert_equal(identifier.url, \"/api/jobs/MASTER.nameb/7\")\n        assert_equal(identifier.type, TronObjectType.job_run)\n\n    def test_get_url_from_identifier_action_run(self):\n        identifier = get_object_type_from_identifier(\n            self.index,\n            \"MASTER.nameb.7.run\",\n        )\n        assert_equal(identifier.url, \"/api/jobs/MASTER.nameb/7/run\")\n        assert_equal(identifier.type, TronObjectType.action_run)\n\n    def test_get_url_from_identifier_job_no_namespace_not_master(self):\n        identifier = get_object_type_from_identifier(self.index, \"nameg\")\n        assert_equal(identifier.url, \"/api/jobs/OTHER.nameg\")\n        assert_equal(identifier.type, TronObjectType.job)\n\n    def test_get_url_from_identifier_no_match(self):\n        exc = assert_raises(\n            ValueError,\n            get_object_type_from_identifier,\n            self.index,\n            \"MASTER.namec\",\n        )\n        assert_in(\"namec\", str(exc))\n\n\nif __name__ == \"__main__\":\n    run()\n"
  },
  {
    "path": "tests/commands/cmd_utils_test.py",
    "content": "import argparse\nfrom unittest import mock\n\nfrom testifycompat import assert_equal\nfrom testifycompat import assert_in\nfrom testifycompat import setup_teardown\nfrom testifycompat import TestCase\nfrom tron.commands import cmd_utils\n\n\nclass TestGetConfig(TestCase):\n    @setup_teardown\n    def patch_environment(self):\n        with mock.patch(\"tron.commands.cmd_utils.opener\", autospec=True) as self.mock_opener, mock.patch(\n            \"tron.commands.cmd_utils.yaml\", autospec=True\n        ) as self.mock_yaml:\n            yield\n\n    def test_read_config_missing(self):\n        self.mock_opener.side_effect = IOError\n        assert_equal(cmd_utils.read_config(), {})\n\n    def test_read_config(self):\n        assert_equal(cmd_utils.read_config(), self.mock_yaml.load.return_value)\n\n    @mock.patch(\"tron.commands.cmd_utils.os.access\", autospec=True)\n    def test_get_client_config(self, mock_access):\n        mock_access.return_value = False\n        config = cmd_utils.get_client_config()\n        assert_equal(mock_access.call_count, 2)\n        assert_equal(config, {})\n\n    def test_filter_jobs_actions_runs_with_nothing(self):\n        inputs = [\n            \"M.foo\",\n            \"M.foo.1\",\n            \"M.foo.1.action1\",\n            \"M.foo.2.action1\",\n            \"M.bar\",\n            \"M.bar.1.action\",\n        ]\n        prefix = \"\"\n        expected = [\"M.foo\", \"M.bar\"]\n        assert_equal(\n            cmd_utils.filter_jobs_actions_runs(\n                prefix,\n                inputs,\n            ),\n            expected,\n        )\n\n    def test_filter_jobs_actions_runs_with_almost_a_job(self):\n        inputs = [\n            \"M.foo\",\n            \"M.foo.1\",\n            \"M.foo.1.action1\",\n            \"M.foo.2.action1\",\n            \"M.bar.1.action\",\n        ]\n        prefix = \"M.f\"\n        expected = [\"M.foo\"]\n        assert_equal(\n            cmd_utils.filter_jobs_actions_runs(\n                prefix,\n                inputs,\n            ),\n            expected,\n        )\n\n    def test_filter_jobs_actions_runs_with_a_job_run(self):\n        inputs = [\n            \"M.foo\",\n            \"M.foo.1\",\n            \"M.foo.1.action1\",\n            \"M.foo.2\",\n            \"M.foo.2.action1\",\n            \"M.bar.1.action\",\n        ]\n        prefix = \"M.foo.\"\n        expected = [\"M.foo.1\", \"M.foo.2\"]\n        assert_equal(\n            cmd_utils.filter_jobs_actions_runs(\n                prefix,\n                inputs,\n            ),\n            expected,\n        )\n\n    def test_filter_jobs_actions_runs_with_a_job_run_and_id(self):\n        inputs = [\n            \"M.foo\",\n            \"M.foo.1\",\n            \"M.foo.1.action1\",\n            \"M.foo.2.action1\",\n            \"M.bar.1.action\",\n        ]\n        prefix = \"M.foo.1\"\n        expected = [\"M.foo.1\", \"M.foo.1.action1\"]\n        assert_equal(\n            cmd_utils.filter_jobs_actions_runs(\n                prefix,\n                inputs,\n            ),\n            expected,\n        )\n\n\nclass TestBuildOptionParser(TestCase):\n    def test_build_option_parser(self):\n        \"\"\"Assert that we don't set default options so that we can load\n        the defaults from the config.\n        \"\"\"\n        usage = \"Something\"\n        epilog = \"Something\"\n        argparse.ArgumentParser = mock.Mock()\n        parser = cmd_utils.build_option_parser(\n            usage=usage,\n            epilog=epilog,\n        )\n        argparse.ArgumentParser.assert_called_with(\n            usage=usage,\n            formatter_class=argparse.RawDescriptionHelpFormatter,\n            epilog=epilog,\n        )\n        assert parser.add_argument.call_count == 5\n\n        args = [call[1] for call in parser.add_argument.mock_calls]\n        expected = [\n            (\"--version\",),\n            (\"-v\", \"--verbose\"),\n            (\"--server\",),\n            (\"--cluster_name\",),\n            (\"-s\", \"--save\"),\n        ]\n        assert args == expected\n\n        defaults = [call[2].get(\"default\") for call in parser.add_argument.mock_calls]\n        assert defaults == [None, None, None, None, None]\n\n\nclass TestSuggestions(TestCase):\n    def test_suggest_possibilities_none(self):\n        expected = \"\"\n        actual = cmd_utils.suggest_possibilities(word=\"FOO\", possibilities=[])\n        assert_equal(actual, expected)\n\n    def test_suggest_possibilities_many(self):\n        expected = \"FOOO, FOOBAR\"\n        actual = cmd_utils.suggest_possibilities(\n            word=\"FOO\",\n            possibilities=[\"FOOO\", \"FOOBAR\"],\n        )\n        assert_in(expected, actual)\n\n    def test_suggest_possibilities_one(self):\n        expected = \"FOOBAR?\"\n        actual = cmd_utils.suggest_possibilities(\n            word=\"FOO\",\n            possibilities=[\"FOOBAR\", \"BAZ\"],\n        )\n        assert_in(expected, actual)\n"
  },
  {
    "path": "tests/commands/display_test.py",
    "content": "from unittest import mock\n\nfrom testifycompat import setup\nfrom testifycompat import setup_teardown\nfrom testifycompat import TestCase\nfrom tron.commands import display\nfrom tron.commands.display import DisplayActionRuns\nfrom tron.commands.display import DisplayJobRuns\nfrom tron.commands.display import DisplayJobs\nfrom tron.core import actionrun\nfrom tron.core import job\n\n\nclass TestDisplayJobRuns(TestCase):\n    @setup\n    def setup_data(self):\n        self.data = [\n            dict(\n                id=\"something.23\",\n                state=\"FAIL\",\n                node=mock.MagicMock(),\n                run_num=23,\n                run_time=\"2012-01-20 23:11:23\",\n                start_time=\"2012-01-20 23:11:23\",\n                end_time=\"2012-02-21 23:10:10\",\n                duration=\"2 days\",\n                manual=False,\n            ),\n            dict(\n                id=\"something.55\",\n                state=\"QUE\",\n                node=mock.MagicMock(),\n                run_num=55,\n                run_time=\"2012-01-20 23:11:23\",\n                start_time=\"2012-01-20 23:11:23\",\n                end_time=\"\",\n                duration=\"\",\n                manual=False,\n            ),\n        ]\n\n    def test_format(self):\n        out = DisplayJobRuns().format(self.data)\n        lines = out.split(\"\\n\")\n        assert len(lines) == 7\n\n\nclass TestDisplayJobs(TestCase):\n    @setup\n    def setup_data(self):\n        self.data = [\n            dict(\n                name=\"important_things\",\n                status=\"running\",\n                scheduler=mock.MagicMock(),\n                last_success=None,\n            ),\n            dict(\n                name=\"other_thing\",\n                status=\"enabled\",\n                scheduler=mock.MagicMock(),\n                last_success=\"2012-01-23 10:23:23\",\n            ),\n        ]\n\n    def do_format(self):\n        out = DisplayJobs().format(self.data)\n        lines = out.split(\"\\n\")\n        return lines\n\n    def test_format(self):\n        lines = self.do_format()\n        assert len(lines) == 5\n\n\nclass TestDisplayActions(TestCase):\n    @setup\n    def setup_data(self):\n        self.data = {\n            \"id\": \"something.23\",\n            \"state\": \"UNKWN\",\n            \"node\": {\n                \"hostname\": \"something\",\n                \"username\": \"a\",\n            },\n            \"run_time\": \"sometime\",\n            \"start_time\": \"sometime\",\n            \"end_time\": \"sometime\",\n            \"manual\": False,\n            \"runs\": [\n                dict(\n                    id=\"something.23.run_other_thing\",\n                    state=\"unknown\",\n                    start_time=\"2012-01-23 10:10:10.123456\",\n                    end_time=\"\",\n                    duration=\"\",\n                    run_time=\"sometime\",\n                ),\n                dict(\n                    id=\"something.1.run_foo\",\n                    state=\"failed\",\n                    start_time=\"2012-01-23 10:10:10.123456\",\n                    end_time=\"2012-01-23 10:40:10.123456\",\n                    duration=\"1234.123456\",\n                    run_time=\"sometime\",\n                ),\n                dict(\n                    id=\"something.23.run_other_thing\",\n                    state=\"queued\",\n                    start_time=\"2012-01-23 10:10:10.123456\",\n                    end_time=\"\",\n                    duration=\"\",\n                    run_time=\"sometime\",\n                ),\n                dict(\n                    id=\"something.42.weird_run\",\n                    state=\"unknown\",\n                    start_time=None,\n                    end_time=None,\n                    duration=\"\",\n                    run_time=None,\n                ),\n                dict(\n                    id=\"something.43.skipped\",\n                    state=\"skipped\",\n                    start_time=\"2019-07-15 18:12:05\",\n                    end_time=\"2019-07-16 01:31:50\",\n                    duration=\"7:19:44.506211\",\n                ),\n                dict(\n                    id=\"something.43.running\",\n                    state=\"running\",\n                    start_time=\"2019-07-15 18:12:05\",\n                    end_time=None,\n                    duration=\"7:19:44.506211\",\n                ),\n            ],\n        }\n        self.details = {\n            \"id\": \"something.1.foo\",\n            \"state\": \"FAIL\",\n            \"node\": \"localhost\",\n            \"stdout\": [\"Blah\", \"blah\", \"blah\"],\n            \"stderr\": [\"Crash\", \"and\", \"burn\"],\n            \"command\": \"/bin/bash ./runme.sh now\",\n            \"raw_command\": \"bash runme.sh now\",\n            \"requirements\": [\".run_first_job\"],\n        }\n\n    def format_lines(self):\n        out = DisplayActionRuns().format(self.data)\n        return out.split(\"\\n\")\n\n    def test_format(self):\n        lines = self.format_lines()\n        assert len(lines) == 16, \"\\n\".join(lines)\n\n\nclass TestAddColorForState(TestCase):\n    @setup_teardown\n    def enable_color(self):\n        with display.Color.enable():\n            yield\n\n    def test_add_red(self):\n        text = display.add_color_for_state(actionrun.ActionRun.FAILED)\n        assert text.startswith(display.Color.colors[\"red\"]), text\n\n    def test_add_green(self):\n        text = display.add_color_for_state(actionrun.ActionRun.RUNNING)\n        assert text.startswith(display.Color.colors[\"green\"]), text\n\n    def test_add_blue(self):\n        text = display.add_color_for_state(job.Job.STATUS_DISABLED)\n        assert text.startswith(display.Color.colors[\"blue\"]), text\n\n\nclass TestDisplayNode(TestCase):\n\n    node_source = {\n        \"name\": \"name\",\n        \"hostname\": \"hostname\",\n        \"username\": \"username\",\n    }\n\n    def test_display_node(self):\n        result = display.display_node(self.node_source)\n        assert result == \"username@hostname\"\n\n    def test_display_node_pool(self):\n        source = {\"name\": \"name\", \"nodes\": [self.node_source]}\n        result = display.display_node_pool(source)\n        assert result == \"name (1 node(s))\"\n"
  },
  {
    "path": "tests/commands/retry_test.py",
    "content": "import random\nfrom unittest import mock\n\nimport pytest\n\nfrom tron.commands import client\nfrom tron.commands import retry\n\n\nasync def _empty_coro(*args, **kwargs):\n    return None\n\n\n@pytest.fixture(autouse=True)\ndef mock_sleep():\n    with mock.patch(\"asyncio.sleep\", _empty_coro, autospec=None):\n        yield\n\n\n@pytest.fixture(autouse=True)\ndef mock_client():\n    with mock.patch.object(client, \"Client\", autospec=True) as m:\n        m.return_value.url_base = \"http://localhost\"\n        yield m\n\n\n@pytest.fixture(autouse=True)\ndef mock_urlopen():  # prevent any requests from being made\n    with mock.patch(\"urllib.request.urlopen\", autospec=True) as m:\n        yield m\n\n\n@pytest.fixture\ndef mock_client_request():\n    with mock.patch.object(client, \"request\", autospec=True) as m:\n        m.return_value = mock.Mock(error=False, content={})  # response\n        yield m\n\n\n@mock.patch.object(\n    client,\n    \"get_object_type_from_identifier\",\n    return_value=client.TronObjectIdentifier(\"JOB_RUN\", \"/a_job_run\"),\n    autospec=True,\n)\ndef test_retry_action_init_not_an_action(mock_get_obj_type, mock_client):\n    tron_client = mock_client.return_value\n    with pytest.raises(ValueError):\n        retry.RetryAction(tron_client, \"a_fake_action_run\")\n\n\n@pytest.fixture\ndef fake_retry_action(mock_client):\n    tron_client = mock_client.return_value\n    tron_client.action_runs.return_value = dict(\n        action_name=\"a_fake_action\",\n        requirements=[\"required_action_0\", \"required_action_1\"],\n        triggered_by=\"a_fake_trigger_0 (done), a_fake_trigger_1\",\n    )\n    tron_client.job_runs.return_value = dict(\n        job_name=\"a_fake_job\",\n        run_num=1234,\n        runs=[\n            dict(action_name=\"required_action_0\", state=\"succeeded\"),\n            dict(action_name=\"non_required_action\", state=\"succeeded\"),\n            dict(action_name=\"required_action_1\", state=\"failed\"),\n            dict(action_name=\"upstream_action_0\", trigger_downstreams=\"a_fake_trigger_0\"),\n            dict(action_name=\"upstream_action_1\", trigger_downstreams=\"a_fake_trigger_1\"),\n            tron_client.action_runs.return_value,\n        ],\n    )\n\n    with mock.patch.object(\n        client,\n        \"get_object_type_from_identifier\",\n        side_effect=[\n            client.TronObjectIdentifier(\"ACTION_RUN\", \"/a_fake_job/0/a_fake_action\"),\n            client.TronObjectIdentifier(\"JOB_RUN\", \"/a_fake_job/0\"),\n        ],\n        autospec=True,\n    ):\n        yield retry.RetryAction(tron_client, \"a_fake_job.0.a_fake_action\", use_latest_command=True)\n\n\ndef test_retry_action_init_ok(fake_retry_action):\n    assert fake_retry_action.retry_params == dict(command=\"retry\", use_latest_command=1)\n    assert fake_retry_action.full_action_name == \"a_fake_job.0.a_fake_action\"\n    fake_retry_action.tron_client.action_runs.assert_called_once_with(\n        \"/a_fake_job/0/a_fake_action\",\n        num_lines=0,\n    )\n    assert fake_retry_action.action_name == \"a_fake_action\"\n    assert fake_retry_action.action_run_id.url == \"/a_fake_job/0/a_fake_action\"\n    fake_retry_action.tron_client.job_runs.assert_called_once_with(\"/a_fake_job/0\")\n    assert fake_retry_action.job_run_name == \"a_fake_job.0\"\n    assert fake_retry_action.job_run_id.url == \"/a_fake_job/0\"\n    assert fake_retry_action._required_action_indices == {\"required_action_0\": 0, \"required_action_1\": 2}\n\n\ndef test_check_trigger_statuses(fake_retry_action, event_loop):\n    expected = dict(a_fake_trigger_0=True, a_fake_trigger_1=False)\n    assert expected == event_loop.run_until_complete(fake_retry_action.check_trigger_statuses())\n    assert fake_retry_action.tron_client.action_runs.call_args_list[1] == mock.call(  # 0th call is in init\n        \"/a_fake_job/0/a_fake_action\",\n        num_lines=0,\n    )\n\n\ndef test_check_required_actions_statuses(fake_retry_action, event_loop):\n    expected = dict(required_action_0=True, required_action_1=False)\n    assert expected == event_loop.run_until_complete(fake_retry_action.check_required_actions_statuses())\n    assert fake_retry_action.tron_client.job_runs.call_args_list[1] == mock.call(\"/a_fake_job/0\")  # 0th call is in init\n\n\n@pytest.mark.parametrize(\n    \"expected,triggered_by,required_action_1_state\",\n    [\n        (False, \"a_fake_trigger_0 (done), a_fake_trigger_1\", \"skipped\"),  # unpublished triggers\n        (False, \"a_fake_trigger_0 (done), a_fake_trigger_1 (done)\", \"failed\"),  # required not succeeded\n        (True, \"a_fake_trigger_0 (done), a_fake_trigger_1 (done)\", \"succeeded\"),  # all done\n    ],\n)\ndef test_can_retry(fake_retry_action, event_loop, expected, triggered_by, required_action_1_state):\n    fake_retry_action.tron_client.action_runs.return_value[\"triggered_by\"] = triggered_by\n    fake_retry_action.tron_client.job_runs.return_value[\"runs\"][2][\"state\"] = required_action_1_state\n    assert expected == event_loop.run_until_complete(fake_retry_action.can_retry())\n\n\ndef test_wait_for_deps_timeout(fake_retry_action, event_loop):\n    assert not event_loop.run_until_complete(fake_retry_action.wait_for_deps(deps_timeout_s=3, poll_interval_s=1))\n    assert fake_retry_action._elapsed.seconds == 3\n    assert fake_retry_action.tron_client.action_runs.call_count == 5  # 1 in init, 4 in this test\n\n\ndef test_wait_for_deps_all_deps_done(fake_retry_action, event_loop):\n    fake_retry_action.tron_client.job_runs.return_value[\"runs\"][2][\"state\"] = \"skipped\"\n    fake_retry_action.tron_client.action_runs.return_value = None\n    triggered_by_results = [\n        \"a_fake_trigger_0 (done), a_fake_trigger_1\",\n        \"a_fake_trigger_0 (done), a_fake_trigger_1\",\n        \"a_fake_trigger_0 (done), a_fake_trigger_1 (done)\",\n    ]\n    fake_retry_action.tron_client.action_runs.side_effect = [\n        dict(\n            action_name=\"a_fake_action\",\n            requirements=[\"required_action_0\", \"required_action_1\"],\n            triggered_by=r,\n        )\n        for r in triggered_by_results\n    ]\n\n    assert event_loop.run_until_complete(fake_retry_action.wait_for_deps(deps_timeout_s=3, poll_interval_s=1))\n    # 3rd triggered_by result returned on check at 2nd second\n    assert fake_retry_action._elapsed.seconds == 2\n    assert fake_retry_action.tron_client.action_runs.call_count == 4  # 1 in init, 3 in this test\n\n\n@pytest.mark.parametrize(\"expected,error\", [(False, True), (True, False)])\ndef test_issue_retry(fake_retry_action, mock_client_request, event_loop, expected, error):\n    mock_client_request.return_value.error = error\n    assert expected == event_loop.run_until_complete(fake_retry_action.issue_retry())\n    assert expected == fake_retry_action.succeeded\n\n\ndef test_wait_for_retry_deps_not_done(fake_retry_action, mock_client_request, event_loop):\n    assert not event_loop.run_until_complete(\n        fake_retry_action.wait_and_retry(deps_timeout_s=10, poll_interval_s=1, jitter=True),\n    )\n    assert fake_retry_action._elapsed.seconds == 10  # timeout\n    mock_client_request.assert_not_called()  # retry not attempted\n\n\ndef test_wait_for_retry_deps_done(fake_retry_action, mock_client_request, event_loop):\n    fake_retry_action.tron_client.job_runs.return_value[\"runs\"][2][\"state\"] = \"skipped\"\n    fake_retry_action.tron_client.action_runs.return_value[\n        \"triggered_by\"\n    ] = \"a_fake_trigger_0 (done), a_fake_trigger_1 (done)\"\n    mock_client_request.return_value.error = False\n    random.seed(1)  # init delay is 1s\n\n    assert event_loop.run_until_complete(\n        fake_retry_action.wait_and_retry(deps_timeout_s=10, poll_interval_s=5, jitter=True),\n    )\n    assert fake_retry_action._elapsed.seconds == 1  # init delay only\n    mock_client_request.assert_called_once_with(\n        \"http://localhost/a_fake_job/0/a_fake_action\",\n        data=dict(command=\"retry\", use_latest_command=1),\n        user_attribution=True,\n    )\n\n\n@mock.patch.object(retry, \"RetryAction\", autospec=True)\ndef test_retry_actions(mock_retry_action, mock_client, event_loop):\n    mock_wait_and_retry = mock_retry_action.return_value.wait_and_retry\n    mock_wait_and_retry.return_value = _empty_coro()\n\n    r_actions = retry.retry_actions(\n        \"http://localhost\",\n        [\"a_job.0.an_action_0\", \"another_job.1.an_action_1\"],\n        use_latest_command=True,\n        deps_timeout_s=4,\n    )\n\n    assert r_actions == [mock_retry_action.return_value] * 2\n    assert mock_retry_action.call_args_list == [\n        mock.call(mock_client.return_value, \"a_job.0.an_action_0\", use_latest_command=True),\n        mock.call(mock_client.return_value, \"another_job.1.an_action_1\", use_latest_command=True),\n    ]\n    assert mock_wait_and_retry.call_args_list == [\n        mock.call(deps_timeout_s=4, jitter=False),\n        mock.call(deps_timeout_s=4),\n    ]\n"
  },
  {
    "path": "tests/config/__init__.py",
    "content": ""
  },
  {
    "path": "tests/config/config_parse_test.py",
    "content": "import datetime\nimport os\nimport shutil\nimport tempfile\nfrom unittest import mock\n\nimport pytest\nimport pytz\n\nfrom testifycompat import assert_equal\nfrom testifycompat import assert_in\nfrom testifycompat import run\nfrom testifycompat import setup\nfrom testifycompat import teardown\nfrom testifycompat import TestCase\nfrom tests.assertions import assert_raises\nfrom tron.config import config_parse\nfrom tron.config import config_utils\nfrom tron.config import ConfigError\nfrom tron.config import schedule_parse\nfrom tron.config import schema\nfrom tron.config.config_parse import build_format_string_validator\nfrom tron.config.config_parse import CLEANUP_ACTION_NAME\nfrom tron.config.config_parse import valid_cleanup_action_name\nfrom tron.config.config_parse import valid_config\nfrom tron.config.config_parse import valid_job\nfrom tron.config.config_parse import valid_node_pool\nfrom tron.config.config_parse import valid_output_stream_dir\nfrom tron.config.config_parse import validate_fragment\nfrom tron.config.config_utils import NullConfigContext\nfrom tron.config.schedule_parse import ConfigDailyScheduler\nfrom tron.config.schema import ConfigNodeAffinity\nfrom tron.config.schema import MASTER_NAMESPACE\n\nBASE_CONFIG = dict(\n    ssh_options=dict(agent=False, identities=[\"tests/test_id_rsa\"]),\n    time_zone=\"EST\",\n    output_stream_dir=\"/tmp\",\n    nodes=[\n        dict(name=\"node0\", hostname=\"node0\"),\n        dict(name=\"node1\", hostname=\"node1\"),\n    ],\n    node_pools=[dict(name=\"NodePool\", nodes=[\"node0\", \"node1\"])],\n)\n\n\ndef make_ssh_options():\n    return schema.ConfigSSHOptions(\n        agent=False,\n        identities=(\"tests/test_id_rsa\",),\n        known_hosts_file=None,\n        connect_timeout=30,\n        idle_connection_timeout=3600,\n        jitter_min_load=4,\n        jitter_max_delay=20,\n        jitter_load_factor=1,\n    )\n\n\ndef make_mock_schedule():\n    return ConfigDailyScheduler(\n        days=set(),\n        hour=0,\n        minute=0,\n        second=0,\n        original=\"00:00:00 \",\n        jitter=None,\n    )\n\n\ndef make_command_context():\n    return {\n        \"python\": \"/usr/bin/python\",\n        \"batch_dir\": \"/tron/batch/test/foo\",\n    }\n\n\ndef make_nodes():\n    return {\n        \"node0\": schema.ConfigNode(\n            name=\"node0\",\n            username=\"foo\",\n            hostname=\"node0\",\n            port=22,\n        ),\n        \"node1\": schema.ConfigNode(\n            name=\"node1\",\n            username=\"foo\",\n            hostname=\"node1\",\n            port=22,\n        ),\n    }\n\n\ndef make_node_pools():\n    return {\n        \"NodePool\": schema.ConfigNodePool(\n            nodes=(\"node0\", \"node1\"),\n            name=\"NodePool\",\n        ),\n    }\n\n\ndef make_mesos_options():\n    return schema.ConfigMesos(\n        master_address=None,\n        master_port=5050,\n        secret_file=None,\n        role=\"*\",\n        principal=\"tron\",\n        enabled=False,\n        default_volumes=(),\n        dockercfg_location=None,\n        offer_timeout=300,\n    )\n\n\ndef make_k8s_options():\n    return schema.ConfigKubernetes(enabled=False, non_retryable_exit_codes=(), default_volumes=())\n\n\ndef make_action(**kwargs):\n    kwargs.setdefault(\"name\", \"action\"),\n    kwargs.setdefault(\"command\", \"command\")\n    kwargs.setdefault(\"executor\", schema.ExecutorTypes.ssh.value)\n    kwargs.setdefault(\"requires\", ())\n    kwargs.setdefault(\"expected_runtime\", datetime.timedelta(1))\n    return schema.ConfigAction(**kwargs)\n\n\ndef make_cleanup_action(**kwargs):\n    kwargs.setdefault(\"name\", \"cleanup\"),\n    kwargs.setdefault(\"command\", \"command\")\n    kwargs.setdefault(\"executor\", schema.ExecutorTypes.ssh.value)\n    kwargs.setdefault(\"expected_runtime\", datetime.timedelta(1))\n    return schema.ConfigCleanupAction(**kwargs)\n\n\ndef make_job(**kwargs):\n    kwargs.setdefault(\"namespace\", \"MASTER\")\n    kwargs.setdefault(\"name\", f\"{kwargs['namespace']}.job_name\")\n    kwargs.setdefault(\"node\", \"node0\")\n    kwargs.setdefault(\"enabled\", True)\n    kwargs.setdefault(\"monitoring\", {})\n    kwargs.setdefault(\n        \"schedule\",\n        schedule_parse.ConfigDailyScheduler(\n            days=set(),\n            hour=16,\n            minute=30,\n            second=0,\n            original=\"16:30:00 \",\n            jitter=None,\n        ),\n    )\n    kwargs.setdefault(\"actions\", {\"action\": make_action()})\n    kwargs.setdefault(\"queueing\", True)\n    kwargs.setdefault(\"run_limit\", 50)\n    kwargs.setdefault(\"all_nodes\", False)\n    kwargs.setdefault(\"cleanup_action\", make_cleanup_action())\n    kwargs.setdefault(\"max_runtime\")\n    kwargs.setdefault(\"allow_overlap\", False)\n    kwargs.setdefault(\"time_zone\", None)\n    kwargs.setdefault(\"expected_runtime\", datetime.timedelta(0, 3600))\n    kwargs.setdefault(\"use_k8s\", False)\n    return schema.ConfigJob(**kwargs)\n\n\ndef make_master_jobs():\n    return {\n        \"MASTER.test_job0\": make_job(\n            name=\"MASTER.test_job0\",\n            schedule=make_mock_schedule(),\n            expected_runtime=datetime.timedelta(1),\n        ),\n        \"MASTER.test_job1\": make_job(\n            name=\"MASTER.test_job1\",\n            schedule=schedule_parse.ConfigDailyScheduler(\n                days={1, 3, 5},\n                hour=0,\n                minute=30,\n                second=0,\n                original=\"00:30:00 MWF\",\n                jitter=None,\n            ),\n            actions={\n                \"action\": make_action(\n                    requires=(\"action1\",),\n                    expected_runtime=datetime.timedelta(0, 7200),\n                ),\n                \"action1\": make_action(\n                    name=\"action1\",\n                    expected_runtime=datetime.timedelta(0, 7200),\n                ),\n            },\n            time_zone=pytz.timezone(\"Pacific/Auckland\"),\n            expected_runtime=datetime.timedelta(1),\n            cleanup_action=None,\n            allow_overlap=True,\n        ),\n        \"MASTER.test_job2\": make_job(\n            name=\"MASTER.test_job2\",\n            node=\"node1\",\n            actions={\n                \"action2_0\": make_action(\n                    name=\"action2_0\",\n                    command=\"test_command2.0\",\n                ),\n            },\n            time_zone=pytz.timezone(\"Pacific/Auckland\"),\n            expected_runtime=datetime.timedelta(1),\n            cleanup_action=None,\n        ),\n        \"MASTER.test_job_actions_dict\": make_job(\n            name=\"MASTER.test_job_actions_dict\",\n            node=\"node1\",\n            schedule=make_mock_schedule(),\n            actions={\n                \"action\": make_action(),\n                \"action1\": make_action(name=\"action1\"),\n                \"action2\": make_action(\n                    name=\"action2\",\n                    requires=(\"action\", \"action1\"),\n                    node=\"node0\",\n                ),\n            },\n            cleanup_action=None,\n            expected_runtime=datetime.timedelta(1),\n        ),\n        \"MASTER.test_job4\": make_job(\n            name=\"MASTER.test_job4\",\n            node=\"NodePool\",\n            schedule=schedule_parse.ConfigDailyScheduler(\n                original=\"00:00:00 \",\n                hour=0,\n                minute=0,\n                second=0,\n                days=set(),\n                jitter=None,\n            ),\n            all_nodes=True,\n            enabled=False,\n            cleanup_action=None,\n            expected_runtime=datetime.timedelta(1),\n        ),\n        \"MASTER.test_job_mesos\": make_job(\n            name=\"MASTER.test_job_mesos\",\n            node=\"NodePool\",\n            schedule=schedule_parse.ConfigDailyScheduler(\n                original=\"00:00:00 \",\n                hour=0,\n                minute=0,\n                second=0,\n                days=set(),\n                jitter=None,\n            ),\n            actions={\n                \"action_mesos\": make_action(\n                    name=\"action_mesos\",\n                    command=\"test_command_mesos\",\n                    executor=schema.ExecutorTypes.mesos.value,\n                    cpus=0.1,\n                    mem=100,\n                    disk=600,\n                    docker_image=\"container:latest\",\n                ),\n            },\n            cleanup_action=None,\n            expected_runtime=datetime.timedelta(1),\n        ),\n        \"MASTER.test_job_k8s\": make_job(\n            name=\"MASTER.test_job_k8s\",\n            node=\"NodePool\",\n            schedule=schedule_parse.ConfigDailyScheduler(\n                original=\"00:00:00 \",\n                hour=0,\n                minute=0,\n                second=0,\n                days=set(),\n                jitter=None,\n            ),\n            actions={\n                \"action_k8s\": make_action(\n                    name=\"action_k8s\",\n                    command=\"test_command_k8s\",\n                    executor=schema.ExecutorTypes.kubernetes.value,\n                    cpus=0.1,\n                    mem=100,\n                    disk=600,\n                    cap_add=[\"KILL\"],\n                    cap_drop=[\"CHOWN\", \"KILL\"],\n                    docker_image=\"container:latest\",\n                    secret_env=dict(\n                        TEST_SECRET=schema.ConfigSecretSource(secret_name=\"tron-secret-test-secret--1\", key=\"secret_1\")\n                    ),\n                    secret_volumes=(\n                        schema.ConfigSecretVolume(\n                            secret_volume_name=\"abc\",\n                            secret_name=\"secret1\",\n                            container_path=\"/b/c\",\n                            default_mode=\"0644\",\n                            items=(schema.ConfigSecretVolumeItem(key=\"secret1\", path=\"abcd\", mode=\"777\"),),\n                        ),\n                    ),\n                    projected_sa_volumes=(\n                        schema.ConfigProjectedSAVolume(\n                            container_path=\"/var/secrets/whatever\",\n                            audience=\"foo.bar\",\n                            expiration_seconds=1800,\n                        ),\n                    ),\n                    node_selectors={\"yelp.com/pool\": \"default\"},\n                    node_affinities=(\n                        ConfigNodeAffinity(\n                            key=\"instance_type\",\n                            operator=\"In\",\n                            value=(\"a1.1xlarge\",),\n                        ),\n                    ),\n                ),\n            },\n            cleanup_action=None,\n            expected_runtime=datetime.timedelta(1),\n        ),\n    }\n\n\ndef make_tron_config(\n    action_runner=None,\n    output_stream_dir=\"/tmp\",\n    command_context=None,\n    ssh_options=None,\n    time_zone=pytz.timezone(\"EST\"),\n    state_persistence=config_parse.DEFAULT_STATE_PERSISTENCE,\n    nodes=None,\n    node_pools=None,\n    jobs=None,\n    mesos_options=None,\n    k8s_options=None,\n    read_json=False,\n):\n    return schema.TronConfig(\n        action_runner=action_runner or {},\n        output_stream_dir=output_stream_dir,\n        command_context=command_context or dict(batch_dir=\"/tron/batch/test/foo\", python=\"/usr/bin/python\"),\n        ssh_options=ssh_options or make_ssh_options(),\n        time_zone=time_zone,\n        state_persistence=state_persistence,\n        nodes=nodes or make_nodes(),\n        node_pools=node_pools or make_node_pools(),\n        jobs=jobs or make_master_jobs(),\n        mesos_options=mesos_options or make_mesos_options(),\n        k8s_options=k8s_options or make_k8s_options(),\n        read_json=read_json,\n    )\n\n\ndef make_named_tron_config(jobs=None):\n    return schema.NamedTronConfig(jobs=jobs or make_master_jobs())\n\n\nclass ConfigTestCase(TestCase):\n    JOBS_CONFIG = dict(\n        jobs=[\n            dict(\n                name=\"test_job0\",\n                node=\"node0\",\n                schedule=\"daily 00:00:00\",\n                actions=[dict(name=\"action\", command=\"command\")],\n                cleanup_action=dict(command=\"command\"),\n            ),\n            dict(\n                name=\"test_job1\",\n                node=\"node0\",\n                schedule=\"daily 00:30:00 MWF\",\n                allow_overlap=True,\n                time_zone=\"Pacific/Auckland\",\n                actions=[\n                    dict(\n                        name=\"action\",\n                        command=\"command\",\n                        requires=[\"action1\"],\n                        expected_runtime=\"2h\",\n                    ),\n                    dict(\n                        name=\"action1\",\n                        command=\"command\",\n                        expected_runtime=\"2h\",\n                    ),\n                ],\n            ),\n            dict(\n                name=\"test_job2\",\n                node=\"node1\",\n                schedule=\"daily 16:30:00\",\n                expected_runtime=\"1d\",\n                time_zone=\"Pacific/Auckland\",\n                actions=[dict(name=\"action2_0\", command=\"test_command2.0\")],\n            ),\n            dict(\n                name=\"test_job_actions_dict\",\n                node=\"node1\",\n                schedule=\"daily 00:00:00 \",\n                actions=dict(\n                    action=dict(command=\"command\"),\n                    action1=dict(command=\"command\"),\n                    action2=dict(\n                        node=\"node0\",\n                        command=\"command\",\n                        requires=[\"action\", \"action1\"],\n                    ),\n                ),\n            ),\n            dict(\n                name=\"test_job4\",\n                node=\"NodePool\",\n                all_nodes=True,\n                schedule=\"daily\",\n                enabled=False,\n                actions=[dict(name=\"action\", command=\"command\")],\n            ),\n            dict(\n                name=\"test_job_mesos\",\n                node=\"NodePool\",\n                schedule=\"daily\",\n                actions=[\n                    dict(\n                        name=\"action_mesos\",\n                        executor=\"mesos\",\n                        command=\"test_command_mesos\",\n                        cpus=0.1,\n                        mem=100,\n                        disk=600,\n                        docker_image=\"container:latest\",\n                    ),\n                ],\n            ),\n            dict(\n                name=\"test_job_k8s\",\n                node=\"NodePool\",\n                schedule=\"daily\",\n                actions=[\n                    dict(\n                        name=\"action_k8s\",\n                        executor=\"kubernetes\",\n                        command=\"test_command_k8s\",\n                        cpus=0.1,\n                        mem=100,\n                        disk=600,\n                        docker_image=\"container:latest\",\n                        secret_env=dict(TEST_SECRET=dict(secret_name=\"tron-secret-test-secret--1\", key=\"secret_1\")),\n                        secret_volumes=[\n                            dict(\n                                secret_volume_name=\"abc\",\n                                secret_name=\"secret1\",\n                                container_path=\"/b/c\",\n                                default_mode=\"0644\",\n                                items=[\n                                    dict(key=\"secret1\", path=\"abcd\", mode=\"777\"),\n                                ],\n                            ),\n                        ],\n                        projected_sa_volumes=[\n                            dict(\n                                container_path=\"/var/secrets/whatever\",\n                                audience=\"foo.bar\",\n                                expiration_seconds=1800,\n                            ),\n                        ],\n                        cap_add=[\"KILL\"],\n                        cap_drop=[\"CHOWN\", \"KILL\"],\n                        node_selectors={\"yelp.com/pool\": \"default\"},\n                        node_affinities=[{\"key\": \"instance_type\", \"operator\": \"In\", \"value\": [\"a1.1xlarge\"]}],\n                    ),\n                ],\n            ),\n        ],\n    )\n\n    config = dict(\n        command_context=dict(\n            batch_dir=\"/tron/batch/test/foo\",\n            python=\"/usr/bin/python\",\n        ),\n        **BASE_CONFIG,\n        **JOBS_CONFIG,\n    )\n\n    @mock.patch.dict(\"tron.config.config_parse.ValidateNode.defaults\")\n    def test_attributes(self):\n        config_parse.ValidateNode.defaults[\"username\"] = \"foo\"\n        expected = make_tron_config()\n\n        test_config = valid_config(self.config)\n\n        assert test_config.command_context == expected.command_context\n        assert test_config.ssh_options == expected.ssh_options\n        assert test_config.mesos_options == expected.mesos_options\n        assert test_config.time_zone == expected.time_zone\n        assert test_config.nodes == expected.nodes\n        assert test_config.node_pools == expected.node_pools\n        assert test_config.k8s_options == expected.k8s_options\n        assert test_config.read_json == expected.read_json\n        for key in [\"0\", \"1\", \"2\", \"_actions_dict\", \"4\", \"_mesos\"]:\n            job_name = f\"MASTER.test_job{key}\"\n            assert job_name in test_config.jobs, f\"{job_name} in test_config.jobs\"\n            assert job_name in expected.jobs, f\"{job_name} in test_config.jobs\"\n            assert test_config.jobs[job_name] == expected.jobs[job_name]\n        assert test_config == expected\n\n    def test_empty_node_test(self):\n        valid_config(dict(nodes=None))\n\n\nclass TestNamedConfig(TestCase):\n    config = ConfigTestCase.JOBS_CONFIG\n\n    def test_attributes(self):\n        expected = make_named_tron_config(\n            jobs={\n                \"test_job\": make_job(\n                    name=\"test_job\",\n                    namespace=\"test_namespace\",\n                    schedule=make_mock_schedule(),\n                    expected_runtime=datetime.timedelta(1),\n                ),\n            },\n        )\n        test_config = validate_fragment(\n            \"test_namespace\",\n            dict(\n                jobs=[\n                    dict(\n                        name=\"test_job\",\n                        namespace=\"test_namespace\",\n                        node=\"node0\",\n                        schedule=\"daily 00:00:00 \",\n                        actions=[dict(name=\"action\", command=\"command\")],\n                        cleanup_action=dict(command=\"command\"),\n                    ),\n                ],\n            ),\n        )\n        assert_equal(test_config, expected)\n\n    def test_attributes_with_master_context(self):\n        expected = make_named_tron_config(\n            jobs={\n                \"test_namespace.test_job\": make_job(\n                    name=\"test_namespace.test_job\",\n                    namespace=\"test_namespace\",\n                    schedule=make_mock_schedule(),\n                    expected_runtime=datetime.timedelta(1),\n                ),\n            },\n        )\n        master_config = dict(\n            nodes=[\n                dict(\n                    name=\"node0\",\n                    hostname=\"node0\",\n                ),\n            ],\n            node_pools=[\n                dict(\n                    name=\"nodepool0\",\n                    nodes=[\"node0\"],\n                ),\n            ],\n        )\n        test_config = validate_fragment(\n            \"test_namespace\",\n            dict(\n                jobs=[\n                    dict(\n                        name=\"test_job\",\n                        namespace=\"test_namespace\",\n                        node=\"node0\",\n                        schedule=\"daily 00:00:00\",\n                        actions=[dict(name=\"action\", command=\"command\")],\n                        cleanup_action=dict(command=\"command\"),\n                    ),\n                ],\n            ),\n            master_config=master_config,\n        )\n        assert_equal(test_config, expected)\n\n    def test_invalid_job_node_with_master_context(self):\n        master_config = dict(\n            nodes=[\n                dict(\n                    name=\"node0\",\n                    hostname=\"node0\",\n                ),\n            ],\n        )\n        test_config = dict(\n            jobs=[\n                dict(\n                    name=\"test_job\",\n                    namespace=\"test_namespace\",\n                    node=\"node1\",\n                    schedule=\"daily 00:30:00 \",\n                    actions=[dict(name=\"action\", command=\"command\")],\n                    cleanup_action=dict(command=\"command\"),\n                ),\n            ],\n        )\n        expected_message = \"Unknown node name node1 at test_namespace.NamedConfigFragment.jobs.Job.test_job.node\"\n        exception = assert_raises(\n            ConfigError,\n            validate_fragment,\n            \"test_namespace\",\n            test_config,\n            master_config,\n        )\n        assert_in(expected_message, str(exception))\n\n    def test_invalid_action_node_with_master_context(self):\n        master_config = dict(\n            nodes=[\n                dict(\n                    name=\"node0\",\n                    hostname=\"node0\",\n                ),\n            ],\n            node_pools=[\n                dict(\n                    name=\"nodepool0\",\n                    nodes=[\"node0\"],\n                ),\n            ],\n        )\n        test_config = dict(\n            jobs=[\n                dict(\n                    name=\"test_job\",\n                    namespace=\"test_namespace\",\n                    node=\"node0\",\n                    schedule=\"daily 00:30:00 \",\n                    actions=[dict(name=\"action\", node=\"nodepool1\", command=\"command\")],\n                    cleanup_action=dict(command=\"command\"),\n                ),\n            ],\n        )\n        expected_message = \"Unknown node name nodepool1 at test_namespace.NamedConfigFragment.jobs.Job.test_job.actions.Action.action.node\"\n\n        exception = assert_raises(\n            ConfigError,\n            validate_fragment,\n            \"test_namespace\",\n            test_config,\n            master_config,\n        )\n        assert_in(expected_message, str(exception))\n\n\nclass TestJobConfig(TestCase):\n    def test_no_actions(self):\n        test_config = dict(\n            jobs=[\n                dict(name=\"test_job0\", node=\"node0\", schedule=\"daily 00:30:00 \"),\n            ],\n            **BASE_CONFIG,\n        )\n\n        expected_message = \"Job test_job0 is missing options: actions\"\n        exception = assert_raises(\n            ConfigError,\n            valid_config,\n            test_config,\n        )\n        assert_in(expected_message, str(exception))\n\n    def test_empty_actions(self):\n        test_config = dict(\n            jobs=[\n                dict(\n                    name=\"test_job0\",\n                    node=\"node0\",\n                    schedule=\"daily 00:30:00 \",\n                    actions=None,\n                ),\n            ],\n            **BASE_CONFIG,\n        )\n\n        expected_message = \"Value at config.jobs.Job.test_job0.actions\"\n        exception = assert_raises(\n            ConfigError,\n            valid_config,\n            test_config,\n        )\n        assert_in(expected_message, str(exception))\n\n    def test_dupe_names(self):\n        test_config = dict(\n            jobs=[\n                dict(\n                    name=\"test_job0\",\n                    node=\"node0\",\n                    schedule=\"daily 00:30:00\",\n                    actions=[\n                        dict(name=\"action\", command=\"cmd\"),\n                        dict(name=\"action\", command=\"cmd\"),\n                    ],\n                ),\n            ],\n            **BASE_CONFIG,\n        )\n\n        expected = \"Duplicate name action at config.jobs.Job.test_job0.actions\"\n        exception = assert_raises(\n            ConfigError,\n            valid_config,\n            test_config,\n        )\n        assert_in(expected, str(exception))\n\n    def test_bad_requires(self):\n        test_config = dict(\n            jobs=[\n                dict(\n                    name=\"test_job0\",\n                    node=\"node0\",\n                    schedule=\"daily 00:30:00\",\n                    actions=[dict(name=\"action\", command=\"cmd\")],\n                ),\n                dict(\n                    name=\"test_job1\",\n                    node=\"node0\",\n                    schedule=\"daily 00:30:00\",\n                    actions=[\n                        dict(\n                            name=\"action1\",\n                            command=\"cmd\",\n                            requires=[\"action\"],\n                        ),\n                    ],\n                ),\n            ],\n            **BASE_CONFIG,\n        )\n\n        expected_message = \"jobs.MASTER.test_job1.action1 has a dependency \" '\"action\" that is not in the same job!'\n        exception = assert_raises(\n            ConfigError,\n            valid_config,\n            test_config,\n        )\n        assert_in(expected_message, str(exception))\n\n    def test_circular_dependency(self):\n        test_config = dict(\n            jobs=[\n                dict(\n                    name=\"test_job0\",\n                    node=\"node0\",\n                    schedule=\"daily 00:30:00\",\n                    actions=[\n                        dict(\n                            name=\"action1\",\n                            command=\"cmd\",\n                            requires=[\"action2\"],\n                        ),\n                        dict(\n                            name=\"action2\",\n                            command=\"cmd\",\n                            requires=[\"action1\"],\n                        ),\n                    ],\n                ),\n            ],\n            **BASE_CONFIG,\n        )\n\n        expect = \"Circular dependency in job.MASTER.test_job0: action1 -> action2\"\n        exception = assert_raises(\n            ConfigError,\n            valid_config,\n            test_config,\n        )\n        assert_in(expect, str(exception))\n\n    def test_circular_dependency_multiaction(self):\n        test_config = dict(\n            jobs=[\n                dict(\n                    name=\"test_job0\",\n                    node=\"node0\",\n                    schedule=\"daily 00:30:00\",\n                    actions=[\n                        dict(\n                            name=\"action1\",\n                            command=\"cmd\",\n                            requires=[\"action2\"],\n                        ),\n                        dict(\n                            name=\"action2\",\n                            command=\"cmd\",\n                            requires=[\"action3\"],\n                        ),\n                        dict(\n                            name=\"action3\",\n                            command=\"cmd\",\n                            requires=[\"action4\"],\n                        ),\n                        dict(\n                            name=\"action4\",\n                            command=\"cmd\",\n                            requires=[\"action5\"],\n                        ),\n                        dict(\n                            name=\"action5\",\n                            command=\"cmd\",\n                            requires=[\"action3\"],\n                        ),\n                    ],\n                ),\n            ],\n            **BASE_CONFIG,\n        )\n\n        expect = \"Circular dependency in job.MASTER.test_job0: action3 -> action4 -> action5\"\n        exception = assert_raises(\n            ConfigError,\n            valid_config,\n            test_config,\n        )\n        assert_in(expect, str(exception))\n\n    def test_config_cleanup_name_collision(self):\n        test_config = dict(\n            jobs=[\n                dict(\n                    name=\"test_job0\",\n                    node=\"node0\",\n                    schedule=\"daily 00:30:00\",\n                    actions=[\n                        dict(name=CLEANUP_ACTION_NAME, command=\"cmd\"),\n                    ],\n                ),\n            ],\n            **BASE_CONFIG,\n        )\n        expected_message = \"config.jobs.Job.test_job0.actions.Action.cleanup.name\"\n        exception = assert_raises(\n            ConfigError,\n            valid_config,\n            test_config,\n        )\n        assert_in(expected_message, str(exception))\n\n    def test_config_cleanup_action_name(self):\n        test_config = dict(\n            jobs=[\n                dict(\n                    name=\"test_job0\",\n                    node=\"node0\",\n                    schedule=\"daily 00:30:00\",\n                    actions=[\n                        dict(name=\"action\", command=\"cmd\"),\n                    ],\n                    cleanup_action=dict(name=\"gerald\", command=\"cmd\"),\n                ),\n            ],\n            **BASE_CONFIG,\n        )\n\n        expected_msg = \"Cleanup actions cannot have custom names\"\n        exception = assert_raises(\n            ConfigError,\n            valid_config,\n            test_config,\n        )\n        assert_in(expected_msg, str(exception))\n\n    def test_config_cleanup_requires(self):\n        test_config = dict(\n            jobs=[\n                dict(\n                    name=\"test_job0\",\n                    node=\"node0\",\n                    schedule=\"daily 00:30:00\",\n                    actions=[\n                        dict(name=\"action\", command=\"cmd\"),\n                    ],\n                    cleanup_action=dict(command=\"cmd\", requires=[\"action\"]),\n                ),\n            ],\n            **BASE_CONFIG,\n        )\n\n        expected_msg = \"Unknown keys in CleanupAction : requires\"\n        exception = assert_raises(\n            ConfigError,\n            valid_config,\n            test_config,\n        )\n        assert_equal(expected_msg, str(exception))\n\n    def test_validate_job_no_actions(self):\n        job_config = dict(\n            name=\"job_name\",\n            node=\"localhost\",\n            schedule=\"daily 00:30:00\",\n            actions=[],\n        )\n        config_context = config_utils.ConfigContext(\n            \"config\",\n            [\"localhost\"],\n            None,\n            None,\n        )\n        expected_msg = \"Required non-empty list at config.Job.job_name.actions\"\n        exception = assert_raises(\n            ConfigError,\n            valid_job,\n            job_config,\n            config_context,\n        )\n        assert_in(expected_msg, str(exception))\n\n\nclass TestValidSecretSource(TestCase):\n    def test_missing_secret_name(self):\n        secret_env = dict(key=\"no_secret_name\")\n\n        with pytest.raises(ConfigError) as missing_exc:\n            config_parse.valid_secret_source(secret_env, NullConfigContext)\n\n        assert \"missing options: secret\" in str(missing_exc.value)\n\n    def test_validate_job_extra_secret_env(self):\n        secret_env = dict(\n            secret_name=\"tron-secret-k8s-name-no--secret--name\",\n            key=\"no_secret_name\",\n            extra_key=\"unknown\",\n        )\n        with pytest.raises(ConfigError) as missing_exc:\n            config_parse.valid_secret_source(secret_env, NullConfigContext)\n\n        assert \"Unknown keys in SecretSource : extra_key\" in str(missing_exc.value)\n\n    def test_valid_job_secret_env_success(self):\n        secret_env = dict(\n            secret_name=\"tron-secret-k8s-name-no--secret--name\",\n            key=\"no_secret_name\",\n        )\n\n        expected_env = schema.ConfigSecretSource(**secret_env)\n\n        built_env = config_parse.valid_secret_source(secret_env, NullConfigContext)\n        assert built_env == expected_env\n\n\nclass TestNodeConfig(TestCase):\n    def test_validate_node_pool(self):\n        config_node_pool = valid_node_pool(\n            dict(name=\"theName\", nodes=[\"node1\", \"node2\"]),\n        )\n        assert_equal(config_node_pool.name, \"theName\")\n        assert_equal(len(config_node_pool.nodes), 2)\n\n    def test_overlap_node_and_node_pools(self):\n        tron_config = dict(\n            nodes=[\n                dict(name=\"sameName\", hostname=\"localhost\"),\n            ],\n            node_pools=[\n                dict(name=\"sameName\", nodes=[\"sameNode\"]),\n            ],\n        )\n        expected_msg = \"Node and NodePool names must be unique sameName\"\n        exception = assert_raises(ConfigError, valid_config, tron_config)\n        assert_in(expected_msg, str(exception))\n\n    def test_invalid_node_name(self):\n        test_config = dict(\n            jobs=[\n                dict(\n                    name=\"test_job0\",\n                    node=\"unknown_node\",\n                    schedule=\"daily 00:30:00\",\n                    actions=[dict(name=\"action\", command=\"cmd\")],\n                ),\n            ],\n            **BASE_CONFIG,\n        )\n\n        expected_msg = \"Unknown node name unknown_node at config.jobs.Job.test_job0.node\"\n        exception = assert_raises(\n            ConfigError,\n            valid_config,\n            test_config,\n        )\n        assert_equal(expected_msg, str(exception))\n\n    def test_invalid_nested_node_pools(self):\n        test_config = dict(\n            nodes=[\n                dict(name=\"node0\", hostname=\"node0\"),\n                dict(name=\"node1\", hostname=\"node1\"),\n            ],\n            node_pools=[\n                dict(name=\"pool0\", nodes=[\"node1\"]),\n                dict(name=\"pool1\", nodes=[\"node0\", \"pool0\"]),\n            ],\n            jobs=[\n                dict(\n                    name=\"test_job0\",\n                    node=\"pool1\",\n                    schedule=\"daily 00:30:00\",\n                    actions=[dict(name=\"action\", command=\"cmd\")],\n                ),\n            ],\n        )\n\n        expected_msg = \"NodePool pool1 contains other NodePools: pool0\"\n        exception = assert_raises(\n            ConfigError,\n            valid_config,\n            test_config,\n        )\n        assert_in(expected_msg, str(exception))\n\n    def test_invalid_node_pool_config(self):\n        test_config = dict(\n            nodes=[\n                dict(name=\"node0\", hostname=\"node0\"),\n                dict(name=\"node1\", hostname=\"node1\"),\n            ],\n            node_pools=[\n                dict(name=\"pool0\", hostname=[\"node1\"]),\n                dict(name=\"pool1\", nodes=[\"node0\", \"pool0\"]),\n            ],\n            jobs=[\n                dict(\n                    name=\"test_job0\",\n                    node=\"pool1\",\n                    schedule=\"daily 00:30:00\",\n                    actions=[dict(name=\"action\", command=\"cmd\")],\n                ),\n            ],\n        )\n\n        expected_msg = \"NodePool pool0 is missing options\"\n        exception = assert_raises(\n            ConfigError,\n            valid_config,\n            test_config,\n        )\n        assert_in(expected_msg, str(exception))\n\n    def test_invalid_named_update(self):\n        test_config = dict(bozray=None)\n        expected_message = \"Unknown keys in NamedConfigFragment : bozray\"\n        exception = assert_raises(\n            ConfigError,\n            validate_fragment,\n            \"foo\",\n            test_config,\n        )\n        assert_in(expected_message, str(exception))\n\n\nclass TestValidateJobs(TestCase):\n    def test_valid_jobs_success(self):\n        test_config = dict(\n            jobs=[\n                dict(\n                    name=\"test_job0\",\n                    node=\"node0\",\n                    schedule=\"daily\",\n                    expected_runtime=\"20m\",\n                    actions=[\n                        dict(\n                            name=\"action\",\n                            command=\"command\",\n                            expected_runtime=\"20m\",\n                        ),\n                        dict(\n                            name=\"action_mesos\",\n                            command=\"command\",\n                            executor=\"mesos\",\n                            cpus=4,\n                            mem=300,\n                            disk=600,\n                            constraints=[\n                                dict(\n                                    attribute=\"pool\",\n                                    operator=\"LIKE\",\n                                    value=\"default\",\n                                ),\n                            ],\n                            docker_image=\"my_container:latest\",\n                            docker_parameters=[\n                                dict(key=\"label\", value=\"labelA\"),\n                                dict(key=\"label\", value=\"labelB\"),\n                            ],\n                            env=dict(USER=\"batch\"),\n                            extra_volumes=[\n                                dict(\n                                    container_path=\"/tmp\",\n                                    host_path=\"/home/tmp\",\n                                    mode=\"RO\",\n                                ),\n                            ],\n                        ),\n                        dict(\n                            name=\"test_trigger_attrs\",\n                            command=\"foo\",\n                            triggered_by=[\"foo.bar\"],\n                            trigger_downstreams=True,\n                        ),\n                    ],\n                    cleanup_action=dict(command=\"command\"),\n                ),\n            ],\n            **BASE_CONFIG,\n        )\n\n        expected_jobs = {\n            \"MASTER.test_job0\": make_job(\n                name=\"MASTER.test_job0\",\n                schedule=make_mock_schedule(),\n                actions={\n                    \"action\": make_action(\n                        expected_runtime=datetime.timedelta(0, 1200),\n                    ),\n                    \"action_mesos\": make_action(\n                        name=\"action_mesos\",\n                        executor=schema.ExecutorTypes.mesos.value,\n                        cpus=4.0,\n                        mem=300.0,\n                        disk=600.0,\n                        constraints=(\n                            schema.ConfigConstraint(\n                                attribute=\"pool\",\n                                operator=\"LIKE\",\n                                value=\"default\",\n                            ),\n                        ),\n                        docker_image=\"my_container:latest\",\n                        docker_parameters=(\n                            schema.ConfigParameter(\n                                key=\"label\",\n                                value=\"labelA\",\n                            ),\n                            schema.ConfigParameter(\n                                key=\"label\",\n                                value=\"labelB\",\n                            ),\n                        ),\n                        env={\"USER\": \"batch\"},\n                        extra_volumes=(\n                            schema.ConfigVolume(\n                                container_path=\"/tmp\",\n                                host_path=\"/home/tmp\",\n                                mode=schema.VolumeModes.RO.value,\n                            ),\n                        ),\n                        expected_runtime=datetime.timedelta(hours=24),\n                    ),\n                    \"test_trigger_attrs\": make_action(\n                        name=\"test_trigger_attrs\",\n                        command=\"foo\",\n                        triggered_by=(\"foo.bar\",),\n                        trigger_downstreams=True,\n                    ),\n                },\n                expected_runtime=datetime.timedelta(0, 1200),\n            ),\n        }\n\n        context = config_utils.ConfigContext(\n            \"config\",\n            [\"node0\"],\n            None,\n            MASTER_NAMESPACE,\n        )\n        config_parse.validate_jobs(test_config, context)\n        assert expected_jobs == test_config[\"jobs\"]\n\n\nclass TestValidCleanupActionName(TestCase):\n    def test_valid_cleanup_action_name_pass(self):\n        name = valid_cleanup_action_name(CLEANUP_ACTION_NAME, None)\n        assert_equal(CLEANUP_ACTION_NAME, name)\n\n    def test_valid_cleanup_action_name_fail(self):\n        assert_raises(\n            ConfigError,\n            valid_cleanup_action_name,\n            \"other\",\n            NullConfigContext,\n        )\n\n\nclass TestValidOutputStreamDir(TestCase):\n    @setup\n    def setup_dir(self):\n        self.dir = tempfile.mkdtemp()\n\n    @teardown\n    def teardown_dir(self):\n        shutil.rmtree(self.dir)\n\n    def test_valid_dir(self):\n        path = valid_output_stream_dir(self.dir, NullConfigContext)\n        assert_equal(self.dir, path)\n\n    def test_missing_dir(self):\n        exception = assert_raises(\n            ConfigError,\n            valid_output_stream_dir,\n            \"bogus-dir\",\n            NullConfigContext,\n        )\n        assert_in(\"is not a directory\", str(exception))\n\n    # TODO: docker tests run as root so everything is writeable\n    # def test_no_ro_dir(self):\n    #     os.chmod(self.dir, stat.S_IRUSR)\n    #     exception = assert_raises(\n    #         ConfigError,\n    #         valid_output_stream_dir, self.dir, NullConfigContext,\n    #     )\n    #     assert_in(\"is not writable\", str(exception))\n\n    def test_missing_with_partial_context(self):\n        dir = \"/bogus/path/does/not/exist\"\n        context = config_utils.PartialConfigContext(\"path\", \"MASTER\")\n        path = config_parse.valid_output_stream_dir(dir, context)\n        assert_equal(path, dir)\n\n\nclass TestBuildFormatStringValidator(TestCase):\n    @setup\n    def setup_keys(self):\n        self.context = dict.fromkeys([\"one\", \"seven\", \"stars\"])\n        self.validator = build_format_string_validator(self.context)\n\n    def test_validator_passes(self):\n        template = \"The {one} thing I {seven} is {stars}\"\n        assert self.validator(template, NullConfigContext)\n\n    def test_validator_unknown_variable_error(self):\n        template = \"The {one} thing I {seven} is {unknown}\"\n        exception = assert_raises(\n            ConfigError,\n            self.validator,\n            template,\n            NullConfigContext,\n        )\n        assert_in(\"Unknown context variable\", str(exception))\n\n    def test_validator_passes_with_context(self):\n        template = \"The {one} thing I {seven} is {mars}\"\n        context = config_utils.ConfigContext(\n            None,\n            None,\n            {\"mars\": \"ok\"},\n            None,\n        )\n        assert self.validator(template, context) == template\n\n    def test_validator_valid_string_without_no_percent_escape(self):\n        template = \"The {one} {seven} thing is {mars} --year %Y\"\n        context = config_utils.ConfigContext(\n            path=None,\n            nodes=None,\n            command_context={\"mars\": \"ok\"},\n            namespace=None,\n        )\n        assert self.validator(template, context)\n\n\nclass TestValidateConfigMapping(TestCase):\n    config = dict(**BASE_CONFIG, command_context=dict(some_var=\"The string\"))\n\n    def test_validate_config_mapping_missing_master(self):\n        config_mapping = {\"other\": mock.Mock()}\n        seq = config_parse.validate_config_mapping(config_mapping)\n        exception = assert_raises(ConfigError, list, seq)\n        assert_in(\"requires a MASTER namespace\", str(exception))\n\n    def test_validate_config_mapping(self):\n        master_config = self.config\n        other_config = TestNamedConfig.config\n        config_mapping = {\n            \"other\": other_config,\n            MASTER_NAMESPACE: master_config,\n        }\n        result = list(config_parse.validate_config_mapping(config_mapping))\n        assert_equal(len(result), 2)\n        assert_equal(result[0][0], MASTER_NAMESPACE)\n        assert_equal(result[1][0], \"other\")\n\n\nclass TestConfigContainer(TestCase):\n    config = BASE_CONFIG\n\n    @setup\n    def setup_container(self):\n        other_config = TestNamedConfig.config\n        self.config_mapping = {\n            MASTER_NAMESPACE: valid_config(self.config),\n            \"other\": validate_fragment(\"other\", other_config),\n        }\n        self.container = config_parse.ConfigContainer(self.config_mapping)\n\n    def test_create(self):\n        config_mapping = {\n            MASTER_NAMESPACE: self.config,\n            \"other\": TestNamedConfig.config,\n        }\n\n        container = config_parse.ConfigContainer.create(config_mapping)\n        assert_equal(set(container.configs.keys()), {\"MASTER\", \"other\"})\n\n    def test_create_missing_master(self):\n        config_mapping = {\"other\": mock.Mock()}\n        assert_raises(\n            ConfigError,\n            config_parse.ConfigContainer.create,\n            config_mapping,\n        )\n\n    def test_get_job_names(self):\n        job_names = self.container.get_job_names()\n        expected = [\n            \"test_job1\",\n            \"test_job0\",\n            \"test_job_actions_dict\",\n            \"test_job2\",\n            \"test_job4\",\n            \"test_job_mesos\",\n            \"test_job_k8s\",\n        ]\n        assert_equal(set(job_names), set(expected))\n\n    def test_get_jobs(self):\n        expected = [\n            \"test_job1\",\n            \"test_job0\",\n            \"test_job_actions_dict\",\n            \"test_job2\",\n            \"test_job4\",\n            \"test_job_mesos\",\n            \"test_job_k8s\",\n        ]\n        assert_equal(set(expected), set(self.container.get_jobs().keys()))\n\n    def test_get_node_names(self):\n        node_names = self.container.get_node_names()\n        expected = {\"node0\", \"node1\", \"NodePool\"}\n        assert_equal(node_names, expected)\n\n\nclass TestValidateSSHOptions(TestCase):\n    @setup\n    def setup_context(self):\n        self.context = config_utils.NullConfigContext\n        self.config = {\"agent\": True, \"identities\": []}\n\n    @mock.patch.dict(\"tron.config.config_parse.os.environ\")\n    def test_post_validation_failed(self):\n        if \"SSH_AUTH_SOCK\" in os.environ:\n            del os.environ[\"SSH_AUTH_SOCK\"]\n        assert_raises(\n            ConfigError,\n            config_parse.valid_ssh_options.validate,\n            self.config,\n            self.context,\n        )\n\n    @mock.patch.dict(\"tron.config.config_parse.os.environ\")\n    def test_post_validation_success(self):\n        os.environ[\"SSH_AUTH_SOCK\"] = \"something\"\n        config = config_parse.valid_ssh_options.validate(\n            self.config,\n            self.context,\n        )\n        assert_equal(config.agent, True)\n\n\nclass TestValidateIdentityFile(TestCase):\n    @setup\n    def setup_context(self):\n        self.context = config_utils.NullConfigContext\n        self.private_file = tempfile.NamedTemporaryFile()\n\n    def test_valid_identity_file_missing_private_key(self):\n        exception = assert_raises(\n            ConfigError,\n            config_parse.valid_identity_file,\n            \"/file/not/exist\",\n            self.context,\n        )\n        assert_in(\"Private key file\", str(exception))\n\n    def test_valid_identity_files_missing_public_key(self):\n        filename = self.private_file.name\n        exception = assert_raises(\n            ConfigError,\n            config_parse.valid_identity_file,\n            filename,\n            self.context,\n        )\n        assert_in(\"Public key file\", str(exception))\n\n    def test_valid_identity_files_valid(self):\n        filename = self.private_file.name\n        fh_private = open(filename + \".pub\", \"w\")\n        try:\n            config = config_parse.valid_identity_file(filename, self.context)\n        finally:\n            fh_private.close()\n            os.unlink(fh_private.name)\n        assert_equal(config, filename)\n\n    def test_valid_identity_files_missing_with_partial_context(self):\n        path = \"/bogus/file/does/not/exist\"\n        context = config_utils.PartialConfigContext(\"path\", \"MASTER\")\n        file_path = config_parse.valid_identity_file(path, context)\n        assert_equal(path, file_path)\n\n\nclass TestValidKnownHostsFile(TestCase):\n    @setup\n    def setup_context(self):\n        self.context = config_utils.NullConfigContext\n        self.known_hosts_file = tempfile.NamedTemporaryFile()\n\n    def test_valid_known_hosts_file_exists(self):\n        filename = config_parse.valid_known_hosts_file(\n            self.known_hosts_file.name,\n            self.context,\n        )\n        assert_equal(filename, self.known_hosts_file.name)\n\n    def test_valid_known_hosts_file_missing(self):\n        exception = assert_raises(\n            ConfigError,\n            config_parse.valid_known_hosts_file,\n            \"/bogus/path\",\n            self.context,\n        )\n        assert_in(\"Known hosts file /bogus/path\", str(exception))\n\n    def test_valid_known_hosts_file_missing_partial_context(self):\n        context = config_utils.PartialConfigContext\n        expected = \"/bogus/does/not/exist\"\n        filename = config_parse.valid_known_hosts_file(\n            expected,\n            context,\n        )\n        assert_equal(filename, expected)\n\n\nclass TestValidateVolume(TestCase):\n    @setup\n    def setup_context(self):\n        self.context = config_utils.NullConfigContext\n\n    def test_missing_container_path(self):\n        config = {\n            \"container_path_typo\": \"/nail/srv\",\n            \"host_path\": \"/tmp\",\n            \"mode\": \"RO\",\n        }\n        assert_raises(\n            ConfigError,\n            config_parse.valid_volume.validate,\n            config,\n            self.context,\n        )\n\n    def test_missing_host_path(self):\n        config = {\n            \"container_path\": \"/nail/srv\",\n            \"hostPath\": \"/tmp\",\n            \"mode\": \"RO\",\n        }\n        assert_raises(\n            ConfigError,\n            config_parse.valid_volume.validate,\n            config,\n            self.context,\n        )\n\n    def test_invalid_mode(self):\n        config = {\n            \"container_path\": \"/nail/srv\",\n            \"host_path\": \"/tmp\",\n            \"mode\": \"RA\",\n        }\n        assert_raises(\n            ConfigError,\n            config_parse.valid_volume.validate,\n            config,\n            self.context,\n        )\n\n    def test_valid(self):\n        config = {\n            \"container_path\": \"/nail/srv\",\n            \"host_path\": \"/tmp\",\n            \"mode\": schema.VolumeModes.RO.value,\n        }\n        assert_equal(\n            schema.ConfigVolume(**config),\n            config_parse.valid_volume.validate(config, self.context),\n        )\n\n    def test_mesos_default_volumes(self):\n        mesos_options = {\"master_address\": \"mesos_master\"}\n        mesos_options[\"default_volumes\"] = [\n            {\n                \"container_path\": \"/nail/srv\",\n                \"host_path\": \"/tmp\",\n                \"mode\": \"RO\",\n            },\n            {\n                \"container_path\": \"/nail/srv\",\n                \"host_path\": \"/tmp\",\n                \"mode\": \"invalid\",\n            },\n        ]\n\n        with pytest.raises(ConfigError):\n            config_parse.valid_mesos_options.validate(mesos_options, self.context)\n\n        # After we fix the error, expect error to go away.\n        mesos_options[\"default_volumes\"][1][\"mode\"] = \"RW\"\n        assert config_parse.valid_mesos_options.validate(\n            mesos_options,\n            self.context,\n        )\n\n    def test_k8s_default_volumes(self):\n        k8s_options = {\"kubeconfig_path\": \"some_path\"}\n        k8s_options[\"default_volumes\"] = [\n            {\n                \"container_path\": \"/nail/srv\",\n                \"host_path\": \"/tmp\",\n                \"mode\": \"RO\",\n            },\n            {\n                \"container_path\": \"/nail/srv\",\n                \"host_path\": \"/tmp\",\n                \"mode\": \"invalid\",\n            },\n        ]\n\n        with pytest.raises(ConfigError):\n            config_parse.valid_kubernetes_options.validate(k8s_options, self.context)\n\n        # After we fix the error, expect error to go away.\n        k8s_options[\"default_volumes\"][1][\"mode\"] = \"RW\"\n        assert config_parse.valid_kubernetes_options.validate(\n            k8s_options,\n            self.context,\n        )\n\n\nclass TestValidPermissionMode:\n    @pytest.mark.parametrize(\n        (\"permission\", \"normalized\"),\n        [(\"777\", \"777\"), (\"0\", \"0\"), (\"0000\", \"0000\"), (\"0123\", \"0123\"), (0, \"0\"), (7777, \"7777\")],\n    )\n    def test_valid_permissions(self, permission, normalized):\n        result = config_parse.valid_permission_mode(permission, NullConfigContext)\n        assert result == normalized\n\n    @pytest.mark.parametrize(\"permission\", [\"778\", \"é\", -1, \"\", {}, [], ()])\n    def test_invalid_permissions(self, permission):\n        with pytest.raises(ConfigError):\n            config_parse.valid_permission_mode(permission, NullConfigContext)\n\n\nclass TestValidSecretVolumeItem:\n    @pytest.mark.parametrize(\n        \"config\",\n        [\n            {\"path\": \"abc\"},\n            {\n                \"key\": \"abc\",\n            },\n            {\n                \"key\": \"abc\",\n                \"path\": \"abc\",\n                \"extra_key\": None,\n            },\n            {\"key\": \"abc\", \"path\": \"abc\", \"mode\": \"a\"},\n        ],\n    )\n    def test_invalid(self, config):\n        with pytest.raises(ConfigError):\n            config_parse.valid_secret_volume_item(config, NullConfigContext)\n\n    @pytest.mark.parametrize(\n        \"config\",\n        [{\"key\": \"abc\", \"path\": \"abc\"}, {\"key\": \"abc\", \"path\": \"abc\", \"mode\": \"777\"}],\n    )\n    def test_valid_job_secret_volume_success(self, config):\n        config_parse.valid_secret_volume_item(config, NullConfigContext)\n\n    @pytest.mark.parametrize(\n        \"item_config, default_mode, expected\",\n        [\n            (\n                # Item inherits volume default_mode\n                {\"key\": \"s1\", \"path\": \"p1\"},\n                \"0755\",\n                \"0755\",\n            ),\n            (\n                # Item explicit mode overrides volume default_mode\n                {\"key\": \"s2\", \"path\": \"p2\", \"mode\": \"0600\"},\n                \"0755\",\n                \"0600\",\n            ),\n            (\n                # Item inherits class default for volume default_mode\n                {\"key\": \"s3\", \"path\": \"p3\"},\n                None,\n                \"0644\",\n            ),\n        ],\n    )\n    def test_item_mode_propagation_and_override(self, item_config, default_mode, expected):\n        input_config = {\n            \"secret_volume_name\": \"test_vol\",\n            \"secret_name\": item_config[\"key\"],\n            \"container_path\": \"/secrets\",\n            \"items\": [item_config],\n        }\n        if default_mode is not None:\n            input_config[\"default_mode\"] = default_mode\n\n        context = config_utils.NullConfigContext\n        validated_volume_obj = config_parse.valid_secret_volume(input_config, context)\n\n        assert validated_volume_obj.items is not None\n        assert len(validated_volume_obj.items) == 1\n\n        actual_item_mode = validated_volume_obj.items[0].mode\n        assert actual_item_mode == expected\n\n    def test_volume_when_items_key_is_omitted(self):\n        input_config = {\n            \"secret_volume_name\": \"vol_no_items\",\n            \"secret_name\": \"s_no_items\",\n            \"container_path\": \"/secrets\",\n            \"default_mode\": \"0400\",\n        }\n        context = config_utils.NullConfigContext\n        validated_volume_obj = config_parse.valid_secret_volume(input_config, context)\n\n        assert validated_volume_obj.default_mode == \"0400\"\n        assert validated_volume_obj.items is None\n\n    def test_volume_when_items_is_empty(self):\n        input_config = {\n            \"secret_volume_name\": \"vol_empty_items\",\n            \"secret_name\": \"s_empty_items\",\n            \"container_path\": \"/secrets\",\n            \"default_mode\": \"0700\",\n            \"items\": [],\n        }\n        context = config_utils.NullConfigContext\n        validated_volume_obj = config_parse.valid_secret_volume(input_config, context)\n\n        assert validated_volume_obj.default_mode == \"0700\"\n        assert validated_volume_obj.items is not None\n        assert isinstance(validated_volume_obj.items, tuple)\n        assert len(validated_volume_obj.items) == 0\n\n\nclass TestValidSecretVolume:\n    @pytest.mark.parametrize(\n        \"config\",\n        [\n            dict(\n                secret_volume_name=\"abc\",\n                secret_name=\"secret1\",\n                container_path=\"/b/c\",\n                default_mode=\"0644\",\n                items=[\n                    dict(key=\"secret1\", path=\"abcd\", mode=\"7778\"),\n                ],\n            ),\n            dict(\n                secret_volume_name=\"abc\",\n                container_path=\"/b/c\",\n                default_mode=\"0644\",\n                items=[\n                    dict(key=\"secret1\", path=\"abcd\", mode=\"7777\"),\n                ],\n            ),\n            dict(\n                secret_volume_name=\"abc\",\n                secret_name=\"secret1\",\n                container_path=123,\n            ),\n            dict(\n                secret_volume_name=\"abc\",\n                secret_name=\"secret1\",\n                container_path=\"/b/c\",\n                items=[dict(key=\"secret1\", path=\"abcd\", mode=\"7777\"), dict(key=\"secret1\", path=\"abcde\", mode=\"7777\")],\n            ),\n        ],\n    )\n    def test_invalid(self, config):\n        with pytest.raises(ConfigError):\n            config_parse.valid_secret_volume(config, NullConfigContext)\n\n    def test_wrong_item_key(self):\n        config = dict(\n            secret_volume_name=\"abc\",\n            secret_name=\"secret1\",\n            container_path=\"/b/c\",\n            items=[\n                dict(key=\"secret2\", path=\"abc\"),\n            ],\n        )\n        with pytest.raises(ConfigError):\n            config_parse.valid_secret_volume(config, NullConfigContext)\n\n    @pytest.mark.parametrize(\n        \"config\",\n        [\n            dict(\n                secret_volume_name=\"abc\",\n                secret_name=\"secret1\",\n                container_path=\"/b/c\",\n                default_mode=\"0644\",\n                items=[\n                    dict(key=\"secret1\", path=\"abc\"),\n                ],\n            ),\n            dict(\n                secret_volume_name=\"abc\",\n                secret_name=\"secret1\",\n                container_path=\"/b/c\",\n                items=[],\n            ),\n            dict(\n                secret_volume_name=\"abc\",\n                secret_name=\"secret1\",\n                container_path=\"/b/c\",\n            ),\n        ],\n    )\n    def test_valid(self, config):\n        config_parse.valid_secret_volume(config, NullConfigContext)\n\n\nclass TestValidMasterAddress:\n    @pytest.fixture\n    def context(self):\n        return config_utils.NullConfigContext\n\n    @pytest.mark.parametrize(\n        \"url\",\n        [\n            \"http://blah.com\",\n            \"http://blah.com/\",\n            \"blah.com\",\n            \"blah.com/\",\n        ],\n    )\n    def test_valid(self, url, context):\n        normalized = \"http://blah.com\"\n        result = config_parse.valid_master_address(url, context)\n        assert result == normalized\n\n    @pytest.mark.parametrize(\n        \"url\",\n        [\n            \"https://blah.com\",\n            \"http://blah.com/something\",\n            \"blah.com/other\",\n            \"http://\",\n            \"blah.com?a=1\",\n        ],\n    )\n    def test_invalid(self, url, context):\n        with pytest.raises(ConfigError):\n            config_parse.valid_master_address(url, context)\n\n\nclass TestValidKubeconfigPaths:\n    @setup\n    def setup_context(self):\n        self.context = config_utils.NullConfigContext\n\n    @pytest.mark.parametrize(\n        \"kubeconfig_path,watcher_kubeconfig_paths\",\n        [(\"/some/kubeconfig.conf\", []), (\"/another/kube/config\", [\"a_watcher_kubeconfig\"])],\n    )\n    def test_valid(self, kubeconfig_path, watcher_kubeconfig_paths):\n        k8s_options = {\n            \"enabled\": True,\n            \"kubeconfig_path\": kubeconfig_path,\n            \"watcher_kubeconfig_paths\": watcher_kubeconfig_paths,\n        }\n        assert config_parse.valid_kubernetes_options.validate(k8s_options, self.context)\n\n    @pytest.mark.parametrize(\n        \"kubeconfig_path,watcher_kubeconfig_paths\",\n        [\n            ([\"/a/kubeconfig/in/a/list\"], [\"/a/valid/kubeconfig\"]),\n            (None, []),\n            (\"/some/kubeconfig.conf\", \"/not/a/list/kubeconfig\"),\n        ],\n    )\n    def test_invalid(self, kubeconfig_path, watcher_kubeconfig_paths):\n        k8s_options = {\n            \"enabled\": True,\n            \"kubeconfig_path\": kubeconfig_path,\n            \"watcher_kubeconfig_paths\": watcher_kubeconfig_paths,\n        }\n        with pytest.raises(ConfigError):\n            config_parse.valid_kubernetes_options.validate(k8s_options, self.context)\n\n    def test_nonretry(self):\n        k8s_options = {\n            \"enabled\": True,\n            \"kubeconfig_path\": \"/some/valid/path\",\n            \"watcher_kubeconfig_paths\": [],\n            \"non_retryable_exit_codes\": 1,\n        }\n        with pytest.raises(ConfigError):\n            config_parse.valid_kubernetes_options.validate(k8s_options, self.context)\n\n        k8s_options[\"non_retryable_exit_codes\"] = [-12, 1]\n\n        assert config_parse.valid_kubernetes_options.validate(k8s_options, self.context)\n\n\nclass TestValidateStatePersistenceDefaults(TestCase):\n    def test_post_validation_sees_defaults_for_omitted_keys(self):\n        input_config = {\n            \"store_type\": \"dynamodb\",\n            \"name\": \"test_state\",\n            \"table_name\": \"test_table\",\n            \"dynamodb_region\": \"us-west-2\",\n            \"buffer_size\": 5,\n            # max_transact_write_items\n        }\n\n        original_post_validation = config_parse.ValidateStatePersistence.post_validation\n        post_validation_args = {}\n\n        def mock_post_validation_side_effect(self_validator, output_dict, config_context):\n            post_validation_args[\"max_transact_write_items\"] = output_dict.get(\"max_transact_write_items\")\n            post_validation_args[\"buffer_size\"] = output_dict.get(\"buffer_size\")\n\n            return original_post_validation(self_validator, output_dict, config_context)\n\n        with mock.patch.object(\n            config_parse.ValidateStatePersistence,\n            \"post_validation\",\n            side_effect=mock_post_validation_side_effect,\n            autospec=True,\n        ) as mock_method:\n            validator = config_parse.ValidateStatePersistence()\n            context = config_utils.NullConfigContext\n            validated_config = validator(input_config, context)\n            mock_method.assert_called_once()\n\n        assert post_validation_args.get(\"max_transact_write_items\") == 8\n        assert post_validation_args.get(\"buffer_size\") == 5\n\n        assert validated_config.store_type == \"dynamodb\"\n        assert validated_config.name == \"test_state\"\n        assert validated_config.table_name == \"test_table\"\n        assert validated_config.dynamodb_region == \"us-west-2\"\n        assert validated_config.buffer_size == 5\n        assert validated_config.max_transact_write_items == 8\n\n    def test_post_validation_sees_provided_values(self):\n        input_config = {\n            \"store_type\": \"dynamodb\",\n            \"name\": \"test_state\",\n            \"table_name\": \"test_table\",\n            \"dynamodb_region\": \"us-west-2\",\n            \"buffer_size\": 5,\n            \"max_transact_write_items\": 25,\n        }\n\n        original_post_validation = config_parse.ValidateStatePersistence.post_validation\n        post_validation_args = {}\n\n        def mock_post_validation_side_effect(self_validator, output_dict, config_context):\n            post_validation_args[\"max_transact_write_items\"] = output_dict.get(\"max_transact_write_items\")\n            return original_post_validation(self_validator, output_dict, config_context)\n\n        with mock.patch.object(\n            config_parse.ValidateStatePersistence,\n            \"post_validation\",\n            side_effect=mock_post_validation_side_effect,\n            autospec=True,\n        ) as mock_method:\n            validator = config_parse.ValidateStatePersistence()\n            context = config_utils.NullConfigContext\n            validated_config = validator(input_config, context)\n            mock_method.assert_called_once()\n\n        assert post_validation_args.get(\"max_transact_write_items\") == 25\n\n        assert validated_config.max_transact_write_items == 25\n        assert validated_config.store_type == \"dynamodb\"\n        assert validated_config.name == \"test_state\"\n        assert validated_config.table_name == \"test_table\"\n        assert validated_config.dynamodb_region == \"us-west-2\"\n        assert validated_config.buffer_size == 5\n\n\nif __name__ == \"__main__\":\n    run()\n"
  },
  {
    "path": "tests/config/config_utils_test.py",
    "content": "import datetime\nfrom unittest import mock\n\nfrom testifycompat import assert_equal\nfrom testifycompat import assert_in\nfrom testifycompat import run\nfrom testifycompat import setup\nfrom testifycompat import TestCase\nfrom tests.assertions import assert_raises\nfrom tron.config import config_utils\nfrom tron.config import ConfigError\nfrom tron.config import schema\nfrom tron.config.config_utils import build_list_of_type_validator\nfrom tron.config.config_utils import ConfigContext\nfrom tron.config.config_utils import valid_identifier\n\n\nclass TestUniqueNameDict(TestCase):\n    @setup\n    def setup_dict(self):\n        self.msg = \"The key %s was there.\"\n        self.dict = config_utils.UniqueNameDict(self.msg)\n\n    def test_set_item_no_conflict(self):\n        self.dict[\"a\"] = \"something\"\n        assert_in(\"a\", self.dict)\n\n    def test_set_item_conflict(self):\n        self.dict[\"a\"] = \"something\"\n        assert_raises(ConfigError, self.dict.__setitem__, \"a\", \"next_thing\")\n\n\nclass TestValidatorIdentifier(TestCase):\n    def test_valid_identifier_too_long(self):\n        assert_raises(ConfigError, valid_identifier, \"a\" * 256, mock.Mock())\n\n    def test_valid_identifier(self):\n        name = \"avalidname\"\n        assert_equal(name, valid_identifier(name, mock.Mock()))\n\n    def test_valid_identifier_invalid_character(self):\n        for name in [\"invalid space\", \"*name\", \"1numberstarted\", 123, \"\"]:\n            assert_raises(ConfigError, valid_identifier, name, mock.Mock())\n\n\nclass TestBuildListOfTypeValidator(TestCase):\n    @setup\n    def setup_validator(self):\n        self.item_validator = mock.Mock()\n        self.validator = build_list_of_type_validator(self.item_validator)\n\n    def test_validator_passes(self):\n        items, context = [\"one\", \"two\"], mock.create_autospec(ConfigContext)\n        self.validator(items, context)\n        expected = [mock.call(item, context) for item in items]\n        assert_equal(self.item_validator.mock_calls, expected)\n\n    def test_validator_fails(self):\n        self.item_validator.side_effect = ConfigError\n        items, context = [\"one\", \"two\"], mock.create_autospec(ConfigContext)\n        assert_raises(ConfigError, self.validator, items, context)\n\n\nclass TestBuildEnumValidator(TestCase):\n    @setup\n    def setup_enum_validator(self):\n        self.enum = dict(a=1, b=2)\n        self.validator = config_utils.build_enum_validator(self.enum)\n        self.context = config_utils.NullConfigContext\n\n    def test_validate(self):\n        assert_equal(self.validator(\"a\", self.context), \"a\")\n        assert_equal(self.validator(\"b\", self.context), \"b\")\n\n    def test_invalid(self):\n        exception = assert_raises(\n            ConfigError,\n            self.validator,\n            \"c\",\n            self.context,\n        )\n        assert_in(\n            \"Value at  is not in %s: \" % str(set(self.enum)),\n            str(exception),\n        )\n\n\nclass TestValidTime(TestCase):\n    @setup\n    def setup_config(self):\n        self.context = config_utils.NullConfigContext\n\n    def test_valid_time(self):\n        time_spec = config_utils.valid_time(\"14:32\", self.context)\n        assert_equal(time_spec.hour, 14)\n        assert_equal(time_spec.minute, 32)\n        assert_equal(time_spec.second, 0)\n\n    def test_valid_time_with_seconds(self):\n        time_spec = config_utils.valid_time(\"14:32:12\", self.context)\n        assert_equal(time_spec.hour, 14)\n        assert_equal(time_spec.minute, 32)\n        assert_equal(time_spec.second, 12)\n\n    def test_valid_time_invalid(self):\n        assert_raises(\n            ConfigError,\n            config_utils.valid_time,\n            \"14:32:12:34\",\n            self.context,\n        )\n        assert_raises(ConfigError, config_utils.valid_time, None, self.context)\n\n\nclass TestValidTimeDelta(TestCase):\n    @setup\n    def setup_config(self):\n        self.context = config_utils.NullConfigContext\n\n    def test_valid_time_delta_invalid(self):\n        exception = assert_raises(\n            ConfigError,\n            config_utils.valid_time_delta,\n            \"no time\",\n            self.context,\n        )\n        assert_in(\"not a valid time delta: no time\", str(exception))\n\n    def test_valid_time_delta_valid_seconds(self):\n        for jitter in [\" 82s \", \"82 s\", \"82 sec\", \"82seconds  \"]:\n            delta = datetime.timedelta(seconds=82)\n            assert_equal(\n                delta,\n                config_utils.valid_time_delta(\n                    jitter,\n                    self.context,\n                ),\n            )\n\n    def test_valid_time_delta_valid_minutes(self):\n        for jitter in [\"10m\", \"10 m\", \"10   min\", \"  10minutes\"]:\n            delta = datetime.timedelta(seconds=600)\n            assert_equal(\n                delta,\n                config_utils.valid_time_delta(\n                    jitter,\n                    self.context,\n                ),\n            )\n\n    def test_valid_time_delta_invalid_unit(self):\n        for jitter in [\"1 year\", \"3 mo\", \"3 months\"]:\n            assert_raises(\n                ConfigError,\n                config_utils.valid_time_delta,\n                jitter,\n                self.context,\n            )\n\n\nclass TestConfigContext(TestCase):\n    def test_build_config_context(self):\n        path, nodes, namespace = \"path\", {1, 2, 3}, \"namespace\"\n        command_context = mock.MagicMock()\n        parent_context = config_utils.ConfigContext(\n            path,\n            nodes,\n            command_context,\n            namespace,\n        )\n\n        child = parent_context.build_child_context(\"child\")\n        assert_equal(child.path, \"%s.child\" % path)\n        assert_equal(child.nodes, nodes)\n        assert_equal(child.namespace, namespace)\n        assert_equal(child.command_context, command_context)\n        assert not child.partial\n\n\nStubConfigObject = schema.config_object_factory(\n    \"StubConfigObject\",\n    [\"req1\", \"req2\"],\n    [\"opt1\", \"opt2\"],\n)\n\n\nclass StubValidator(config_utils.Validator):\n    config_class = StubConfigObject\n\n\nclass TestValidator(TestCase):\n    @setup\n    def setup_validator(self):\n        self.validator = StubValidator()\n\n    def test_validate_with_none(self):\n        expected_msg = \"A StubObject is required\"\n        exception = assert_raises(\n            ConfigError,\n            self.validator.validate,\n            None,\n            config_utils.NullConfigContext,\n        )\n        assert_in(expected_msg, str(exception))\n\n    def test_validate_optional_with_none(self):\n        self.validator.optional = True\n        config = self.validator.validate(None, config_utils.NullConfigContext)\n        assert_equal(config, None)\n\n\nif __name__ == \"__main__\":\n    run()\n"
  },
  {
    "path": "tests/config/manager_test.py",
    "content": "import os\nimport shutil\nimport tempfile\nfrom unittest import mock\n\nfrom testifycompat import assert_equal\nfrom testifycompat import run\nfrom testifycompat import setup\nfrom testifycompat import teardown\nfrom testifycompat import TestCase\nfrom tests.assertions import assert_raises\nfrom tests.testingutils import autospec_method\nfrom tron import yaml\nfrom tron.config import ConfigError\nfrom tron.config import manager\nfrom tron.config import schema\n\n\nclass TestFromString(TestCase):\n    def test_from_string_valid(self):\n        content = \"{'one': 'thing', 'another': 'thing'}\\n\"\n        actual = manager.from_string(content)\n        expected = {\"one\": \"thing\", \"another\": \"thing\"}\n        assert_equal(actual, expected)\n\n    def test_from_string_invalid(self):\n        content = \"{} asdf\"\n        assert_raises(ConfigError, manager.from_string, content)\n\n\nclass TestReadWrite(TestCase):\n    @setup\n    def setup_tempfile(self):\n        self.filename = tempfile.NamedTemporaryFile().name\n\n    @teardown\n    def teardown_tempfile(self):\n        os.unlink(self.filename)\n\n    def test_read_write(self):\n        content = {\"one\": \"stars\", \"two\": \"beers\"}\n        manager.write(self.filename, content)\n        actual = manager.read(self.filename)\n        assert_equal(content, actual)\n\n    def test_read_raw_write_raw(self):\n        content = \"Some string\"\n        manager.write_raw(self.filename, content)\n        actual = manager.read_raw(self.filename)\n        assert_equal(content, actual)\n\n\nclass TestManifestFile(TestCase):\n    @setup\n    def setup_manifest(self):\n        self.temp_dir = tempfile.mkdtemp()\n        self.manifest = manager.ManifestFile(self.temp_dir)\n        self.manifest.create()\n\n    @teardown\n    def teardown_dir(self):\n        shutil.rmtree(self.temp_dir)\n\n    @mock.patch(\"tron.config.manager.os.path\", autospec=True)\n    @mock.patch(\"tron.config.manager.write\", autospec=True)\n    def test_create_exists(self, mock_write, mock_os):\n        mock_os.isfile.return_value = True\n        self.manifest.create()\n        assert not mock_write.call_count\n\n    def test_create(self):\n        assert_equal(manager.read(self.manifest.filename), {})\n\n    def test_add(self):\n        self.manifest.add(\"zing\", \"zing.yaml\")\n        expected = {\"zing\": \"zing.yaml\"}\n        assert_equal(manager.read(self.manifest.filename), expected)\n\n    def test_delete(self):\n        current = {\n            \"one\": \"a.yaml\",\n            \"two\": \"b.yaml\",\n        }\n        manager.write(self.manifest.filename, current)\n        self.manifest.delete(\"one\")\n        expected = {\"two\": \"b.yaml\"}\n        assert_equal(manager.read(self.manifest.filename), expected)\n\n    def test_get_file_mapping(self):\n        file_mapping = {\n            \"one\": \"a.yaml\",\n            \"two\": \"b.yaml\",\n        }\n        manager.write(self.manifest.filename, file_mapping)\n        assert_equal(self.manifest.get_file_mapping(), file_mapping)\n\n\nclass TestConfigManager(TestCase):\n\n    content = {\"one\": \"stars\", \"two\": \"other\"}\n    raw_content = \"{'one': 'stars', 'two': 'other'}\\n\"\n\n    @setup\n    def setup_config_manager(self):\n        self.temp_dir = tempfile.mkdtemp()\n        self.manager = manager.ConfigManager(self.temp_dir)\n        self.manifest = mock.create_autospec(manager.ManifestFile)\n        self.manager.manifest = self.manifest\n\n    @teardown\n    def teardown_dir(self):\n        shutil.rmtree(self.temp_dir)\n\n    def test_build_file_path(self):\n        path = self.manager.build_file_path(\"what\")\n        assert_equal(path, os.path.join(self.temp_dir, \"what.yaml\"))\n\n    def test_build_file_path_with_invalid_chars(self):\n        path = self.manager.build_file_path(\"/etc/passwd\")\n        assert_equal(path, os.path.join(self.temp_dir, \"_etc_passwd.yaml\"))\n        path = self.manager.build_file_path(\"../../etc/passwd\")\n        assert_equal(\n            path,\n            os.path.join(\n                self.temp_dir,\n                \"______etc_passwd.yaml\",\n            ),\n        )\n\n    def test_read_raw_config(self):\n        name = \"name\"\n        path = os.path.join(self.temp_dir, name)\n        manager.write(path, self.content)\n        self.manifest.get_file_name.return_value = path\n        config = self.manager.read_raw_config(name)\n        assert_equal(config, yaml.dump(self.content))\n\n    def test_write_config(self):\n        name = \"filename\"\n        path = self.manager.build_file_path(name)\n        self.manifest.get_file_name.return_value = path\n        autospec_method(self.manager.validate_with_fragment)\n        self.manager.write_config(name, self.raw_content)\n        assert_equal(manager.read(path), self.content)\n        self.manifest.get_file_name.assert_called_with(name)\n        assert not self.manifest.add.call_count\n        self.manager.validate_with_fragment.assert_called_with(\n            name,\n            self.content,\n            should_validate_missing_dependency=False,\n        )\n\n    def test_write_config_new_name(self):\n        name = \"filename2\"\n        path = self.manager.build_file_path(name)\n        self.manifest.get_file_name.return_value = None\n        autospec_method(self.manager.validate_with_fragment)\n        self.manager.write_config(name, self.raw_content)\n        assert_equal(manager.read(path), self.content)\n        self.manifest.get_file_name.assert_called_with(name)\n        self.manifest.add.assert_called_with(name, path)\n\n    @mock.patch(\"os.remove\", autospec=True)\n    def test_delete_config(self, mock_remove):\n        name = \"namespace\"\n        path = \"namespace.yaml\"\n        self.manifest.get_file_name.return_value = path\n        self.manager.delete_config(name)\n        self.manifest.delete.assert_called_with(name)\n        mock_remove.assert_called_with(path)\n\n    @mock.patch(\"os.remove\", autospec=True)\n    def test_delete_missing_namespace(self, mock_remove):\n        name = \"namespace\"\n        self.manifest.get_file_name.return_value = None\n        self.manager.delete_config(name)\n        assert_equal(mock_remove.call_count, 0)\n\n    @mock.patch(\n        \"tron.config.manager.JobGraph\",\n        autospec=True,\n    )\n    @mock.patch(\n        \"tron.config.manager.config_parse.ConfigContainer\",\n        autospec=True,\n    )\n    def test_validate_with_fragment(self, mock_config_container, mock_job_graph):\n        name = \"the_name\"\n        name_mapping = {\"something\": \"content\", name: \"old_content\"}\n        autospec_method(self.manager.get_config_name_mapping)\n        self.manager.get_config_name_mapping.return_value = name_mapping\n        self.manager.validate_with_fragment(name, self.content)\n        expected_mapping = dict(name_mapping)\n        expected_mapping[name] = self.content\n        mock_config_container.create.assert_called_with(expected_mapping)\n        mock_job_graph.assert_called_once_with(\n            mock_config_container.create.return_value,\n            should_validate_missing_dependency=True,\n        )\n\n    @mock.patch(\"tron.config.manager.read\", autospec=True)\n    @mock.patch(\n        \"tron.config.manager.config_parse.ConfigContainer\",\n        autospec=True,\n    )\n    def test_load(self, mock_config_container, mock_read):\n        content_items = self.content.items()\n        self.manifest.get_file_mapping().return_value = content_items\n        container = self.manager.load()\n        self.manifest.get_file_mapping.assert_called_with()\n        assert_equal(container, mock_config_container.create.return_value)\n\n        expected = {name: call.return_value for ((name, _), call) in zip(content_items, mock_read.mock_calls)}\n        mock_config_container.create.assert_called_with(expected)\n\n    def test_get_hash_default(self):\n        self.manifest.__contains__.return_value = False\n        hash_digest = self.manager.get_hash(\"name\")\n        assert_equal(hash_digest, self.manager.DEFAULT_HASH)\n\n    def test_get_hash(self):\n        content = \"OkOkOk\"\n        autospec_method(self.manager.read_raw_config, return_value=content)\n        self.manifest.__contains__.return_value = True\n        hash_digest = self.manager.get_hash(\"name\")\n        assert_equal(hash_digest, manager.hash_digest(content))\n\n\nclass TestCreateNewConfig(TestCase):\n    @mock.patch(\"tron.config.manager.os.makedirs\", autospec=True)\n    @mock.patch(\"tron.config.manager.ManifestFile\", autospec=True)\n    @mock.patch(\"tron.config.manager.write_raw\", autospec=True)\n    def test_create_new_config(self, mock_write, mock_manifest, mock_makedirs):\n        path, master_content = \"/bogus/path/\", mock.Mock()\n        filename = \"/bogus/path/MASTER.yaml\"\n        manifest = mock_manifest.return_value\n        manifest.get_file_name.return_value = None\n\n        manager.create_new_config(path, master_content)\n        mock_makedirs.assert_called_with(path)\n        mock_write.assert_called_with(filename, master_content)\n        manifest.create.assert_called_with()\n        manifest.add.assert_called_with(schema.MASTER_NAMESPACE, filename)\n\n\nif __name__ == \"__main__\":\n    run()\n"
  },
  {
    "path": "tests/config/schedule_parse_test.py",
    "content": "import datetime\nfrom unittest import mock\n\nfrom testifycompat import assert_equal\nfrom testifycompat import assert_raises\nfrom testifycompat import run\nfrom testifycompat import TestCase\nfrom tron.config import config_utils\nfrom tron.config import ConfigError\nfrom tron.config import schedule_parse\n\n\nclass TestPadSequence(TestCase):\n    def test_pad_sequence_short(self):\n        expected = [0, 1, 2, 3, None, None]\n        assert_equal(schedule_parse.pad_sequence(range(4), 6), expected)\n\n    def test_pad_sequence_long(self):\n        expected = [0, 1, 2, 3]\n        assert_equal(schedule_parse.pad_sequence(range(6), 4), expected)\n\n    def test_pad_sequence_exact(self):\n        expected = [0, 1, 2, 3]\n        assert_equal(schedule_parse.pad_sequence(range(4), 4), expected)\n\n    def test_pad_sequence_empty(self):\n        expected = [\"a\", \"a\"]\n        assert_equal(schedule_parse.pad_sequence([], 2, \"a\"), expected)\n\n    def test_pad_negative_size(self):\n        assert_equal(schedule_parse.pad_sequence([], -2, \"a\"), [])\n\n\nclass TestScheduleConfigFromString(TestCase):\n    @mock.patch(\n        \"tron.config.schedule_parse.parse_groc_expression\",\n        autospec=True,\n    )\n    def test_groc_config(self, mock_parse_groc):\n        schedule = \"every Mon,Wed at 12:00\"\n        context = config_utils.NullConfigContext\n        config = schedule_parse.schedule_config_from_string(schedule, context)\n        assert_equal(config, mock_parse_groc.return_value)\n        generic_config = schedule_parse.ConfigGenericSchedule(\n            \"groc daily\",\n            schedule,\n            None,\n        )\n        mock_parse_groc.assert_called_with(generic_config, context)\n\n\nclass TestValidScheduler(TestCase):\n    @mock.patch(\"tron.config.schedule_parse.schedulers\", autospec=True)\n    def assert_validation(self, schedule, expected, mock_schedulers):\n        context = config_utils.NullConfigContext\n        config = schedule_parse.valid_schedule(schedule, context)\n        mock_schedulers.__getitem__.assert_called_with(\"cron\")\n        func = mock_schedulers.__getitem__.return_value\n        assert_equal(config, func.return_value)\n        func.assert_called_with(expected, context)\n\n    def test_cron_from_dict(self):\n        schedule = {\"type\": \"cron\", \"value\": \"* * * * *\"}\n        config = schedule_parse.ConfigGenericSchedule(\n            \"cron\",\n            schedule[\"value\"],\n            datetime.timedelta(),\n        )\n        self.assert_validation(schedule, config)\n\n    def test_cron_from_dict_with_jitter(self):\n        schedule = {\"type\": \"cron\", \"value\": \"* * * * *\", \"jitter\": \"5 min\"}\n        config = schedule_parse.ConfigGenericSchedule(\n            \"cron\",\n            schedule[\"value\"],\n            datetime.timedelta(minutes=5),\n        )\n        self.assert_validation(schedule, config)\n\n\nclass TestValidCronScheduler(TestCase):\n    _suites = [\"integration\"]\n\n    def validate(self, line):\n        config = schedule_parse.ConfigGenericSchedule(\"cron\", line, None)\n        context = config_utils.NullConfigContext\n        return schedule_parse.valid_cron_scheduler(config, context)\n\n    def test_valid_config(self):\n        config = self.validate(\"5 0 L * *\")\n        assert_equal(config.minutes, [5])\n        assert_equal(config.months, None)\n        assert_equal(config.monthdays, [\"LAST\"])\n\n    def test_invalid_config(self):\n        assert_raises(ConfigError, self.validate, \"* * *\")\n\n\nclass TestValidDailyScheduler(TestCase):\n    def validate(self, config):\n        context = config_utils.NullConfigContext\n        config = schedule_parse.ConfigGenericSchedule(\"daily\", config, None)\n        return schedule_parse.valid_daily_scheduler(config, context)\n\n    def assert_parse(self, config, expected):\n        config = self.validate(config)\n        expected = schedule_parse.ConfigDailyScheduler(*expected, jitter=None)\n        assert_equal(config, expected)\n\n    def test_valid_daily_scheduler_start_time(self):\n        expected = (\"14:32 \", 14, 32, 0, set())\n        self.assert_parse(\"14:32\", expected)\n\n    def test_valid_daily_scheduler_just_days(self):\n        expected = (\"00:00:00 MWS\", 0, 0, 0, {1, 3, 6})\n        self.assert_parse(\"00:00:00 MWS\", expected)\n\n    def test_valid_daily_scheduler_time_and_day(self):\n        expected = (\"17:02:44 SU\", 17, 2, 44, {0, 6})\n        self.assert_parse(\"17:02:44 SU\", expected)\n\n    def test_valid_daily_scheduler_invalid_start_time(self):\n        assert_raises(ConfigError, self.validate, \"5 MWF\")\n        assert_raises(ConfigError, self.validate, \"05:30:45:45 MWF\")\n        assert_raises(ConfigError, self.validate, \"25:30:45 MWF\")\n\n    def test_valid_daily_scheduler_invalid_days(self):\n        assert_raises(ConfigError, self.validate, \"SUG\")\n        assert_raises(ConfigError, self.validate, \"3\")\n\n\nif __name__ == \"__main__\":\n    run()\n"
  },
  {
    "path": "tests/core/__init__.py",
    "content": ""
  },
  {
    "path": "tests/core/action_test.py",
    "content": "import pytest\n\nfrom tron.config.schema import ConfigAction\nfrom tron.config.schema import ConfigConstraint\nfrom tron.config.schema import ConfigFieldSelectorSource\nfrom tron.config.schema import ConfigNodeAffinity\nfrom tron.config.schema import ConfigParameter\nfrom tron.config.schema import ConfigProjectedSAVolume\nfrom tron.config.schema import ConfigSecretSource\nfrom tron.config.schema import ConfigSecretVolume\nfrom tron.config.schema import ConfigSecretVolumeItem\nfrom tron.config.schema import ConfigTopologySpreadConstraints\nfrom tron.config.schema import ConfigVolume\nfrom tron.core.action import Action\nfrom tron.core.action import ActionCommandConfig\n\n\nclass TestAction:\n    @pytest.mark.parametrize(\"disk\", [600.0, None])\n    def test_from_config_full(self, disk):\n        config = ConfigAction(\n            name=\"ted\",\n            command=\"do something\",\n            node=\"first\",\n            executor=\"ssh\",\n            cpus=1,\n            mem=100,\n            disk=disk,  # default: 1024.0\n            constraints=[\n                ConfigConstraint(\n                    attribute=\"pool\",\n                    operator=\"LIKE\",\n                    value=\"default\",\n                ),\n            ],\n            docker_image=\"fake-docker.com:400/image\",\n            docker_parameters=[\n                ConfigParameter(\n                    key=\"test\",\n                    value=123,\n                ),\n            ],\n            env={\"TESTING\": \"true\"},\n            secret_env={\"TEST_SECRET\": ConfigSecretSource(secret_name=\"tron-secret-svc-sec--A\", key=\"sec_A\")},\n            secret_volumes=[\n                ConfigSecretVolume(\n                    secret_volume_name=\"secretvolumename\",\n                    secret_name=\"secret\",\n                    container_path=\"/b\",\n                    default_mode=\"0644\",\n                    items=[ConfigSecretVolumeItem(key=\"key\", path=\"path\", mode=\"0755\")],\n                ),\n            ],\n            extra_volumes=[\n                ConfigVolume(\n                    host_path=\"/tmp\",\n                    container_path=\"/nail/tmp\",\n                    mode=\"RO\",\n                ),\n            ],\n            trigger_downstreams=True,\n            triggered_by=[\"foo.bar\"],\n        )\n        new_action = Action.from_config(config)\n        assert new_action.name == config.name\n        assert new_action.node_pool is None\n        assert new_action.executor == config.executor\n        assert new_action.trigger_downstreams is True\n        assert new_action.triggered_by == [\"foo.bar\"]\n\n        command_config = new_action.command_config\n        assert command_config.command == config.command\n        assert command_config.cpus == config.cpus\n        assert command_config.mem == config.mem\n        assert command_config.disk == (600.0 if disk else 1024.0)\n        assert command_config.constraints == {(\"pool\", \"LIKE\", \"default\")}\n        assert command_config.docker_image == config.docker_image\n        assert command_config.docker_parameters == {(\"test\", 123)}\n        assert command_config.env == config.env\n        assert command_config.secret_env == config.secret_env\n        # cant do direct tuple equality, since this is not hashable\n        assert command_config.secret_volumes == config.secret_volumes\n        assert command_config.extra_volumes == {(\"/nail/tmp\", \"/tmp\", \"RO\")}\n\n    def test_from_config_none_values(self):\n        config = ConfigAction(\n            name=\"ted\",\n            command=\"do something\",\n            node=\"first\",\n            executor=\"ssh\",\n        )\n        new_action = Action.from_config(config)\n        assert new_action.name == config.name\n        assert new_action.executor == config.executor\n        command_config = new_action.command_config\n        assert command_config.command == config.command\n        assert command_config.constraints == set()\n        assert command_config.docker_image is None\n        assert command_config.docker_parameters == set()\n        assert command_config.env == {}\n        assert command_config.secret_env == {}\n        assert command_config.secret_volumes == []\n        assert command_config.extra_volumes == set()\n\n    @pytest.fixture\n    def action_command_config_json(self):\n        raw_json = \"\"\"\n        {\n            \"command\": \"echo 'Hello, World!'\",\n            \"cpus\": 1.0,\n            \"mem\": 512.0,\n            \"disk\": 1024.0,\n            \"cap_add\": [\"NET_ADMIN\"],\n            \"cap_drop\": [\"MKNOD\"],\n            \"constraints\": [\n                {\n                    \"attribute\": \"pool\",\n                    \"operator\": \"LIKE\",\n                    \"value\": \"default\"\n                }\n            ],\n            \"docker_image\": \"fake-docker.com:400/image\",\n            \"docker_parameters\": [\n                {\n                    \"key\": \"test\",\n                    \"value\": 123\n                }\n            ],\n            \"env\": {\"TESTING\": \"true\"},\n            \"secret_env\": {\n                \"TEST_SECRET\": {\n                    \"secret_name\": \"tron-secret-svc-sec--A\",\n                    \"key\": \"sec_A\"\n                }\n            },\n            \"secret_volumes\": [\n                {\n                    \"secret_volume_name\": \"secretvolumename\",\n                    \"secret_name\": \"secret\",\n                    \"container_path\": \"/b\",\n                    \"default_mode\": \"0644\",\n                    \"items\": [\n                        {\n                            \"key\": \"key\",\n                            \"path\": \"path\",\n                            \"mode\": \"0755\"\n                        }\n                    ]\n                }\n            ],\n            \"projected_sa_volumes\": [\n                {\n                    \"container_path\": \"/var/run/secrets/whatever\",\n                    \"audience\": \"for.bar.com\",\n                    \"expiration_seconds\": 3600\n                }\n            ],\n            \"extra_volumes\": [\n                {\n                    \"container_path\": \"/tmp\",\n                    \"host_path\": \"/home/tmp\",\n                    \"mode\": \"RO\"\n                }\n            ],\n            \"node_affinities\": [\n                {\n                    \"key\": \"topology.kubernetes.io/zone\",\n                    \"operator\": \"In\",\n                    \"value\": [\"us-west-1a\", \"us-west-1c\"]\n                }\n            ],\n            \"topology_spread_constraints\": [\n                {\n                    \"topology_key\": \"zone\",\n                    \"max_skew\": 1,\n                    \"when_unsatisfiable\": \"DoNotSchedule\",\n                    \"label_selector\": {\n                        \"match_labels\": {\n                            \"app\": \"myapp\"\n                        }\n                    }\n                }\n            ],\n            \"labels\": {\"app\": \"myapp\"},\n            \"idempotent\": true,\n            \"annotations\": {\"annotation_key\": \"annotation_value\"},\n            \"service_account_name\": \"default\",\n            \"ports\": [8080, 9090],\n            \"field_selector_env\": {\n                \"key\": {\n                    \"field_path\": \"value\"\n                }\n            },\n            \"node_selectors\": {\"key\": \"node-A\"}\n        }\n        \"\"\"\n        return raw_json\n\n    def test_action_command_config_from_json(self, action_command_config_json):\n        result = ActionCommandConfig.from_json(action_command_config_json)\n\n        expected = {\n            \"command\": \"echo 'Hello, World!'\",\n            \"cpus\": 1.0,\n            \"mem\": 512.0,\n            \"disk\": 1024.0,\n            \"cap_add\": [\"NET_ADMIN\"],\n            \"cap_drop\": [\"MKNOD\"],\n            \"constraints\": [ConfigConstraint(attribute=\"pool\", operator=\"LIKE\", value=\"default\")],\n            \"docker_image\": \"fake-docker.com:400/image\",\n            \"docker_parameters\": [ConfigParameter(key=\"test\", value=123)],\n            \"env\": {\"TESTING\": \"true\"},\n            \"secret_env\": {\"TEST_SECRET\": ConfigSecretSource(secret_name=\"tron-secret-svc-sec--A\", key=\"sec_A\")},\n            \"secret_volumes\": [\n                ConfigSecretVolume(\n                    secret_volume_name=\"secretvolumename\",\n                    secret_name=\"secret\",\n                    container_path=\"/b\",\n                    default_mode=\"0644\",\n                    items=[{\"key\": \"key\", \"path\": \"path\", \"mode\": \"0755\"}],\n                )\n            ],\n            \"projected_sa_volumes\": [\n                ConfigProjectedSAVolume(\n                    container_path=\"/var/run/secrets/whatever\",\n                    audience=\"for.bar.com\",\n                    expiration_seconds=3600,\n                )\n            ],\n            \"extra_volumes\": [ConfigVolume(container_path=\"/tmp\", host_path=\"/home/tmp\", mode=\"RO\")],\n            \"node_affinities\": [\n                ConfigNodeAffinity(key=\"topology.kubernetes.io/zone\", operator=\"In\", value=[\"us-west-1a\", \"us-west-1c\"])\n            ],\n            \"topology_spread_constraints\": [\n                ConfigTopologySpreadConstraints(\n                    topology_key=\"zone\",\n                    max_skew=1,\n                    when_unsatisfiable=\"DoNotSchedule\",\n                    label_selector={\"match_labels\": {\"app\": \"myapp\"}},\n                )\n            ],\n            \"labels\": {\"app\": \"myapp\"},\n            \"annotations\": {\"annotation_key\": \"annotation_value\"},\n            \"service_account_name\": \"default\",\n            \"ports\": [8080, 9090],\n            \"idempotent\": True,\n            \"node_selectors\": {\"key\": \"node-A\"},\n            \"field_selector_env\": {\"key\": ConfigFieldSelectorSource(field_path=\"value\")},\n        }\n\n        assert result == expected\n"
  },
  {
    "path": "tests/core/actiongraph_test.py",
    "content": "from unittest import mock\n\nfrom testifycompat import assert_equal\nfrom testifycompat import assert_raises\nfrom testifycompat import run\nfrom testifycompat import setup\nfrom testifycompat import TestCase\nfrom tron.core import actiongraph\n\n\nclass TestActionGraph(TestCase):\n    @setup\n    def setup_graph(self):\n        self.action_names = [\n            \"base_one\",\n            \"base_two\",\n            \"dep_one\",\n            \"dep_one_one\",\n            \"dep_multi\",\n        ]\n        self.action_map = {}\n        for name in self.action_names:\n            self.action_map[name] = mock.MagicMock()\n            self.action_map[name].name = name\n\n        self.required_actions = {\n            \"base_one\": set(),\n            \"base_two\": set(),\n            \"dep_multi\": {\"dep_one_one\", \"base_two\"},\n            \"dep_one_one\": {\"dep_one\"},\n            \"dep_one\": {\"base_one\"},\n        }\n        self.required_triggers = {\n            \"base_one\": {\"MASTER.otherjob.first\"},\n            \"base_two\": set(),\n            \"dep_multi\": set(),\n            \"dep_one_one\": set(),\n            \"dep_one\": set(),\n        }\n\n        self.action_graph = actiongraph.ActionGraph(self.action_map, self.required_actions, self.required_triggers)\n\n    def test_get_dependencies(self):\n        assert self.action_graph.get_dependencies(\"not_in_job\") == []\n        assert self.action_graph.get_dependencies(\"base_one\") == []\n        assert self.action_graph.get_dependencies(\"base_one\", include_triggers=True)[0].name == \"MASTER.otherjob.first\"\n        assert sorted(d.name for d in self.action_graph.get_dependencies(\"dep_multi\")) == sorted(\n            [\n                \"dep_one_one\",\n                \"base_two\",\n            ]\n        )\n\n    def test_names(self):\n        assert sorted(self.action_graph.names()) == sorted(self.action_names)\n        assert sorted(self.action_graph.names(include_triggers=True)) == sorted(\n            self.action_names + [\"MASTER.otherjob.first\"],\n        )\n\n    def test__getitem__(self):\n        assert_equal(\n            self.action_graph[\"base_one\"],\n            self.action_map[\"base_one\"],\n        )\n\n    def test__getitem__miss(self):\n        assert_raises(KeyError, lambda: self.action_graph[\"unknown\"])\n\n    def test__eq__(self):\n        other_graph = mock.MagicMock(\n            action_map=self.action_map,\n            required_actions=self.required_actions,\n            required_triggers=self.required_triggers,\n        )\n        assert_equal(self.action_graph, other_graph)\n\n        other_graph.required_actions = None\n        assert not self.action_graph == other_graph\n\n    def test__ne__(self):\n        other_graph = mock.MagicMock()\n        assert self.action_graph != other_graph\n\n\nif __name__ == \"__main__\":\n    run()\n"
  },
  {
    "path": "tests/core/actionrun_test.py",
    "content": "import datetime\nimport shutil\nimport tempfile\nfrom unittest import mock\nfrom unittest.mock import MagicMock\n\nimport pytest\n\nfrom tests.assertions import assert_length\nfrom tests.testingutils import autospec_method\nfrom tron import actioncommand\nfrom tron import node\nfrom tron.actioncommand import SubprocessActionRunnerFactory\nfrom tron.config.schema import ConfigConstraint\nfrom tron.config.schema import ConfigParameter\nfrom tron.config.schema import ConfigVolume\nfrom tron.config.schema import ExecutorTypes\nfrom tron.core import actiongraph\nfrom tron.core import jobrun\nfrom tron.core.action import ActionCommandConfig\nfrom tron.core.actionrun import ActionCommand\nfrom tron.core.actionrun import ActionRun\nfrom tron.core.actionrun import ActionRunAttempt\nfrom tron.core.actionrun import ActionRunCollection\nfrom tron.core.actionrun import ActionRunFactory\nfrom tron.core.actionrun import eager_all\nfrom tron.core.actionrun import INITIAL_RECOVER_DELAY\nfrom tron.core.actionrun import KubernetesActionRun\nfrom tron.core.actionrun import MAX_RECOVER_TRIES\nfrom tron.core.actionrun import MesosActionRun\nfrom tron.core.actionrun import min_filter\nfrom tron.core.actionrun import SSHActionRun\nfrom tron.serialize import filehandler\n\n\n@pytest.fixture\ndef output_path():\n    output_path = filehandler.OutputPath(tempfile.mkdtemp())\n    yield output_path\n    shutil.rmtree(output_path.base, ignore_errors=True)\n\n\n@pytest.fixture\ndef mock_current_time():\n    with mock.patch(\n        \"tron.core.actionrun.timeutils.current_time\",\n        autospec=True,\n    ) as mock_current_time:\n        yield mock_current_time\n\n\nclass TestMinFilter:\n    def test_min_filter(self):\n        seq = [None, 2, None, 7, None, 9, 10, 12, 1]\n        assert min_filter(seq) == 1\n\n\nclass TestEagerAll:\n    def test_all_true(self):\n        assert eager_all(range(1, 5))\n\n    def test_all_false(self):\n        assert not eager_all(0 for _ in range(7))\n\n    def test_full_iteration(self):\n        seq = iter([1, 0, 3, 0, 5])\n        assert not eager_all(seq)\n        with pytest.raises(StopIteration):\n            next(seq)\n\n\nclass TestActionRunFactory:\n    @pytest.fixture(autouse=True)\n    def setup_action_runs(self):\n        self.run_time = datetime.datetime(2012, 3, 14, 15, 9, 26)\n        a1 = MagicMock()\n        a1.name = \"act1\"\n        a1.command_config = ActionCommandConfig(command=\"do action1\")\n        a2 = MagicMock()\n        a2.name = \"act2\"\n        actions = [a1, a2]\n        self.action_graph = actiongraph.ActionGraph(\n            {a.name: a for a in actions},\n            {\"act1\": set(), \"act2\": set()},\n            {\"act1\": set(), \"act2\": set()},\n        )\n\n        mock_node = mock.create_autospec(node.Node)\n        self.job_run = jobrun.JobRun(\n            \"jobname\",\n            7,\n            self.run_time,\n            mock_node,\n            action_graph=self.action_graph,\n        )\n\n        self.action_runner = mock.create_autospec(\n            actioncommand.SubprocessActionRunnerFactory,\n        )\n\n    @pytest.fixture\n    def state_data(self):\n        command_config = self.action_graph.action_map[\"act1\"].command_config.state_data\n        # State data with command config and retries.\n        yield {\n            \"job_run_id\": \"job_run_id\",\n            \"action_name\": \"act1\",\n            \"state\": \"succeeded\",\n            \"run_time\": \"the_run_time\",\n            \"start_time\": None,\n            \"end_time\": None,\n            \"attempts\": [dict(command_config=command_config, start_time=\"start\")],\n            \"node_name\": \"anode\",\n        }\n\n    def test_build_action_run_collection(self):\n        collection = ActionRunFactory.build_action_run_collection(\n            self.job_run,\n            self.action_runner,\n        )\n        assert collection.action_graph == self.action_graph\n        assert \"act1\" in collection.run_map\n        assert \"act2\" in collection.run_map\n        assert len(collection.run_map) == 2\n        assert collection.run_map[\"act1\"].action_name == \"act1\"\n\n    def test_action_run_collection_from_state(self, state_data):\n        state_data = [state_data]\n        cleanup_command_config = dict(command=\"do action1\")\n        cleanup_action_state_data = {\n            \"job_run_id\": \"job_run_id\",\n            \"action_name\": \"cleanup\",\n            \"state\": \"succeeded\",\n            \"run_time\": self.run_time,\n            \"start_time\": None,\n            \"end_time\": None,\n            \"attempts\": [\n                dict(\n                    command_config=cleanup_command_config,\n                    rendered_command=\"do action1\",\n                    start_time=\"start\",\n                    end_time=\"end\",\n                    exit_status=0,\n                ),\n            ],\n            \"node_name\": \"anode\",\n            \"action_runner\": {\n                \"status_path\": \"/tmp/foo\",\n                \"exec_path\": \"/bin/foo\",\n            },\n        }\n        collection = ActionRunFactory.action_run_collection_from_state(\n            self.job_run,\n            state_data,\n            cleanup_action_state_data,\n        )\n\n        assert collection.action_graph == self.action_graph\n        assert_length(collection.run_map, 2)\n        assert collection.run_map[\"act1\"].action_name == \"act1\"\n        assert collection.run_map[\"cleanup\"].action_name == \"cleanup\"\n\n    def test_build_run_for_action(self):\n        expected_command = \"doit\"\n        action = MagicMock(\n            node_pool=None,\n            is_cleanup=False,\n            command_config=ActionCommandConfig(command=expected_command),\n        )\n        action.name = \"theaction\"\n        action_run = ActionRunFactory.build_run_for_action(\n            self.job_run,\n            action,\n            self.action_runner,\n        )\n\n        assert action_run.job_run_id == self.job_run.id\n        assert action_run.node == self.job_run.node\n        assert action_run.action_name == action.name\n        assert not action_run.is_cleanup\n        assert action_run.command == expected_command\n\n    def test_build_run_for_action_with_node(self):\n        expected_command = \"doit\"\n        action = MagicMock(\n            node_pool=None,\n            is_cleanup=True,\n            command_config=ActionCommandConfig(command=expected_command),\n        )\n        action.node_pool = mock.create_autospec(node.NodePool)\n        action_run = ActionRunFactory.build_run_for_action(\n            self.job_run,\n            action,\n            self.action_runner,\n        )\n\n        assert action_run.job_run_id == self.job_run.id\n        assert action_run.node == action.node_pool.next()\n        assert action_run.is_cleanup\n        assert action_run.action_name == action.name\n        assert action_run.command == expected_command\n\n    def test_build_run_for_ssh_action(self):\n        action = MagicMock(\n            name=\"theaction\",\n            command=\"doit\",\n            executor=ExecutorTypes.ssh.value,\n        )\n        action_run = ActionRunFactory.build_run_for_action(\n            self.job_run,\n            action,\n            self.action_runner,\n        )\n        assert action_run.__class__ == SSHActionRun\n\n    def test_build_run_for_mesos_action(self):\n        command_config = MagicMock(\n            cpus=10,\n            mem=500,\n            disk=600,\n            constraints=[[\"pool\", \"LIKE\", \"default\"]],\n            docker_image=\"fake-docker.com:400/image\",\n            docker_parameters=[\n                {\n                    \"key\": \"test\",\n                    \"value\": 123,\n                }\n            ],\n            env={\"TESTING\": \"true\"},\n            extra_volumes=[\n                {\n                    \"path\": \"/tmp\",\n                }\n            ],\n        )\n        action = MagicMock(\n            name=\"theaction\",\n            command=\"doit\",\n            executor=ExecutorTypes.mesos.value,\n            command_config=command_config,\n        )\n        action_run = ActionRunFactory.build_run_for_action(\n            self.job_run,\n            action,\n            self.action_runner,\n        )\n        assert action_run.__class__ == MesosActionRun\n        assert action_run.command_config.cpus == command_config.cpus\n        assert action_run.command_config.mem == command_config.mem\n        assert action_run.command_config.disk == command_config.disk\n        assert action_run.command_config.constraints == command_config.constraints\n        assert action_run.command_config.docker_image == command_config.docker_image\n        assert action_run.command_config.docker_parameters == command_config.docker_parameters\n        assert action_run.command_config.env == command_config.env\n        assert action_run.command_config.extra_volumes == command_config.extra_volumes\n\n    def test_action_run_from_state_ssh(self, state_data):\n        action_run = ActionRunFactory.action_run_from_state(\n            self.job_run,\n            state_data,\n        )\n\n        assert action_run.job_run_id == state_data[\"job_run_id\"]\n        assert not action_run.is_cleanup\n        assert action_run.__class__ == SSHActionRun\n\n    def test_action_run_from_state_mesos(self, state_data):\n        state_data[\"executor\"] = ExecutorTypes.mesos.value\n        action_run = ActionRunFactory.action_run_from_state(\n            self.job_run,\n            state_data,\n        )\n\n        assert action_run.job_run_id == state_data[\"job_run_id\"]\n        action_name = state_data[\"action_name\"]\n        assert action_run.command_config == self.action_graph.action_map[action_name].command_config\n\n        assert not action_run.is_cleanup\n        assert action_run.__class__ == MesosActionRun\n\n    def test_action_run_from_state_kubernetes(self, state_data):\n        state_data[\"executor\"] = ExecutorTypes.kubernetes.value\n        action_run = ActionRunFactory.action_run_from_state(\n            self.job_run,\n            state_data,\n        )\n\n        assert action_run.job_run_id == state_data[\"job_run_id\"]\n        action_name = state_data[\"action_name\"]\n        assert action_run.command_config == self.action_graph.action_map[action_name].command_config\n\n        assert not action_run.is_cleanup\n        assert action_run.__class__ == KubernetesActionRun\n\n    def test_action_run_from_state_spark(self, state_data):\n        state_data[\"executor\"] = ExecutorTypes.spark.value\n        action_run = ActionRunFactory.action_run_from_state(\n            self.job_run,\n            state_data,\n        )\n\n        assert action_run.job_run_id == state_data[\"job_run_id\"]\n        action_name = state_data[\"action_name\"]\n        assert action_run.command_config == self.action_graph.action_map[action_name].command_config\n\n        assert not action_run.is_cleanup\n        assert action_run.__class__ == KubernetesActionRun\n\n\nclass TestActionRun:\n    @pytest.fixture(autouse=True)\n    def setup_action_run(self, output_path):\n        self.action_runner = actioncommand.NoActionRunnerFactory()\n        self.command = \"do command {actionname}\"\n        self.rendered_command = \"do command action_name\"\n        self.action_run = ActionRun(\n            job_run_id=\"ns.id.0\",\n            name=\"action_name\",\n            node=mock.create_autospec(node.Node),\n            command_config=ActionCommandConfig(command=self.command),\n            output_path=output_path,\n            action_runner=self.action_runner,\n        )\n        # These should be implemented in subclasses, we don't care here\n        self.action_run.submit_command = mock.Mock()\n        self.action_run.stop = mock.Mock()\n        self.action_run.kill = mock.Mock()\n\n    def test_init_state(self):\n        assert self.action_run.state == ActionRun.SCHEDULED\n\n    def test_ready_state(self):\n        self.action_run.ready()\n        assert self.action_run.state == ActionRun.WAITING\n\n    def test_start(self):\n        self.action_run.machine.transition(\"ready\")\n        assert self.action_run.start()\n        assert self.action_run.submit_command.call_count == 1\n        assert self.action_run.is_starting\n        assert self.action_run.start_time\n\n    def test_start_bad_state(self):\n        self.action_run.fail()\n        assert not self.action_run.start()\n\n    @mock.patch(\"tron.core.actionrun.log\", autospec=True)\n    def test_start_invalid_command(self, _log):\n        self.action_run.original_command = \"{notfound}\"\n        self.action_run.machine.transition(\"ready\")\n        assert not self.action_run.start()\n        assert self.action_run.is_failed\n        assert self.action_run.exit_status == -1\n\n    def test_success(self):\n        assert self.action_run.ready()\n        self.action_run.machine.transition(\"start\")\n        self.action_run.machine.transition(\"started\")\n\n        assert self.action_run.is_running\n        assert self.action_run.success()\n        assert not self.action_run.is_running\n        assert self.action_run.is_done\n        assert self.action_run.end_time\n        assert self.action_run.exit_status == 0\n\n    def test_success_emits_not(self):\n        self.action_run.machine.transition(\"start\")\n        self.action_run.machine.transition(\"started\")\n        self.action_run.trigger_downstreams = None\n        self.action_run.emit_triggers = mock.Mock()\n        assert self.action_run.success()\n        assert self.action_run.emit_triggers.call_count == 0\n\n    def test_sucess_emits_not_invalid_transition(self):\n        self.action_run.trigger_downstreams = True\n        self.action_run.machine.check = mock.Mock(return_value=False)\n        self.action_run.emit_triggers = mock.Mock()\n\n        assert not self.action_run.success()\n        assert self.action_run.emit_triggers.call_count == 0\n\n    def test_success_emits_on_true(self):\n        self.action_run.machine.transition(\"start\")\n        self.action_run.machine.transition(\"started\")\n        self.action_run.trigger_downstreams = True\n        self.action_run.emit_triggers = mock.Mock()\n        assert self.action_run.success()\n        assert self.action_run.emit_triggers.call_count == 1\n\n    def test_success_emits_on_dict(self):\n        self.action_run.machine.transition(\"start\")\n        self.action_run.machine.transition(\"started\")\n        self.action_run.trigger_downstreams = dict(foo=\"bar\")\n        self.action_run.emit_triggers = mock.Mock()\n        assert self.action_run.success()\n        assert self.action_run.emit_triggers.call_count == 1\n\n    @mock.patch(\"tron.core.actionrun.EventBus\", autospec=True)\n    def test_emit_triggers(self, eventbus):\n        self.action_run.context = {\"shortdate\": \"foo\"}\n\n        self.action_run.trigger_downstreams = True\n        self.action_run.emit_triggers()\n\n        self.action_run.trigger_downstreams = dict(foo=\"bar\")\n        self.action_run.emit_triggers()\n\n        assert eventbus.publish.mock_calls == [\n            mock.call(\"ns.id.action_name.shortdate.foo\"),\n            mock.call(\"ns.id.action_name.foo.bar\"),\n        ]\n\n    def test_failure(self):\n        self.action_run._exit_unsuccessful(1)\n        assert not self.action_run.is_running\n        assert self.action_run.is_done\n        assert self.action_run.end_time\n        assert self.action_run.exit_status == 1\n\n    def test_failure_bad_state(self):\n        self.action_run.fail(444)\n        assert not self.action_run.fail(123)\n        assert self.action_run.exit_status == 444\n\n    def test_skip(self):\n        assert not self.action_run.is_running\n        self.action_run.ready()\n        assert self.action_run.start()\n\n        assert self.action_run.fail(-1)\n        assert self.action_run.skip()\n        assert self.action_run.is_skipped\n\n    def test_skip_bad_state(self):\n        assert not self.action_run.skip()\n\n    def test_render_command(self):\n        self.action_run.context = {\"stars\": \"bright\"}\n        bare_command = \"{stars}\"\n        assert self.action_run.render_command(bare_command) == \"bright\"\n\n    def test_command_not_yet_rendered(self):\n        assert self.action_run.command == self.action_run.command_config.command\n\n    def test_command_already_rendered(self):\n        last_attempt = self.action_run.create_attempt()\n        assert self.action_run.command == last_attempt.rendered_command\n\n    @mock.patch(\"tron.core.actionrun.log\", autospec=True)\n    def test_command_failed_render(self, _log):\n        bare_command = \"{this_is_missing}\"\n        assert self.action_run.render_command(bare_command) == ActionRun.FAILED_RENDER\n\n    def test_is_complete(self):\n        self.action_run.machine.state = ActionRun.SUCCEEDED\n        assert self.action_run.is_complete\n        self.action_run.machine.state = ActionRun.SKIPPED\n        assert self.action_run.is_complete\n        self.action_run.machine.state = ActionRun.RUNNING\n        assert not self.action_run.is_complete\n\n    def test_is_broken(self):\n        self.action_run.machine.state = ActionRun.UNKNOWN\n        assert self.action_run.is_broken\n        self.action_run.machine.state = ActionRun.FAILED\n        assert self.action_run.is_broken\n        self.action_run.machine.state = ActionRun.WAITING\n        assert not self.action_run.is_broken\n\n    def test__getattr__(self):\n        assert not self.action_run.is_succeeded\n        assert not self.action_run.is_failed\n        assert not self.action_run.is_queued\n        assert self.action_run.is_scheduled\n        assert self.action_run.cancel()\n        assert self.action_run.is_cancelled\n\n    def test__getattr__missing_attribute(self):\n        with pytest.raises(AttributeError):\n            self.action_run.__getattr__(\"is_not_a_real_state\")\n\n    def test_auto_retry(self, mock_current_time):\n        # One timestamp for start and end of each attempt, plus final end time\n        mock_current_time.side_effect = [1, 2, 3, 4, 5, 6, 7]\n        self.action_run.retries_remaining = 2\n        self.action_run.create_attempt()\n        self.action_run.machine.transition(\"start\")\n\n        assert self.action_run._exit_unsuccessful(-1)\n        assert self.action_run.is_starting\n        assert self.action_run.retries_remaining == 1\n\n        assert self.action_run._exit_unsuccessful(-1)\n        assert self.action_run.retries_remaining == 0\n        assert not self.action_run.is_failed\n\n        assert self.action_run._exit_unsuccessful(-2)\n        assert self.action_run.retries_remaining == 0\n        assert self.action_run.is_failed\n\n        assert self.action_run.exit_statuses == [-1, -1, -2]\n        assert len(self.action_run.attempts) == 3\n        for i, attempt in enumerate(self.action_run.attempts):\n            assert attempt.start_time == i * 2 + 1\n            assert attempt.end_time == (i + 1) * 2\n\n    def test_auto_retry_command_config_change(self, mock_current_time):\n        self.action_run.retries_remaining = 1\n        self.action_run.create_attempt()\n        self.action_run.machine.transition(\"start\")\n\n        # If the command_config gets reconfigured later, auto retry\n        # still uses the original command by default.\n        self.action_run.command_config = ActionCommandConfig(command=\"new\")\n\n        assert self.action_run._exit_unsuccessful(-1)\n        assert self.action_run._exit_unsuccessful(-1)\n        assert len(self.action_run.attempts) == 2\n\n        for i, attempt in enumerate(self.action_run.attempts):\n            assert attempt.rendered_command == self.rendered_command\n\n    def test_no_auto_retry_on_fail_not_running(self):\n        self.action_run.retries_remaining = 2\n\n        self.action_run.fail()\n        assert self.action_run.retries_remaining == -1\n        assert self.action_run.is_failed\n\n        assert self.action_run.exit_statuses == []\n        assert self.action_run.exit_status is None\n\n    def test_no_auto_retry_on_fail_running(self):\n        self.action_run.retries_remaining = 2\n        self.action_run.create_attempt()\n        self.action_run.machine.transition(\"start\")\n\n        self.action_run.fail()\n        assert self.action_run.retries_remaining == -1\n        assert self.action_run.is_failed\n\n        assert self.action_run.exit_statuses == [None]\n        assert self.action_run.exit_status is None\n\n    def test_auto_retry_already_done(self):\n        # If someone transitions the action before it\n        # is done to success/fail, the action\n        # should not automatically retry when the command\n        # completes.\n        self.action_run.retries_remaining = 2\n        self.action_run.create_attempt()\n        self.action_run.machine.transition(\"start\")\n        self.action_run.machine.transition(\"started\")\n\n        # Action gets manually transitioned to success with tronctl\n        self.action_run.machine.transition(\"success\")\n        assert self.action_run.is_succeeded\n\n        # Command later fails\n        # Does not start a command\n        assert not self.action_run._exit_unsuccessful(-1)\n        # Still succeeded, not starting\n        assert self.action_run.is_succeeded\n\n    def test_manual_retry(self, mock_current_time):\n        mock_current_time.side_effect = [1, 2, 3, 4]\n        self.action_run.retries_remaining = None\n        failed_attempt = self.action_run.create_attempt()\n        self.action_run.machine.transition(\"start\")\n        self.action_run.fail(-1)\n        assert failed_attempt.end_time == 2\n        assert failed_attempt.exit_status == -1\n\n        self.action_run.retry()\n        assert self.action_run.is_starting\n        assert self.action_run.exit_statuses == [-1]\n        assert self.action_run.retries_remaining == 0\n        # Last attempt should be unchanged\n        assert failed_attempt.end_time == 2\n        assert failed_attempt.exit_status == -1\n\n    def test_manual_retry_use_new_command(self, mock_current_time):\n        mock_current_time.side_effect = [1, 2, 3, 4]\n        self.action_run.retries_remaining = None\n        self.action_run.create_attempt()\n        self.action_run.machine.transition(\"start\")\n        self.action_run.fail(-1)\n\n        # Change the command config\n        self.action_run.command_config = ActionCommandConfig(command=\"new\")\n        self.action_run.retry(original_command=False)\n        assert self.action_run.is_starting\n        assert self.action_run.last_attempt.rendered_command == \"new\"\n\n    @mock.patch(\"twisted.internet.reactor.callLater\", autospec=True)\n    def test_retries_delay(self, callLater):\n        self.action_run.retries_delay = datetime.timedelta()\n        self.action_run.retries_remaining = 2\n        self.action_run.machine.transition(\"start\")\n        callLater.return_value = \"delayed call\"\n        assert self.action_run._exit_unsuccessful(-1)\n        assert self.action_run.in_delay == \"delayed call\"\n\n\nclass TestActionRunFactoryTriggerTimeout:\n    def test_trigger_timeout_default(self):\n        today = datetime.datetime.today()\n        day = datetime.timedelta(days=1)\n        tomorrow = today + day\n        action_run = ActionRunFactory.build_run_for_action(\n            mock.Mock(run_time=today),\n            mock.Mock(trigger_timeout=None),\n            mock.Mock(),\n        )\n        assert action_run.trigger_timeout_timestamp == tomorrow.timestamp()\n\n    def test_trigger_timeout_custom(self):\n        today = datetime.datetime.today()\n        hour = datetime.timedelta(hours=1)\n        target = today + hour\n        action_run = ActionRunFactory.build_run_for_action(\n            mock.Mock(run_time=today),\n            mock.Mock(trigger_timeout=hour),\n            mock.Mock(),\n        )\n        assert action_run.trigger_timeout_timestamp == target.timestamp()\n\n\nclass TestActionRunTriggerTimeout:\n    @pytest.fixture(autouse=True)\n    def setup_teardown(self):\n        self.command = mock.Mock()\n        self.rendered_command = \"do command action_name\"\n        self.action_run = ActionRun(\n            job_run_id=\"ns.id.0\",\n            name=\"action_name\",\n            command_config=ActionCommandConfig(command=self.command),\n            triggered_by=[\"hello\"],\n            node=mock.Mock(),\n            output_path=mock.Mock(),\n            action_runner=mock.Mock(),\n            trigger_timeout_timestamp=mock.Mock(),\n        )\n        self.action_run.submit_command = mock.Mock()\n        self.action_run.stop = mock.Mock()\n        self.action_run.kill = mock.Mock()\n\n    def test_cleanup_clears_trigger_timeout(self):\n        self.action_run.clear_trigger_timeout = MagicMock()\n        self.action_run.cleanup()\n        self.action_run.clear_trigger_timeout.assert_called_with()\n\n    def test_clear_trigger_timeout(self):\n        timeout_call = MagicMock()\n        self.action_run.trigger_timeout_call = timeout_call\n        self.action_run.clear_trigger_timeout()\n        assert self.action_run.trigger_timeout_call is None\n        timeout_call.cancel.assert_called_with()\n\n    @mock.patch(\"tron.core.actionrun.EventBus\", autospec=True)\n    @mock.patch(\"tron.core.actionrun.reactor\", autospec=True)\n    def test_setup_subscriptions_no_triggers(self, reactor, eventbus):\n        self.action_run.triggered_by = []\n        self.action_run.setup_subscriptions()\n        assert not reactor.callLater.called\n        assert not eventbus.subscribe.called\n\n    @mock.patch(\"tron.core.actionrun.EventBus\", autospec=True)\n    @mock.patch(\"tron.core.actionrun.reactor\", autospec=True)\n    def test_setup_subscriptions_no_remaining(self, reactor, eventbus):\n        self.action_run.triggered_by = [\"hello\"]\n        self.action_run.trigger_timeout_timestamp = None\n        eventbus.has_event.return_value = True\n        self.action_run.setup_subscriptions()\n        assert not reactor.callLater.called\n        assert not eventbus.subscribe.called\n        assert eventbus.has_event.call_args_list == [mock.call(\"hello\")]\n\n    @mock.patch(\"tron.core.actionrun.reactor\", autospec=True)\n    def test_setup_subscriptions_timeout_in_future(self, reactor, mock_current_time):\n        now = datetime.datetime.now()\n        mock_current_time.return_value = now\n        self.action_run.trigger_timeout_timestamp = now.timestamp() + 10\n        self.action_run.setup_subscriptions()\n        reactor.callLater.assert_called_once_with(\n            10.0,\n            self.action_run.trigger_timeout_reached,\n        )\n\n    @mock.patch(\"tron.core.actionrun.reactor\", autospec=True)\n    def test_setup_subscriptions_timeout_in_past(self, reactor, mock_current_time):\n        now = datetime.datetime.now()\n        mock_current_time.return_value = now\n        self.action_run.trigger_timeout_timestamp = now.timestamp() - 10\n        self.action_run.setup_subscriptions()\n        reactor.callLater.assert_called_once_with(\n            1,\n            self.action_run.trigger_timeout_reached,\n        )\n\n    @mock.patch(\"tron.core.actionrun.EventBus\", autospec=True)\n    def test_trigger_timeout_reached_no_remaining_notifies(self, eventbus):\n        self.action_run.notify = MagicMock()\n        self.action_run.triggered_by = [\"hello\"]\n        eventbus.has_event.return_value = True\n        self.action_run.trigger_timeout_reached()\n        assert self.action_run.notify.called\n\n    @mock.patch(\"tron.core.actionrun.EventBus\", autospec=True)\n    def test_trigger_timeout_reached_with_remaining_fails(self, eventbus):\n        self.action_run.fail = MagicMock()\n        self.action_run.triggered_by = [\"hello\"]\n        eventbus.has_event.return_value = False\n        self.action_run.trigger_timeout_reached()\n        assert self.action_run.fail.called\n\n    def test_done_clears_trigger_timeout_call(self):\n        self.action_run.machine.check = mock.Mock(return_value=True)\n        self.action_run.transition_and_notify = MagicMock()\n        self.action_run.triggered_by = []\n        self.action_run.clear_trigger_timeout = MagicMock()\n        self.action_run._done(ActionRun.SUCCEEDED)\n        assert self.action_run.clear_trigger_timeout.called\n\n    def test_trigger_notify_clears_trigger_timeout(self):\n        self.action_run.notify = MagicMock()\n        self.action_run.triggered_by = []\n        self.action_run.clear_trigger_timeout = MagicMock()\n        self.action_run.trigger_notify()\n        assert self.action_run.clear_trigger_timeout.called\n\n\nclass TestSSHActionRun:\n    @pytest.fixture(autouse=True)\n    def setup_action_run(self, output_path):\n        self.action_runner = mock.create_autospec(\n            actioncommand.NoActionRunnerFactory,\n        )\n        self.command = \"do command {actionname}\"\n        self.action_run = SSHActionRun(\n            job_run_id=\"job_name.5\",\n            name=\"action_name\",\n            command_config=ActionCommandConfig(command=self.command),\n            node=mock.create_autospec(node.Node),\n            output_path=output_path,\n            action_runner=self.action_runner,\n        )\n\n    def test_start_node_error(self):\n        def raise_error(c):\n            raise node.Error(\"The error\")\n\n        self.action_run.node = mock.MagicMock()\n        self.action_run.node.submit_command.side_effect = raise_error\n        self.action_run.machine.transition(\"ready\")\n        assert not self.action_run.start()\n        assert self.action_run.exit_status == -2\n        assert self.action_run.is_failed\n\n    @mock.patch(\"tron.core.actionrun.filehandler\", autospec=True)\n    def test_build_action_command(self, mock_filehandler):\n        self.action_run.watch = mock.MagicMock()\n        attempt = self.action_run.create_attempt()\n        serializer = mock_filehandler.OutputStreamSerializer.return_value\n        action_command = self.action_run.build_action_command(attempt)\n        assert action_command == self.action_run.action_command\n        assert action_command == self.action_runner.create.return_value\n        self.action_runner.create.assert_called_with(\n            self.action_run.id,\n            attempt.rendered_command,\n            serializer,\n        )\n        mock_filehandler.OutputStreamSerializer.assert_called_with(\n            self.action_run.output_path,\n        )\n        self.action_run.watch.assert_called_with(action_command)\n\n    def test_handler_running(self):\n        attempt = self.action_run.create_attempt()\n        self.action_run.build_action_command(attempt)\n        self.action_run.machine.transition(\"start\")\n        assert self.action_run.handler(\n            self.action_run.action_command,\n            ActionCommand.RUNNING,\n        )\n        assert self.action_run.is_running\n\n    def test_handler_failstart(self):\n        attempt = self.action_run.create_attempt()\n        self.action_run.build_action_command(attempt)\n        assert self.action_run.handler(\n            self.action_run.action_command,\n            ActionCommand.FAILSTART,\n        )\n        assert self.action_run.is_failed\n\n    def test_handler_exiting_fail(self):\n        attempt = self.action_run.create_attempt()\n        self.action_run.build_action_command(attempt)\n        self.action_run.action_command.exit_status = -1\n        self.action_run.machine.transition(\"start\")\n        assert self.action_run.handler(\n            self.action_run.action_command,\n            ActionCommand.EXITING,\n        )\n        assert self.action_run.is_failed\n        assert self.action_run.exit_status == -1\n\n    def test_handler_exiting_success(self):\n        attempt = self.action_run.create_attempt()\n        self.action_run.build_action_command(attempt)\n        self.action_run.action_command.exit_status = 0\n        self.action_run.machine.transition(\"start\")\n        self.action_run.machine.transition(\"started\")\n        assert self.action_run.handler(\n            self.action_run.action_command,\n            ActionCommand.EXITING,\n        )\n        assert self.action_run.is_succeeded\n        assert self.action_run.exit_status == 0\n\n    def test_handler_exiting_failunknown(self):\n        self.action_run.action_command = mock.create_autospec(\n            actioncommand.ActionCommand,\n            exit_status=None,\n        )\n        self.action_run.machine.transition(\"start\")\n        self.action_run.machine.transition(\"started\")\n        assert self.action_run.handler(\n            self.action_run.action_command,\n            ActionCommand.EXITING,\n        )\n        assert self.action_run.is_unknown\n        assert self.action_run.exit_status is None\n        assert self.action_run.end_time is not None\n\n    def test_handler_unhandled(self):\n        attempt = self.action_run.create_attempt()\n        self.action_run.build_action_command(attempt)\n        assert (\n            self.action_run.handler(\n                self.action_run.action_command,\n                ActionCommand.PENDING,\n            )\n            is None\n        )\n        assert self.action_run.is_scheduled\n\n    def test_recover_no_action_runner(self):\n        # Default setup has no action runner\n        assert not self.action_run.recover()\n\n\nclass TestSSHActionRunRecover:\n    @pytest.fixture(autouse=True)\n    def setup_action_run(self, output_path):\n        self.action_runner = SubprocessActionRunnerFactory(\n            status_path=\"/tmp/foo\",\n            exec_path=\"/bin/foo\",\n        )\n        self.command = \"do command {actionname}\"\n        self.action_run = SSHActionRun(\n            job_run_id=\"job_name.5\",\n            name=\"action_name\",\n            command_config=ActionCommandConfig(self.command),\n            node=mock.create_autospec(node.Node),\n            output_path=output_path,\n            action_runner=self.action_runner,\n        )\n\n    def test_recover_incorrect_state(self):\n        # Should return falsy if not UNKNOWN.\n        self.action_run.machine.state = ActionRun.FAILED\n        assert not self.action_run.recover()\n\n    def test_recover_action_runner(self):\n        self.action_run.end_time = 1000\n        self.action_run.exit_status = 0\n        self.action_run.machine.state = ActionRun.UNKNOWN\n        last_attempt = self.action_run.create_attempt()\n        last_attempt.end_time = 1000\n        last_attempt.exit_status = 0\n        assert self.action_run.recover()\n        assert self.action_run.machine.state == ActionRun.RUNNING\n        assert self.action_run.end_time is None\n        assert self.action_run.exit_status is None\n        assert last_attempt.end_time is None\n        assert last_attempt.exit_status is None\n        self.action_run.node.submit_command.assert_called_once()\n\n        # Check recovery command\n        submit_args = self.action_run.node.submit_command.call_args[0]\n        assert len(submit_args) == 1\n        recovery_command = submit_args[0]\n        assert recovery_command.command == \"/bin/foo/recover_batch.py /tmp/foo/job_name.5.action_name/status\"\n        assert recovery_command.start_time is not None  # already started\n\n    @mock.patch(\"tron.core.actionrun.reactor\", autospec=True)\n    def test_handler_exiting_failunknown(self, mock_reactor):\n        self.action_run.action_command = mock.create_autospec(\n            actioncommand.ActionCommand,\n            exit_status=None,\n        )\n        self.action_run.machine.transition(\"start\")\n        self.action_run.machine.transition(\"started\")\n        delay_deferred = self.action_run.handler(\n            self.action_run.action_command,\n            ActionCommand.EXITING,\n        )\n        assert delay_deferred == mock_reactor.callLater.return_value\n        assert self.action_run.is_running\n        assert self.action_run.exit_status is None\n        assert self.action_run.end_time is None\n\n        call_args = mock_reactor.callLater.call_args[0]\n        assert call_args[0] == INITIAL_RECOVER_DELAY\n        assert call_args[1] == self.action_run.submit_recovery_command\n\n        # Check recovery run\n        recovery_run = call_args[2]\n        assert \"recovery\" in recovery_run.name\n        assert isinstance(recovery_run, SSHActionRun)\n        # Recovery run should not be recovering itself, parent run handles its unknown status\n        assert recovery_run.recover() is None\n\n        # Check command\n        recovery_command = call_args[3]\n        assert recovery_command.command == \"/bin/foo/recover_batch.py /tmp/foo/job_name.5.action_name/status\"\n        assert recovery_command.start_time is not None  # already started\n\n    @mock.patch(\"tron.core.actionrun.SSHActionRun.do_recover\", autospec=True)\n    @mock.patch(\"tron.core.actionrun.reactor\", autospec=True)\n    def test_handler_exiting_failunknown_max_retries(self, mock_reactor, mock_do_recover):\n        self.action_run.action_command = mock.create_autospec(\n            actioncommand.ActionCommand,\n            exit_status=None,\n        )\n        self.action_run.machine.transition(\"start\")\n        self.action_run.machine.transition(\"started\")\n\n        def exit_unknown(*args, **kwargs):\n            self.action_run.handler(\n                self.action_run.action_command,\n                ActionCommand.EXITING,\n            )\n\n        # Each time do_recover is called, end up exiting unknown again\n        mock_do_recover.side_effect = exit_unknown\n\n        # Start the cycle\n        exit_unknown()\n\n        assert mock_do_recover.call_count == MAX_RECOVER_TRIES\n        last_call = mock_do_recover.call_args\n        expected_delay = INITIAL_RECOVER_DELAY * (3 ** (MAX_RECOVER_TRIES - 1))\n        assert last_call == mock.call(self.action_run, delay=expected_delay)\n\n        assert self.action_run.is_unknown\n        assert self.action_run.exit_status is None\n        assert self.action_run.end_time is not None\n\n\nclass TestActionRunStateRestore:\n    now = datetime.datetime(2012, 3, 14, 15, 19)\n\n    @pytest.fixture(autouse=True)\n    def setup_action_run(self, mock_current_time):\n        self.parent_context = {}\n        self.output_path = [\"one\", \"two\"]\n        self.run_node = MagicMock()\n        mock_current_time.return_value = self.now\n        self.command_config = ActionCommandConfig(\n            command=\"do {actionname}\",\n            cpus=1,\n        )\n        self.action_config = mock.Mock(command_config=self.command_config)\n        self.action_graph = actiongraph.ActionGraph(\n            {\"theaction\": self.action_config},\n            {\"theaction\": set()},\n            {\"theaction\": set()},\n        )\n\n    @pytest.fixture\n    def state_data(self):\n        # State data with command config and retries.\n        yield {\n            \"job_run_id\": \"theid\",\n            \"action_name\": \"theaction\",\n            \"node_name\": \"anode\",\n            \"run_time\": \"the_run_time\",\n            \"start_time\": \"start_time\",\n            \"end_time\": \"end\",\n            \"exit_status\": 0,\n            \"attempts\": [\n                dict(\n                    command_config=self.command_config.state_data,\n                    rendered_command=\"do theaction\",\n                    start_time=\"start\",\n                    end_time=\"end\",\n                    exit_status=0,\n                ),\n            ],\n            \"state\": \"succeeded\",\n        }\n\n    @pytest.fixture\n    def state_data_old(self):\n        # State data before command config and retries are separate.\n        yield {\n            \"job_run_id\": \"theid\",\n            \"action_name\": \"theaction\",\n            \"node_name\": \"anode\",\n            \"command\": \"do {actionname}\",\n            \"start_time\": \"start_time\",\n            \"end_time\": \"end\",\n            \"state\": \"succeeded\",\n        }\n\n    def test_from_state_old(self, state_data_old):\n        state_data = state_data_old\n        action_run = ActionRun.from_state(\n            state_data,\n            self.parent_context,\n            list(self.output_path),\n            self.run_node,\n            self.action_graph,\n        )\n\n        for key, value in state_data.items():\n            if key in [\"state\", \"node_name\"]:\n                continue\n            assert getattr(action_run, key) == value\n\n        assert action_run.is_succeeded\n        assert not action_run.is_cleanup\n        assert action_run.output_path[:2] == self.output_path\n        assert action_run.command_config.command == state_data[\"command\"]\n        assert action_run.command == state_data[\"command\"]\n\n    def test_from_state_old_with_mesos_task_id(self, state_data_old):\n        state_data = state_data_old\n        state_data[\"mesos_task_id\"] = \"task\"\n        action_run = ActionRun.from_state(\n            state_data,\n            self.parent_context,\n            list(self.output_path),\n            self.run_node,\n            self.action_graph,\n        )\n\n        for key, value in state_data.items():\n            if key in [\"state\", \"node_name\", \"mesos_task_id\"]:\n                continue\n            assert getattr(action_run, key) == value\n\n        assert action_run.is_succeeded\n        assert action_run.last_attempt.mesos_task_id == state_data[\"mesos_task_id\"]\n\n    def test_from_state_old_not_started(self, state_data_old):\n        state_data = state_data_old\n        state_data[\"start_time\"] = None\n        state_data[\"state\"] = \"scheduled\"\n        action_run = ActionRun.from_state(\n            state_data,\n            self.parent_context,\n            list(self.output_path),\n            self.run_node,\n            self.action_graph,\n        )\n\n        for key, value in state_data.items():\n            if key in [\"state\", \"node_name\"]:\n                continue\n            assert getattr(action_run, key) == value\n\n        assert action_run.is_scheduled\n        assert action_run.exit_statuses == []\n        assert len(action_run.attempts) == 0\n\n    def test_from_state_old_rendered_and_exited(self, state_data_old):\n        state_data = state_data_old\n        state_data[\"rendered_command\"] = \"do things theaction\"\n        state_data[\"exit_status\"] = 0\n        action_run = ActionRun.from_state(\n            state_data,\n            self.parent_context,\n            list(self.output_path),\n            self.run_node,\n            self.action_graph,\n        )\n\n        for key, value in state_data.items():\n            if key in [\"state\", \"node_name\", \"command\", \"rendered_command\"]:\n                continue\n            assert getattr(action_run, key) == value\n\n        assert action_run.is_succeeded\n        assert action_run.exit_statuses == [0]\n        assert action_run.command_config.command == state_data[\"command\"]\n        assert action_run.command == state_data[\"rendered_command\"]\n\n    def test_from_state_old_retries(self, state_data_old):\n        state_data = state_data_old\n        state_data[\"rendered_command\"] = \"do things theaction\"\n        state_data[\"exit_status\"] = 0\n        state_data[\"exit_statuses\"] = [1]\n        action_run = ActionRun.from_state(\n            state_data,\n            self.parent_context,\n            list(self.output_path),\n            self.run_node,\n            self.action_graph,\n        )\n\n        for key, value in state_data.items():\n            if key in [\n                \"state\",\n                \"node_name\",\n                \"command\",\n                \"rendered_command\",\n                \"exit_statuses\",\n            ]:\n                continue\n            assert getattr(action_run, key) == value\n\n        assert action_run.is_succeeded\n        assert action_run.exit_statuses == [1, 0]\n        assert len(action_run.attempts) == 2\n\n    def test_from_state_running(self, state_data):\n        state_data[\"state\"] = \"running\"\n        action_run = ActionRun.from_state(\n            state_data,\n            self.parent_context,\n            self.output_path,\n            self.run_node,\n            self.action_graph,\n            lambda: None,\n        )\n        assert action_run.is_unknown\n\n    def test_from_state_starting(self, state_data):\n        state_data[\"state\"] = \"starting\"\n        action_run = ActionRun.from_state(\n            state_data,\n            self.parent_context,\n            self.output_path,\n            self.run_node,\n            self.action_graph,\n            lambda: None,\n        )\n        assert action_run.is_unknown\n\n    def test_from_state_queued(self, state_data):\n        state_data[\"state\"] = \"queued\"\n        action_run = ActionRun.from_state(\n            state_data,\n            self.parent_context,\n            self.output_path,\n            self.run_node,\n            self.action_graph,\n            lambda: None,\n        )\n        assert action_run.is_queued\n\n    def test_from_state_no_node_name(self, state_data):\n        del state_data[\"node_name\"]\n        action_run = ActionRun.from_state(\n            state_data,\n            self.parent_context,\n            self.output_path,\n            self.run_node,\n            self.action_graph,\n            lambda: None,\n        )\n        assert action_run.node == self.run_node\n\n    @mock.patch(\"tron.core.actionrun.node.NodePoolRepository\", autospec=True)\n    def test_from_state_with_node_exists(self, mock_store, state_data):\n        ActionRun.from_state(\n            state_data,\n            self.parent_context,\n            self.output_path,\n            self.run_node,\n            self.action_graph,\n            lambda: None,\n        )\n        mock_store.get_instance().get_node.assert_called_with(\n            state_data[\"node_name\"],\n            self.run_node,\n        )\n\n    def test_from_state_after_rendered_command(self, state_data):\n        action_run = ActionRun.from_state(\n            state_data,\n            self.parent_context,\n            self.output_path,\n            self.run_node,\n            self.action_graph,\n            lambda: None,\n        )\n        assert action_run.command_config == self.command_config\n        assert len(action_run.attempts) == len(state_data[\"attempts\"])\n        assert action_run.exit_statuses == [0]\n        assert action_run.command == state_data[\"attempts\"][-1][\"rendered_command\"]\n\n    def test_from_state_action_config_gone(self, state_data):\n        state_data[\"action_name\"] = \"old_action\"\n        action_run = ActionRun.from_state(\n            state_data,\n            self.parent_context,\n            self.output_path,\n            self.run_node,\n            self.action_graph,\n            lambda: None,\n        )\n        assert action_run.command_config.command == \"\"\n        assert action_run.command == state_data[\"attempts\"][-1][\"rendered_command\"]\n\n\nclass TestActionRunCollection:\n    def _build_run(self, action):\n        mock_node = mock.create_autospec(node.Node)\n        return ActionRun(\n            \"id\",\n            action.name,\n            mock_node,\n            command_config=action.command_config,\n            output_path=self.output_path,\n        )\n\n    @pytest.fixture(autouse=True)\n    def setup_runs(self, output_path):\n        action_names = [\"action_name\", \"second_name\", \"cleanup\"]\n\n        actions = []\n        for name in action_names:\n            m = mock.Mock(\n                name=name,\n                required_actions=[],\n                command_config=ActionCommandConfig(command=\"old\"),\n            )\n            m.name = name\n            actions.append(m)\n\n        self.action_graph = actiongraph.ActionGraph(\n            {a.name: a for a in actions},\n            {\"action_name\": set(), \"second_name\": set(), \"cleanup\": set()},\n            {\"action_name\": set(), \"second_name\": set(), \"cleanup\": set()},\n        )\n        self.output_path = output_path\n        self.command = \"do command\"\n        self.action_runs = [self._build_run(action) for action in actions]\n        self.run_map = {a.action_name: a for a in self.action_runs}\n        self.run_map[\"cleanup\"].is_cleanup = True\n        self.collection = ActionRunCollection(self.action_graph, self.run_map)\n\n    def test__init__(self):\n        assert self.collection.action_graph == self.action_graph\n        assert self.collection.run_map == self.run_map\n        assert self.collection.proxy_action_runs_with_cleanup\n\n    def test_action_runs_for_actions(self):\n        m = MagicMock()\n        m.name = \"action_name\"\n        actions = [m]\n        action_runs = self.collection.action_runs_for_actions(actions)\n        assert list(action_runs) == self.action_runs[:1]\n\n    def test_get_action_runs_with_cleanup(self):\n        runs = self.collection.get_action_runs_with_cleanup()\n        assert set(runs) == set(self.action_runs)\n\n    def test_get_action_runs(self):\n        runs = self.collection.get_action_runs()\n        assert set(runs) == set(self.action_runs[:2])\n\n    def test_cleanup_action_run(self):\n        assert self.action_runs[2] == self.collection.cleanup_action_run\n\n    def test_update_action_config_no_changes(self):\n        assert self.collection.update_action_config(self.action_graph) is False\n\n    def test_update_action_config(self):\n        # Latest config has 'new_name' instead of 'action_name'\n        new_action_names = [\"new_name\", \"second_name\", \"cleanup\"]\n        new_actions = []\n        for name in new_action_names:\n            action = mock.Mock(\n                name=name,\n                required_actions=[],\n                command_config=ActionCommandConfig(command=\"new\"),\n            )\n            action.name = name\n            new_actions.append(action)\n\n        new_action_graph = actiongraph.ActionGraph(\n            {a.name: a for a in new_actions},\n            {\"new_name\": set(), \"second_name\": set(), \"cleanup\": set()},\n            {\"new_name\": set(), \"second_name\": set(), \"cleanup\": set()},\n        )\n        assert self.collection.update_action_config(new_action_graph) is True\n        assert self.collection.action_graph != new_action_graph\n\n        updated_action_runs = self.collection.action_runs_with_cleanup\n        # Action names should be unchanged\n        assert sorted(run.name for run in updated_action_runs) == sorted(run.name for run in self.action_runs)\n\n        for run in updated_action_runs:\n            if run.name == \"action_name\":\n                assert run.command_config.command == \"old\"\n            else:\n                assert run.command_config.command == \"new\"\n\n    def test_state_data(self):\n        state_data = self.collection.state_data\n        assert_length(state_data, len(self.action_runs[:2]))\n\n    def test_cleanup_action_state_data(self):\n        state_data = self.collection.cleanup_action_state_data\n        assert state_data[\"action_name\"] == \"cleanup\"\n\n    def test_cleanup_action_state_data_no_cleanup_action(self):\n        del self.collection.run_map[\"cleanup\"]\n        assert not self.collection.cleanup_action_state_data\n\n    def test_get_startable_action_runs(self):\n        action_runs = self.collection.get_startable_action_runs()\n        assert set(action_runs) == set(self.action_runs[:2])\n\n    def test_get_startable_action_runs_none(self):\n        self.collection.run_map.clear()\n        action_runs = self.collection.get_startable_action_runs()\n        assert set(action_runs) == set()\n\n    def test_has_startable_action_runs(self):\n        assert self.collection.has_startable_action_runs\n\n    def test_has_startable_action_runs_false(self):\n        self.collection.run_map.clear()\n        assert not self.collection.has_startable_action_runs\n\n    def test_is_complete_false(self):\n        assert not self.collection.is_complete\n\n    def test_is_complete_true(self):\n        for action_run in self.collection.action_runs_with_cleanup:\n            action_run.machine.state = ActionRun.SKIPPED\n        assert self.collection.is_complete\n\n    def test_is_done_false(self):\n        assert not self.collection.is_done\n\n    def test_is_done_false_because_of_running(self):\n        action_run = self.collection.run_map[\"action_name\"]\n        action_run.machine.state = ActionRun.RUNNING\n        assert not self.collection.is_done\n\n    def test_is_done_true_because_blocked(self):\n        self.run_map[\"action_name\"].machine.state = ActionRun.FAILED\n        self.run_map[\"second_name\"].machine.state = ActionRun.WAITING\n        autospec_method(self.collection._is_run_blocked)\n\n        self.collection._is_run_blocked.return_value = True\n        assert self.collection.is_done\n        assert self.collection.is_failed\n        self.collection._is_run_blocked.assert_called_with(\n            self.run_map[\"second_name\"],\n            in_job_only=True,\n        )\n\n    def test_is_done_true(self):\n        for action_run in self.collection.action_runs_with_cleanup:\n            action_run.machine.state = ActionRun.FAILED\n        assert self.collection.is_done\n\n    def test_is_failed_false_not_done(self):\n        self.run_map[\"action_name\"].machine.state = ActionRun.FAILED\n        assert not self.collection.is_failed\n\n    def test_is_failed_false_no_failed(self):\n        for action_run in self.collection.action_runs_with_cleanup:\n            action_run.machine.state = ActionRun.SUCCEEDED\n        assert not self.collection.is_failed\n\n    def test_is_failed_true(self):\n        for action_run in self.collection.action_runs_with_cleanup:\n            action_run.machine.state = ActionRun.FAILED\n        assert self.collection.is_failed\n\n    def test__getattr__(self):\n        assert self.collection.is_scheduled\n        assert not self.collection.is_cancelled\n        assert not self.collection.is_running\n        assert self.collection.ready()\n\n    def test__str__(self):\n        self.collection._is_run_blocked = lambda r: r.action_name != \"cleanup\"\n        expected = [\n            \"ActionRunCollection\",\n            \"second_name(scheduled:blocked)\",\n            \"action_name(scheduled:blocked)\",\n            \"cleanup(scheduled)\",\n        ]\n        for expectation in expected:\n            assert expectation in str(self.collection)\n\n    def test_end_time(self):\n        max_end_time = datetime.datetime(2013, 6, 15)\n        self.run_map[\"action_name\"].machine.state = ActionRun.FAILED\n        self.run_map[\"action_name\"].end_time = datetime.datetime(2013, 5, 12)\n        self.run_map[\"second_name\"].machine.state = ActionRun.SUCCEEDED\n        self.run_map[\"second_name\"].end_time = max_end_time\n        assert self.collection.end_time == max_end_time\n\n    def test_end_time_not_done(self):\n        self.run_map[\"action_name\"].end_time = datetime.datetime(2013, 5, 12)\n        self.run_map[\"action_name\"].machine.state = ActionRun.FAILED\n        self.run_map[\"second_name\"].end_time = None\n        self.run_map[\"second_name\"].machine.state = ActionRun.RUNNING\n        assert self.collection.end_time is None\n\n    def test_end_time_not_started(self):\n        assert self.collection.end_time is None\n\n\nclass TestActionRunCollectionIsRunBlocked:\n    def _build_run(self, name):\n        mock_node = mock.create_autospec(node.Node)\n        return ActionRun(\n            \"id\",\n            name,\n            mock_node,\n            self.command_config,\n            output_path=self.output_path,\n        )\n\n    @pytest.fixture(autouse=True)\n    def setup_collection(self, output_path):\n        action_names = [\"action_name\", \"second_name\", \"cleanup\"]\n\n        actions = []\n        for name in action_names:\n            m = MagicMock()\n            m.name = name\n            actions.append(m)\n\n        self.second_act = actions[1]\n        action_map = {a.name: a for a in actions}\n        self.action_graph = actiongraph.ActionGraph(\n            action_map,\n            {\"action_name\": set(), \"second_name\": {\"action_name\"}, \"cleanup\": set()},\n            {\"action_name\": set(), \"second_name\": set(), \"cleanup\": set()},\n        )\n\n        self.output_path = output_path\n        self.command_config = ActionCommandConfig(command=\"do command\")\n        self.action_runs = [self._build_run(name) for name in action_names]\n        self.run_map = {a.action_name: a for a in self.action_runs}\n        self.run_map[\"cleanup\"].is_cleanup = True\n        self.collection = ActionRunCollection(self.action_graph, self.run_map)\n\n    def test_is_run_blocked_no_required_actions(self):\n        assert not self.collection._is_run_blocked(self.run_map[\"action_name\"])\n\n    def test_is_run_blocked_completed_run(self):\n        self.run_map[\"second_name\"].machine.state = ActionRun.FAILED\n        assert not self.collection._is_run_blocked(self.run_map[\"second_name\"])\n\n        self.run_map[\"second_name\"].machine.state = ActionRun.RUNNING\n        assert not self.collection._is_run_blocked(self.run_map[\"second_name\"])\n\n    def test_is_run_blocked_required_actions_completed(self):\n        self.run_map[\"action_name\"].machine.state = ActionRun.SKIPPED\n        assert not self.collection._is_run_blocked(self.run_map[\"second_name\"])\n\n    def test_is_run_blocked_required_actions_blocked(self):\n        third_act = MagicMock()\n        third_act.name = \"third_act\"\n        self.action_graph.action_map[\"third_act\"] = third_act\n        self.action_graph.required_actions[\"third_act\"] = {self.second_act.name}\n        self.run_map[\"third_act\"] = self._build_run(\"third_act\")\n\n        self.run_map[\"action_name\"].machine.state = ActionRun.FAILED\n        assert self.collection._is_run_blocked(self.run_map[\"third_act\"])\n\n    def test_is_run_blocked_required_actions_scheduled(self):\n        self.run_map[\"action_name\"].machine.state = ActionRun.SCHEDULED\n        assert self.collection._is_run_blocked(self.run_map[\"second_name\"])\n\n    def test_is_run_blocked_required_actions_starting(self):\n        self.run_map[\"action_name\"].machine.state = ActionRun.STARTING\n        assert self.collection._is_run_blocked(self.run_map[\"second_name\"])\n\n    def test_is_run_blocked_required_actions_waiting(self):\n        self.run_map[\"action_name\"].machine.state = ActionRun.WAITING\n        assert self.collection._is_run_blocked(self.run_map[\"second_name\"])\n\n    def test_is_run_blocked_required_actions_failed(self):\n        self.run_map[\"action_name\"].machine.state = ActionRun.FAILED\n        assert self.collection._is_run_blocked(self.run_map[\"second_name\"])\n\n    def test_is_run_blocked_required_actions_missing(self):\n        del self.run_map[\"action_name\"]\n        assert not self.collection._is_run_blocked(self.run_map[\"second_name\"])\n\n    def test_is_run_blocked_in_job_only(self):\n        self.run_map[\"action_name\"].machine.state = ActionRun.SKIPPED\n        self.run_map[\"second_name\"].triggered_by = [\"trigger\"]\n        assert not self.collection._is_run_blocked(self.run_map[\"second_name\"], in_job_only=True)\n        assert self.collection._is_run_blocked(self.run_map[\"second_name\"], in_job_only=False)\n\n\nclass TestMesosActionRun:\n    @pytest.fixture(autouse=True)\n    def setup_action_run(self):\n        self.output_path = mock.MagicMock()\n        self.command = \"do the command\"\n        self.extra_volumes = [ConfigVolume(\"/mnt/foo\", \"/mnt/foo\", \"RO\")]\n        self.constraints = [ConfigConstraint(\"an attr\", \"an op\", \"a val\")]\n        self.docker_parameters = [ConfigParameter(\"init\", \"true\")]\n        self.other_task_kwargs = {\n            \"cpus\": 1,\n            \"mem\": 50,\n            \"disk\": 42,\n            \"docker_image\": \"container:v2\",\n            \"env\": {\n                \"TESTING\": \"true\",\n                \"TRON_JOB_NAMESPACE\": \"mynamespace\",\n                \"TRON_JOB_NAME\": \"myjob\",\n                \"TRON_RUN_NUM\": \"42\",\n                \"TRON_ACTION\": \"action_name\",\n            },\n        }\n        command_config = ActionCommandConfig(\n            command=self.command,\n            extra_volumes=self.extra_volumes,\n            constraints=self.constraints,\n            docker_parameters=self.docker_parameters,\n            **self.other_task_kwargs,\n        )\n        self.action_run = MesosActionRun(\n            job_run_id=\"mynamespace.myjob.42\",\n            name=\"action_name\",\n            command_config=command_config,\n            node=mock.create_autospec(node.Node),\n            output_path=self.output_path,\n            executor=ExecutorTypes.mesos.value,\n        )\n\n    @mock.patch(\"tron.core.actionrun.filehandler\", autospec=True)\n    @mock.patch(\"tron.core.actionrun.MesosClusterRepository\", autospec=True)\n    def test_submit_command(self, mock_cluster_repo, mock_filehandler):\n        serializer = mock_filehandler.OutputStreamSerializer.return_value\n        # submit_command should add a new attempt\n        self.action_run.attempts = [\n            ActionRunAttempt(\n                command_config=self.action_run.command_config,\n                rendered_command=self.command,\n                mesos_task_id=\"last_attempt\",\n            ),\n        ]\n        with mock.patch.object(\n            self.action_run,\n            \"watch\",\n            autospec=True,\n        ) as mock_watch:\n            new_attempt = self.action_run.create_attempt()\n            self.action_run.submit_command(new_attempt)\n\n            mock_get_cluster = mock_cluster_repo.get_cluster\n            mock_get_cluster.assert_called_once_with()\n\n            mock_get_cluster.return_value.create_task.assert_called_once_with(\n                action_run_id=self.action_run.id,\n                command=self.command,\n                serializer=serializer,\n                task_id=None,\n                extra_volumes=[e._asdict() for e in self.extra_volumes],\n                constraints=[[\"an attr\", \"an op\", \"a val\"]],\n                docker_parameters=[{\"key\": \"init\", \"value\": \"true\"}],\n                **self.other_task_kwargs,\n            )\n            task = mock_get_cluster.return_value.create_task.return_value\n            mock_get_cluster.return_value.submit.assert_called_once_with(task)\n            mock_watch.assert_called_once_with(task)\n            assert self.action_run.last_attempt.mesos_task_id == task.get_mesos_id.return_value\n\n        mock_filehandler.OutputStreamSerializer.assert_called_with(\n            self.action_run.output_path,\n        )\n\n    @mock.patch(\"tron.core.actionrun.filehandler\", autospec=True)\n    @mock.patch(\"tron.core.actionrun.MesosClusterRepository\", autospec=True)\n    def test_submit_command_task_none(\n        self,\n        mock_cluster_repo,\n        mock_filehandler,\n    ):\n        # Task is None if Mesos is disabled\n        mock_get_cluster = mock_cluster_repo.get_cluster\n        mock_get_cluster.return_value.create_task.return_value = None\n        new_attempt = self.action_run.create_attempt()\n        self.action_run.submit_command(new_attempt)\n\n        mock_get_cluster.assert_called_once_with()\n        assert mock_get_cluster.return_value.submit.call_count == 0\n        assert self.action_run.is_failed\n\n    @mock.patch(\"tron.core.actionrun.filehandler\", autospec=True)\n    @mock.patch(\"tron.core.actionrun.MesosClusterRepository\", autospec=True)\n    def test_recover(self, mock_cluster_repo, mock_filehandler):\n        self.action_run.machine.state = ActionRun.UNKNOWN\n        self.action_run.end_time = 1000\n        self.action_run.exit_status = 0\n        last_attempt = self.action_run.create_attempt()\n        last_attempt.mesos_task_id = \"my_mesos_id\"\n        last_attempt.end_time = 1000\n        last_attempt.exit_status = 0\n        serializer = mock_filehandler.OutputStreamSerializer.return_value\n        with mock.patch.object(\n            self.action_run,\n            \"watch\",\n            autospec=True,\n        ) as mock_watch:\n            assert self.action_run.recover()\n\n            mock_get_cluster = mock_cluster_repo.get_cluster\n            mock_get_cluster.assert_called_once_with()\n            mock_get_cluster.return_value.create_task.assert_called_once_with(\n                action_run_id=self.action_run.id,\n                command=self.command,\n                serializer=serializer,\n                task_id=\"my_mesos_id\",\n                extra_volumes=[e._asdict() for e in self.extra_volumes],\n                constraints=[[\"an attr\", \"an op\", \"a val\"]],\n                docker_parameters=[{\"key\": \"init\", \"value\": \"true\"}],\n                **self.other_task_kwargs,\n            )\n            task = mock_get_cluster.return_value.create_task.return_value\n            mock_get_cluster.return_value.recover.assert_called_once_with(task)\n            mock_watch.assert_called_once_with(task)\n\n        assert self.action_run.is_running\n        assert self.action_run.end_time is None\n        assert self.action_run.exit_status is None\n        assert last_attempt.end_time is None\n        assert last_attempt.exit_status is None\n        mock_filehandler.OutputStreamSerializer.assert_called_with(\n            self.action_run.output_path,\n        )\n\n    @mock.patch(\"tron.core.actionrun.filehandler\", autospec=True)\n    @mock.patch(\"tron.core.actionrun.MesosClusterRepository\", autospec=True)\n    def test_recover_done_no_change(self, mock_cluster_repo, mock_filehandler):\n        self.action_run.machine.state = ActionRun.SUCCEEDED\n        last_attempt = self.action_run.create_attempt()\n        last_attempt.mesos_task_id = \"my_mesos_id\"\n\n        assert not self.action_run.recover()\n        assert mock_cluster_repo.get_cluster.call_count == 0\n        assert self.action_run.is_succeeded\n\n    @mock.patch(\"tron.core.actionrun.filehandler\", autospec=True)\n    @mock.patch(\"tron.core.actionrun.MesosClusterRepository\", autospec=True)\n    def test_recover_no_mesos_task_id(\n        self,\n        mock_cluster_repo,\n        mock_filehandler,\n    ):\n        self.action_run.machine.state = ActionRun.UNKNOWN\n        last_attempt = self.action_run.create_attempt()\n        last_attempt.mesos_task_id = None\n\n        assert not self.action_run.recover()\n        assert mock_cluster_repo.get_cluster.call_count == 0\n        assert self.action_run.is_unknown\n        assert self.action_run.end_time is not None\n\n    @mock.patch(\"tron.core.actionrun.filehandler\", autospec=True)\n    @mock.patch(\"tron.core.actionrun.MesosClusterRepository\", autospec=True)\n    def test_recover_task_none(self, mock_cluster_repo, mock_filehandler):\n        self.action_run.machine.state = ActionRun.UNKNOWN\n        last_attempt = self.action_run.create_attempt()\n        last_attempt.mesos_task_id = \"my_mesos_id\"\n        # Task is None if Mesos is disabled\n        mock_get_cluster = mock_cluster_repo.get_cluster\n        mock_get_cluster.return_value.create_task.return_value = None\n        assert not self.action_run.recover()\n\n        mock_get_cluster.assert_called_once_with()\n        assert self.action_run.is_unknown\n        assert mock_get_cluster.return_value.recover.call_count == 0\n        assert self.action_run.end_time is not None\n\n    @mock.patch(\"tron.core.actionrun.MesosClusterRepository\", autospec=True)\n    def test_kill_task(self, mock_cluster_repo):\n        mock_get_cluster = mock_cluster_repo.get_cluster\n        last_attempt = self.action_run.create_attempt()\n        last_attempt.mesos_task_id = \"fake_task_id\"\n        self.action_run.machine.state = ActionRun.RUNNING\n\n        self.action_run.kill()\n        mock_get_cluster.return_value.kill.assert_called_once_with(\n            last_attempt.mesos_task_id,\n        )\n\n    @mock.patch(\"tron.core.actionrun.MesosClusterRepository\", autospec=True)\n    def test_kill_task_no_task_id(self, mock_cluster_repo):\n        self.action_run.machine.state = ActionRun.RUNNING\n        self.action_run.create_attempt()\n        error_message = self.action_run.kill()\n        assert error_message == \"Error: Can't find task id for the action.\"\n\n    @mock.patch(\"tron.core.actionrun.MesosClusterRepository\", autospec=True)\n    def test_stop_task(self, mock_cluster_repo):\n        mock_get_cluster = mock_cluster_repo.get_cluster\n        last_attempt = self.action_run.create_attempt()\n        last_attempt.mesos_task_id = \"fake_task_id\"\n        self.action_run.machine.state = ActionRun.RUNNING\n\n        self.action_run.stop()\n        mock_get_cluster.return_value.kill.assert_called_once_with(\n            last_attempt.mesos_task_id,\n        )\n\n    @mock.patch(\"tron.core.actionrun.MesosClusterRepository\", autospec=True)\n    def test_stop_task_no_task_id(self, mock_cluster_repo):\n        self.action_run.machine.state = ActionRun.RUNNING\n        self.action_run.create_attempt()\n        error_message = self.action_run.stop()\n        assert error_message == \"Error: Can't find task id for the action.\"\n\n    def test_handler_exiting_unknown(self):\n        self.action_run.action_command = mock.create_autospec(\n            actioncommand.ActionCommand,\n            exit_status=None,\n        )\n        self.action_run.machine.transition(\"start\")\n        self.action_run.machine.transition(\"started\")\n        assert self.action_run.handler(\n            self.action_run.action_command,\n            ActionCommand.EXITING,\n        )\n        assert self.action_run.is_unknown\n        assert self.action_run.exit_status is None\n        assert self.action_run.end_time is not None\n\n    def test_handler_exiting_unknown_retry(self):\n        self.action_run.action_command = mock.create_autospec(\n            actioncommand.ActionCommand,\n            exit_status=None,\n        )\n        self.action_run.retries_remaining = 1\n        self.action_run.start = mock.Mock()\n\n        self.action_run.machine.transition(\"start\")\n        self.action_run.machine.transition(\"started\")\n        assert self.action_run.handler(\n            self.action_run.action_command,\n            ActionCommand.EXITING,\n        )\n        assert self.action_run.retries_remaining == 0\n        assert not self.action_run.is_unknown\n        assert self.action_run.start.call_count == 1\n\n    def test_handler_exiting_failstart_failed(self):\n        self.action_run.action_command = mock.create_autospec(\n            actioncommand.ActionCommand,\n            exit_status=1,\n        )\n        self.action_run.machine.transition(\"start\")\n        assert self.action_run.handler(\n            self.action_run.action_command,\n            ActionCommand.FAILSTART,\n        )\n        assert self.action_run.is_failed\n\n\nclass TestKubernetesActionRun:\n    @pytest.fixture\n    def mock_k8s_action_run(self):\n        command_config = ActionCommandConfig(\n            command=\"mock_command\",\n            extra_volumes=set(),\n            constraints=set(),\n            docker_parameters=set(),\n            cpus=1,\n            mem=50,\n            disk=42,\n            docker_image=\"container:v2\",\n            env={\n                \"TESTING\": \"true\",\n                \"TRON_JOB_NAMESPACE\": \"mock_namespace\",\n                \"TRON_JOB_NAME\": \"mock_job\",\n                \"TRON_RUN_NUM\": \"42\",\n                \"TRON_ACTION\": \"mock_action_name\",\n            },\n            labels={\n                \"tron.yelp.com/run_num\": \"42\",\n            },\n        )\n\n        return KubernetesActionRun(\n            job_run_id=\"mock_namespace.mock_job.42\",\n            name=\"mock_action_name\",\n            command_config=command_config,\n            node=mock.create_autospec(node.Node),\n            output_path=mock.create_autospec(filehandler.OutputPath),\n            executor=ExecutorTypes.kubernetes.value,\n        )\n\n    def test_k8s_handler_exiting_unknown(self, mock_k8s_action_run):\n        mock_k8s_action_run.action_command = mock.create_autospec(\n            actioncommand.ActionCommand,\n            exit_status=None,\n        )\n        mock_k8s_action_run.machine.transition(\"start\")\n        mock_k8s_action_run.machine.transition(\"started\")\n        assert mock_k8s_action_run.handler(\n            mock_k8s_action_run.action_command,\n            ActionCommand.EXITING,\n        )\n        assert mock_k8s_action_run.is_unknown\n        assert mock_k8s_action_run.exit_status is None\n        assert mock_k8s_action_run.end_time is not None\n\n    def test_handler_exiting_unknown_retry(self, mock_k8s_action_run):\n        mock_k8s_action_run.action_command = mock.create_autospec(\n            actioncommand.ActionCommand,\n            exit_status=None,\n        )\n        mock_k8s_action_run.retries_remaining = 1\n        mock_k8s_action_run.start = mock.Mock()\n\n        mock_k8s_action_run.machine.transition(\"start\")\n        mock_k8s_action_run.machine.transition(\"started\")\n        assert mock_k8s_action_run.handler(\n            mock_k8s_action_run.action_command,\n            ActionCommand.EXITING,\n        )\n        assert mock_k8s_action_run.retries_remaining == 0\n        assert not mock_k8s_action_run.is_unknown\n        assert mock_k8s_action_run.start.call_count == 1\n\n    def test_handler_exiting_failstart_failed(self, mock_k8s_action_run):\n        mock_k8s_action_run.action_command = mock.create_autospec(\n            actioncommand.ActionCommand,\n            exit_status=1,\n        )\n        mock_k8s_action_run.machine.transition(\"start\")\n        assert mock_k8s_action_run.handler(\n            mock_k8s_action_run.action_command,\n            ActionCommand.FAILSTART,\n        )\n        assert mock_k8s_action_run.is_failed\n\n    @mock.patch(\"tron.core.actionrun.filehandler\", autospec=True)\n    @mock.patch(\"tron.core.actionrun.KubernetesClusterRepository\", autospec=True)\n    def test_recover(self, mock_cluster_repo, mock_filehandler, mock_k8s_action_run):\n        mock_k8s_action_run.machine.state = ActionRun.UNKNOWN\n        mock_k8s_action_run.end_time = 1000\n        mock_k8s_action_run.exit_status = 0\n        last_attempt = mock_k8s_action_run.create_attempt()\n        last_attempt.kubernetes_task_id = \"test-k8s-task-id\"\n        last_attempt.end_time = 1000\n        last_attempt.exit_status = 0\n        serializer = mock_filehandler.OutputStreamSerializer.return_value\n        with mock.patch.object(\n            mock_k8s_action_run,\n            \"watch\",\n            autospec=True,\n        ) as mock_watch:\n            assert mock_k8s_action_run.recover()\n\n            mock_get_cluster = mock_cluster_repo.get_cluster\n            mock_get_cluster.assert_called_once_with()\n            mock_get_cluster.return_value.create_task.assert_called_once_with(\n                action_run_id=mock_k8s_action_run.id,\n                command=last_attempt.rendered_command,\n                cpus=mock_k8s_action_run.command_config.cpus,\n                mem=mock_k8s_action_run.command_config.mem,\n                disk=mock_k8s_action_run.command_config.disk,\n                docker_image=mock_k8s_action_run.command_config.docker_image,\n                env=mock.ANY,\n                secret_env=mock_k8s_action_run.command_config.secret_env,\n                field_selector_env=mock_k8s_action_run.command_config.field_selector_env,\n                serializer=serializer,\n                volumes=mock_k8s_action_run.command_config.extra_volumes,\n                secret_volumes=mock_k8s_action_run.command_config.secret_volumes,\n                projected_sa_volumes=mock_k8s_action_run.command_config.projected_sa_volumes,\n                cap_add=mock_k8s_action_run.command_config.cap_add,\n                cap_drop=mock_k8s_action_run.command_config.cap_drop,\n                task_id=last_attempt.kubernetes_task_id,\n                node_selectors=mock_k8s_action_run.command_config.node_selectors,\n                node_affinities=mock_k8s_action_run.command_config.node_affinities,\n                topology_spread_constraints=mock_k8s_action_run.command_config.topology_spread_constraints,\n                pod_labels={\n                    \"tron.yelp.com/run_num\": \"42\",\n                    \"tron.yelp.com/attempt_number\": \"0\",\n                },\n                pod_annotations=mock_k8s_action_run.command_config.annotations,\n                service_account_name=mock_k8s_action_run.command_config.service_account_name,\n                ports=mock_k8s_action_run.command_config.ports,\n            )\n            task = mock_get_cluster.return_value.create_task.return_value\n            mock_get_cluster.return_value.recover.assert_called_once_with(task)\n            mock_watch.assert_called_once_with(task)\n\n        assert mock_k8s_action_run.is_running\n        assert mock_k8s_action_run.end_time is None\n        assert mock_k8s_action_run.exit_status is None\n        assert last_attempt.end_time is None\n        assert last_attempt.exit_status is None\n        mock_filehandler.OutputStreamSerializer.assert_called_with(\n            mock_k8s_action_run.output_path,\n        )\n\n    @mock.patch(\"tron.core.actionrun.filehandler\", autospec=True)\n    @mock.patch(\"tron.core.actionrun.KubernetesClusterRepository\", autospec=True)\n    def test_recover_done_no_change(\n        self,\n        mock_cluster_repo,\n        mock_filehandler,\n        mock_k8s_action_run,\n    ):\n        mock_k8s_action_run.machine.state = ActionRun.SUCCEEDED\n        last_attempt = mock_k8s_action_run.create_attempt()\n        last_attempt.kubernetes_task_ic = \"test-kubernetes-task-id\"\n\n        assert not mock_k8s_action_run.recover()\n        assert mock_cluster_repo.get_cluster.call_count != 0\n        assert mock_k8s_action_run.is_succeeded\n\n    @mock.patch(\"tron.core.actionrun.filehandler\", autospec=True)\n    @mock.patch(\"tron.core.actionrun.KubernetesClusterRepository\", autospec=True)\n    def test_recover_no_k8s_task_id(\n        self,\n        mock_cluster_repo,\n        mock_filehandler,\n        mock_k8s_action_run,\n    ):\n        print(f\"cluster: {type(mock_cluster_repo)} filehand: {type(mock_filehandler)} ar: {type(mock_k8s_action_run)}\")\n        mock_k8s_action_run.machine.state = ActionRun.UNKNOWN\n        last_attempt = mock_k8s_action_run.create_attempt()\n        last_attempt.mesos_task_id = None\n\n        assert not mock_k8s_action_run.recover()\n        assert mock_k8s_action_run.is_unknown\n        assert mock_k8s_action_run.end_time is not None\n\n    @mock.patch(\"tron.core.actionrun.filehandler\", autospec=True)\n    @mock.patch(\"tron.core.actionrun.KubernetesClusterRepository\", autospec=True)\n    def test_recover_task_none(self, mock_cluster_repo, mock_filehandler, mock_k8s_action_run):\n        mock_k8s_action_run.machine.state = ActionRun.UNKNOWN\n        last_attempt = mock_k8s_action_run.create_attempt()\n        last_attempt.kubernetes_task_id = \"test-kubernetes-task-id\"\n        # Task is None e.g. if Kubernetes is disabled\n        mock_get_cluster = mock_cluster_repo.get_cluster\n        mock_get_cluster.return_value.create_task.return_value = None\n        assert not mock_k8s_action_run.recover()\n\n        mock_get_cluster.assert_called_once_with()\n        assert mock_k8s_action_run.is_unknown\n        assert mock_get_cluster.return_value.recover.call_count == 0\n        assert mock_k8s_action_run.end_time is not None\n\n    @mock.patch(\"tron.core.actionrun.KubernetesClusterRepository\", autospec=True)\n    def test_kill_task_k8s(self, mock_cluster_repo, mock_k8s_action_run):\n        mock_get_cluster = mock_cluster_repo.get_cluster\n        last_attempt = mock_k8s_action_run.create_attempt()\n        last_attempt.kubernetes_task_id = \"fake_task_id\"\n        mock_k8s_action_run.machine.state = ActionRun.RUNNING\n\n        mock_k8s_action_run.kill()\n        mock_get_cluster.return_value.kill.assert_called_once_with(last_attempt.kubernetes_task_id)\n\n    @mock.patch(\"tron.core.actionrun.KubernetesClusterRepository\", autospec=True)\n    def test_kill_task_no_task_id_k8s(self, mock_cluster_repo, mock_k8s_action_run):\n        mock_k8s_action_run.machine.state = ActionRun.RUNNING\n        mock_k8s_action_run.create_attempt()\n        error_message = mock_k8s_action_run.kill()\n        assert error_message == \"Error: Can't find task id for the action.\"\n\n    @mock.patch(\"tron.core.actionrun.KubernetesClusterRepository\", autospec=True)\n    def test_stop_task_k8s(self, mock_cluster_repo, mock_k8s_action_run):\n        mock_get_cluster = mock_cluster_repo.get_cluster\n        last_attempt = mock_k8s_action_run.create_attempt()\n        last_attempt.kubernetes_task_id = \"fake_task_id\"\n        mock_k8s_action_run.machine.state = ActionRun.RUNNING\n\n        mock_k8s_action_run.stop()\n        mock_get_cluster.return_value.kill.assert_called_once_with(last_attempt.kubernetes_task_id)\n\n    @mock.patch(\"tron.core.actionrun.KubernetesClusterRepository\", autospec=True)\n    def test_stop_task_no_task_id_k8s(self, mock_cluster_repo, mock_k8s_action_run):\n        mock_k8s_action_run.machine.state = ActionRun.RUNNING\n        mock_k8s_action_run.create_attempt()\n        error_message = mock_k8s_action_run.stop()\n        assert error_message == \"Error: Can't find task id for the action.\"\n\n    @mock.patch(\"tron.core.actionrun.KubernetesClusterRepository\", autospec=True)\n    def test_non_retryable_exit(self, mock_cluster_repo, mock_k8s_action_run):\n\n        mock_cluster = mock.Mock()\n        mock_cluster.non_retryable_exit_codes = [13]\n        mock_cluster_repo.get_cluster.return_value = mock_cluster\n\n        mock_k8s_action_run.action_command = mock.create_autospec(\n            actioncommand.ActionCommand,\n            exit_status=13,\n        )\n        mock_k8s_action_run.retries_remaining = 5\n\n        mock_k8s_action_run.machine.transition(\"start\")\n        mock_k8s_action_run.machine.transition(\"started\")\n        assert mock_k8s_action_run.handler(\n            mock_k8s_action_run.action_command,\n            ActionCommand.EXITING,\n        )\n\n        assert mock_k8s_action_run.retries_remaining == 0\n        assert mock_k8s_action_run.is_unknown\n\n    @mock.patch(\"tron.core.actionrun.KubernetesClusterRepository\", autospec=True)\n    def test_retryable_exit(self, mock_cluster_repo, mock_k8s_action_run):\n\n        mock_cluster = mock.Mock()\n        mock_cluster.non_retryable_exit_codes = [-12]\n\n        mock_cluster_repo.get_cluster.return_value = mock_cluster\n\n        mock_k8s_action_run.retries_remaining = 5\n        mock_k8s_action_run.start = mock.Mock()\n\n        mock_k8s_action_run._exit_unsuccessful(13)\n\n        assert mock_k8s_action_run.retries_remaining == 4\n\n    @mock.patch(\"tron.core.actionrun.filehandler\", autospec=True)\n    @mock.patch(\"tron.core.actionrun.KubernetesClusterRepository\", autospec=True)\n    def test_submit_command_first_attempt_labels(self, mock_cluster_repo, mock_filehandler, mock_k8s_action_run):\n        with mock.patch.object(mock_k8s_action_run, \"watch\", autospec=True):\n            new_attempt = mock_k8s_action_run.create_attempt()\n            mock_k8s_action_run.submit_command(new_attempt)\n\n            create_task_kwargs = mock_cluster_repo.get_cluster.return_value.create_task.call_args[1]\n            assert create_task_kwargs[\"pod_labels\"][\"tron.yelp.com/attempt_number\"] == \"0\"\n\n    @mock.patch(\"tron.core.actionrun.filehandler\", autospec=True)\n    @mock.patch(\"tron.core.actionrun.KubernetesClusterRepository\", autospec=True)\n    def test_submit_command_retry_attempt_labels(self, mock_cluster_repo, mock_filehandler, mock_k8s_action_run):\n        mock_k8s_action_run.attempts = [\n            ActionRunAttempt(\n                command_config=mock_k8s_action_run.command_config,\n                rendered_command=\"mock_command\",\n            ),\n            ActionRunAttempt(\n                command_config=mock_k8s_action_run.command_config,\n                rendered_command=\"mock_command\",\n            ),\n        ]\n        with mock.patch.object(mock_k8s_action_run, \"watch\", autospec=True):\n            new_attempt = mock_k8s_action_run.create_attempt()\n            mock_k8s_action_run.submit_command(new_attempt)\n\n            create_task_kwargs = mock_cluster_repo.get_cluster.return_value.create_task.call_args[1]\n            assert create_task_kwargs[\"pod_labels\"][\"tron.yelp.com/attempt_number\"] == \"2\"\n\n    @mock.patch(\"tron.core.actionrun.filehandler\", autospec=True)\n    @mock.patch(\"tron.core.actionrun.KubernetesClusterRepository\", autospec=True)\n    def test_recover_retry_attempt_labels(self, mock_cluster_repo, mock_filehandler, mock_k8s_action_run):\n        mock_k8s_action_run.attempts = [\n            ActionRunAttempt(\n                command_config=mock_k8s_action_run.command_config,\n                rendered_command=\"mock_command\",\n            ),\n            ActionRunAttempt(\n                command_config=mock_k8s_action_run.command_config,\n                rendered_command=\"mock_command\",\n            ),\n        ]\n        mock_k8s_action_run.machine.state = ActionRun.UNKNOWN\n        last_attempt = mock_k8s_action_run.create_attempt()\n        last_attempt.kubernetes_task_id = \"test-k8s-task-id\"\n        with mock.patch.object(mock_k8s_action_run, \"watch\", autospec=True):\n            assert mock_k8s_action_run.recover()\n\n            create_task_kwargs = mock_cluster_repo.get_cluster.return_value.create_task.call_args[1]\n            assert create_task_kwargs[\"pod_labels\"][\"tron.yelp.com/attempt_number\"] == \"2\"\n"
  },
  {
    "path": "tests/core/job_collection_test.py",
    "content": "from unittest import mock\n\nfrom testifycompat import setup\nfrom testifycompat import TestCase\nfrom tests.testingutils import autospec_method\nfrom tron.core.job import Job\nfrom tron.core.job_collection import JobCollection\nfrom tron.core.job_scheduler import JobScheduler\nfrom tron.core.job_scheduler import JobSchedulerFactory\n\n\nclass TestJobCollection(TestCase):\n    @setup\n    def setup_collection(self):\n        self.collection = JobCollection()\n\n    def test_update_from_config(self):\n        autospec_method(self.collection.jobs.filter_by_name)\n        autospec_method(self.collection.add)\n        factory = mock.create_autospec(JobSchedulerFactory)\n        job_configs = {\"a\": mock.Mock(), \"b\": mock.Mock()}\n        result = self.collection.update_from_config(job_configs, factory, True)\n        result = list(result)\n        assert len(result) == len(job_configs)\n        self.collection.jobs.filter_by_name.assert_called_with(job_configs)\n        expected_calls = [mock.call(v) for v in job_configs.values()]\n        assert factory.build.call_args_list == expected_calls\n        assert self.collection.add.call_count == 2\n        job_schedulers = [call[1][0] for call in self.collection.add.mock_calls[::2]]\n        for job_scheduler in job_schedulers:\n            job_scheduler.schedule.assert_called_with()\n            job_scheduler.get_job.assert_called_with()\n\n    def test_update_from_config_reconfigure_one_namespace(self):\n        autospec_method(self.collection.jobs.filter_by_name)\n        autospec_method(self.collection.add)\n        factory = mock.create_autospec(JobSchedulerFactory)\n        job_configs = {\n            \"a.foo\": mock.Mock(namespace=\"a\"),\n            \"b.foo\": mock.Mock(namespace=\"b\"),\n        }\n        result = self.collection.update_from_config(job_configs, factory, True, namespace_to_reconfigure=\"a\")\n        result = list(result)\n        assert len(result) == 1\n        self.collection.jobs.filter_by_name.assert_called_with(job_configs)\n        expected_calls = [mock.call(job_configs[\"a.foo\"])]\n        assert factory.build.call_args_list == expected_calls\n        assert self.collection.add.call_count == 1\n        job_schedulers = [call[1][0] for call in self.collection.add.mock_calls[::2]]\n        for job_scheduler in job_schedulers:\n            job_scheduler.schedule.assert_called_with()\n            job_scheduler.get_job.assert_called_with()\n\n    def test_move_running_job(self):\n        with mock.patch(\n            \"tron.core.job_collection.JobCollection.get_by_name\",\n            autospec=None,\n        ) as mock_scheduler:\n            mock_scheduler.return_value.get_job.return_value.status = Job.STATUS_RUNNING\n            result = self.collection.move(\"old.test\", \"new.test\")\n            assert \"Job is still running.\" in result\n\n    def test_move(self):\n        with mock.patch(\n            \"tron.core.job_collection.JobCollection.get_by_name\",\n            autospec=None,\n        ) as mock_scheduler:\n            mock_scheduler.return_value.get_job.return_value.status = Job.STATUS_ENABLED\n            mock_scheduler.get_name.return_value = \"old.test\"\n            self.collection.add(mock_scheduler)\n            result = self.collection.move(\"old.test\", \"new.test\")\n            assert \"succeeded\" in result\n\n    def test_update(self):\n        mock_scheduler = mock.create_autospec(JobScheduler)\n        existing_scheduler = mock.create_autospec(JobScheduler)\n        autospec_method(\n            self.collection.get_by_name,\n            return_value=existing_scheduler,\n        )\n        assert self.collection.update(mock_scheduler)\n        self.collection.get_by_name.assert_called_with(\n            mock_scheduler.get_name(),\n        )\n        existing_scheduler.update_from_job_scheduler.assert_called_with(\n            mock_scheduler,\n        )\n        existing_scheduler.schedule_reconfigured.assert_called_with()\n"
  },
  {
    "path": "tests/core/job_scheduler_test.py",
    "content": "import datetime\nfrom unittest import mock\n\nfrom testifycompat import assert_equal\nfrom testifycompat import setup\nfrom testifycompat import TestCase\nfrom tests import testingutils\nfrom tests.assertions import assert_length\nfrom tron import actioncommand\nfrom tron.core import job\nfrom tron.core.actionrun import ActionRun\nfrom tron.core.job_scheduler import JobScheduler\nfrom tron.core.job_scheduler import JobSchedulerFactory\n\n\nclass TestJobSchedulerGetRunsToSchedule(TestCase):\n    @setup\n    def setup_job(self):\n        self.scheduler = mock.Mock()\n        run_collection = mock.Mock(has_pending=False)\n        node_pool = mock.Mock()\n        self.job = job.Job(\n            \"jobname\",\n            self.scheduler,\n            run_collection=run_collection,\n            node_pool=node_pool,\n        )\n        self.job_scheduler = JobScheduler(self.job)\n        self.job.runs.get_pending.return_value = False\n        self.scheduler.queue_overlapping = True\n\n    def test_get_runs_to_schedule_with_pending(self):\n        self.scheduler.queue_overlapping = False\n        self.job.runs.has_pending = True\n        job_runs = self.job_scheduler.get_runs_to_schedule(None)\n        assert_length(job_runs, 0)\n\n    def test_get_runs_to_schedule_guess(self):\n        job_runs = list(self.job_scheduler.get_runs_to_schedule(None))\n\n        assert self.job.scheduler.next_run_time.call_args_list == [mock.call(None)]\n        assert_length(job_runs, 1)\n        # This should return a JobRun which has the job attached as an observer\n        job_runs[0].attach.assert_any_call(True, self.job)\n\n    def test_get_runs_to_schedule_given(self):\n        now = datetime.datetime.now()\n        job_runs = list(self.job_scheduler.get_runs_to_schedule(now))\n\n        assert self.job.scheduler.next_run_time.call_count == 0\n        assert_length(job_runs, 1)\n        # This should return a JobRun which has the job attached as an observer\n        job_runs[0].attach.assert_any_call(True, self.job)\n\n\nclass JobSchedulerManualStartTestCase(testingutils.MockTimeTestCase):\n\n    now = datetime.datetime.now()\n\n    @setup\n    def setup_job(self):\n        self.scheduler = mock.Mock()\n        run_collection = mock.Mock()\n        node_pool = mock.Mock()\n        self.job = job.Job(\n            \"jobname\",\n            self.scheduler,\n            run_collection=run_collection,\n            node_pool=node_pool,\n        )\n        self.job_scheduler = JobScheduler(self.job)\n        self.manual_run = mock.Mock()\n        self.job.build_new_runs = mock.Mock(return_value=[self.manual_run])\n\n    def test_manual_start(self):\n        manual_runs = self.job_scheduler.manual_start()\n\n        self.job.build_new_runs.assert_called_with(self.now, manual=True)\n        assert_length(manual_runs, 1)\n        self.manual_run.start.assert_called_once_with()\n\n    def test_manual_start_default_with_timezone(self):\n        self.job.time_zone = mock.Mock()\n        with mock.patch(\n            \"tron.core.job_scheduler.timeutils.current_time\",\n            autospec=True,\n        ) as mock_current:\n            manual_runs = self.job_scheduler.manual_start()\n            mock_current.assert_called_with(tz=self.job.time_zone)\n            self.job.build_new_runs.assert_called_with(\n                mock_current.return_value,\n                manual=True,\n            )\n        assert_length(manual_runs, 1)\n        self.manual_run.start.assert_called_once_with()\n\n    def test_manual_start_with_run_time(self):\n        run_time = datetime.datetime(2012, 3, 14, 15, 9, 26)\n        manual_runs = self.job_scheduler.manual_start(run_time)\n\n        self.job.build_new_runs.assert_called_with(run_time, manual=True)\n        assert_length(manual_runs, 1)\n        self.manual_run.start.assert_called_once_with()\n\n\nclass TestJobSchedulerSchedule(TestCase):\n    @setup\n    def setup_job(self):\n        self.scheduler = mock.Mock(autospec=True)\n        self.scheduler.next_run_time.return_value = 0\n        mock_run = mock.Mock()\n        mock_run.seconds_until_run_time.return_value = 0\n        run_collection = mock.Mock(\n            has_pending=False,\n            autospec=True,\n            return_value=[mock_run],\n        )\n        mock_build_new_run = mock.Mock()\n        run_collection.build_new_run.return_value = mock_build_new_run\n        mock_build_new_run.seconds_until_run_time.return_value = 0\n        node_pool = mock.Mock()\n        self.job = job.Job(\n            name=\"jobname\",\n            scheduler=self.scheduler,\n            run_collection=run_collection,\n            node_pool=node_pool,\n        )\n        self.job_scheduler = JobScheduler(self.job)\n        self.original_build_new_runs = self.job.build_new_runs\n        self.job.build_new_runs = mock.Mock(return_value=[mock_run])\n\n    @mock.patch(\"tron.core.job_scheduler.reactor\", autospec=True)\n    def test_enable(self, reactor):\n        self.job.enabled = False\n        self.job_scheduler.enable()\n        assert self.job.enabled\n        assert_length(reactor.callLater.mock_calls, 1)\n\n    @mock.patch(\"tron.core.job_scheduler.reactor\", autospec=True)\n    def test_enable_noop(self, reactor):\n        self.job.enabled = True\n        self.job_scheduler.enable()\n        assert self.job.enabled\n        assert_length(reactor.callLater.mock_calls, 0)\n\n    @mock.patch(\"tron.core.job_scheduler.reactor\", autospec=True)\n    def test_schedule(self, reactor):\n        self.job.build_new_runs = self.original_build_new_runs\n        self.job_scheduler.schedule()\n        assert reactor.callLater.call_count == 1\n\n        # Args passed to callLater\n        call_args = reactor.callLater.mock_calls[0][1]\n        assert_equal(call_args[1], self.job_scheduler.run_job)\n        secs = call_args[0]\n        run = call_args[2]\n\n        run.seconds_until_run_time.assert_called_with()\n        # Assert that we use the seconds we get from the run to schedule\n        assert_equal(run.seconds_until_run_time.return_value, secs)\n\n    @mock.patch(\"tron.core.job_scheduler.reactor\", autospec=True)\n    def test_schedule_disabled_job(self, reactor):\n        self.job.enabled = False\n        self.job_scheduler.schedule()\n        assert reactor.callLater.call_count == 0\n\n    @mock.patch(\"tron.core.job_scheduler.reactor\", autospec=True)\n    def test_handle_job_events_no_schedule_on_complete(self, reactor):\n        self.job_scheduler.run_job = mock.Mock()\n        self.job.scheduler.schedule_on_complete = False\n        queued_job_run = mock.Mock()\n        self.job.runs.get_first_queued = lambda: queued_job_run\n        self.job_scheduler.handle_job_events(self.job, job.Job.NOTIFY_RUN_DONE)\n        reactor.callLater.assert_any_call(\n            0,\n            self.job_scheduler.run_job,\n            queued_job_run,\n            run_queued=True,\n        )\n\n    def test_handle_job_events_schedule_on_complete(self):\n        self.job_scheduler.schedule = mock.Mock()\n        self.job.scheduler.schedule_on_complete = True\n        self.job_scheduler.handle_job_events(self.job, job.Job.NOTIFY_RUN_DONE)\n        self.job_scheduler.schedule.assert_called_with()\n\n    def test_handler_unknown_event(self):\n        self.job.runs.get_runs_by_state = mock.Mock()\n        self.job_scheduler.handler(self.job, \"some_other_event\")\n        self.job.runs.get_runs_by_state.assert_not_called()\n\n    def test_handler_no_queued(self):\n        self.job_scheduler.run_job = mock.Mock()\n\n        def get_queued(state):\n            if state == ActionRun.QUEUED:\n                return []\n\n        self.job.runs.get_runs_by_state = get_queued\n        self.job_scheduler.handler(self.job, job.Job.NOTIFY_RUN_DONE)\n        self.job_scheduler.run_job.assert_not_called()\n\n    @mock.patch(\"tron.core.job_scheduler.reactor\", autospec=True)\n    def test_run_queue_schedule(self, reactor):\n        with mock.patch.object(\n            self.job_scheduler,\n            \"schedule\",\n        ) as mock_schedule:\n            self.job_scheduler.run_job = mock.Mock()\n            self.job.scheduler.schedule_on_complete = False\n            queued_job_run = mock.Mock()\n            self.job.runs.get_first_queued = lambda: queued_job_run\n            self.job_scheduler.run_queue_schedule()\n            reactor.callLater.assert_called_once_with(\n                0,\n                self.job_scheduler.run_job,\n                queued_job_run,\n                run_queued=True,\n            )\n            mock_schedule.assert_called_once_with()\n\n\nclass TestJobSchedulerOther(TestCase):\n    \"\"\"Test other JobScheduler functions\"\"\"\n\n    def _make_job_scheduler(self, job_name, enabled=True):\n        scheduler = mock.Mock()\n        run_collection = mock.Mock()\n        node_pool = mock.Mock()\n        new_job = job.Job(\n            job_name,\n            scheduler,\n            run_collection=run_collection,\n            node_pool=node_pool,\n            enabled=enabled,\n        )\n        return new_job, JobScheduler(new_job)\n\n    @setup\n    def setup_job(self):\n        self.job, self.job_scheduler = self._make_job_scheduler(\n            \"jobname\",\n            True,\n        )\n\n    def test_disable(self):\n        self.job.runs.cancel_pending = mock.Mock()\n\n        self.job_scheduler.disable()\n\n        assert not self.job.enabled\n        assert self.job.runs.cancel_pending.call_count == 1\n\n    def test_update_from_job_scheduler_disable(self):\n        new_job, new_job_scheduler = self._make_job_scheduler(\"jobname\", False)\n        self.job.update_from_job = mock.Mock()\n        self.job_scheduler.disable = mock.Mock()\n\n        self.job_scheduler.update_from_job_scheduler(new_job_scheduler)\n\n        assert self.job.update_from_job.call_args == mock.call(\n            new_job_scheduler.get_job(),\n        )\n        assert self.job_scheduler.disable.call_count == 1\n\n    def test_update_from_job_scheduler_enable(self):\n        new_job, new_job_scheduler = self._make_job_scheduler(\"jobname\", True)\n        self.job.update_from_job = mock.Mock()\n        self.job.enabled = False\n        self.job.config_enabled = False\n        self.job_scheduler.enable = mock.Mock()\n\n        self.job_scheduler.update_from_job_scheduler(new_job_scheduler)\n\n        assert self.job.update_from_job.call_args == mock.call(\n            new_job_scheduler.get_job(),\n        )\n        assert self.job_scheduler.enable.call_count == 1\n\n    def test_update_from_job_scheduler_no_config_change(self):\n        new_job, new_job_scheduler = self._make_job_scheduler(\"jobname\", True)\n        self.job.enabled = False\n        self.job.update_from_job = mock.Mock()\n        self.job_scheduler.enable = mock.Mock()\n        self.job_scheduler.disable = mock.Mock()\n\n        self.job_scheduler.update_from_job_scheduler(new_job_scheduler)\n\n        assert self.job.update_from_job.call_args == mock.call(\n            new_job_scheduler.get_job(),\n        )\n        assert self.job_scheduler.enable.call_count == 0\n        assert self.job_scheduler.disable.call_count == 0\n        assert self.job.config_enabled == new_job.config_enabled\n        assert not self.job.enabled\n\n\nclass TestJobSchedulerFactory(TestCase):\n    @setup\n    def setup_factory(self):\n        self.context = mock.Mock()\n        self.output_stream_dir = mock.Mock()\n        self.time_zone = mock.Mock()\n        self.action_runner = mock.create_autospec(\n            actioncommand.SubprocessActionRunnerFactory,\n        )\n        self.factory = JobSchedulerFactory(\n            self.context,\n            self.output_stream_dir,\n            self.time_zone,\n            self.action_runner,\n            mock.Mock(),\n        )\n\n    def test_build(self):\n        config = mock.Mock()\n        with mock.patch(\n            \"tron.core.job_scheduler.Job\",\n            autospec=True,\n        ) as mock_job:\n            job_scheduler = self.factory.build(config)\n            _, kwargs = mock_job.from_config.call_args\n            assert_equal(kwargs[\"job_config\"], config)\n            assert_equal(\n                job_scheduler.get_job(),\n                mock_job.from_config.return_value,\n            )\n            assert_equal(kwargs[\"parent_context\"], self.context)\n            assert_equal(kwargs[\"output_path\"].base, self.output_stream_dir)\n            assert_equal(kwargs[\"action_runner\"], self.action_runner)\n"
  },
  {
    "path": "tests/core/job_test.py",
    "content": "import collections\nimport datetime\nfrom unittest import mock\nfrom unittest.mock import MagicMock\n\nimport pytest\n\nfrom testifycompat import assert_equal\nfrom testifycompat import assert_not_equal\nfrom tests.assertions import assert_call\nfrom tests.assertions import assert_length\nfrom tests.testingutils import autospec_method\nfrom tron import actioncommand\nfrom tron import node\nfrom tron.core import job\nfrom tron.core import jobrun\nfrom tron.core.actionrun import ActionRun\nfrom tron.core.job_scheduler import JobScheduler\n\n\n@pytest.fixture\ndef mock_node_repo():\n    with mock.patch(\n        \"tron.core.job.node.NodePoolRepository\",\n        autospec=True,\n    ) as mock_node_repo:\n        yield mock_node_repo\n\n\n@pytest.fixture\ndef mock_job(mock_node_repo):\n    action_graph = mock.Mock(names=lambda: [\"one\", \"two\"])\n    scheduler = mock.Mock()\n    run_collection = MagicMock()\n    nodes = mock.create_autospec(node.NodePool)\n    mock_job = job.Job(\n        \"jobname\",\n        scheduler,\n        run_collection=run_collection,\n        action_graph=action_graph,\n        node_pool=nodes,\n        action_runner=actioncommand.NoActionRunnerFactory,\n    )\n    yield mock_job\n\n\nclass TestJob:\n    @pytest.fixture(autouse=True)\n    def setup_job(self, mock_job):\n        self.job = mock_job\n        autospec_method(self.job.notify)\n        autospec_method(self.job.watch)\n        yield\n\n    def test__init__(self):\n        assert str(self.job.output_path).endswith(self.job.name)\n\n    def test_from_config(self, mock_node_repo):\n        action = mock.MagicMock(\n            name=\"first\",\n            command=\"doit\",\n            node=None,\n            requires=[],\n        )\n        job_config = mock.Mock(\n            node=\"thenodepool\",\n            monitoring={\n                \"team\": \"foo\",\n                \"page\": True,\n            },\n            all_nodes=False,\n            queueing=True,\n            enabled=True,\n            run_limit=20,\n            actions={action.name: action},\n            cleanup_action=None,\n        )\n        job_config.name = \"ajob\"  # set this after mock creation to give it a \"real\" name attribute\n        scheduler = \"scheduler_token\"\n        parent_context = \"parent_context_token\"\n        output_path = [\"base_path\"]\n        mock_action_runner = mock.create_autospec(\n            actioncommand.SubprocessActionRunnerFactory,\n        )\n        new_job = job.Job.from_config(\n            job_config,\n            scheduler,\n            parent_context=parent_context,\n            output_path=output_path,\n            action_runner=mock_action_runner,\n            action_graph=mock.Mock(),\n        )\n\n        assert_equal(new_job.scheduler, scheduler)\n        assert_equal(new_job.context.next, parent_context)\n        mock_node_repo.get_instance().get_by_name.assert_called_with(\n            job_config.node,\n        )\n        assert_equal(new_job.enabled, True)\n        assert_equal(new_job.get_monitoring()[\"team\"], \"foo\")\n        assert new_job.action_graph\n\n    def test_update_from_job(self):\n        action_runner = mock.Mock()\n        other_job = job.Job(\n            \"otherjob\",\n            \"scheduler\",\n            action_runner=action_runner,\n            run_limit=10,\n        )\n        self.job.update_from_job(other_job)\n        assert_equal(self.job.name, \"otherjob\")\n        assert_equal(self.job.scheduler, \"scheduler\")\n        assert_equal(self.job, other_job)\n        assert_equal(self.job.runs.run_limit, 10)\n\n    def test_status_disabled(self):\n        self.job.enabled = False\n        assert_equal(self.job.status, self.job.STATUS_DISABLED)\n\n    def test_status_enabled(self):\n        self.job.runs.get_run_by_state = lambda state: MagicMock() if state == ActionRun.SCHEDULED else None\n        self.job.runs.get_active.return_value = []\n        assert_equal(self.job.status, self.job.STATUS_ENABLED)\n\n    def test_status_running(self):\n        self.job.runs.get_active.return_value = [MagicMock()]\n        assert_equal(self.job.status, self.job.STATUS_RUNNING)\n\n    def test_status_unknown(self):\n        self.job.runs.get_active.return_value = []\n        self.job.runs.get_run_by_state = lambda s: None\n        assert_equal(self.job.status, self.job.STATUS_UNKNOWN)\n\n    def test_state_data(self):\n        state_data = self.job.state_data\n        assert_equal(state_data[\"run_nums\"], self.job.runs.get_run_nums.return_value)\n        assert state_data[\"enabled\"]\n\n    def test_get_job_runs_from_state(self):\n        job_runs = [\n            dict(\n                run_num=i,\n                job_name=\"thename\",\n                run_time=\"sometime\",\n                start_time=\"start_time\",\n                end_time=\"sometime\",\n                cleanup_run=None,\n                runs=[],\n            )\n            for i in range(0, 3)\n        ]\n        state_data = {\"enabled\": False, \"runs\": job_runs}\n        self.job.get_job_runs_from_state(state_data)\n        assert not self.job.enabled\n\n    def test_build_new_runs(self):\n        run_time = datetime.datetime(2012, 3, 14, 15, 9, 26)\n        runs = list(self.job.build_new_runs(run_time))\n\n        self.job.node_pool.next.assert_called_with()\n        node = self.job.node_pool.next.return_value\n        assert_call(\n            self.job.runs.build_new_run,\n            0,\n            self.job,\n            run_time,\n            node,\n            manual=False,\n        )\n        assert_length(runs, 1)\n        self.job.watch.assert_called_with(runs[0])\n\n    def test_build_new_runs_all_nodes(self):\n        self.job.all_nodes = True\n        run_time = datetime.datetime(2012, 3, 14, 15, 9, 26)\n        node_count = 2\n        self.job.node_pool.nodes = [mock.Mock()] * node_count\n        runs = list(self.job.build_new_runs(run_time))\n\n        assert_length(runs, node_count)\n        for i in range(len(runs)):\n            node = self.job.node_pool.nodes[i]\n            assert_call(\n                self.job.runs.build_new_run,\n                i,\n                self.job,\n                run_time,\n                node,\n                manual=False,\n            )\n\n        calls = []\n        for r in runs:\n            calls.extend(r.mock_calls)\n        self.job.watch.assert_has_calls(calls)\n\n    def test_build_new_runs_manual(self):\n        run_time = datetime.datetime(2012, 3, 14, 15, 9, 26)\n        runs = list(self.job.build_new_runs(run_time, manual=True))\n\n        self.job.node_pool.next.assert_called_with()\n        node = self.job.node_pool.next.return_value\n        assert_length(runs, 1)\n        assert_call(\n            self.job.runs.build_new_run,\n            0,\n            self.job,\n            run_time,\n            node,\n            manual=True,\n        )\n        self.job.watch.assert_called_with(runs[0])\n\n    def test_handler(self):\n        self.job.handler(None, jobrun.JobRun.NOTIFY_STATE_CHANGED)\n        self.job.notify.assert_called_with(self.job.NOTIFY_STATE_CHANGE)\n\n        self.job.handler(None, jobrun.JobRun.NOTIFY_DONE)\n        self.job.notify.assert_called_with(self.job.NOTIFY_RUN_DONE)\n\n    def test__eq__(self):\n        other_job = job.Job(\"jobname\", \"scheduler\", run_collection=MagicMock())\n        assert not self.job == other_job\n        other_job.update_from_job(self.job)\n        assert_equal(self.job, other_job)\n\n    def test__ne__(self):\n        other_job = job.Job(\"jobname\", \"scheduler\", run_collection=MagicMock())\n        assert self.job != other_job\n        other_job.update_from_job(self.job)\n        assert not self.job != other_job\n\n    def test__eq__true(self):\n        action_runner = mock.Mock()\n        first = job.Job(\"jobname\", \"scheduler\", action_runner=action_runner)\n        second = job.Job(\"jobname\", \"scheduler\", action_runner=action_runner)\n        assert_equal(first, second)\n\n    def test__eq__false(self):\n        first = job.Job(\"jobname\", \"scheduler\", action_runner=mock.Mock())\n        second = job.Job(\"jobname\", \"scheduler\", action_runner=mock.Mock())\n        assert_not_equal(first, second)\n\n\ndef test_job_watch_notifies_about_runs(mock_job):\n    # Separate from the above tests because we don't want\n    # watch to be mocked here.\n    new_run = jobrun.JobRun(\n        job_name=\"test\",\n        run_num=1,\n        run_time=\"some_time\",\n        node=\"node\",\n    )\n    with mock.patch.object(mock_job, \"handler\",) as mock_handler, mock.patch.object(\n        mock_job,\n        \"notify\",\n    ) as mock_notify:\n        mock_job.watch(new_run)\n\n        # Make sure that the job is still watching correctly\n        # by checking it handles events\n        new_run.notify(\"test_event\", \"test_data\")\n        assert mock_handler.call_args_list == [mock.call(new_run, \"test_event\", \"test_data\")]\n\n        # Check that the job notifies its watchers about a new run\n        assert mock_notify.call_args_list == [mock.call(job.Job.NOTIFY_NEW_RUN, event_data=new_run)]\n\n\nclass TestJobScheduler:\n    @pytest.fixture(autouse=True)\n    def setup_job(self):\n        mock_graph = mock.Mock(autospec=True)\n        mock_graph.get_action_map.return_value = {}\n        mock_graph.action_map = {}\n        self.job = mock.Mock(autospec=True)\n        self.job.allow_overlap = False\n        self.job.max_runtime = datetime.timedelta(days=1)\n        self.job_scheduler = JobScheduler(job=self.job)\n\n    def test_restore_state_sets_job_runs(self):\n        self.job.enabled = False\n        mock_runs = [mock.Mock(), mock.Mock()]\n        mock_action_runner = mock.Mock()\n        job_state_data = {\"runs\": mock_runs, \"enabled\": True}\n\n        self.job_scheduler._set_callback = lambda x: x\n\n        self.job.runs.runs = collections.deque()\n        self.job.runs.get_scheduled.return_value = [mock.Mock()]\n        self.job.get_job_runs_from_state.return_value = mock_runs\n\n        with mock.patch(\n            \"tron.core.job_scheduler.recovery.launch_recovery_actionruns_for_job_runs\",\n            autospec=True,\n        ) as mock_launch_recovery:\n            mock_launch_recovery.return_value = mock.Mock(autospec=True)\n            self.job_scheduler.restore_state(\n                job_state_data,\n                mock_action_runner,\n            )\n            assert self.job.runs.runs == collections.deque(mock_runs)\n            mock_launch_recovery.assert_called_once_with(\n                job_runs=mock_runs,\n                master_action_runner=mock_action_runner,\n            )\n            calls = [mock.call(mock_runs[i]) for i in range(0, len(mock_runs))]\n            self.job.watch.assert_has_calls(calls)\n\n    def test_create_and_schedule_runs_specific_time(self):\n        self.job_scheduler.get_runs_to_schedule = mock.Mock(return_value=[mock.Mock()])\n        self.job_scheduler._set_callback = mock.Mock()\n        self.job_scheduler.create_and_schedule_runs(next_run_time=\"a_datetime\")\n        assert self.job_scheduler.get_runs_to_schedule.call_args_list == [mock.call(\"a_datetime\")]\n\n    def test_create_and_schedule_runs_guess(self):\n        self.job_scheduler.get_runs_to_schedule = mock.Mock(return_value=[mock.Mock()])\n        self.job_scheduler._set_callback = mock.Mock()\n        self.job_scheduler.create_and_schedule_runs(next_run_time=None)\n        assert self.job_scheduler.get_runs_to_schedule.call_args_list == [mock.call(None)]\n\n    def test_disable(self):\n        self.job_scheduler.disable()\n        assert self.job_scheduler.job.enabled is False\n        self.job_scheduler.job.runs.cancel_pending.assert_called_once()\n\n    def test_schedule_reconfigured(self):\n        pending_run = mock.Mock()\n        pending_run.run_time = \"a_run_time\"\n        self.job.runs.get_pending.return_value = [pending_run]\n        self.job_scheduler.create_and_schedule_runs = mock.Mock()\n\n        self.job_scheduler.schedule_reconfigured()\n\n        assert self.job.runs.remove_pending.call_count == 1\n        assert self.job_scheduler.create_and_schedule_runs.call_args_list == [\n            mock.call(\n                next_run_time=\"a_run_time\",\n            ),\n        ]\n\n    def test_schedule(self):\n        self.job.enabled = True\n        last_run = mock.Mock()\n        last_run.run_time = \"a_run_time\"\n        self.job.runs.get_newest = mock.Mock(return_value=last_run)\n        self.job_scheduler.create_and_schedule_runs = mock.Mock()\n\n        self.job_scheduler.schedule()\n\n        self.job.scheduler.next_run_time.assert_called_once_with(\"a_run_time\")\n        assert self.job_scheduler.create_and_schedule_runs.call_args_list == [\n            mock.call(next_run_time=self.job.scheduler.next_run_time.return_value),\n        ]\n\n    def test_run_job(self):\n        self.job_scheduler.schedule = mock.Mock(autospec=True)\n        self.job.scheduler.schedule_on_complete = False\n        self.job.runs.get_active = lambda n: []\n        job_run = mock.Mock(autospec=True)\n        job_run.is_cancelled = False\n        self.job_scheduler.run_job(job_run)\n        job_run.start.assert_called_once()\n        self.job_scheduler.schedule.assert_called_once()\n\n    def test_run_job_job_disabled(self):\n        self.job_scheduler.schedule = MagicMock()\n        job_run = MagicMock()\n        self.job.enabled = False\n        self.job_scheduler.run_job(job_run)\n        assert_length(self.job_scheduler.schedule.mock_calls, 0)\n        assert_length(job_run.start.mock_calls, 0)\n        assert_length(job_run.cancel.mock_calls, 1)\n\n    def test_run_job_cancelled(self):\n        self.job_scheduler.schedule = MagicMock()\n        job_run = MagicMock(is_scheduled=False)\n        self.job_scheduler.run_job(job_run)\n        assert_length(job_run.start.mock_calls, 0)\n        assert_length(self.job_scheduler.schedule.mock_calls, 1)\n\n    def test_run_job_already_running_queuing(self):\n        self.job_scheduler.schedule = mock.Mock(autospec=True)\n        self.job.runs.get_active = lambda s: [mock.Mock(autospec=True)]\n        job_run = mock.Mock(autospec=True)\n        job_run.is_cancelled = False\n        self.job_scheduler.run_job(job_run)\n        assert not job_run.start.called\n        job_run.queue.assert_called_once()\n        assert not self.job_scheduler.schedule.called\n\n    def test_run_job_already_running_cancel(self):\n        self.job_scheduler.schedule = mock.Mock(autospec=True)\n        self.job.runs.get_active = lambda s: [mock.Mock(autospec=True)]\n        self.job.queueing = False\n        job_run = mock.Mock(autospec=True)\n        job_run.is_cancelled = False\n        self.job_scheduler.run_job(job_run)\n        assert not job_run.start.called\n        job_run.cancel.assert_called_once()\n        self.job_scheduler.schedule.assert_called_once()\n\n    def test_run_job_already_running_allow_overlap(self):\n        self.job_scheduler.schedule = mock.Mock()\n        self.job.runs.get_active = lambda s: [mock.Mock()]\n        self.job.allow_overlap = True\n        job_run = MagicMock(is_cancelled=False)\n        self.job_scheduler.run_job(job_run)\n        job_run.start.assert_called_with()\n\n    def test_run_job_has_starting_queueing(self):\n        self.job_scheduler.schedule = mock.Mock(autospec=True)\n        self.job.runs.get_active = lambda s: [mock.Mock(autospec=True)]\n        job_run = mock.Mock(autospec=True)\n        job_run.is_cancelled = False\n        self.job_scheduler.run_job(job_run)\n        assert not job_run.start.called\n        job_run.queue.assert_called_once()\n        assert not self.job_scheduler.schedule.called\n\n    def test_run_job_schedule_on_complete(self):\n        self.job_scheduler.schedule = MagicMock()\n        self.job.scheduler.schedule_on_complete = True\n        self.job.runs.get_active = lambda s: []\n        job_run = MagicMock(is_cancelled=False)\n        self.job_scheduler.run_job(job_run)\n        assert_length(job_run.start.mock_calls, 1)\n        assert_length(self.job_scheduler.schedule.mock_calls, 0)\n"
  },
  {
    "path": "tests/core/jobgraph_test.py",
    "content": "from unittest import mock\n\nimport pytest\n\nfrom tron.config.schema import ConfigAction\nfrom tron.config.schema import ConfigJob\nfrom tron.core.jobgraph import AdjListEntry\nfrom tron.core.jobgraph import JobGraph\n\n\nMISSING_DEPENDENCY_ERR_MSG = \"\"\"The following actions are dependencies of other actions but missing:\nAction other.job2.action3 is dependency of actions:\n  - MASTER.job3.action5\nPlease check if you have deleted/renamed any of them or their containing jobs.\"\"\"\n\n\ndef _setup_job_graph_config_container():\n    action1 = ConfigAction(\n        name=\"action1\",\n        command=\"do something\",\n    )\n    action2 = ConfigAction(\n        name=\"action2\",\n        command=\"do something\",\n        requires=[\"action1\"],\n    )\n    job1_config = ConfigJob(\n        name=\"job1\",\n        node=\"default\",\n        schedule=mock.Mock(),\n        actions={\"action1\": action1, \"action2\": action2},\n        namespace=\"MASTER\",\n    )\n\n    action3 = ConfigAction(\n        name=\"action3\",\n        command=\"do something\",\n        triggered_by=[\"MASTER.job1.action2.shortdate.{shortdate}\"],\n    )\n    job2_config = ConfigJob(\n        name=\"job1\",\n        node=\"default\",\n        schedule=mock.Mock(),\n        actions={\"action3\": action3},\n        namespace=\"other\",\n    )\n\n    action4 = ConfigAction(\n        name=\"action4\",\n        command=\"do something\",\n    )\n    action5 = ConfigAction(\n        name=\"action5\",\n        command=\"do something\",\n        requires=[\"action4\"],\n        triggered_by=[\"other.job2.action3.shortdate.{shortdate}\"],\n    )\n    job3_config = ConfigJob(\n        name=\"job1\",\n        node=\"default\",\n        schedule=mock.Mock(),\n        actions={\"action4\": action4, \"action5\": action5},\n        namespace=\"MASTER\",\n    )\n    config_container = mock.Mock()\n    config_container.get_jobs.return_value = {\n        \"MASTER.job1\": job1_config,\n        \"other.job2\": job2_config,\n        \"MASTER.job3\": job3_config,\n    }\n    return config_container\n\n\nclass TestJobGraph:\n    def setup_method(self):\n        self.job_graph = JobGraph(_setup_job_graph_config_container(), should_validate_missing_dependency=True)\n\n    def test_job_graph_missing_dependency(self):\n        missing_dependency_config_container = _setup_job_graph_config_container()\n        missing_dependency_config_container.get_jobs.return_value.pop(\"other.job2\")\n        with pytest.raises(ValueError) as e:\n            JobGraph(\n                missing_dependency_config_container,\n                should_validate_missing_dependency=True,\n            )\n        assert str(e.value) == MISSING_DEPENDENCY_ERR_MSG\n\n    def test_job_graph(self):\n        assert sorted(list(self.job_graph.action_map.keys())) == [\n            \"MASTER.job1.action1\",\n            \"MASTER.job1.action2\",\n            \"MASTER.job3.action4\",\n            \"MASTER.job3.action5\",\n            \"other.job2.action3\",\n        ]\n        assert self.job_graph._actions_for_job == {\n            \"MASTER.job1\": [\"MASTER.job1.action1\", \"MASTER.job1.action2\"],\n            \"other.job2\": [\"other.job2.action3\"],\n            \"MASTER.job3\": [\"MASTER.job3.action4\", \"MASTER.job3.action5\"],\n        }\n        assert self.job_graph._adj_list == {\n            \"MASTER.job1.action1\": [AdjListEntry(\"MASTER.job1.action2\", False)],\n            \"MASTER.job1.action2\": [AdjListEntry(\"other.job2.action3\", True)],\n            \"other.job2.action3\": [AdjListEntry(\"MASTER.job3.action5\", True)],\n            \"MASTER.job3.action4\": [AdjListEntry(\"MASTER.job3.action5\", False)],\n        }\n        assert self.job_graph._rev_adj_list == {\n            \"MASTER.job1.action1\": [],\n            \"MASTER.job1.action2\": [AdjListEntry(\"MASTER.job1.action1\", False)],\n            \"other.job2.action3\": [AdjListEntry(\"MASTER.job1.action2\", True)],\n            \"MASTER.job3.action4\": [],\n            \"MASTER.job3.action5\": [\n                AdjListEntry(\"MASTER.job3.action4\", False),\n                AdjListEntry(\"other.job2.action3\", True),\n            ],\n        }\n\n    def test_get_action_graph_for_job(self):\n        action_graph_1 = self.job_graph.get_action_graph_for_job(\"MASTER.job1\")\n        assert sorted(action_graph_1.action_map.keys()) == [\n            \"action1\",\n            \"action2\",\n        ]\n        assert action_graph_1.required_actions == {\n            \"action1\": set(),\n            \"action2\": {\"action1\"},\n        }\n        assert action_graph_1.required_triggers == {\n            \"other.job2.action3\": {\"action2\"},\n            \"MASTER.job3.action5\": {\"other.job2.action3\"},\n        }\n\n        action_graph_2 = self.job_graph.get_action_graph_for_job(\"other.job2\")\n        assert sorted(action_graph_2.action_map.keys()) == [\n            \"action3\",\n        ]\n        assert action_graph_2.required_actions == {\n            \"action3\": set(),\n        }\n        assert action_graph_2.required_triggers == {\n            \"action3\": {\"MASTER.job1.action2\"},\n            \"MASTER.job3.action5\": {\"action3\"},\n        }\n\n        action_graph_3 = self.job_graph.get_action_graph_for_job(\"MASTER.job3\")\n        assert sorted(action_graph_3.action_map.keys()) == [\n            \"action4\",\n            \"action5\",\n        ]\n        assert action_graph_3.required_actions == {\n            \"action4\": set(),\n            \"action5\": {\"action4\"},\n        }\n        assert action_graph_3.required_triggers == {\n            \"action5\": {\"other.job2.action3\"},\n            \"other.job2.action3\": {\"MASTER.job1.action2\"},\n        }\n"
  },
  {
    "path": "tests/core/jobrun_test.py",
    "content": "import datetime\nimport json\nfrom unittest import mock\nfrom unittest.mock import MagicMock\n\nimport pytest\nimport pytz\n\nfrom testifycompat import assert_equal\nfrom testifycompat import assert_in\nfrom testifycompat import setup\nfrom testifycompat import TestCase\nfrom tests.assertions import assert_call\nfrom tests.assertions import assert_length\nfrom tests.assertions import assert_raises\nfrom tests.testingutils import autospec_method\nfrom tron import actioncommand\nfrom tron import node\nfrom tron.core import action\nfrom tron.core import actiongraph\nfrom tron.core import actionrun\nfrom tron.core import job\nfrom tron.core import jobrun\nfrom tron.serialize import filehandler\n\n\ndef build_mock_job():\n    action_graph = mock.create_autospec(actiongraph.ActionGraph)\n    action_graph.action_map = {\n        \"foo\": mock.Mock(\n            triggered_by=[],\n            trigger_timeout=datetime.timedelta(days=1),\n        ),\n    }\n    runner = mock.create_autospec(actioncommand.SubprocessActionRunnerFactory)\n    return mock.create_autospec(\n        job.Job,\n        action_graph=action_graph,\n        output_path=mock.Mock(),\n        context=mock.Mock(),\n        action_runner=runner,\n    )\n\n\nclass TestJobRun:\n\n    now = datetime.datetime(2012, 3, 14, 15, 9, 20, tzinfo=None)\n    now_with_tz = datetime.datetime(2012, 3, 14, 15, 9, 20, tzinfo=pytz.utc)\n\n    @setup\n    def setup_jobrun(self):\n        self.job = build_mock_job()\n        self.action_graph = self.job.action_graph\n        self.run_time = datetime.datetime(2012, 3, 14, 15, 9, 26)\n        mock_node = mock.create_autospec(node.Node)\n        self.job_run = jobrun.JobRun(\n            \"jobname\",\n            7,\n            self.run_time,\n            mock_node,\n            action_runs=MagicMock(\n                action_runs_with_cleanup=[],\n                get_startable_action_runs=lambda: [],\n            ),\n        )\n        autospec_method(self.job_run.watch)\n        autospec_method(self.job_run.notify)\n        self.action_run = mock.create_autospec(\n            actionrun.ActionRun,\n            is_skipped=False,\n        )\n\n    def test__init__(self):\n        assert_equal(self.job_run.job_name, \"jobname\")\n        assert_equal(self.job_run.run_time, self.run_time)\n        assert str(self.job_run.output_path).endswith(str(self.job_run.run_num))\n\n    def test_for_job(self):\n        run_num = 6\n        mock_node = mock.create_autospec(node.Node)\n        run = jobrun.JobRun.for_job(\n            self.job,\n            run_num,\n            self.run_time,\n            mock_node,\n            False,\n        )\n\n        assert_equal(run.action_runs.action_graph, self.action_graph)\n        assert_equal(run.job_name, self.job.get_name.return_value)\n        assert_equal(run.run_num, run_num)\n        assert_equal(run.node, mock_node)\n        assert not run.manual\n\n    def test_for_job_manual(self):\n        run_num = 6\n        mock_node = mock.create_autospec(node.Node)\n        run = jobrun.JobRun.for_job(\n            self.job,\n            run_num,\n            self.run_time,\n            mock_node,\n            True,\n        )\n        assert_equal(run.action_runs.action_graph, self.action_graph)\n        assert run.manual\n\n    def test_state_data(self):\n        state_data = self.job_run.state_data\n        assert_equal(state_data[\"run_num\"], 7)\n        assert not state_data[\"manual\"]\n        assert_equal(state_data[\"run_time\"], self.run_time)\n\n    def test_set_action_runs(self):\n        self.job_run._action_runs = None\n        count = 2\n        action_runs = [mock.create_autospec(actionrun.ActionRun) for _ in range(count)]\n        run_collection = mock.create_autospec(\n            actionrun.ActionRunCollection,\n            action_runs_with_cleanup=action_runs,\n        )\n        self.job_run._set_action_runs(run_collection)\n        assert_equal(self.job_run.watch.call_count, count)\n\n        expected = [mock.call(run) for run in action_runs]\n        assert_equal(self.job_run.watch.mock_calls, expected)\n        assert_equal(self.job_run.action_runs, run_collection)\n        assert self.job_run.action_runs_proxy\n\n    def test_set_action_runs_none(self):\n        self.job_run._action_runs = None\n        run_collection = mock.create_autospec(actionrun.ActionRunCollection)\n        self.job_run._set_action_runs(run_collection)\n        assert not self.job_run.watch.mock_calls\n        assert_equal(self.job_run.action_runs, run_collection)\n\n    def test_set_action_runs_duplicate(self):\n        run_collection = mock.create_autospec(actionrun.ActionRunCollection)\n        assert_raises(\n            ValueError,\n            self.job_run._set_action_runs,\n            run_collection,\n        )\n\n    @mock.patch(\"tron.core.jobrun.timeutils.current_time\", autospec=True)\n    def test_seconds_until_run_time(self, mock_current_time):\n        mock_current_time.return_value = self.now\n        seconds = self.job_run.seconds_until_run_time()\n        assert_equal(seconds, 6)\n\n    @mock.patch(\"tron.core.jobrun.timeutils.current_time\", autospec=True)\n    def test_seconds_until_run_time_with_tz(self, mock_current_time):\n        mock_current_time.return_value = self.now_with_tz\n        self.job_run.run_time = self.run_time.replace(tzinfo=pytz.utc)\n        seconds = self.job_run.seconds_until_run_time()\n        assert_equal(seconds, 6)\n\n    def test_start(self):\n        autospec_method(self.job_run._do_start)\n        assert self.job_run.start()\n        self.job_run._do_start.assert_called_with()\n\n    def test_start_failed(self):\n        autospec_method(self.job_run._do_start, return_value=False)\n        assert not self.job_run.start()\n\n    def test_do_start(self):\n        startable_runs = [mock.create_autospec(actionrun.ActionRun) for _ in range(3)]\n        self.job_run.action_runs.get_startable_action_runs = lambda: startable_runs\n\n        assert self.job_run._do_start()\n        self.job_run.action_runs.ready.assert_called_with()\n        for startable_run in startable_runs:\n            startable_run.start.assert_called_with()\n\n    def test_do_start_all_failed(self):\n        autospec_method(self.job_run._start_action_runs, return_value=[None])\n        assert not self.job_run._do_start()\n\n    def test_do_start_some_failed(self):\n        returns = [True, None]\n        autospec_method(self.job_run._start_action_runs, return_value=returns)\n        assert self.job_run._do_start()\n\n    def test_do_start_no_runs(self):\n        assert not self.job_run._do_start()\n\n    def test_start_action_runs(self):\n        startable_runs = [mock.create_autospec(actionrun.ActionRun) for _ in range(3)]\n        self.job_run.action_runs.get_startable_action_runs = lambda: startable_runs\n\n        started_runs = self.job_run._start_action_runs()\n        assert_equal(started_runs, startable_runs)\n\n    def test_start_action_runs_failed(self):\n        startable_runs = [mock.create_autospec(actionrun.ActionRun) for _ in range(3)]\n        startable_runs[0].start.return_value = False\n        self.job_run.action_runs.get_startable_action_runs = lambda: startable_runs\n\n        started_runs = self.job_run._start_action_runs()\n        assert_equal(started_runs, startable_runs[1:])\n\n    @pytest.fixture\n    def jobrun_json(self):\n        runs = [\n            {\n                \"job_run_id\": \"compute-infra-test-service.test_load_foo1.5910\",\n                \"action_name\": \"example_action\",\n                \"state\": \"succeeded\",\n                \"original_command\": \"date; sleep 150; date\",\n                \"start_time\": \"2023-10-01T12:00:00\",\n                \"end_time\": \"2023-10-01T12:30:00\",\n                \"node_name\": \"paasta\",\n                \"exit_status\": 0,\n                \"attempts\": [],\n                \"retries_remaining\": 2,\n                \"retries_delay\": 60,\n                \"action_runner\": '{\"status_path\": \"/tmp/tron\", \"exec_path\": \"/opt/venvs/tron/bin\"}',\n                \"executor\": \"kubernetes\",\n                \"trigger_timeout_timestamp\": 1731584100,\n                \"trigger_downstreams\": False,\n                \"triggered_by\": [],\n                \"on_upstream_rerun\": None,\n            }\n        ]\n        cleanup = {\n            \"job_run_id\": \"compute-infra-test-service.test_load_foo1.5910\",\n            \"action_name\": \"cleanup_action\",\n            \"state\": \"succeeded\",\n            \"original_command\": \"date; sleep 150; date\",\n            \"start_time\": \"2023-10-01T12:00:00\",\n            \"end_time\": \"2023-10-01T12:30:00\",\n            \"node_name\": \"paasta\",\n            \"exit_status\": 0,\n            \"attempts\": [],\n            \"retries_remaining\": 2,\n            \"retries_delay\": 60,\n            \"action_runner\": '{\"status_path\": \"/tmp/tron\", \"exec_path\": \"/opt/venvs/tron/bin\"}',\n            \"executor\": \"kubernetes\",\n            \"trigger_timeout_timestamp\": 1731584100,\n            \"trigger_downstreams\": False,\n            \"triggered_by\": [],\n            \"on_upstream_rerun\": None,\n        }\n        serialized_cleanup = json.dumps(cleanup)\n        serialized_runs = [json.dumps(run) for run in runs]\n        return json.dumps(\n            {\n                \"job_name\": \"example_job\",\n                \"run_num\": 1,\n                \"run_time\": \"2023-10-01T12:00:00\",\n                \"time_zone\": None,\n                \"node_name\": \"example_node\",\n                \"runs\": serialized_runs,\n                \"cleanup_run\": serialized_cleanup,\n                \"manual\": False,\n            }\n        )\n\n    def test_from_json(self, jobrun_json):\n        result = jobrun.JobRun.from_json(jobrun_json)\n        expected = {\n            \"job_name\": \"example_job\",\n            \"run_num\": 1,\n            \"run_time\": datetime.datetime(2023, 10, 1, 12, 0, 0),\n            \"node_name\": \"example_node\",\n            \"runs\": [\n                {\n                    \"job_run_id\": \"compute-infra-test-service.test_load_foo1.5910\",\n                    \"action_name\": \"example_action\",\n                    \"state\": \"succeeded\",\n                    \"original_command\": \"date; sleep 150; date\",\n                    \"start_time\": datetime.datetime(2023, 10, 1, 12, 0, 0),\n                    \"end_time\": datetime.datetime(2023, 10, 1, 12, 30, 0),\n                    \"node_name\": \"paasta\",\n                    \"exit_status\": 0,\n                    \"attempts\": [],\n                    \"retries_remaining\": 2,\n                    \"retries_delay\": datetime.timedelta(seconds=60),\n                    \"action_runner\": {\"status_path\": \"/tmp/tron\", \"exec_path\": \"/opt/venvs/tron/bin\"},\n                    \"executor\": \"kubernetes\",\n                    \"trigger_timeout_timestamp\": 1731584100,\n                    \"trigger_downstreams\": False,\n                    \"triggered_by\": [],\n                    \"on_upstream_rerun\": None,\n                }\n            ],\n            \"cleanup_run\": {\n                \"job_run_id\": \"compute-infra-test-service.test_load_foo1.5910\",\n                \"action_name\": \"cleanup_action\",\n                \"state\": \"succeeded\",\n                \"original_command\": \"date; sleep 150; date\",\n                \"start_time\": datetime.datetime(2023, 10, 1, 12, 0, 0),\n                \"end_time\": datetime.datetime(2023, 10, 1, 12, 30, 0),\n                \"node_name\": \"paasta\",\n                \"exit_status\": 0,\n                \"attempts\": [],\n                \"retries_remaining\": 2,\n                \"retries_delay\": datetime.timedelta(seconds=60),\n                \"action_runner\": {\"status_path\": \"/tmp/tron\", \"exec_path\": \"/opt/venvs/tron/bin\"},\n                \"executor\": \"kubernetes\",\n                \"trigger_timeout_timestamp\": 1731584100,\n                \"trigger_downstreams\": False,\n                \"triggered_by\": [],\n                \"on_upstream_rerun\": None,\n            },\n            \"manual\": False,\n            \"time_zone\": None,\n        }\n        assert result == expected\n\n    def test_start_action_runs_all_failed(self):\n        startable_runs = [mock.create_autospec(actionrun.ActionRun) for _ in range(2)]\n        for startable_run in startable_runs:\n            startable_run.start.return_value = False\n        self.job_run.action_runs.get_startable_action_runs = lambda: startable_runs\n\n        started_runs = self.job_run._start_action_runs()\n        assert_equal(started_runs, [])\n\n    def test_handler_trigger_ready_still_scheduled(self):\n        autospec_method(self.job_run._start_action_runs)\n        self.job_run.is_scheduled = True\n        self.job_run.handler(self.action_run, actionrun.ActionRun.NOTIFY_TRIGGER_READY)\n        assert not self.job_run._start_action_runs.mock_calls\n\n    def test_handler_trigger_ready_started(self):\n        autospec_method(self.job_run._start_action_runs)\n        self.job_run.is_scheduled = False\n        self.job_run.is_queued = False\n        self.job_run.handler(self.action_run, actionrun.ActionRun.NOTIFY_TRIGGER_READY)\n        assert self.job_run._start_action_runs.call_count == 1\n\n    def test_handler_not_end_state_event(self):\n        autospec_method(self.job_run.finalize)\n        autospec_method(self.job_run._start_action_runs)\n        self.action_run.is_done = False\n        self.job_run.handler(self.action_run, mock.Mock())\n        assert not self.job_run.finalize.mock_calls\n        assert not self.job_run._start_action_runs.mock_calls\n\n    def test_handler_with_startable(self):\n        startable_run = mock.create_autospec(actionrun.ActionRun)\n        self.job_run.action_runs.get_startable_action_runs = lambda: [\n            startable_run,\n        ]\n        autospec_method(self.job_run.finalize)\n        self.action_run.is_broken = False\n\n        self.job_run.handler(self.action_run, mock.Mock())\n        self.job_run.notify.assert_called_with(\n            self.job_run.NOTIFY_STATE_CHANGED,\n        )\n        startable_run.start.assert_called_with()\n        assert not self.job_run.finalize.mock_calls\n\n    def test_handler_runs_not_done(self):\n        self.job_run.action_runs.is_done = False\n        autospec_method(self.job_run._start_action_runs, return_value=[])\n        autospec_method(self.job_run.finalize)\n        self.job_run.handler(self.action_run, mock.Mock())\n        assert not self.job_run.finalize.mock_calls\n\n    def test_handler_finished_without_cleanup(self):\n        self.job_run.action_runs.is_active = False\n        self.job_run.action_runs.is_scheduled = False\n        self.job_run.action_runs.cleanup_action_run = None\n        autospec_method(self.job_run.finalize)\n        self.job_run.handler(self.action_run, mock.Mock())\n        self.job_run.finalize.assert_called_with()\n\n    def test_handler_finished_with_cleanup_done(self):\n        self.job_run.action_runs.is_active = False\n        self.job_run.action_runs.is_scheduled = False\n        self.job_run.action_runs.cleanup_action_run = mock.Mock(is_done=True)\n        autospec_method(self.job_run.finalize)\n        self.job_run.handler(self.action_run, mock.Mock())\n        self.job_run.finalize.assert_called_with()\n\n    def test_handler_finished_with_cleanup(self):\n        self.job_run.action_runs.is_active = False\n        self.job_run.action_runs.is_scheduled = False\n        self.job_run.action_runs.cleanup_action_run = mock.Mock(is_done=False)\n        autospec_method(self.job_run.finalize)\n        self.job_run.handler(self.action_run, mock.Mock())\n        assert not self.job_run.finalize.mock_calls\n        self.job_run.action_runs.cleanup_action_run.start.assert_called_with()\n\n    def test_handler_action_run_cancelled(self):\n        self.action_run.is_broken = True\n        autospec_method(self.job_run._start_action_runs)\n        self.job_run.handler(self.action_run, mock.Mock())\n        assert not self.job_run._start_action_runs.mock_calls\n\n    def test_handler_action_run_skipped(self):\n        self.action_run.is_broken = False\n        self.action_run.is_skipped = True\n        self.job_run.action_runs.is_scheduled = True\n        autospec_method(self.job_run._start_action_runs)\n        self.job_run.handler(self.action_run, mock.Mock())\n        assert not self.job_run._start_action_runs.mock_calls\n\n    def test_state(self):\n        assert_equal(self.job_run.state, actionrun.ActionRun.SUCCEEDED)\n\n    def test_state_with_no_action_runs(self):\n        self.job_run._action_runs = None\n        assert_equal(self.job_run.state, actionrun.ActionRun.UNKNOWN)\n\n    def test_finalize(self):\n        self.job_run.action_runs.is_failed = False\n        self.job_run.finalize()\n        self.job_run.notify.assert_called_with(self.job_run.NOTIFY_DONE)\n\n    def test_finalize_failure(self):\n        self.job_run.finalize()\n        self.job_run.notify.assert_called_with(self.job_run.NOTIFY_DONE)\n\n    def test_cleanup(self):\n        autospec_method(self.job_run.clear_observers)\n        self.job_run.output_path = mock.create_autospec(filehandler.OutputPath)\n        self.job_run.cleanup()\n\n        self.job_run.notify.assert_called_with(jobrun.JobRun.NOTIFY_REMOVED)\n        self.job_run.clear_observers.assert_called_with()\n        self.job_run.output_path.delete.assert_called_with()\n        assert not self.job_run.node\n        assert not self.job_run.action_graph\n        assert not self.job_run.action_runs\n\n    def test__getattr__(self):\n        assert self.job_run.cancel\n        assert self.job_run.state == \"succeeded\"\n        assert self.job_run.is_succeeded\n\n    def test__getattr__miss(self):\n        assert_raises(AttributeError, lambda: self.job_run.bogus)\n\n\nclass TestJobRunFromState(TestCase):\n    @setup\n    def setup_jobrun(self):\n        self.action_graph = mock.create_autospec(actiongraph.ActionGraph, action_map={})\n        self.run_time = datetime.datetime(2012, 3, 14, 15, 9, 26)\n        self.path = [\"base\", \"path\"]\n        self.output_path = mock.create_autospec(filehandler.OutputPath)\n        self.node_pool = mock.create_autospec(node.NodePool)\n        self.action_run_state_data = [\n            {\n                \"job_run_id\": \"thejobname.22\",\n                \"action_name\": \"blingaction\",\n                \"state\": \"succeeded\",\n                \"run_time\": \"sometime\",\n                \"start_time\": \"sometime\",\n                \"end_time\": \"sometime\",\n                \"command\": \"doit\",\n                \"node_name\": \"thenode\",\n            }\n        ]\n        self.state_data = {\n            \"job_name\": \"thejobname\",\n            \"run_num\": 22,\n            \"run_time\": self.run_time,\n            \"node_name\": \"thebox\",\n            \"end_time\": \"the_end\",\n            \"start_time\": \"start_time\",\n            \"runs\": self.action_run_state_data,\n            \"cleanup_run\": None,\n            \"manual\": True,\n        }\n        self.context = mock.Mock()\n\n    def test_from_state(self):\n        run = jobrun.JobRun.from_state(\n            self.state_data,\n            self.action_graph,\n            self.output_path,\n            self.context,\n            self.node_pool,\n        )\n        assert_length(run.action_runs.run_map, 1)\n        assert_equal(run.job_name, self.state_data[\"job_name\"])\n        assert_equal(run.run_time, self.run_time)\n        assert run.manual\n        assert_equal(run.output_path, self.output_path)\n        assert run.context.next\n        assert run.action_graph\n\n    def test_from_state_node_no_longer_exists(self):\n        run = jobrun.JobRun.from_state(\n            self.state_data,\n            self.action_graph,\n            self.output_path,\n            self.context,\n            self.node_pool,\n        )\n        assert_length(run.action_runs.run_map, 1)\n        assert_equal(run.job_name, \"thejobname\")\n        assert_equal(run.run_time, self.run_time)\n        assert_equal(run.node, self.node_pool)\n\n\nclass MockJobRun(MagicMock):\n\n    manual = False\n\n    node = \"anode\"\n\n    @property\n    def is_scheduled(self):\n        return self.state == actionrun.ActionRun.SCHEDULED\n\n    @property\n    def is_queued(self):\n        return self.state == actionrun.ActionRun.QUEUED\n\n    @property\n    def is_running(self):\n        return self.state == actionrun.ActionRun.RUNNING\n\n    @property\n    def is_starting(self):\n        return self.state == actionrun.ActionRun.STARTING\n\n    @property\n    def is_waiting(self):\n        return self.state == actionrun.ActionRun.WAITING\n\n    def __repr__(self):\n        return str(self.__dict__)\n\n\nclass TestJobRunCollection(TestCase):\n    def _mock_run(self, **kwargs):\n        return MockJobRun(**kwargs)\n\n    @setup\n    def setup_runs(self):\n        self.run_collection = jobrun.JobRunCollection(6)\n        self.job_runs = [\n            self._mock_run(state=actionrun.ActionRun.QUEUED, run_num=5),\n            self._mock_run(state=actionrun.ActionRun.WAITING, run_num=4),\n            self._mock_run(state=actionrun.ActionRun.RUNNING, run_num=3),\n        ] + [\n            self._mock_run(\n                state=actionrun.ActionRun.SUCCEEDED,\n                run_num=i,\n            )\n            for i in range(2, 0, -1)\n        ]\n        self.run_collection.runs.extend(self.job_runs)\n        self.mock_node = mock.create_autospec(node.Node)\n\n    def test__init__(self):\n        assert_equal(self.run_collection.run_limit, 6)\n\n    def test_from_config(self):\n        job_config = mock.Mock(run_limit=20)\n        runs = jobrun.JobRunCollection.from_config(job_config)\n        assert_equal(runs.run_limit, 20)\n\n    def test_job_runs_from_state(self):\n        state_data = [\n            dict(\n                run_num=i,\n                job_name=\"thename\",\n                run_time=\"sometime\",\n                start_time=\"start_time\",\n                end_time=\"sometime\",\n                cleanup_run=None,\n                runs=[],\n            )\n            for i in range(3, -1, -1)\n        ]\n        action_graph = mock.create_autospec(actiongraph.ActionGraph)\n        output_path = mock.create_autospec(filehandler.OutputPath)\n        context = mock.Mock()\n        node_pool = mock.create_autospec(node.NodePool)\n        runs = jobrun.job_runs_from_state(\n            state_data,\n            action_graph,\n            output_path,\n            context,\n            node_pool,\n        )\n        assert len(runs) == 4\n        assert all([type(job) == jobrun.JobRun for job in runs])\n\n    def test_build_new_run(self):\n        autospec_method(self.run_collection.remove_old_runs)\n        run_time = datetime.datetime(2012, 3, 14, 15, 9, 26)\n        mock_job = build_mock_job()\n        job_run = self.run_collection.build_new_run(\n            mock_job,\n            run_time,\n            self.mock_node,\n        )\n        assert_in(job_run, self.run_collection.runs)\n        self.run_collection.remove_old_runs.assert_called_with()\n        assert job_run.run_num == 6\n        assert job_run.job_name == mock_job.get_name.return_value\n\n    def test_build_new_run_manual(self):\n        autospec_method(self.run_collection.remove_old_runs)\n        run_time = datetime.datetime(2012, 3, 14, 15, 9, 26)\n        mock_job = build_mock_job()\n        job_run = self.run_collection.build_new_run(\n            mock_job,\n            run_time,\n            self.mock_node,\n            True,\n        )\n        assert_in(job_run, self.run_collection.runs)\n        self.run_collection.remove_old_runs.assert_called_with()\n        assert job_run.run_num == 6\n        assert job_run.manual\n\n    def test_cancel_pending(self):\n        pending_runs = [mock.Mock() for _ in range(2)]\n        autospec_method(\n            self.run_collection.get_pending,\n            return_value=pending_runs,\n        )\n        self.run_collection.cancel_pending()\n        for pending_run in pending_runs:\n            pending_run.cancel.assert_called_with()\n\n    def test_cancel_pending_no_pending(self):\n        autospec_method(self.run_collection.get_pending, return_value=[])\n        self.run_collection.cancel_pending()\n\n    def test_remove_pending(self):\n        self.run_collection.remove_pending()\n        assert_length(self.run_collection.runs, 4)\n        assert_equal(self.run_collection.runs[0], self.job_runs[1])\n        assert_call(self.job_runs[0].cleanup, 0)\n\n    def test_get_run_by_state(self):\n        state = actionrun.ActionRun.SUCCEEDED\n        run = self.run_collection.get_run_by_state(state)\n        assert_equal(run, self.job_runs[3])\n\n    def test_get_run_by_state_no_match(self):\n        state = actionrun.ActionRun.UNKNOWN\n        run = self.run_collection.get_run_by_state(state)\n        assert_equal(run, None)\n\n    def test_get_run_by_num(self):\n        run = self.run_collection.get_run_by_num(1)\n        assert_equal(run.run_num, 1)\n\n    def test_get_run_by_num_no_match(self):\n        run = self.run_collection.get_run_by_num(7)\n        assert_equal(run, None)\n\n    def test_get_run_by_index(self):\n        run = self.run_collection.get_run_by_index(-1)\n        assert_equal(run, self.job_runs[0])\n        run = self.run_collection.get_run_by_index(-2)\n        assert_equal(run, self.job_runs[1])\n        run = self.run_collection.get_run_by_index(0)\n        assert_equal(run, self.job_runs[-1])\n        run = self.run_collection.get_run_by_index(1)\n        assert_equal(run, self.job_runs[-2])\n\n    def test_get_run_by_index_invalid_index(self):\n        run = self.run_collection.get_run_by_index(-6)\n        assert_equal(run, None)\n        run = self.run_collection.get_run_by_index(5)\n        assert_equal(run, None)\n\n    def test_get_newest(self):\n        run = self.run_collection.get_newest()\n        assert_equal(run, self.job_runs[0])\n\n    def test_get_newest_exclude_manual(self):\n        run = self._mock_run(\n            state=actionrun.ActionRun.RUNNING,\n            run_num=5,\n            manual=True,\n        )\n        self.job_runs.insert(0, run)\n        newest_run = self.run_collection.get_newest(include_manual=False)\n        assert_equal(newest_run, self.job_runs[1])\n\n    def test_get_newest_no_runs(self):\n        run_collection = jobrun.JobRunCollection(5)\n        assert_equal(run_collection.get_newest(), None)\n\n    def test_pending(self):\n        run_num = self.run_collection.next_run_num()\n        scheduled_run = self._mock_run(\n            run_num=run_num,\n            state=actionrun.ActionRun.SCHEDULED,\n        )\n        self.run_collection.runs.appendleft(scheduled_run)\n        pending = list(self.run_collection.get_pending())\n        assert_length(pending, 2)\n        assert_equal(pending, [scheduled_run, self.job_runs[0]])\n\n    def test_get_active(self):\n        starting_run = self._mock_run(\n            run_num=self.run_collection.next_run_num(),\n            state=actionrun.ActionRun.STARTING,\n        )\n        self.run_collection.runs.appendleft(starting_run)\n        active = list(self.run_collection.get_active())\n        assert_length(active, 3)\n        assert_equal(active, [starting_run, self.job_runs[1], self.job_runs[2]])\n\n    def test_get_active_with_node(self):\n        starting_run = self._mock_run(\n            run_num=self.run_collection.next_run_num(),\n            state=actionrun.ActionRun.STARTING,\n        )\n        starting_run.node = \"differentnode\"\n        self.run_collection.runs.appendleft(starting_run)\n        active = list(self.run_collection.get_active(\"anode\"))\n        assert_length(active, 2)\n        assert_equal(active, [self.job_runs[1], self.job_runs[2]])\n\n    def test_get_active_none(self):\n        active = list(self.run_collection.get_active(\"bogus\"))\n        assert_length(active, 0)\n\n    def test_get_first_queued(self):\n        run_num = self.run_collection.next_run_num()\n        second_queued = self._mock_run(\n            run_num=run_num,\n            state=actionrun.ActionRun.QUEUED,\n        )\n        self.run_collection.runs.appendleft(second_queued)\n\n        first_queued = self.run_collection.get_first_queued()\n        assert_equal(first_queued, self.job_runs[0])\n\n    def test_get_first_queued_no_match(self):\n        self.job_runs[0].state = actionrun.ActionRun.CANCELLED\n        first_queued = self.run_collection.get_first_queued()\n        assert not first_queued\n\n    def test_get_next_run_num(self):\n        assert_equal(self.run_collection.next_run_num(), 6)\n\n    def test_get_next_run_num_first(self):\n        run_collection = jobrun.JobRunCollection(5)\n        assert_equal(run_collection.next_run_num(), 0)\n\n    def test_remove_old_runs(self):\n        self.run_collection.run_limit = 1\n        self.run_collection.remove_old_runs()\n\n        assert_length(self.run_collection.runs, 1)\n        assert_call(self.job_runs[-1].cleanup, 0)\n        for job_run in self.run_collection.runs:\n            assert_length(job_run.cancel.calls, 0)\n\n    def test_remove_old_runs_none(self):\n        self.run_collection.remove_old_runs()\n        for job_run in self.job_runs:\n            assert_length(job_run.cancel.calls, 0)\n\n    def test_remove_old_runs_no_runs(self):\n        run_collection = jobrun.JobRunCollection(4)\n        run_collection.remove_old_runs()\n\n    def test_state_data(self):\n        assert_length(self.run_collection.state_data, len(self.job_runs))\n\n    def test_last_success(self):\n        assert_equal(self.run_collection.last_success, self.job_runs[3])\n\n    def test__str__(self):\n        expected = \"JobRunCollection[5(queued), 4(waiting), 3(running), 2(succeeded), 1(succeeded)]\"\n        assert_equal(str(self.run_collection), expected)\n\n    def test_get_action_runs(self):\n        action_name = \"action_name\"\n        self.run_collection.runs = job_runs = [mock.Mock(), mock.Mock()]\n        runs = self.run_collection.get_action_runs(action_name)\n        expected = [job_run.get_action_run.return_value for job_run in job_runs]\n        assert_equal(runs, expected)\n        for job_run in job_runs:\n            job_run.get_action_run.assert_called_with(action_name)\n\n    def test_get_run_nums(self):\n        assert self.run_collection.get_run_nums() == [5, 4, 3, 2, 1]\n\n\nclass TestJobRunStateTransitions:\n    \"\"\"Integration test for the state of a job run when actions change state in various ways.\"\"\"\n\n    @pytest.fixture\n    def mock_event_bus(self):\n        with mock.patch(\n            \"tron.core.actionrun.EventBus\",\n            autospec=True,\n        ) as mock_event_bus:\n            mock_event_bus.has_event.return_value = True\n            yield mock_event_bus\n\n    @pytest.fixture\n    def job_run(self, tmpdir, mock_event_bus):\n        action_foo = action.Action(\"foo\", action.ActionCommandConfig(\"command\"), None)\n        action_after_foo = action.Action(\"after_foo\", action.ActionCommandConfig(\"command\"), None)\n        action_bar = action.Action(\"bar\", action.ActionCommandConfig(\"command\"), None, triggered_by={\"trigger\"})\n        action_graph = actiongraph.ActionGraph(\n            action_map={\n                \"foo\": action_foo,\n                \"after_foo\": action_after_foo,\n                \"bar\": action_bar,\n            },\n            required_actions={\"foo\": set(), \"after_foo\": {\"foo\"}, \"bar\": set()},\n            required_triggers={\"foo\": set(), \"after_foo\": set(), \"bar\": {\"trigger\"}},\n        )\n        mock_job = mock.Mock(\n            output_path=filehandler.OutputPath(tmpdir),\n            action_graph=action_graph,\n            action_runner=actioncommand.NoActionRunnerFactory(),\n        )\n        job_run = jobrun.JobRun.for_job(\n            mock_job,\n            run_num=1,\n            run_time=datetime.datetime.now(),\n            node=mock.Mock(),\n            manual=False,\n        )\n        return job_run\n\n    def test_success_path(self, job_run):\n        # Check expected states as actions run normally and succeed.\n        foo = job_run.get_action_run(\"foo\")\n        after_foo = job_run.get_action_run(\"after_foo\")\n        bar = job_run.get_action_run(\"bar\")\n\n        # Run is initially SCHEDULED\n        assert job_run.state == actionrun.ActionRun.SCHEDULED\n\n        # After starting, both actions without dependencies start.\n        # Run is STARTING\n        job_run.start()\n        assert foo.is_starting\n        assert bar.is_starting\n        assert job_run.state == actionrun.ActionRun.STARTING\n\n        # Commands start successfully, run is RUNNING.\n        foo.action_command.started()\n        bar.action_command.started()\n        assert job_run.state == actionrun.ActionRun.RUNNING\n\n        # Still RUNNING after one of two running actions succeeds\n        bar.action_command.exited(0)\n        assert job_run.state == actionrun.ActionRun.RUNNING\n\n        # after_foo starts after its dependency succeeds\n        foo.action_command.exited(0)\n        assert after_foo.is_starting\n        after_foo.action_command.started()\n        assert job_run.state == actionrun.ActionRun.RUNNING\n\n        # SUCCEEDED after all actions succeed\n        after_foo.action_command.exited(0)\n        assert job_run.state == actionrun.ActionRun.SUCCEEDED\n\n    def test_one_action_fails(self, job_run):\n        foo = job_run.get_action_run(\"foo\")\n        after_foo = job_run.get_action_run(\"after_foo\")\n        bar = job_run.get_action_run(\"bar\")\n\n        # bar action fails, job is RUNNING because foo is still running\n        job_run.start()\n        foo.action_command.started()\n        bar.action_command.started()\n        bar.action_command.exited(1)\n        assert job_run.state == actionrun.ActionRun.RUNNING\n\n        # after_foo still starts after its dependency succeeds\n        foo.action_command.exited(0)\n        assert after_foo.is_starting\n        after_foo.action_command.started()\n        assert job_run.state == actionrun.ActionRun.RUNNING\n\n        # After running actions finish, run enters FAILED terminal state\n        after_foo.action_command.exited(0)\n        assert job_run.state == actionrun.ActionRun.FAILED\n\n        # If we skip the failed action, run becomes SUCCEEDED\n        bar.skip()\n        assert job_run.state == actionrun.ActionRun.SUCCEEDED\n\n    def test_one_action_unknown(self, job_run):\n        foo = job_run.get_action_run(\"foo\")\n        after_foo = job_run.get_action_run(\"after_foo\")\n        bar = job_run.get_action_run(\"bar\")\n\n        assert job_run.state == actionrun.ActionRun.SCHEDULED\n\n        # bar action becomes unknown, job is RUNNING because foo is still running\n        job_run.start()\n        foo.action_command.started()\n        bar.action_command.started()\n        bar.action_command.exited(None)\n        assert job_run.state == actionrun.ActionRun.RUNNING\n\n        # after_foo still starts after its dependency succeeds\n        foo.action_command.exited(0)\n        assert after_foo.is_starting\n        after_foo.action_command.started()\n        assert job_run.state == actionrun.ActionRun.RUNNING\n\n        # UNKNOWN after running actions finish\n        after_foo.action_command.exited(0)\n        assert job_run.state == actionrun.ActionRun.UNKNOWN\n\n    def test_both_unknown_and_failed(self, job_run):\n        foo = job_run.get_action_run(\"foo\")\n        after_foo = job_run.get_action_run(\"after_foo\")\n        bar = job_run.get_action_run(\"bar\")\n\n        # bar action becomes unknown, job is RUNNING because foo is still running\n        job_run.start()\n        foo.action_command.started()\n        bar.action_command.started()\n        bar.action_command.exited(None)\n        assert job_run.state == actionrun.ActionRun.RUNNING\n\n        # after_foo still starts after its dependency succeeds\n        foo.action_command.exited(0)\n        assert after_foo.is_starting\n        after_foo.action_command.started()\n        assert job_run.state == actionrun.ActionRun.RUNNING\n\n        # A different action fails\n        # Overall run is FAILED\n        after_foo.action_command.exited(1)\n        assert job_run.state == actionrun.ActionRun.FAILED\n\n    def test_required_action_fails(self, job_run):\n        foo = job_run.get_action_run(\"foo\")\n        after_foo = job_run.get_action_run(\"after_foo\")\n        bar = job_run.get_action_run(\"bar\")\n\n        assert job_run.state == actionrun.ActionRun.SCHEDULED\n\n        # An action (foo) required by another action fails\n        # Run is RUNNING while the other action, bar, is running\n        job_run.start()\n        foo.action_command.started()\n        bar.action_command.started()\n        foo.action_command.exited(1)\n        assert job_run.state == actionrun.ActionRun.RUNNING\n\n        # bar action succeeds\n        # after_foo cannot run because its required action failed\n        # So run is FAILED even though after_foo is waiting\n        bar.action_command.exited(0)\n        assert after_foo.is_waiting\n        assert job_run.state == actionrun.ActionRun.FAILED\n\n        # Pretend we reconfigured and after_foo doesn't depend on foo anymore\n        # Run should not be WAITING\n        # Ideally it would still be FAILED, but for now it's UNKNOWN in this case\n        job_run.action_runs.action_graph.required_actions[\"after_foo\"] = {}\n        assert job_run.state == actionrun.ActionRun.UNKNOWN\n\n    def test_required_action_unknown(self, job_run):\n        foo = job_run.get_action_run(\"foo\")\n        after_foo = job_run.get_action_run(\"after_foo\")\n        bar = job_run.get_action_run(\"bar\")\n\n        # An action (foo) required by another action becomes unknown\n        # Run is RUNNING while the other action, bar, is running\n        job_run.start()\n        foo.action_command.started()\n        bar.action_command.started()\n        foo.action_command.exited(None)\n        assert job_run.state == actionrun.ActionRun.RUNNING\n\n        # Other action succeeds\n        # after_foo cannot run because its required action is unknown\n        # So run is UNKNOWN even though after_foo is waiting\n        bar.action_command.exited(0)\n        assert after_foo.is_waiting\n        assert job_run.state == actionrun.ActionRun.UNKNOWN\n\n        # Pretend we reconfigured and after_foo doesn't depend on foo anymore\n        # Run should not be waiting\n        job_run.action_runs.action_graph.required_actions[\"after_foo\"] = {}\n        assert job_run.state == actionrun.ActionRun.UNKNOWN\n\n    def test_with_trigger(self, job_run, mock_event_bus):\n        foo = job_run.get_action_run(\"foo\")\n        after_foo = job_run.get_action_run(\"after_foo\")\n        bar = job_run.get_action_run(\"bar\")\n\n        # Start without trigger for bar\n        mock_event_bus.has_event.return_value = False\n        # Job should still start in scheduled state\n        assert job_run.state == actionrun.ActionRun.SCHEDULED\n\n        # Only foo is able to start\n        job_run.start()\n        assert foo.is_starting\n        assert bar.is_waiting\n        assert job_run.state == actionrun.ActionRun.STARTING\n\n        foo.action_command.started()\n        assert job_run.state == actionrun.ActionRun.RUNNING\n\n        # after_foo runs normally after foo succeeds\n        foo.action_command.exited(0)\n        assert after_foo.is_starting\n        after_foo.action_command.started()\n        assert job_run.state == actionrun.ActionRun.RUNNING\n\n        # After after_foo succeeds, run is not done\n        # WAITING because bar is still waiting for a trigger\n        after_foo.action_command.exited(0)\n        assert job_run.state == actionrun.ActionRun.WAITING\n\n        # After trigger is available, job run finishes as normal\n        mock_event_bus.has_event.return_value = True\n        bar.trigger_notify()\n        assert bar.is_starting\n        bar.action_command.started()\n        bar.action_command.exited(0)\n        assert job_run.state == actionrun.ActionRun.SUCCEEDED\n\n    def test_queued(self, job_run):\n        assert job_run.state == actionrun.ActionRun.SCHEDULED\n        job_run.queue()\n        assert job_run.state == actionrun.ActionRun.QUEUED\n        job_run.start()\n        assert job_run.state == actionrun.ActionRun.STARTING\n\n    def test_cancel_one(self, job_run):\n        assert job_run.state == actionrun.ActionRun.SCHEDULED\n        job_run.start()\n        assert job_run.state == actionrun.ActionRun.STARTING\n        job_run.get_action_run(\"after_foo\").cancel()\n        assert job_run.state == actionrun.ActionRun.CANCELLED\n"
  },
  {
    "path": "tests/core/recovery_test.py",
    "content": "from unittest import mock\nfrom unittest.mock import Mock\n\nfrom testifycompat import setup\nfrom testifycompat import TestCase\nfrom tron.actioncommand import NoActionRunnerFactory\nfrom tron.actioncommand import SubprocessActionRunnerFactory\nfrom tron.core.actionrun import ActionRun\nfrom tron.core.actionrun import KubernetesActionRun\nfrom tron.core.actionrun import MesosActionRun\nfrom tron.core.actionrun import SSHActionRun\nfrom tron.core.recovery import filter_action_runs_needing_recovery\nfrom tron.core.recovery import launch_recovery_actionruns_for_job_runs\nfrom tron.utils import timeutils\n\n\nclass TestRecovery(TestCase):\n    @setup\n    def fake_action_runs(self):\n        mock_unknown_machine = Mock(autospec=True)\n        mock_ok_machine = Mock(autospec=True)\n\n        mock_unknown_machine.state = ActionRun.UNKNOWN\n        mock_ok_machine.state = ActionRun.SUCCEEDED\n        self.action_runs = [\n            SSHActionRun(\n                job_run_id=\"test.unknown\",\n                name=\"test.unknown\",\n                node=Mock(),\n                command_config=Mock(),\n                machine=mock_unknown_machine,\n                end_time=timeutils.current_time(),\n            ),\n            SSHActionRun(\n                job_run_id=\"test.succeeded\",\n                name=\"test.succeeded\",\n                node=Mock(),\n                command_config=Mock(),\n                machine=mock_ok_machine,\n            ),\n            MesosActionRun(\n                job_run_id=\"test.succeeded\",\n                name=\"test.succeeded\",\n                node=Mock(),\n                command_config=Mock(),\n                machine=mock_ok_machine,\n            ),\n            MesosActionRun(\n                job_run_id=\"test.unknown-mesos\",\n                name=\"test.unknown-mesos\",\n                node=Mock(),\n                command_config=Mock(),\n                machine=mock_unknown_machine,\n            ),\n            MesosActionRun(\n                job_run_id=\"test.unknown-mesos-done\",\n                name=\"test.unknown-mesos-done\",\n                node=Mock(),\n                command_config=Mock(),\n                machine=mock_unknown_machine,\n                end_time=timeutils.current_time(),\n            ),\n            # TODO: Convert to all KubernetesActionRuns after deprecating mesos\n            #  A job will normally only ever have MesosActionRuns or KubernetsActionRuns\n            KubernetesActionRun(\n                job_run_id=\"test.k8s-done\",\n                name=\"test.k8s-done\",\n                node=Mock(),\n                command_config=Mock(),\n                machine=mock_unknown_machine,\n                end_time=timeutils.current_time(),\n            ),\n            KubernetesActionRun(\n                job_run_id=\"test.k8s-unknown\",\n                name=\"test.k8s-unknown\",\n                node=Mock(),\n                command_config=Mock(),\n                machine=mock_unknown_machine,\n            ),\n        ]\n\n    def test_filter_action_runs_needing_recovery(self):\n        assert filter_action_runs_needing_recovery(self.action_runs) == (\n            [self.action_runs[0]],\n            [self.action_runs[3]],\n            [self.action_runs[6]],\n        )\n\n    @mock.patch(\"tron.core.recovery.filter_action_runs_needing_recovery\", autospec=True)\n    def test_launch_recovery_actionruns_for_job_runs(self, mock_filter):\n        mock_actions = (\n            [\n                mock.Mock(\n                    action_runner=NoActionRunnerFactory(),\n                    spec=SSHActionRun,\n                ),\n                mock.Mock(\n                    action_runner=SubprocessActionRunnerFactory(\n                        status_path=\"/tmp/foo\",\n                        exec_path=(\"/tmp/foo\"),\n                    ),\n                    spec=SSHActionRun,\n                ),\n            ],\n            [\n                mock.Mock(\n                    action_runner=NoActionRunnerFactory(),\n                    spec=MesosActionRun,\n                ),\n            ],\n            [\n                mock.Mock(\n                    action_runner=NoActionRunnerFactory(),\n                    spec=KubernetesActionRun,\n                ),\n            ],\n        )\n\n        mock_filter.return_value = mock_actions\n        mock_action_runner = mock.Mock(autospec=True)\n\n        mock_job_run = mock.Mock()\n        launch_recovery_actionruns_for_job_runs(\n            [mock_job_run],\n            mock_action_runner,\n        )\n        ssh_runs = mock_actions[0]\n        for run in ssh_runs:\n            assert run.recover.call_count == 1\n\n        mesos_run = mock_actions[1][0]\n        assert mesos_run.recover.call_count == 1\n\n        kubernetes_run = mock_actions[2][0]\n        assert kubernetes_run.recover.call_count == 1\n\n    @mock.patch(\"tron.core.recovery.filter_action_runs_needing_recovery\", autospec=True)\n    def test_launch_recovery_actionruns_empty_job_run(self, mock_filter):\n        \"\"\"_action_runs=None shouldn't prevent other job runs from being recovered\"\"\"\n        empty_job_run = mock.Mock(_action_runs=None)\n        other_job_run = mock.Mock(_action_runs=[mock.Mock()])\n        mock_action_runner = mock.Mock()\n        mock_filter.return_value = ([], [], [])\n\n        launch_recovery_actionruns_for_job_runs(\n            [empty_job_run, other_job_run],\n            mock_action_runner,\n        )\n        mock_filter.assert_called_with(other_job_run._action_runs)\n"
  },
  {
    "path": "tests/data/logging.conf",
    "content": "[loggers]\nkeys=root, twisted, tron\n\n[handlers]\nkeys=fileHandler\n\n[formatters]\nkeys=defaultFormatter\n\n[logger_root]\nlevel=WARN\nhandlers=fileHandler\n\n[logger_twisted]\nlevel=WARN\nhandlers=fileHandler\nqualname=twisted\npropagate=0\n\n[logger_tron]\nlevel=WARN\nhandlers=fileHandler\nqualname=tron\npropagate=0\n\n[handler_fileHandler]\nclass=logging.FileHandler\nlevel=WARN\nformatter=defaultFormatter\nargs=('{0}',)\n\n[formatter_defaultFormatter]\nformat=%(asctime)s %(name)s %(levelname)s %(message)s\n"
  },
  {
    "path": "tests/data/test_config.yaml",
    "content": "# This test config is intended to cover most common configuration cases\n\nssh_options:\n    agent: true\n\nstate_persistence:\n    store_type: shelve\n    name: tron_state.shelve\n    buffer_size: 1\n\nnodes:\n    - hostname: localhost\n    - name: box1\n      hostname: localhost\n    - name: box2\n      hostname: localhost\n    - name: box3\n      hostname: localhost\n    - name: box4\n      hostname: 127.0.0.1\n\nnode_pools:\n    - name: pool0\n      nodes: [localhost, box2]\n    - name: pool1\n      nodes: [box1]\n    - nodes: [box1, box2]\n    - name: pool2\n      nodes: [box3, box4]\n\ncommand_context:\n    THE_JOB_DIR: \"testconfig/jobs\"\n    ECHO:        \"echo\"\n    # Change this to repo root\n    PYTHON: \"cd /home/user/code/Tron && PYTHONPATH=. python\"\n\n\njobs:\n    # IntervalScheduler no dependent Actions, single node\n    -   name: interval_job0\n        run_limit: 3\n        node: localhost\n        schedule: \"cron * * * * *\"\n        actions:\n            -   name: \"task0\"\n                command: \"%(ECHO)s %(actionname)s\"\n            -   name: \"task1\"\n                command: \"sleep 10 && %(ECHO)s %(actionname)s\"\n\n\n    # IntervalScheduler dependent successful Actions, node pool\n    -   name: interval_job1\n        node: pool0\n        schedule: \"cron * * * * *\"\n        actions:\n            -   name: task1\n                command: \"%(ECHO)s %(actionname)s\"\n                requires: [task0]\n            -   name: task0\n                command: \"sleep 3 && %(ECHO)s %(actionname)s %(last_success:shortdate)s\"\n\n\n    # IntervalScheduler dependent failure Actions\n    -   name: interval_job2\n        node: box1_box2\n        schedule: \"cron * * * * *\"\n        actions:\n            -   name: task1\n                command: \"%(ECHO)s %(actionname)s\"\n                requires: [task0]\n            -   name: task0\n                command: \"%(ECHO)s %(actionname)s && sleep 7 && false\"\n\n    # Multiple dependent failure Actions\n    -   name: interval_job3\n        node: box1_box2\n        schedule: \"cron * * * * *\"\n        actions:\n            -   name: task0\n                command: \"%(ECHO)s %(actionname)s && sleep 7 && false\"\n            -   name: task1\n                command: \"%(ECHO)s %(actionname)s\"\n                requires: [task0]\n            -   name: task2\n                command: \"%(ECHO)s %(actionname)s && sleep 10\"\n            -   name: task3\n                command: \"%(ECHO)s %(actionname)s\"\n                requires: [task2]\n\n\n    # Multiple dependent failure Actions with cleanup\n    -   name: interval_job4\n        node: box2\n        schedule: \"cron * * * * *\"\n        actions:\n            -   name: task0\n                command: \"%(ECHO)s %(actionname)s && sleep 7 && false\"\n            -   name: task1\n                command: \"%(ECHO)s %(actionname)s\"\n                requires: [task0]\n            -   name: task2\n                command: \"%(ECHO)s %(actionname)s && sleep 10\"\n            -   name: task3\n                command: \"%(ECHO)s %(actionname)s && sleep 3\"\n                requires: [task2]\n        cleanup_action:\n            command: \"%(ECHO)s %(actionname)s %(cleanup_job_status)s\"\n\n\n    # No failures, with cleanup, different node for action\n    -   name: interval_job5\n        node: box1\n        schedule: \"cron * * * * *\"\n        actions:\n            -   name: \"task0\"\n                command: \"%(ECHO)s %(actionname)s\"\n                node: box2\n            -   name: \"task1\"\n                command: \"sleep 10 && %(ECHO)s %(actionname)s\"\n                node: pool0\n        cleanup_action:\n            command: \"%(ECHO)s %(actionname)s %(cleanup_job_status)s\"\n\n\n    # all_nodes Job\n    -   name: allnodes_job8\n        node: pool2\n        schedule: \"cron * * * * *\"\n        all_nodes: true\n        actions:\n            -   name: \"task0\"\n                command: \"%(ECHO)s %(actionname)s\"\n            -   name: \"task1\"\n                command: \"sleep 10 && %(ECHO)s %(actionname)s\"\n\n\n    # Job failing bad action\n\n    # DailyScheduler\n    -   name: daily_job9\n        node: box1\n        schedule: \"daily 16:00:00\"\n        actions:\n            -   name: \"task0\"\n                command: \"%(ECHO)s %(actionname)s 1 && false\"\n                node: box2\n                requires: [\"task1\"]\n            -   name: \"task1\"\n                command: \"sleep 10 && %(ECHO)s %(actionname)s %(last_success:shortdate)s\"\n                node: pool0\n        cleanup_action:\n            command: \"%(ECHO)s %(actionname)s %(cleanup_job_status)s\"\n\n    # Overlapping, queueing\n    -   name: overlap_cancel\n        node: pool2\n        schedule: \"cron * * * * *\"\n        queueing: false\n        actions:\n            -   name: \"task0\"\n                command: \"sleep 30s && %(ECHO)s %(actionname)s\"\n"
  },
  {
    "path": "tests/eventbus_test.py",
    "content": "import os\nimport tempfile\nfrom collections import defaultdict\nfrom unittest import mock\n\nfrom testifycompat import assert_equal\nfrom testifycompat import setup\nfrom testifycompat import teardown\nfrom testifycompat import TestCase\nfrom tron.eventbus import EventBus\n\n\nclass MakeEventBusTestCase(TestCase):\n    @setup\n    def setup(self):\n        self.logdir = tempfile.TemporaryDirectory()\n\n    @teardown\n    def teardown(self):\n        EventBus.shutdown()\n        self.logdir.cleanup()\n\n    @mock.patch(\"tron.eventbus.time\", autospec=True)\n    def test_setup_eventbus_dir(self, time):\n        os.rmdir(self.logdir.name)\n\n        time.time = mock.Mock(return_value=1.0)\n        eb = EventBus.create(self.logdir.name)\n        assert os.path.exists(self.logdir.name)\n        assert os.path.exists(os.path.join(self.logdir.name, \"current\"))\n\n        time.time = mock.Mock(return_value=2.0)\n        eb.event_log = {\"foo\": \"bar\"}\n        eb.sync_save_log(\"test\")\n\n        new_eb = EventBus.create(self.logdir.name)\n        new_eb.sync_load_log()\n        assert new_eb.event_log == eb.event_log\n\n\nclass EventBusTestCase(TestCase):\n    @setup\n    def setup(self):\n        self.log_dir = tempfile.TemporaryDirectory(prefix=\"tron_eventbus_test\")\n        self.eventbus = EventBus.create(self.log_dir.name)\n        self.eventbus.enabled = True\n\n    @teardown\n    def teardown(self):\n        EventBus.shutdown()\n        self.log_dir.cleanup()\n\n    @mock.patch(\"tron.eventbus.reactor\", autospec=True)\n    def test_start(self, reactor):\n        self.eventbus.sync_load_log = mock.Mock()\n        reactor.callLater = mock.Mock()\n        self.eventbus.start()\n        assert self.eventbus.sync_load_log.call_count == 1\n        assert reactor.callLater.call_count == 1\n\n    def test_shutdown(self):\n        assert self.eventbus.enabled\n        self.eventbus.sync_save_log = mock.Mock()\n        self.eventbus.shutdown()\n        assert not self.eventbus.enabled\n        assert self.eventbus.sync_save_log.call_count == 1\n\n    def test_publish(self):\n        evt = {\"id\": \"foo\"}\n        self.eventbus.publish(evt)\n        assert self.eventbus.publish_queue.pop() == evt\n\n    def test_subscribe(self):\n        ps = (\"foo\", \"bar\", \"cb\")\n        self.eventbus.subscribe(*ps)\n        assert self.eventbus.subscribe_queue.pop() == ps\n\n    def test_has_event(self):\n        assert not self.eventbus.has_event(\"foo\")\n        self.eventbus.event_log[\"foo\"] = \"bar\"\n        assert self.eventbus.has_event(\"foo\")\n\n    @mock.patch(\"tron.eventbus.time\", autospec=True)\n    def test_sync_load_log(self, time):\n        time.time = mock.Mock(return_value=1.0)\n        self.eventbus.event_log = {\"foo\": \"bar\"}\n        self.eventbus.sync_save_log(\"test\")\n        self.eventbus.event_log = {}\n        self.eventbus.sync_load_log()\n        assert self.eventbus.event_log == {\"foo\": \"bar\"}\n\n    @mock.patch(\"tron.eventbus.time\", autospec=True)\n    def test_sync_save_log_time(self, time):\n        time.time = mock.Mock(return_value=1.0)\n        self.eventbus.sync_save_log(\"test\")\n        current_link = os.readlink(self.eventbus.log_current)\n        assert_equal(current_link, os.path.join(self.log_dir.name, \"1.pickle\"))\n\n        time.time = mock.Mock(return_value=2.0)\n        self.eventbus.sync_save_log(\"test\")\n        new_link = os.readlink(self.eventbus.log_current)\n        assert_equal(new_link, os.path.join(self.log_dir.name, \"2.pickle\"))\n        # we clean up the previous link so as not to have a million pickles\n        # on disk\n        assert not os.path.exists(current_link)\n        # so at this point, we should only have the new link\n        assert os.path.exists(new_link)\n\n    @mock.patch(\"tron.eventbus.time\", autospec=True)\n    @mock.patch(\"tron.eventbus.reactor\", autospec=True)\n    def test_sync_loop(self, reactor, time):\n        time.time = mock.Mock(return_value=0)\n        reactor.callLater = mock.Mock()\n        self.eventbus.enabled = True\n        self.eventbus.sync_shutdown = mock.Mock()\n        self.eventbus.sync_loop()\n        assert reactor.callLater.call_count == 1\n        assert self.eventbus.sync_shutdown.call_count == 0\n\n    @mock.patch(\"tron.eventbus.reactor\", autospec=True)\n    def test_sync_loop_shutdown(self, reactor):\n        reactor.callLater = mock.Mock()\n        self.eventbus.enabled = False\n        self.eventbus.sync_save_log = mock.Mock()\n        self.eventbus.sync_loop()\n        assert reactor.callLater.call_count == 0\n\n    @mock.patch(\"tron.eventbus.time\", autospec=True)\n    def test_sync_process_save_log(self, time):\n        time.time = mock.Mock(return_value=10)\n        self.eventbus.log_updates = 1\n        self.eventbus.log_last_save = 0\n        self.eventbus.log_save_interval = 20\n        self.eventbus.sync_save_log = mock.Mock()\n        self.eventbus.sync_process()\n        assert self.eventbus.sync_save_log.call_count == 0\n\n        time.time = mock.Mock(return_value=21)\n        self.eventbus.sync_process()\n        assert self.eventbus.sync_save_log.call_count == 1\n\n        time.time = mock.Mock(return_value=0)\n        self.eventbus.log_updates = 0\n        self.eventbus.log_save_updates = 20\n        self.eventbus.sync_save_log = mock.Mock()\n        self.eventbus.sync_process()\n        assert self.eventbus.sync_save_log.call_count == 0\n\n        self.eventbus.log_updates = 21\n        self.eventbus.sync_process()\n        assert self.eventbus.sync_save_log.call_count == 1\n        assert self.eventbus.log_updates == 0\n\n    @mock.patch(\"tron.eventbus.time\", autospec=True)\n    def test_sync_process_flush_queues(self, time):\n        time.time = mock.Mock(return_value=10)\n        self.eventbus.sync_subscribe = mock.Mock()\n        self.eventbus.sync_publish = mock.Mock()\n\n        for _ in range(5):\n            self.eventbus.publish_queue.append(mock.Mock())\n            self.eventbus.subscribe_queue.append(mock.Mock())\n\n        self.eventbus.sync_process()\n\n        assert_equal(self.eventbus.sync_subscribe.call_count, 5)\n        assert_equal(self.eventbus.sync_publish.call_count, 5)\n\n    @mock.patch(\"tron.eventbus.reactor\", autospec=True)\n    def test_sync_publish(self, reactor):\n        reactor.callLater = mock.Mock()\n        evt = {\"id\": \"foo\", \"bar\": \"baz\"}\n        self.eventbus.event_log = {}\n        self.eventbus.log_save_updates = 0\n        self.eventbus.sync_publish(evt)\n        assert self.eventbus.log_updates == 1\n        assert reactor.callLater.call_count == 1\n\n    @mock.patch(\"tron.eventbus.reactor\", autospec=True)\n    def test_sync_publish_replace(self, reactor):\n        evt1 = {\"id\": \"foo\", \"bar\": \"baz\"}\n        evt2 = {\"id\": \"foo\", \"bar\": \"quux\"}\n        self.eventbus.event_log = {}\n        self.eventbus.log_save_updates = 0\n        self.eventbus.sync_publish(evt1)\n        self.eventbus.sync_publish(evt2)\n        assert self.eventbus.log_updates == 2\n        assert reactor.callLater.call_count == 2\n\n    @mock.patch(\"tron.eventbus.reactor\", autospec=True)\n    def test_sync_publish_duplicate(self, reactor):\n        evt = {\"id\": \"foo\", \"bar\": \"baz\"}\n        self.eventbus.event_log = {\"foo\": {\"bar\": \"baz\"}}\n        self.eventbus.log_save_updates = 0\n        self.eventbus.sync_publish(evt)\n        assert self.eventbus.log_updates == 0\n        assert reactor.callLater.call_count == 0\n\n    def test_sync_subscribe(self):\n        self.eventbus.event_subscribers = defaultdict(list)\n        self.eventbus.sync_subscribe((\"pre\", \"sub\", \"cb\"))\n        assert self.eventbus.event_subscribers == {\"pre\": [(\"sub\", \"cb\")]}\n\n        self.eventbus.sync_subscribe((\"pre\", \"sub2\", \"cb2\"))\n        assert self.eventbus.event_subscribers == {\n            \"pre\": [(\"sub\", \"cb\"), (\"sub2\", \"cb2\")],\n        }\n\n    def test_sync_unsubscribe(self):\n        self.eventbus.event_subscribers = defaultdict(list)\n        self.eventbus.sync_subscribe((\"pre\", \"sub\", \"cb\"))\n        self.eventbus.sync_subscribe((\"pre\", \"sub2\", \"cb2\"))\n        assert self.eventbus.event_subscribers == {\n            \"pre\": [(\"sub\", \"cb\"), (\"sub2\", \"cb2\")],\n        }\n\n        self.eventbus.sync_unsubscribe((\"pre\", \"sub\"))\n        assert self.eventbus.event_subscribers == {\"pre\": [(\"sub2\", \"cb2\")]}\n        self.eventbus.sync_unsubscribe((\"pre\", \"sub2\"))\n        assert self.eventbus.event_subscribers == {}\n\n    @mock.patch(\"tron.eventbus.reactor\", autospec=True)\n    def test_sync_notify(self, reactor):\n        reactor.callLater = mock.Mock()\n        self.eventbus.event_log = {\"p\": {}, \"pre\": {}, \"prefix\": {}}\n        self.eventbus.event_subscribers = {\n            \"pre\": [(\"sub\", \"m1\")],\n            \"prefix\": [(\"sub\", \"m2\"), (\"sub2\", \"m3\")],\n        }\n\n        self.eventbus.sync_notify(\"p\")\n        assert reactor.callLater.call_count == 0\n\n        self.eventbus.sync_notify(\"pre\")\n        assert reactor.callLater.call_count == 1\n\n        self.eventbus.sync_notify(\"prefix\")\n        assert reactor.callLater.call_count == 4\n"
  },
  {
    "path": "tests/kubernetes_test.py",
    "content": "from typing import Any\nfrom unittest import mock\n\nimport pytest\nfrom task_processing.interfaces.event import Event\nfrom task_processing.plugins.kubernetes.task_config import KubernetesTaskConfig\n\nfrom tron.config.schema import ConfigFieldSelectorSource\nfrom tron.config.schema import ConfigKubernetes\nfrom tron.config.schema import ConfigProjectedSAVolume\nfrom tron.config.schema import ConfigSecretSource\nfrom tron.config.schema import ConfigSecretVolume\nfrom tron.config.schema import ConfigSecretVolumeItem\nfrom tron.config.schema import ConfigVolume\nfrom tron.kubernetes import DEFAULT_DISK_LIMIT\nfrom tron.kubernetes import KubernetesCluster\nfrom tron.kubernetes import KubernetesClusterRepository\nfrom tron.kubernetes import KubernetesTask\nfrom tron.utils import exitcode\n\n\n@pytest.fixture\ndef mock_kubernetes_task():\n    with mock.patch(\n        \"tron.kubernetes.logging.getLogger\",\n        return_value=mock.Mock(handlers=[mock.Mock()]),\n        autospec=None,\n    ):\n        yield KubernetesTask(\n            action_run_id=\"mock_service.mock_job.1.mock_action\",\n            task_config=KubernetesTaskConfig(\n                name=\"mock--service-mock-job-mock--action\", uuid=\"123456\", image=\"some_image\", command=\"echo test\"\n            ),\n        )\n\n\n@pytest.fixture\ndef mock_kubernetes_cluster():\n    with mock.patch(\"tron.kubernetes.PyDeferredQueue\", autospec=True,), mock.patch(\n        \"tron.kubernetes.TaskProcessor\",\n        autospec=True,\n    ), mock.patch(\n        \"tron.kubernetes.Subscription\",\n        autospec=True,\n    ) as mock_runner:\n        mock_runner.return_value.configure_mock(\n            stopping=False, TASK_CONFIG_INTERFACE=mock.Mock(spec=KubernetesTaskConfig)\n        )\n        yield KubernetesCluster(\"kube-cluster-a:1234\")\n\n\n@pytest.fixture\ndef mock_disabled_kubernetes_cluster():\n    with mock.patch(\"tron.kubernetes.PyDeferredQueue\", autospec=True,), mock.patch(\n        \"tron.kubernetes.TaskProcessor\",\n        autospec=True,\n    ), mock.patch(\n        \"tron.kubernetes.Subscription\",\n        autospec=True,\n    ):\n        yield KubernetesCluster(\"kube-cluster-a:1234\", enabled=False)\n\n\ndef mock_event_factory(\n    task_id: str,\n    platform_type: str,\n    message: str = None,\n    raw: dict[str, Any] = None,\n    success: bool = False,\n    terminal: bool = False,\n) -> Event:\n    return Event(\n        kind=\"task\",\n        task_id=task_id,\n        platform_type=platform_type,\n        raw=raw or {},\n        terminal=terminal,\n        success=success,\n        message=message,\n    )\n\n\ndef test_get_event_logger_add_unique_handlers(mock_kubernetes_task):\n    \"\"\"\n    Ensures that only a single handler (for stderr) is added to the\n    Kubernetes Taskevent logger, to prevent duplicate log output.\n    \"\"\"\n    # Call 2 times to make sure 2nd call doesn't add another handler\n    logger = mock_kubernetes_task.get_event_logger()\n    logger = mock_kubernetes_task.get_event_logger()\n\n    assert len(logger.handlers) == 1\n\n\ndef test_handle_event_log_event_info_exception(mock_kubernetes_task):\n    with mock.patch.object(\n        mock_kubernetes_task, \"log_event_info\", autospec=True, side_effect=Exception\n    ) as mock_log_event_info:\n        mock_kubernetes_task.handle_event(\n            mock_event_factory(task_id=mock_kubernetes_task.get_kubernetes_id(), platform_type=\"running\")\n        )\n\n    # TODO: should also assert that the task is in the expected state once that's hooked up\n    assert mock_log_event_info.called\n\n\ndef test_handle_event_exit_early_on_misrouted_event(mock_kubernetes_task):\n    with mock.patch.object(\n        mock_kubernetes_task,\n        \"log_event_info\",\n        autospec=True,\n    ) as mock_log_event_info:\n        mock_kubernetes_task.handle_event(\n            mock_event_factory(task_id=\"not-the-pods-youre-looking-for\", platform_type=\"finished\")\n        )\n\n    # TODO: should also assert that the task is in the expected state once that's hooked up\n    # we log before actually doing anything with an event, so this not being called means\n    # we exited early\n    assert not mock_log_event_info.called\n\n\ndef test_handle_event_running(mock_kubernetes_task):\n    mock_kubernetes_task.handle_event(\n        mock_event_factory(task_id=mock_kubernetes_task.get_kubernetes_id(), platform_type=\"running\")\n    )\n\n    assert mock_kubernetes_task.state == mock_kubernetes_task.RUNNING\n\n\ndef test_handle_event_exit_on_finished(mock_kubernetes_task):\n    mock_kubernetes_task.started()\n    raw_event_data = {\n        \"status\": {\n            \"containerStatuses\": [\n                {\n                    \"containerID\": \"docker://asdf\",\n                    \"image\": \"someimage\",\n                    \"imageID\": \"docker-pullable://someimage:sometag\",\n                    \"lastState\": {\"running\": None, \"terminated\": None, \"waiting\": None},\n                    \"name\": \"main\",\n                    \"ready\": False,\n                    \"restartCount\": 0,\n                    \"started\": False,\n                    \"state\": {\n                        \"running\": None,\n                        \"terminated\": {\n                            \"containerID\": \"docker://asdf\",\n                            \"exitCode\": 0,\n                            \"finishedAt\": \"2022-11-19 00:11:02+00:00\",\n                            \"message\": None,\n                            \"reason\": \"Completed\",\n                            \"signal\": None,\n                            \"startedAt\": None,\n                        },\n                        \"waiting\": None,\n                    },\n                },\n            ],\n        }\n    }\n    mock_kubernetes_task.handle_event(\n        mock_event_factory(\n            task_id=mock_kubernetes_task.get_kubernetes_id(),\n            raw=raw_event_data,\n            platform_type=\"finished\",\n            terminal=True,\n            success=True,\n        )\n    )\n    assert mock_kubernetes_task.state == mock_kubernetes_task.COMPLETE\n    assert mock_kubernetes_task.is_complete\n\n\ndef test_handle_event_exit_on_failed(mock_kubernetes_task):\n    mock_kubernetes_task.started()\n    mock_kubernetes_task.handle_event(\n        mock_event_factory(\n            task_id=mock_kubernetes_task.get_kubernetes_id(), platform_type=\"failed\", terminal=True, success=False\n        )\n    )\n\n    assert mock_kubernetes_task.is_failed\n    assert mock_kubernetes_task.is_done\n\n\ndef test_handle_event_spot_interruption_exit(mock_kubernetes_task):\n    mock_kubernetes_task.started()\n    raw_event_data = {\n        \"status\": {\n            \"containerStatuses\": [\n                {\n                    \"containerID\": None,\n                    \"image\": \"someimage\",\n                    \"imageID\": None,\n                    \"lastState\": {\n                        \"running\": None,\n                        \"terminated\": {\n                            \"containerID\": None,\n                            \"exitCode\": 137,\n                            \"finishedAt\": None,\n                            \"message\": \"The container could not be located when the pod was deleted.  The container used to be Running\",\n                            \"reason\": \"ContainerStatusUnknown\",\n                            \"signal\": None,\n                            \"startedAt\": None,\n                        },\n                        \"waiting\": None,\n                    },\n                    \"name\": \"main\",\n                    \"ready\": False,\n                    \"restartCount\": 0,\n                    \"started\": False,\n                    \"state\": {\n                        \"running\": None,\n                        \"terminated\": None,\n                        \"waiting\": {\"message\": None, \"reason\": \"ContainerCreating\"},\n                    },\n                },\n            ],\n        }\n    }\n    mock_kubernetes_task.handle_event(\n        mock_event_factory(\n            task_id=mock_kubernetes_task.get_kubernetes_id(),\n            raw=raw_event_data,\n            platform_type=\"killed\",\n            terminal=True,\n            success=False,\n        )\n    )\n    assert mock_kubernetes_task.exit_status == exitcode.EXIT_KUBERNETES_SPOT_INTERRUPTION\n    assert mock_kubernetes_task.is_failed\n    assert mock_kubernetes_task.is_done\n\n    # Test again, but with no lastState\n    raw_event_data[\"status\"][\"containerStatuses\"][0][\"state\"][\"terminated\"] = raw_event_data[\"status\"][\n        \"containerStatuses\"\n    ][0][\"lastState\"][\"terminated\"]\n    raw_event_data[\"status\"][\"containerStatuses\"][0][\"lastState\"] = {}\n\n    mock_kubernetes_task.handle_event(\n        mock_event_factory(\n            task_id=mock_kubernetes_task.get_kubernetes_id(),\n            raw=raw_event_data,\n            platform_type=\"killed\",\n            terminal=True,\n            success=False,\n        )\n    )\n    assert mock_kubernetes_task.exit_status == exitcode.EXIT_KUBERNETES_SPOT_INTERRUPTION\n    assert mock_kubernetes_task.is_failed\n    assert mock_kubernetes_task.is_done\n\n\ndef test_handle_event_node_scaledown_exit(mock_kubernetes_task):\n    mock_kubernetes_task.started()\n    raw_event_data = {\n        \"status\": {\n            \"containerStatuses\": [\n                {\n                    \"containerID\": \"docker://asdf\",\n                    \"image\": \"someimage\",\n                    \"imageID\": \"docker-pullable://someimage:sometag\",\n                    \"lastState\": {\"running\": None, \"terminated\": None, \"waiting\": None},\n                    \"name\": \"main\",\n                    \"ready\": False,\n                    \"restartCount\": 0,\n                    \"started\": False,\n                    \"state\": {\n                        \"running\": None,\n                        \"terminated\": {\n                            \"containerID\": \"docker://asdf\",\n                            \"exitCode\": 143,\n                            \"finishedAt\": \"2022-11-19 00:11:02+00:00\",\n                            \"message\": None,\n                            \"reason\": \"Error\",\n                            \"signal\": None,\n                            \"startedAt\": None,\n                        },\n                        \"waiting\": None,\n                    },\n                },\n            ],\n        }\n    }\n    mock_kubernetes_task.handle_event(\n        mock_event_factory(\n            task_id=mock_kubernetes_task.get_kubernetes_id(),\n            raw=raw_event_data,\n            platform_type=\"failed\",\n            terminal=True,\n            success=False,\n        )\n    )\n    assert mock_kubernetes_task.exit_status == exitcode.EXIT_KUBERNETES_NODE_SCALEDOWN\n    assert mock_kubernetes_task.is_failed\n    assert mock_kubernetes_task.is_done\n\n\ndef test_handle_event_exit_not_terminated(mock_kubernetes_task):\n    mock_kubernetes_task.started()\n    raw_event_data = {\n        \"status\": {\n            \"containerStatuses\": [\n                {\n                    \"containerID\": \"docker://asdf\",\n                    \"image\": \"someimage\",\n                    \"imageID\": \"docker-pullable://someimage:sometag\",\n                    \"lastState\": {},\n                    \"name\": \"main\",\n                    \"ready\": False,\n                    \"restartCount\": 0,\n                    \"started\": False,\n                    \"state\": {\n                        \"running\": None,\n                        \"terminated\": None,\n                        \"waiting\": {\"reason\": \"ContainerCreating\"},\n                    },\n                },\n            ],\n        }\n    }\n    mock_kubernetes_task.handle_event(\n        mock_event_factory(\n            task_id=mock_kubernetes_task.get_kubernetes_id(),\n            raw=raw_event_data,\n            platform_type=\"killed\",\n            terminal=True,\n            success=False,\n        )\n    )\n\n    assert mock_kubernetes_task.exit_status == exitcode.EXIT_KUBERNETES_NODE_SCALEDOWN\n    assert mock_kubernetes_task.is_failed\n    assert mock_kubernetes_task.is_done\n\n\ndef test_handle_event_abnormal_exit(mock_kubernetes_task):\n    mock_kubernetes_task.started()\n    raw_event_data = {\n        \"status\": {\n            \"containerStatuses\": [\n                {\n                    \"containerID\": \"docker://asdf\",\n                    \"image\": \"someimage\",\n                    \"imageID\": \"docker-pullable://someimage:sometag\",\n                    \"lastState\": {\"running\": None, \"terminated\": None, \"waiting\": None},\n                    \"name\": \"main\",\n                    \"ready\": False,\n                    \"restartCount\": 0,\n                    \"started\": False,\n                    \"state\": {\n                        \"running\": None,\n                        \"terminated\": {\n                            \"containerID\": \"docker://asdf\",\n                            \"exitCode\": 0,\n                            \"finishedAt\": None,\n                            \"message\": None,\n                            \"reason\": None,\n                            \"signal\": None,\n                            \"startedAt\": None,\n                        },\n                        \"waiting\": None,\n                    },\n                },\n            ],\n        }\n    }\n    mock_kubernetes_task.handle_event(\n        mock_event_factory(\n            task_id=mock_kubernetes_task.get_kubernetes_id(),\n            raw=raw_event_data,\n            platform_type=\"finished\",\n            terminal=True,\n            success=False,\n        )\n    )\n    assert mock_kubernetes_task.exit_status == exitcode.EXIT_KUBERNETES_ABNORMAL\n    assert mock_kubernetes_task.is_failed\n    assert mock_kubernetes_task.is_done\n\n\ndef test_handle_event_missing_state(mock_kubernetes_task):\n    mock_kubernetes_task.started()\n    raw_event_data = {\n        \"status\": {\n            \"containerStatuses\": [\n                {\n                    \"containerID\": \"docker://asdf\",\n                    \"image\": \"someimage\",\n                    \"imageID\": \"docker-pullable://someimage:sometag\",\n                    \"lastState\": {},\n                    \"name\": \"main\",\n                    \"ready\": False,\n                    \"restartCount\": 0,\n                    \"started\": False,\n                    \"state\": None,\n                },\n            ],\n        }\n    }\n    mock_kubernetes_task.handle_event(\n        mock_event_factory(\n            task_id=mock_kubernetes_task.get_kubernetes_id(),\n            raw=raw_event_data,\n            platform_type=\"killed\",\n            terminal=True,\n            success=False,\n        )\n    )\n    assert mock_kubernetes_task.exit_status == exitcode.EXIT_KUBERNETES_ABNORMAL\n    assert mock_kubernetes_task.is_failed\n    assert mock_kubernetes_task.is_done\n\n\ndef test_handle_event_code_from_state(mock_kubernetes_task):\n    mock_kubernetes_task.started()\n    raw_event_data = {\n        \"status\": {\n            \"containerStatuses\": [\n                {\n                    \"containerID\": \"docker://asdf\",\n                    \"image\": \"someimage\",\n                    \"imageID\": \"docker-pullable://someimage:sometag\",\n                    \"lastState\": {},\n                    \"name\": \"main\",\n                    \"ready\": False,\n                    \"restartCount\": 0,\n                    \"started\": False,\n                    \"state\": {\n                        \"running\": None,\n                        \"terminated\": {\n                            \"containerID\": \"docker://asdf\",\n                            \"exitCode\": 1337,\n                            \"finishedAt\": None,\n                            \"message\": None,\n                            \"reason\": None,\n                            \"signal\": None,\n                            \"startedAt\": None,\n                        },\n                        \"waiting\": None,\n                    },\n                },\n            ],\n        }\n    }\n    mock_kubernetes_task.handle_event(\n        mock_event_factory(\n            task_id=mock_kubernetes_task.get_kubernetes_id(),\n            raw=raw_event_data,\n            platform_type=\"failed\",\n            terminal=True,\n            success=False,\n        )\n    )\n    assert mock_kubernetes_task.exit_status == 1337\n    assert mock_kubernetes_task.is_failed\n    assert mock_kubernetes_task.is_done\n\n\ndef test_handle_event_lost(mock_kubernetes_task):\n    mock_kubernetes_task.started()\n    mock_kubernetes_task.handle_event(\n        mock_event_factory(\n            task_id=mock_kubernetes_task.get_kubernetes_id(),\n            platform_type=\"lost\",\n        )\n    )\n\n    assert mock_kubernetes_task.exit_status == exitcode.EXIT_KUBERNETES_TASK_LOST\n\n\ndef test_create_task_disabled():\n    cluster = KubernetesCluster(\"kube-cluster-a:1234\", enabled=False)\n    mock_serializer = mock.MagicMock()\n\n    task = cluster.create_task(\n        action_run_id=\"action_a\",\n        serializer=mock_serializer,\n        command=\"ls\",\n        cpus=1,\n        mem=1024,\n        disk=None,\n        docker_image=\"docker-paasta.yelpcorp.com:443/bionic_yelp\",\n        env={},\n        secret_env={},\n        secret_volumes=[],\n        projected_sa_volumes=[],\n        field_selector_env={},\n        volumes=[],\n        cap_add=[],\n        cap_drop=[],\n        node_selectors={\"yelp.com/pool\": \"default\"},\n        node_affinities=[],\n        topology_spread_constraints=[],\n        pod_labels={},\n        pod_annotations={},\n        service_account_name=None,\n        ports=[],\n    )\n\n    assert task is None\n\n\ndef test_create_task(mock_kubernetes_cluster):\n    mock_serializer = mock.MagicMock()\n\n    task = mock_kubernetes_cluster.create_task(\n        action_run_id=\"action_a\",\n        serializer=mock_serializer,\n        command=\"ls\",\n        cpus=1,\n        mem=1024,\n        disk=None,\n        docker_image=\"docker-paasta.yelpcorp.com:443/bionic_yelp\",\n        env={},\n        secret_env={},\n        secret_volumes=[],\n        projected_sa_volumes=[],\n        field_selector_env={},\n        volumes=[],\n        cap_add=[],\n        cap_drop=[],\n        node_selectors={\"yelp.com/pool\": \"default\"},\n        node_affinities=[],\n        topology_spread_constraints=[],\n        pod_labels={},\n        pod_annotations={},\n        service_account_name=None,\n        ports=[],\n    )\n\n    assert task is not None\n\n\ndef test_create_task_with_task_id(mock_kubernetes_cluster):\n    mock_serializer = mock.MagicMock()\n\n    task = mock_kubernetes_cluster.create_task(\n        action_run_id=\"action_a\",\n        serializer=mock_serializer,\n        task_id=\"yay.1234\",\n        command=\"ls\",\n        cpus=1,\n        mem=1024,\n        disk=None,\n        docker_image=\"docker-paasta.yelpcorp.com:443/bionic_yelp\",\n        env={},\n        secret_env={},\n        secret_volumes=[],\n        projected_sa_volumes=[],\n        field_selector_env={},\n        volumes=[],\n        cap_add=[],\n        cap_drop=[],\n        node_selectors={\"yelp.com/pool\": \"default\"},\n        node_affinities=[],\n        topology_spread_constraints=[],\n        pod_labels={},\n        pod_annotations={},\n        service_account_name=None,\n        ports=[],\n    )\n\n    mock_kubernetes_cluster.runner.TASK_CONFIG_INTERFACE().set_pod_name.assert_called_once_with(\"yay.1234\")\n    assert task is not None\n\n\ndef test_create_task_with_invalid_task_id(mock_kubernetes_cluster):\n    mock_serializer = mock.MagicMock()\n\n    with mock.patch.object(mock_kubernetes_cluster, \"runner\") as mock_runner:\n        mock_runner.TASK_CONFIG_INTERFACE.return_value.set_pod_name = mock.MagicMock(side_effect=ValueError)\n        task = mock_kubernetes_cluster.create_task(\n            action_run_id=\"action_a\",\n            serializer=mock_serializer,\n            task_id=\"boo\",\n            command=\"ls\",\n            cpus=1,\n            mem=1024,\n            disk=None,\n            docker_image=\"docker-paasta.yelpcorp.com:443/bionic_yelp\",\n            env={},\n            secret_env={},\n            secret_volumes=[],\n            projected_sa_volumes=[],\n            field_selector_env={},\n            volumes=[],\n            cap_add=[],\n            cap_drop=[],\n            node_selectors={\"yelp.com/pool\": \"default\"},\n            node_affinities=[],\n            topology_spread_constraints=[],\n            pod_labels={},\n            pod_annotations={},\n            service_account_name=None,\n            ports=[],\n        )\n\n    assert task is None\n\n\ndef test_create_task_with_config(mock_kubernetes_cluster):\n    # Validate we pass all expected args to taskproc\n    default_volumes = [ConfigVolume(container_path=\"/nail/tmp\", host_path=\"/nail/tmp\", mode=\"RO\")]\n\n    mock_kubernetes_cluster.default_volumes = default_volumes\n    mock_serializer = mock.MagicMock()\n\n    config_volumes = [ConfigVolume(container_path=\"/tmp\", host_path=\"/host\", mode=\"RO\")]\n    config_secret_volumes = [\n        ConfigSecretVolume(\n            secret_volume_name=\"secretvolumename\",\n            secret_name=\"secret\",\n            container_path=\"/b\",\n            default_mode=\"0644\",\n            items=[ConfigSecretVolumeItem(key=\"key\", path=\"path\", mode=\"0755\")],\n        ),\n    ]\n    config_secrets = {\"TEST_SECRET\": ConfigSecretSource(secret_name=\"tron-secret-test-secret--A\", key=\"secret_A\")}\n    config_field_selector = {\"POD_IP\": ConfigFieldSelectorSource(field_path=\"status.podIP\")}\n    config_sa_volumes = [ConfigProjectedSAVolume(audience=\"for.bar.com\", container_path=\"/var/run/secrets/whatever\")]\n\n    expected_args = {\n        \"name\": mock.ANY,\n        \"command\": \"ls\",\n        \"image\": \"docker-paasta.yelpcorp.com:443/bionic_yelp\",\n        \"cpus\": 1,\n        \"memory\": 1024,\n        \"disk\": DEFAULT_DISK_LIMIT,\n        \"environment\": {\"TEST_ENV\": \"foo\"},\n        \"secret_environment\": {k: v._asdict() for k, v in config_secrets.items()},\n        \"secret_volumes\": [v._asdict() for v in config_secret_volumes],\n        \"projected_sa_volumes\": [v._asdict() for v in config_sa_volumes],\n        \"field_selector_environment\": {k: v._asdict() for k, v in config_field_selector.items()},\n        \"volumes\": [v._asdict() for v in default_volumes + config_volumes],\n        \"cap_add\": [\"KILL\"],\n        \"cap_drop\": [\"KILL\", \"CHOWN\"],\n        \"node_selectors\": {\"yelp.com/pool\": \"default\"},\n        \"node_affinities\": [],\n        \"topology_spread_constraints\": [],\n        \"labels\": {},\n        \"annotations\": {},\n        \"service_account_name\": None,\n        \"ports\": [],\n    }\n\n    task = mock_kubernetes_cluster.create_task(\n        action_run_id=\"action_a\",\n        serializer=mock_serializer,\n        task_id=\"yay.1234\",\n        command=expected_args[\"command\"],\n        cpus=expected_args[\"cpus\"],\n        mem=expected_args[\"memory\"],\n        disk=None,\n        docker_image=expected_args[\"image\"],\n        env=expected_args[\"environment\"],\n        secret_env=config_secrets,\n        secret_volumes=config_secret_volumes,\n        projected_sa_volumes=config_sa_volumes,\n        field_selector_env=config_field_selector,\n        volumes=config_volumes,\n        cap_add=[\"KILL\"],\n        cap_drop=[\"KILL\", \"CHOWN\"],\n        node_selectors={\"yelp.com/pool\": \"default\"},\n        node_affinities=[],\n        topology_spread_constraints=[],\n        pod_labels={},\n        pod_annotations={},\n        service_account_name=None,\n        ports=expected_args[\"ports\"],\n    )\n\n    assert task is not None\n    mock_kubernetes_cluster.runner.TASK_CONFIG_INTERFACE.assert_called_once_with(**expected_args)\n\n\ndef test_process_event_task(mock_kubernetes_cluster):\n    event = mock_event_factory(task_id=\"abc.123\", platform_type=\"mock_type\")\n    mock_kubernetes_task = mock.MagicMock(spec_set=KubernetesTask)\n    mock_kubernetes_task.get_kubernetes_id.return_value = \"abc.123\"\n    mock_kubernetes_cluster.tasks[\"abc.123\"] = mock_kubernetes_task\n\n    mock_kubernetes_cluster.process_event(event)\n\n    mock_kubernetes_task.handle_event.assert_called_once_with(event)\n\n\ndef test_process_event_task_invalid_id(mock_kubernetes_cluster):\n    event = mock_event_factory(task_id=\"hwat.dis\", platform_type=\"mock_type\")\n    mock_kubernetes_task = mock.MagicMock(spec_set=KubernetesTask)\n    mock_kubernetes_task.get_kubernetes_id.return_value = \"abc.123\"\n    mock_kubernetes_cluster.tasks[\"abc.123\"] = mock_kubernetes_task\n\n    mock_kubernetes_cluster.process_event(event)\n\n    assert mock_kubernetes_task.handle_event.call_count == 0\n\n\ndef test_stop_default(mock_kubernetes_cluster):\n    # When stopping, tasks should not exit. They will be recovered\n    mock_task = mock.MagicMock()\n    mock_kubernetes_cluster.tasks = {\"task_id\": mock_task}\n    mock_kubernetes_cluster.stop()\n    assert mock_kubernetes_cluster.deferred is None\n    assert mock_task.exited.call_count == 0\n    assert len(mock_kubernetes_cluster.tasks) == 1\n\n\ndef test_stop_disabled():\n    # Shouldn't raise an error\n    mock_kubernetes_cluster = KubernetesCluster(\"kube-cluster-a:1234\", enabled=False)\n    mock_kubernetes_cluster.stop()\n\n\ndef test_set_enabled_enable_already_on(mock_kubernetes_cluster):\n    mock_kubernetes_cluster.set_enabled(is_enabled=True)\n\n    assert mock_kubernetes_cluster.enabled is True\n    # only called once as part of creating the cluster object\n    mock_kubernetes_cluster.processor.executor_from_config.assert_called_once()\n    assert mock_kubernetes_cluster.runner is not None\n    assert mock_kubernetes_cluster.deferred is not None\n    mock_kubernetes_cluster.deferred.addCallback.assert_has_calls(\n        [\n            mock.call(mock_kubernetes_cluster.process_event),\n            mock.call(mock_kubernetes_cluster.handle_next_event),\n        ]\n    )\n\n\ndef test_set_enabled_enable(mock_disabled_kubernetes_cluster):\n    mock_disabled_kubernetes_cluster.set_enabled(is_enabled=True)\n\n    assert mock_disabled_kubernetes_cluster.enabled is True\n    # only called once as part of enabling\n    mock_disabled_kubernetes_cluster.processor.executor_from_config.assert_called_once()\n    assert mock_disabled_kubernetes_cluster.runner is not None\n    assert mock_disabled_kubernetes_cluster.deferred is not None\n    mock_disabled_kubernetes_cluster.deferred.addCallback.assert_has_calls(\n        [\n            mock.call(mock_disabled_kubernetes_cluster.process_event),\n            mock.call(mock_disabled_kubernetes_cluster.handle_next_event),\n        ]\n    )\n\n\ndef test_set_enabled_disable(mock_kubernetes_cluster):\n    mock_task = mock.Mock(spec=KubernetesTask)\n    mock_kubernetes_cluster.tasks == {\"a.b\": mock_task}\n\n    mock_kubernetes_cluster.set_enabled(is_enabled=False)\n\n    assert mock_kubernetes_cluster.enabled is False\n    mock_kubernetes_cluster.runner.stop.assert_called_once()\n    assert mock_kubernetes_cluster.deferred is None\n    assert mock_kubernetes_cluster.tasks == {}\n\n\ndef test_configure_default_volumes():\n    # default_volume validation is done at config time, we just need to validate we are setting it\n    with mock.patch(\"tron.kubernetes.PyDeferredQueue\", autospec=True,), mock.patch(\n        \"tron.kubernetes.TaskProcessor\",\n        autospec=True,\n    ), mock.patch(\n        \"tron.kubernetes.Subscription\",\n        autospec=True,\n    ):\n        mock_kubernetes_cluster = KubernetesCluster(\"kube-cluster-a:1234\", default_volumes=[])\n    assert mock_kubernetes_cluster.default_volumes == []\n    expected_volumes = [\n        ConfigVolume(\n            container_path=\"/tmp\",\n            host_path=\"/host/tmp\",\n            mode=\"RO\",\n        ),\n    ]\n    mock_kubernetes_cluster.configure_tasks(default_volumes=expected_volumes)\n    assert mock_kubernetes_cluster.default_volumes == expected_volumes\n\n\ndef test_submit_disabled(mock_disabled_kubernetes_cluster, mock_kubernetes_task):\n    with mock.patch.object(mock_kubernetes_task, \"exited\", autospec=True) as mock_exited:\n        mock_disabled_kubernetes_cluster.submit(mock_kubernetes_task)\n\n    assert mock_kubernetes_task.get_kubernetes_id() not in mock_disabled_kubernetes_cluster.tasks\n    mock_exited.assert_called_once_with(1)\n\n\ndef test_submit(mock_kubernetes_cluster, mock_kubernetes_task):\n    mock_kubernetes_cluster.submit(mock_kubernetes_task)\n\n    assert mock_kubernetes_task.get_kubernetes_id() in mock_kubernetes_cluster.tasks\n    assert mock_kubernetes_cluster.tasks[mock_kubernetes_task.get_kubernetes_id()] == mock_kubernetes_task\n    mock_kubernetes_cluster.runner.run.assert_called_once_with(mock_kubernetes_task.get_config())\n\n\ndef test_recover(mock_kubernetes_cluster, mock_kubernetes_task):\n    with mock.patch.object(mock_kubernetes_task, \"started\", autospec=True) as mock_started:\n        mock_kubernetes_cluster.recover(mock_kubernetes_task)\n\n    assert mock_kubernetes_task.get_kubernetes_id() in mock_kubernetes_cluster.tasks\n    mock_kubernetes_cluster.runner.reconcile.assert_called_once_with(mock_kubernetes_task.get_config())\n    assert mock_started.call_count == 1\n\n\ndef test_kuberntes_cluster_repository():\n    # Check we are passing k8s_options from mcp/KubernetesClusterRepository.configure to KubernetesCluster calls\n\n    mock_k8s_options = {\n        \"enabled\": True,\n        \"kubeconfig_path\": \"/tmp/kubeconfig.conf\",\n        \"watcher_kubeconfig_paths\": [\"/tmp/kubeconfig_old.conf\"],\n        \"non_retryable_exit_codes\": [13],\n        \"default_volumes\": [\n            ConfigVolume(\n                container_path=\"/tmp\",\n                host_path=\"/host/tmp\",\n                mode=\"RO\",\n            )\n        ],\n    }\n    mock_k8s_options_obj = ConfigKubernetes(**mock_k8s_options)\n\n    with mock.patch(\"tron.kubernetes.KubernetesCluster\", autospec=True) as mock_cluster:\n        KubernetesClusterRepository.configure(mock_k8s_options_obj)\n        KubernetesClusterRepository.get_cluster()\n\n    mock_cluster.assert_called_once_with(**mock_k8s_options)\n"
  },
  {
    "path": "tests/mcp_reconfigure_test.py",
    "content": "\"\"\"Tests for reconfiguring mcp.\"\"\"\nimport os\nimport tempfile\nimport time\n\nimport pytest\n\nfrom testifycompat import assert_equal\nfrom testifycompat import run\nfrom testifycompat import setup\nfrom testifycompat import suite\nfrom testifycompat import teardown\nfrom testifycompat import TestCase\nfrom tests.assertions import assert_length\nfrom tron import mcp\nfrom tron.config import config_parse\nfrom tron.config import schema\nfrom tron.serialize import filehandler\n\n\nclass TestMCPReconfigure(TestCase):\n\n    os.environ[\"SSH_AUTH_SOCK\"] = \"test-socket\"\n    pre_config = dict(\n        ssh_options=dict(\n            agent=True,\n            identities=[\"tests/test_id_rsa\"],\n        ),\n        nodes=[\n            dict(name=\"node0\", hostname=\"batch0\"),\n            dict(name=\"node1\", hostname=\"batch1\"),\n        ],\n        node_pools=[dict(name=\"nodePool\", nodes=[\"node0\", \"node1\"])],\n        command_context={\n            \"thischanges\": \"froma\",\n        },\n        jobs=[\n            dict(\n                name=\"test_unchanged\",\n                node=\"node0\",\n                schedule=\"daily\",\n                actions=[\n                    dict(\n                        name=\"action_unchanged\",\n                        command=\"command_unchanged\",\n                    ),\n                ],\n            ),\n            dict(\n                name=\"test_remove\",\n                node=\"node1\",\n                schedule={\"type\": \"cron\", \"value\": \"* * * * *\"},\n                actions=[\n                    dict(\n                        name=\"action_remove\",\n                        command=\"command_remove\",\n                    ),\n                ],\n                cleanup_action=dict(name=\"cleanup\", command=\"doit\"),\n            ),\n            dict(\n                name=\"test_change\",\n                node=\"nodePool\",\n                schedule={\"type\": \"cron\", \"value\": \"* * * * *\"},\n                actions=[\n                    dict(\n                        name=\"action_change\",\n                        command=\"command_change\",\n                    ),\n                    dict(\n                        name=\"action_remove2\",\n                        command=\"command_remove2\",\n                        requires=[\"action_change\"],\n                    ),\n                ],\n            ),\n            dict(\n                name=\"test_daily_change\",\n                node=\"node0\",\n                schedule=\"daily\",\n                actions=[\n                    dict(\n                        name=\"action_daily_change\",\n                        command=\"command\",\n                    ),\n                ],\n            ),\n            dict(\n                name=\"test_action_added\",\n                node=\"node0\",\n                schedule={\"type\": \"cron\", \"value\": \"* * * * *\"},\n                actions=[\n                    dict(name=\"action_first\", command=\"command_do_it\"),\n                ],\n            ),\n        ],\n    )\n\n    post_config = dict(\n        ssh_options=dict(\n            agent=True,\n            identities=[\"tests/test_id_rsa\"],\n        ),\n        nodes=[\n            dict(name=\"node0\", hostname=\"batch0\"),\n            dict(name=\"node1\", hostname=\"batch1\"),\n        ],\n        node_pools=[dict(name=\"nodePool\", nodes=[\"node0\", \"node1\"])],\n        command_context={\n            \"a_variable\": \"is_constant\",\n            \"thischanges\": \"tob\",\n        },\n        jobs=[\n            dict(\n                name=\"test_unchanged\",\n                node=\"node0\",\n                schedule=\"daily\",\n                actions=[\n                    dict(\n                        name=\"action_unchanged\",\n                        command=\"command_unchanged\",\n                    ),\n                ],\n            ),\n            dict(\n                name=\"test_change\",\n                node=\"nodePool\",\n                schedule=\"daily\",\n                actions=[\n                    dict(\n                        name=\"action_change\",\n                        command=\"command_changed\",\n                    ),\n                ],\n            ),\n            dict(\n                name=\"test_daily_change\",\n                node=\"node0\",\n                schedule=\"daily\",\n                actions=[\n                    dict(\n                        name=\"action_daily_change\",\n                        command=\"command_changed\",\n                    ),\n                ],\n            ),\n            dict(\n                name=\"test_new\",\n                node=\"nodePool\",\n                schedule={\"type\": \"cron\", \"value\": \"* * * * *\"},\n                actions=[\n                    dict(\n                        name=\"action_new\",\n                        command=\"command_new\",\n                    ),\n                ],\n            ),\n            dict(\n                name=\"test_action_added\",\n                node=\"node0\",\n                schedule={\"type\": \"cron\", \"value\": \"* * * * *\"},\n                actions=[\n                    dict(name=\"action_first\", command=\"command_do_it\"),\n                    dict(name=\"action_second\", command=\"command_ok\"),\n                ],\n            ),\n        ],\n    )\n\n    def _get_config(self, idx, output_dir):\n        config = dict(self.post_config if idx else self.pre_config)\n        config[\"output_stream_dir\"] = output_dir\n        return config\n\n    def _get_runs_to_schedule(self, sched):\n        last_run = sched.job.runs.get_newest(include_manual=False)\n        last_run_time = last_run.run_time if last_run else None\n        return sched.get_runs_to_schedule(last_run_time)\n\n    @setup\n    def setup_mcp(self):\n        self.test_dir = tempfile.mkdtemp()\n        self.mcp = mcp.MasterControlProgram(self.test_dir, \"config\", time.time())\n        config = {schema.MASTER_NAMESPACE: self._get_config(0, self.test_dir)}\n        container = config_parse.ConfigContainer.create(config)\n        self.mcp.apply_config(container)\n\n    @teardown\n    def teardown_mcp(self):\n        filehandler.OutputPath(self.test_dir).delete()\n        filehandler.FileHandleManager.reset()\n\n    def reconfigure(self):\n        config = {schema.MASTER_NAMESPACE: self._get_config(1, self.test_dir)}\n        container = config_parse.ConfigContainer.create(config)\n        self.mcp.apply_config(container, reconfigure=True)\n\n    @suite(\"integration\")\n    def test_job_list(self):\n        count = len(self.pre_config[\"jobs\"])\n        assert_equal(len(self.mcp.jobs.get_names()), count)\n        self.reconfigure()\n        assert_equal(len(self.mcp.jobs.get_names()), count)\n\n    @pytest.mark.skip(\n        reason=\"This test doesn't currently as run1 is not scheduled.\",\n    )\n    @suite(\"integration\")\n    def test_job_unchanged(self):\n        assert \"MASTER.test_unchanged\" in self.mcp.jobs\n        job_sched = self.mcp.jobs.get_by_name(\"MASTER.test_unchanged\")\n        orig_job = job_sched.job\n        run0 = next(self._get_runs_to_schedule(job_sched))\n        run0.start()\n        run1 = next(self._get_runs_to_schedule(job_sched))\n\n        assert_equal(job_sched.job.name, \"MASTER.test_unchanged\")\n        action_map = job_sched.job.action_graph.action_map\n        assert_equal(len(action_map), 1)\n        assert_equal(action_map[\"action_unchanged\"].name, \"action_unchanged\")\n        assert_equal(str(job_sched.job.scheduler), \"daily 00:00:00 \")\n\n        self.reconfigure()\n        assert job_sched is self.mcp.jobs.get_by_name(\"MASTER.test_unchanged\")\n        assert job_sched.job is orig_job\n\n        assert_equal(len(job_sched.job.runs.runs), 2)\n        assert_equal(job_sched.job.runs.runs[1], run0)\n        assert_equal(job_sched.job.runs.runs[0], run1)\n        assert run1.is_scheduled\n        assert_equal(job_sched.job.context[\"a_variable\"], \"is_constant\")\n        assert_equal(job_sched.job.context[\"thischanges\"], \"tob\")\n\n    @suite(\"integration\")\n    def test_job_unchanged_disabled(self):\n        job_sched = self.mcp.jobs.get_by_name(\"MASTER.test_unchanged\")\n        orig_job = job_sched.job\n        next(self._get_runs_to_schedule(job_sched))\n        job_sched.disable()\n\n        self.reconfigure()\n        assert job_sched is self.mcp.jobs.get_by_name(\"MASTER.test_unchanged\")\n        assert job_sched.job is orig_job\n        assert not job_sched.job.enabled\n\n    @suite(\"integration\")\n    def test_job_removed(self):\n        assert \"MASTER.test_remove\" in self.mcp.jobs\n        job_sched = self.mcp.jobs.get_by_name(\"MASTER.test_remove\")\n        run0 = next(self._get_runs_to_schedule(job_sched))\n        run0.start()\n        run1 = next(self._get_runs_to_schedule(job_sched))\n\n        assert_equal(job_sched.job.name, \"MASTER.test_remove\")\n        action_map = job_sched.job.action_graph.action_map\n        assert_equal(len(action_map), 2)\n        assert_equal(action_map[\"action_remove\"].name, \"action_remove\")\n\n        self.reconfigure()\n        assert \"test_remove\" not in self.mcp.jobs\n        assert not job_sched.job.enabled\n        assert not run1.is_scheduled\n\n    @suite(\"integration\")\n    def test_job_changed(self):\n        assert \"MASTER.test_change\" in self.mcp.jobs\n        job_sched = self.mcp.jobs.get_by_name(\"MASTER.test_change\")\n        run0 = next(self._get_runs_to_schedule(job_sched))\n        run0.start()\n        next(self._get_runs_to_schedule(job_sched))\n        assert_equal(len(job_sched.job.runs.runs), 2)\n\n        assert_equal(job_sched.job.name, \"MASTER.test_change\")\n        action_map = job_sched.job.action_graph.action_map\n        assert_equal(len(action_map), 2)\n\n        self.reconfigure()\n        new_job_sched = self.mcp.jobs.get_by_name(\"MASTER.test_change\")\n        assert new_job_sched is job_sched\n        assert new_job_sched.job is job_sched.job\n\n        assert_equal(new_job_sched.job.name, \"MASTER.test_change\")\n        action_map = job_sched.job.action_graph.action_map\n        assert_equal(len(action_map), 1)\n\n        assert_equal(len(new_job_sched.job.runs.runs), 2)\n        assert new_job_sched.job.runs.runs[1].is_starting\n        assert new_job_sched.job.runs.runs[0].is_scheduled\n        assert_equal(job_sched.job.context[\"a_variable\"], \"is_constant\")\n        assert new_job_sched.job.context.base.job is new_job_sched.job\n\n    @suite(\"integration\")\n    def test_job_changed_disabled(self):\n        job_sched = self.mcp.jobs.get_by_name(\"MASTER.test_change\")\n        job_sched.disable()\n        assert not job_sched.job.enabled\n\n        self.reconfigure()\n        new_job_sched = self.mcp.jobs.get_by_name(\"MASTER.test_change\")\n        assert not new_job_sched.job.enabled\n\n    @suite(\"integration\")\n    def test_job_new(self):\n        assert \"test_new\" not in self.mcp.jobs\n        self.reconfigure()\n\n        assert \"MASTER.test_new\" in self.mcp.jobs\n        job_sched = self.mcp.jobs.get_by_name(\"MASTER.test_new\")\n\n        assert_equal(job_sched.job.name, \"MASTER.test_new\")\n        action_map = job_sched.job.action_graph.action_map\n        assert_equal(len(action_map), 1)\n        assert_equal(action_map[\"action_new\"].name, \"action_new\")\n        assert_equal(action_map[\"action_new\"].command, \"command_new\")\n        assert_equal(len(job_sched.job.runs.runs), 1)\n        assert job_sched.job.runs.runs[0].is_scheduled\n\n    @suite(\"integration\")\n    def test_daily_reschedule(self):\n        job_sched = self.mcp.jobs.get_by_name(\"MASTER.test_daily_change\")\n\n        next(self._get_runs_to_schedule(job_sched))\n\n        assert_equal(len(job_sched.job.runs.runs), 1)\n        run = job_sched.job.runs.runs[0]\n        assert run.is_scheduled\n\n        action_runs = run.action_runs\n        self.reconfigure()\n        assert action_runs.is_cancelled\n\n        assert_equal(len(job_sched.job.runs.runs), 1)\n        new_run = job_sched.job.runs.runs[0]\n        assert new_run is not run\n        assert new_run.is_scheduled\n        assert_equal(run.run_time, new_run.run_time)\n\n    @suite(\"integration\")\n    def test_action_added(self):\n        self.reconfigure()\n        job_sched = self.mcp.jobs.get_by_name(\"MASTER.test_action_added\")\n        assert_length(job_sched.job.action_graph.action_map, 2)\n\n\nif __name__ == \"__main__\":\n    run()\n"
  },
  {
    "path": "tests/mcp_test.py",
    "content": "import shutil\nimport tempfile\nimport time\nfrom unittest import mock\n\nimport pytest\n\nfrom testifycompat import assert_equal\nfrom testifycompat import run\nfrom testifycompat import setup\nfrom testifycompat import teardown\nfrom testifycompat import TestCase\nfrom tests.testingutils import autospec_method\nfrom tron import mcp\nfrom tron.config import config_parse\nfrom tron.config import manager\nfrom tron.core.job_collection import JobCollection\nfrom tron.serialize.runstate import statemanager\n\n\nclass TestMasterControlProgram:\n\n    TEST_CONFIG = \"tests/data/test_config.yaml\"\n\n    @pytest.fixture(autouse=True)\n    def setup_mcp(self):\n        self.working_dir = tempfile.mkdtemp()\n        self.config_path = tempfile.mkdtemp()\n        self.boot_time = time.time()\n        self.mcp = mcp.MasterControlProgram(self.working_dir, self.config_path, self.boot_time)\n        self.mcp.state_watcher = mock.create_autospec(\n            statemanager.StateChangeWatcher,\n        )\n        yield\n        shutil.rmtree(self.config_path)\n        shutil.rmtree(self.working_dir)\n\n    def test_reconfigure_default(self):\n        autospec_method(self.mcp._load_config)\n        self.mcp.state_watcher = mock.MagicMock()\n        self.mcp.reconfigure()\n        self.mcp._load_config.assert_called_with(reconfigure=True, namespace_to_reconfigure=None)\n\n    def test_reconfigure_namespace(self):\n        autospec_method(self.mcp._load_config)\n        self.mcp.state_watcher = mock.MagicMock()\n        self.mcp.reconfigure(namespace=\"foo\")\n        self.mcp._load_config.assert_called_with(reconfigure=True, namespace_to_reconfigure=\"foo\")\n\n    @pytest.mark.parametrize(\n        \"reconfigure,namespace\",\n        [\n            (False, None),\n            (True, None),\n            (True, \"foo\"),\n        ],\n    )\n    def test_load_config(self, reconfigure, namespace):\n        autospec_method(self.mcp.apply_config)\n        self.mcp.config = mock.create_autospec(manager.ConfigManager)\n        self.mcp._load_config(reconfigure, namespace)\n        self.mcp.state_watcher.disabled.assert_called_with()\n        self.mcp.apply_config.assert_called_with(\n            self.mcp.config.load.return_value,\n            reconfigure=reconfigure,\n            namespace_to_reconfigure=namespace,\n        )\n\n    @pytest.mark.parametrize(\n        \"reconfigure,namespace\",\n        [\n            (False, None),\n            (True, None),\n            (True, \"foo\"),\n            (True, \"MASTER\"),\n        ],\n    )\n    @mock.patch(\"tron.mcp.KubernetesClusterRepository\", autospec=True)\n    @mock.patch(\"tron.mcp.MesosClusterRepository\", autospec=True)\n    @mock.patch(\"tron.mcp.node.NodePoolRepository\", autospec=True)\n    def test_apply_config(self, mock_repo, mock_cluster_repo, mock_k8s_cluster_repo, reconfigure, namespace):\n        config_container = mock.create_autospec(config_parse.ConfigContainer)\n        master_config = config_container.get_master.return_value\n        autospec_method(self.mcp.jobs.update_from_config)\n        autospec_method(self.mcp.build_job_scheduler_factory)\n        self.mcp.apply_config(config_container, reconfigure, namespace)\n        self.mcp.state_watcher.update_from_config.assert_called_with(\n            master_config.state_persistence,\n        )\n        assert_equal(self.mcp.context.base, master_config.command_context)\n\n        mock_repo.update_from_config.assert_called_with(\n            master_config.nodes,\n            master_config.node_pools,\n            master_config.ssh_options,\n        )\n        mock_cluster_repo.configure.assert_called_with(\n            master_config.mesos_options,\n        )\n        mock_k8s_cluster_repo.configure.assert_called_with(\n            master_config.k8s_options,\n        )\n        self.mcp.build_job_scheduler_factory(master_config, mock.Mock())\n\n        expected_namespace_to_update = None if namespace == \"MASTER\" else namespace\n        self.mcp.jobs.update_from_config.assert_called_once_with(\n            config_container.get_jobs(),\n            self.mcp.build_job_scheduler_factory.return_value,\n            reconfigure,\n            expected_namespace_to_update,\n        )\n        self.mcp.state_watcher.watch_all.assert_called_once_with(\n            self.mcp.jobs.update_from_config.return_value,\n            mock.ANY,\n        )\n\n    def test_update_state_watcher_config_changed(self):\n        self.mcp.state_watcher.update_from_config.return_value = True\n        self.mcp.jobs = mock.create_autospec(JobCollection)\n        self.mcp.jobs.__iter__.return_values = {\n            \"a\": mock.Mock(),\n            \"b\": mock.Mock(),\n        }\n        state_config = mock.Mock()\n        self.mcp.update_state_watcher_config(state_config)\n        self.mcp.state_watcher.update_from_config.assert_called_with(\n            state_config,\n        )\n        assert_equal(\n            self.mcp.state_watcher.save_job.mock_calls,\n            [mock.call(j.job) for j in self.mcp.jobs],\n        )\n\n    def test_update_state_watcher_config_no_change(self):\n        self.mcp.state_watcher.update_from_config.return_value = False\n        self.mcp.jobs = {\"a\": mock.Mock(), \"b\": mock.Mock()}\n        state_config = mock.Mock()\n        self.mcp.update_state_watcher_config(state_config)\n        assert not self.mcp.state_watcher.save_job.mock_calls\n\n\nclass TestMasterControlProgramRestoreState(TestCase):\n    @setup\n    def setup_mcp(self):\n        self.working_dir = tempfile.mkdtemp()\n        self.config_path = tempfile.mkdtemp()\n        self.boot_time = time.time()\n        self.mcp = mcp.MasterControlProgram(self.working_dir, self.config_path, self.boot_time)\n        self.mcp.jobs = mock.create_autospec(JobCollection)\n        self.mcp.state_watcher = mock.create_autospec(\n            statemanager.StateChangeWatcher,\n        )\n\n    @teardown\n    def teardown_mcp(self):\n        shutil.rmtree(self.working_dir)\n        shutil.rmtree(self.config_path)\n\n    def test_restore_state(self):\n        job_state_data = {\"1\": \"things\", \"2\": \"things\"}\n        state_data = {\n            \"job_state\": job_state_data,\n        }\n        self.mcp.state_watcher.restore.return_value = state_data\n        action_runner = mock.Mock()\n        self.mcp.restore_state(action_runner)\n        self.mcp.jobs.restore_state.assert_called_with(job_state_data, action_runner)\n\n\nif __name__ == \"__main__\":\n    run()\n"
  },
  {
    "path": "tests/mesos_test.py",
    "content": "from collections import namedtuple\nfrom unittest import mock\n\nimport staticconf.testing\n\nfrom testifycompat import assert_equal\nfrom testifycompat import setup_teardown\nfrom testifycompat import TestCase\nfrom tron.mesos import MesosCluster\nfrom tron.mesos import MesosClusterRepository\nfrom tron.mesos import MesosTask\n\n\nclass TestMesosClusterRepository(TestCase):\n    @setup_teardown\n    def mock_cluster(self):\n        # Ensure different mock is returned each time class is instantiated\n        def init_cluster(*args, **kwargs):\n            return mock.MagicMock(spec_set=MesosCluster)\n\n        with mock.patch(\n            \"tron.mesos.MesosCluster\",\n            side_effect=init_cluster,\n            autospec=True,\n        ) as self.cluster_cls:\n            yield\n\n    def test_get_cluster_repeated_mesos_address(self):\n        first = MesosClusterRepository.get_cluster(\"master-a.com\")\n        second = MesosClusterRepository.get_cluster(\"master-a.com\")\n        assert_equal(first, second)\n        assert_equal(self.cluster_cls.call_count, 1)\n\n    def test_shutdown(self):\n        clusters = [MesosClusterRepository.get_cluster(address) for address in [\"a\", \"b\", \"c\"]]\n        assert_equal(self.cluster_cls.call_count, 3)\n        MesosClusterRepository.shutdown()\n        for cluster in clusters:\n            assert_equal(cluster.stop.call_count, 1)\n\n    def test_configure(self):\n        clusters = [MesosClusterRepository.get_cluster(address) for address in [\"d\", \"e\"]]\n        mock_volume = mock.Mock()\n        options = mock.Mock(\n            master_port=5000,\n            secret=\"/dev/null\",\n            principal=\"fake-principal\",\n            role=\"tron\",\n            enabled=False,\n            default_volumes=[mock_volume],\n            dockercfg_location=\"auth\",\n            offer_timeout=1000,\n        )\n        with mock.patch(\n            \"tron.mesos.get_secret_from_file\",\n            autospec=True,\n            return_value=\"test-secret\",\n        ):\n            MesosClusterRepository.configure(options)\n\n        expected_volume = mock_volume._asdict.return_value\n        for cluster in clusters:\n            cluster.set_enabled.assert_called_once_with(False)\n            cluster.configure_tasks.assert_called_once_with(\n                default_volumes=[expected_volume],\n                dockercfg_location=\"auth\",\n                offer_timeout=1000,\n            )\n\n        # Next cluster we get should be initialized with the same settings\n        MesosClusterRepository.get_cluster(\"f\")\n        self.cluster_cls.assert_called_with(\n            mesos_address=\"f\",\n            mesos_master_port=5000,\n            secret=\"test-secret\",\n            principal=\"fake-principal\",\n            mesos_role=\"tron\",\n            framework_id=None,\n            enabled=False,\n            default_volumes=[expected_volume],\n            dockercfg_location=\"auth\",\n            offer_timeout=1000,\n        )\n\n\ndef mock_task_event(\n    task_id,\n    platform_type,\n    raw=None,\n    terminal=False,\n    success=False,\n    **kwargs,\n):\n    return mock.MagicMock(\n        kind=\"task\",\n        task_id=task_id,\n        platform_type=platform_type,\n        raw=raw or {},\n        terminal=terminal,\n        success=success,\n        **kwargs,\n    )\n\n\nclass TestMesosTask(TestCase):\n    @setup_teardown\n    def setup(self):\n        TaskConfig = namedtuple(\"TaskConfig\", \"cmd task_id cpus mem disk env\")\n        self.action_run_id = \"my_service.job.1.action\"\n        self.task_id = \"123abcuuid\"\n        with mock.patch(\n            \"tron.mesos.logging.getLogger\",\n            return_value=mock.Mock(handlers=[mock.Mock()]),\n            autospec=None,\n        ):\n            self.task = MesosTask(\n                id=self.action_run_id,\n                task_config=TaskConfig(\n                    cmd=\"echo hello world\",\n                    task_id=self.task_id,\n                    cpus=0.1,\n                    mem=100,\n                    disk=100,\n                    env={\n                        \"INITIAL_VAR\": \"baz\",\n                        \"AWS_SECRET_ACCESS_KEY\": \"THISISASECRET\",\n                        \"SOME_VAR\": \"bar\",\n                        \"AWS_ACCESS_KEY_ID\": \"THISISASECRETTOO\",\n                        \"SOME_OTHER_VAR\": \"foo\",\n                    },\n                ),\n            )\n            yield\n\n    def test_aws_credentials_redacted(self):\n        assert all([\"THISISASECRET\" not in text[0][0] for text in self.task.log.info.call_args_list])\n        assert all([\"foo\" in text[0][0] for text in self.task.log.info.call_args_list])\n        assert all([\"bar\" in text[0][0] for text in self.task.log.info.call_args_list])\n        assert all([\"baz\" in text[0][0] for text in self.task.log.info.call_args_list])\n\n    def test_handle_staging(self):\n        event = mock_task_event(\n            task_id=self.task_id,\n            platform_type=\"staging\",\n        )\n        self.task.handle_event(event)\n        assert self.task.state == MesosTask.PENDING\n\n    def test_handle_starting(self):\n        event = mock_task_event(\n            task_id=self.task_id,\n            platform_type=\"starting\",\n        )\n        self.task.handle_event(event)\n        assert self.task.state == MesosTask.RUNNING\n\n    def test_handle_running(self):\n        event = mock_task_event(\n            task_id=self.task_id,\n            platform_type=\"running\",\n        )\n        self.task.handle_event(event)\n        assert self.task.state == MesosTask.RUNNING\n\n    def test_handle_running_for_other_task(self):\n        event = mock_task_event(\n            task_id=\"other321\",\n            platform_type=\"running\",\n        )\n        self.task.handle_event(event)\n        assert self.task.state == MesosTask.PENDING\n\n    def test_handle_finished(self):\n        self.task.started()\n        event = mock_task_event(\n            task_id=self.task_id,\n            platform_type=\"finished\",\n            terminal=True,\n            success=True,\n        )\n        self.task.handle_event(event)\n        assert self.task.is_complete\n\n    def test_handle_failed(self):\n        self.task.started()\n        event = mock_task_event(\n            task_id=self.task_id,\n            platform_type=\"failed\",\n            terminal=True,\n            success=False,\n        )\n        self.task.handle_event(event)\n        assert self.task.is_failed\n        assert self.task.is_done\n\n    def test_handle_killed(self):\n        self.task.started()\n        event = mock_task_event(\n            task_id=self.task_id,\n            platform_type=\"killed\",\n            terminal=True,\n            success=False,\n        )\n        self.task.handle_event(event)\n        assert self.task.is_failed\n        assert self.task.is_done\n\n    def test_handle_lost(self):\n        self.task.started()\n        event = mock_task_event(\n            task_id=self.task_id,\n            platform_type=\"lost\",\n            terminal=True,\n            success=False,\n        )\n        self.task.handle_event(event)\n        assert self.task.is_unknown\n        assert self.task.is_done\n\n    def test_handle_error(self):\n        self.task.started()\n        event = mock_task_event(\n            task_id=self.task_id,\n            platform_type=\"error\",\n            terminal=True,\n            success=False,\n        )\n        self.task.handle_event(event)\n        assert self.task.is_failed\n        assert self.task.is_done\n\n    def test_handle_terminal_event_offer_timeout(self):\n        self.task.started()\n        event = mock_task_event(\n            task_id=self.task_id,\n            platform_type=None,\n            terminal=True,\n            success=False,\n            raw=\"failed due to offer timeout\",\n            message=\"stop\",\n        )\n        self.task.handle_event(event)\n        assert self.task.is_failed\n        assert self.task.is_done\n\n    def test_handle_success_sequence(self):\n        self.task.handle_event(\n            mock_task_event(\n                task_id=self.task_id,\n                platform_type=\"staging\",\n            ),\n        )\n        self.task.handle_event(\n            mock_task_event(\n                task_id=self.task_id,\n                platform_type=\"starting\",\n            ),\n        )\n        self.task.handle_event(\n            mock_task_event(\n                task_id=self.task_id,\n                platform_type=\"running\",\n            ),\n        )\n        self.task.handle_event(\n            mock_task_event(\n                task_id=self.task_id,\n                platform_type=\"finished\",\n                terminal=True,\n                success=True,\n            ),\n        )\n        assert self.task.is_complete\n\n    def test_log_event_error(self):\n        with mock.patch.object(self.task, \"log_event_info\",) as mock_log_event, mock.patch.object(\n            self.task.log,\n            \"warning\",\n        ) as mock_log:\n            mock_log_event.side_effect = Exception\n            self.task.handle_event(\n                mock_task_event(\n                    task_id=self.task_id,\n                    platform_type=\"running\",\n                ),\n            )\n            assert mock_log_event.called\n            assert mock_log.called\n        assert self.task.state == MesosTask.RUNNING\n\n    def test_get_event_logger_add_unique_handlers(self):\n        \"\"\"\n        Ensures that only a single handler (for stderr) is added to the\n        MesosTask event logger, to prevent duplicate log output.\n        \"\"\"\n        # Call 2 times to make sure 2nd call doesn't add another handler\n        logger = self.task.get_event_logger()\n        logger = self.task.get_event_logger()\n\n        assert len(logger.handlers) == 1\n\n\nclass TestMesosCluster(TestCase):\n    @setup_teardown\n    def setup_mocks(self):\n        with mock.patch(\"tron.mesos.PyDeferredQueue\", autospec=True,) as queue_cls, mock.patch(\n            \"tron.mesos.TaskProcessor\",\n            autospec=True,\n        ) as processor_cls, mock.patch(\"tron.mesos.Subscription\", autospec=True,) as runner_cls, mock.patch(\n            \"tron.mesos.get_mesos_leader\",\n            autospec=True,\n        ) as mock_get_leader:\n            self.mock_queue = queue_cls.return_value\n            self.mock_processor = processor_cls.return_value\n            self.mock_runner_cls = runner_cls\n            self.mock_runner_cls.return_value.configure_mock(\n                stopping=False,\n                TASK_CONFIG_INTERFACE=mock.Mock(),\n            )\n            self.mock_get_leader = mock_get_leader\n            yield\n\n    @mock.patch(\"tron.mesos.socket\", autospec=True)\n    def test_init(self, mock_socket):\n        mock_socket.gethostname.return_value = \"hostname\"\n        cluster = MesosCluster(\n            mesos_address=\"mesos-cluster-a.me\",\n            mesos_master_port=5000,\n            secret=\"my_secret\",\n            mesos_role=\"tron\",\n            framework_id=\"fake_framework_id\",\n            principal=\"fake-principal\",\n        )\n\n        assert_equal(cluster.queue, self.mock_queue)\n        assert_equal(cluster.processor, self.mock_processor)\n\n        self.mock_get_leader.assert_called_once_with(\n            \"mesos-cluster-a.me\",\n            5000,\n        )\n        self.mock_processor.executor_from_config.assert_has_calls(\n            [\n                mock.call(\n                    provider=\"mesos_task\",\n                    provider_config={\n                        \"secret\": \"my_secret\",\n                        \"principal\": \"fake-principal\",\n                        \"mesos_address\": self.mock_get_leader.return_value,\n                        \"role\": \"tron\",\n                        \"framework_name\": \"tron-hostname\",\n                        \"framework_id\": \"fake_framework_id\",\n                        \"failover\": True,\n                    },\n                ),\n                mock.call(\n                    provider=\"logging\",\n                    provider_config=mock.ANY,\n                ),\n            ]\n        )\n        self.mock_runner_cls.assert_called_once_with(\n            self.mock_processor.executor_from_config.return_value,\n            self.mock_queue,\n        )\n        assert_equal(cluster.runner, self.mock_runner_cls.return_value)\n\n        get_event_deferred = cluster.deferred\n        assert_equal(get_event_deferred, self.mock_queue.get.return_value)\n        get_event_deferred.addCallback.assert_has_calls(\n            [\n                mock.call(cluster._process_event),\n                mock.call(cluster.handle_next_event),\n            ]\n        )\n\n    def test_init_disabled(self):\n        cluster = MesosCluster(\"mesos-cluster-a.me\", enabled=False)\n\n        assert_equal(cluster.queue, self.mock_queue)\n        assert_equal(cluster.processor, self.mock_processor)\n        assert_equal(self.mock_processor.executor_from_config.call_count, 0)\n        assert cluster.runner is None\n\n    def test_set_enabled_off(self):\n        cluster = MesosCluster(\"mesos-cluster-a.me\", enabled=True)\n        mock_task = mock.Mock()\n        cluster.tasks = {\"task\": mock_task}\n        cluster.set_enabled(False)\n        assert not cluster.enabled\n        assert cluster.runner.stop.call_count == 1\n        assert cluster.tasks == {}\n        assert mock_task.exited.call_count == 1\n\n    def test_set_enabled_on(self):\n        cluster = MesosCluster(\"mesos-cluster-a.me\", enabled=False)\n        cluster.set_enabled(True)\n        assert_equal(cluster.enabled, True)\n        # Basically the same as regular initialization\n        assert_equal(self.mock_processor.executor_from_config.call_count, 2)\n        self.mock_runner_cls.assert_called_once_with(\n            self.mock_processor.executor_from_config.return_value,\n            self.mock_queue,\n        )\n        assert_equal(cluster.runner, self.mock_runner_cls.return_value)\n\n        get_event_deferred = cluster.deferred\n        assert_equal(get_event_deferred, self.mock_queue.get.return_value)\n        get_event_deferred.addCallback.assert_has_calls(\n            [\n                mock.call(cluster._process_event),\n                mock.call(cluster.handle_next_event),\n            ]\n        )\n\n    def test_set_enabled_on_already(self):\n        cluster = MesosCluster(\"mesos-cluster-a.me\", enabled=True)\n        cluster.set_enabled(True)\n        assert_equal(cluster.enabled, True)\n        # Runner should have only be created once\n        assert_equal(self.mock_runner_cls.call_count, 1)\n\n    def test_configure_tasks(self):\n        cluster = MesosCluster(\n            \"mesos-cluster-a.me\",\n            default_volumes=[],\n            dockercfg_location=\"first\",\n            offer_timeout=60,\n        )\n        assert_equal(cluster.default_volumes, [])\n        assert_equal(cluster.dockercfg_location, \"first\")\n        assert_equal(cluster.offer_timeout, 60)\n\n        expected_volumes = [\n            {\n                \"container_path\": \"/tmp\",\n                \"host_path\": \"/host\",\n                \"mode\": \"RO\",\n            }\n        ]\n        cluster.configure_tasks(\n            default_volumes=expected_volumes,\n            dockercfg_location=\"second\",\n            offer_timeout=300,\n        )\n        assert_equal(cluster.default_volumes, expected_volumes)\n        assert_equal(cluster.dockercfg_location, \"second\")\n        assert_equal(cluster.offer_timeout, 300)\n\n    def test_submit(self):\n        mock_clusterman_metrics = mock.MagicMock()\n        cluster = MesosCluster(\"mesos-cluster-a.me\")\n        mock_task = mock.MagicMock(get_config=mock.Mock(return_value={\"environment\": {}}))\n        mock_task.get_mesos_id.return_value = \"this_task\"\n        with mock.patch(\n            \"tron.mesos.get_clusterman_metrics\",\n            return_value=(mock_clusterman_metrics),\n            autospec=True,\n        ):\n            cluster.submit(mock_task)\n\n        assert \"this_task\" in cluster.tasks\n        assert cluster.tasks[\"this_task\"] == mock_task\n        cluster.runner.run.assert_called_once_with(\n            mock_task.get_config.return_value,\n        )\n        assert mock_clusterman_metrics.ClustermanMetricsBotoClient.call_count == 0\n\n    def test_submit_with_clusterman(self):\n        mock_clusterman_metrics = mock.MagicMock()\n        cluster = MesosCluster(\"mesos-cluster-a.me\")\n        mock_task = mock.MagicMock(\n            get_config=mock.Mock(\n                return_value={\n                    \"environment\": {\n                        \"CLUSTERMAN_RESOURCES\": '{\"required_cpus|blah=x\": 4}',\n                        \"EXECUTOR_CLUSTER\": \"fake-cluster\",\n                        \"EXECUTOR_POOL\": \"fake-pool\",\n                    },\n                },\n            ),\n        )\n        mock_task.get_mesos_id.return_value = \"this_task\"\n        with mock.patch(\n            \"tron.mesos.get_clusterman_metrics\",\n            return_value=mock_clusterman_metrics,\n            autospec=True,\n        ), staticconf.testing.MockConfiguration(\n            {\"clusters\": {\"fake-cluster\": {\"aws_region\": \"fake-region\"}}},\n            namespace=\"clusterman\",\n        ):\n            cluster.submit(mock_task)\n\n        assert \"this_task\" in cluster.tasks\n        assert cluster.tasks[\"this_task\"] == mock_task\n        cluster.runner.run.assert_called_once_with(\n            mock_task.get_config.return_value,\n        )\n        assert mock_clusterman_metrics.ClustermanMetricsBotoClient.call_count == 1\n\n    def test_submit_disabled(self):\n        cluster = MesosCluster(\"mesos-cluster-a.me\", enabled=False)\n        mock_task = mock.MagicMock()\n        mock_task.get_mesos_id.return_value = \"this_task\"\n        with mock.patch(\n            \"tron.mesos.get_clusterman_metrics\",\n            return_value=(None, None),\n            autospec=True,\n        ):\n            cluster.submit(mock_task)\n\n        assert \"this_task\" not in cluster.tasks\n        mock_task.exited.assert_called_once_with(1)\n\n    def test_recover(self):\n        cluster = MesosCluster(\"mesos-cluster-a.me\")\n        mock_task = mock.MagicMock()\n        mock_task.get_mesos_id.return_value = \"this_task\"\n        cluster.recover(mock_task)\n\n        assert \"this_task\" in cluster.tasks\n        assert cluster.tasks[\"this_task\"] == mock_task\n        cluster.runner.reconcile.assert_called_once_with(\n            mock_task.get_config.return_value,\n        )\n        assert mock_task.started.call_count == 1\n\n    def test_recover_disabled(self):\n        cluster = MesosCluster(\"mesos-cluster-a.me\", enabled=False)\n        mock_task = mock.MagicMock()\n        mock_task.get_mesos_id.return_value = \"this_task\"\n        cluster.recover(mock_task)\n\n        assert \"this_task\" not in cluster.tasks\n        mock_task.exited.assert_called_once_with(None)\n\n    @mock.patch(\"tron.mesos.MesosTask\", autospec=True)\n    def test_create_task_defaults(self, mock_task):\n        cluster = MesosCluster(\"mesos-cluster-a.me\")\n        mock_serializer = mock.MagicMock()\n        task = cluster.create_task(\n            action_run_id=\"action_c\",\n            command=\"echo hi\",\n            cpus=1,\n            mem=10,\n            disk=20,\n            constraints=[],\n            docker_image=\"container:latest\",\n            docker_parameters=[],\n            env={\"TESTING\": \"true\"},\n            extra_volumes=[],\n            serializer=mock_serializer,\n        )\n        cluster.runner.TASK_CONFIG_INTERFACE.assert_called_once_with(\n            name=\"action_c\",\n            cmd=\"echo hi\",\n            cpus=1,\n            mem=10,\n            disk=20,\n            constraints=[],\n            image=\"container:latest\",\n            docker_parameters=[],\n            environment={\"TESTING\": \"true\"},\n            volumes=[],\n            uris=[],\n            offer_timeout=None,\n        )\n        assert_equal(task, mock_task.return_value)\n        mock_task.assert_called_once_with(\n            \"action_c\",\n            cluster.runner.TASK_CONFIG_INTERFACE.return_value,\n            mock_serializer,\n        )\n\n    @mock.patch(\"tron.mesos.MesosTask\", autospec=True)\n    def test_create_task_with_task_id(self, mock_task):\n        cluster = MesosCluster(\"mesos-cluster-a.me\")\n        mock_serializer = mock.MagicMock()\n        task_id = \"task.0123-fabc\"\n        task = cluster.create_task(\n            action_run_id=\"action_c\",\n            command=\"echo hi\",\n            cpus=1,\n            mem=10,\n            disk=20,\n            constraints=[],\n            docker_image=\"container:latest\",\n            docker_parameters=[],\n            env={\"TESTING\": \"true\"},\n            extra_volumes=[],\n            serializer=mock_serializer,\n            task_id=task_id,\n        )\n        assert cluster.runner.TASK_CONFIG_INTERFACE.call_count == 1\n        assert task == mock_task.return_value\n        task_config = cluster.runner.TASK_CONFIG_INTERFACE.return_value\n        task_config.set_task_id.assert_called_once_with(task_id)\n        mock_task.assert_called_once_with(\n            \"action_c\",\n            task_config.set_task_id.return_value,\n            mock_serializer,\n        )\n\n    @mock.patch(\"tron.mesos.MesosTask\", autospec=True)\n    def test_create_task_disabled(self, mock_task):\n        # If Mesos is disabled, should return None\n        cluster = MesosCluster(\"mesos-cluster-a.me\", enabled=False)\n        mock_serializer = mock.MagicMock()\n        task = cluster.create_task(\n            action_run_id=\"action_c\",\n            command=\"echo hi\",\n            cpus=1,\n            mem=10,\n            disk=20,\n            constraints=[],\n            docker_image=\"container:latest\",\n            docker_parameters=[],\n            env={\"TESTING\": \"true\"},\n            extra_volumes=[],\n            serializer=mock_serializer,\n        )\n        assert task is None\n\n    @mock.patch(\"tron.mesos.MesosTask\", autospec=True)\n    def test_create_task_with_configuration(self, mock_task):\n        cluster = MesosCluster(\n            \"mesos-cluster-a.me\",\n            default_volumes=[\n                {\n                    \"container_path\": \"/tmp\",\n                    \"host_path\": \"/host\",\n                    \"mode\": \"RO\",\n                },\n                {\n                    \"container_path\": \"/other\",\n                    \"host_path\": \"/other\",\n                    \"mode\": \"RW\",\n                },\n            ],\n            dockercfg_location=\"some_place\",\n            offer_timeout=202,\n        )\n        mock_serializer = mock.MagicMock()\n        task = cluster.create_task(\n            action_run_id=\"action_c\",\n            command=\"echo hi\",\n            cpus=1,\n            mem=10,\n            disk=20,\n            constraints=[],\n            docker_image=\"container:latest\",\n            docker_parameters=[],\n            env={\"TESTING\": \"true\"},\n            # This should override the default volume for /tmp\n            extra_volumes=[\n                {\n                    \"container_path\": \"/tmp\",\n                    \"host_path\": \"/custom\",\n                    \"mode\": \"RW\",\n                },\n            ],\n            serializer=mock_serializer,\n        )\n        cluster.runner.TASK_CONFIG_INTERFACE.assert_called_once_with(\n            name=\"action_c\",\n            cmd=\"echo hi\",\n            cpus=1,\n            mem=10,\n            disk=20,\n            constraints=[],\n            image=\"container:latest\",\n            docker_parameters=[],\n            environment={\"TESTING\": \"true\"},\n            volumes=[\n                {\n                    \"container_path\": \"/tmp\",\n                    \"host_path\": \"/custom\",\n                    \"mode\": \"RW\",\n                },\n                {\n                    \"container_path\": \"/other\",\n                    \"host_path\": \"/other\",\n                    \"mode\": \"RW\",\n                },\n            ],\n            uris=[\"some_place\"],\n            offer_timeout=202,\n        )\n        assert_equal(task, mock_task.return_value)\n        mock_task.assert_called_once_with(\n            \"action_c\",\n            cluster.runner.TASK_CONFIG_INTERFACE.return_value,\n            mock_serializer,\n        )\n\n    def test_process_event_task(self):\n        event = mock_task_event(\"this_task\", \"some_platform_type\")\n        cluster = MesosCluster(\"mesos-cluster-a.me\")\n        mock_task = mock.MagicMock(spec_set=MesosTask)\n        mock_task.get_mesos_id.return_value = \"this_task\"\n        cluster.tasks[\"this_task\"] = mock_task\n\n        cluster._process_event(event)\n        mock_task.handle_event.assert_called_once_with(event)\n\n    def test_process_event_task_id_invalid(self):\n        event = mock_task_event(\"other_task\", \"some_platform_type\")\n        cluster = MesosCluster(\"mesos-cluster-a.me\")\n        mock_task = mock.MagicMock(spec_set=MesosTask)\n        mock_task.get_mesos_id.return_value = \"this_task\"\n        cluster.tasks[\"this_task\"] = mock_task\n\n        cluster._process_event(event)\n        assert_equal(mock_task.handle_event.call_count, 0)\n\n    def test_process_event_control_stop(self):\n        event = mock.MagicMock(\n            kind=\"control\",\n            message=\"stop\",\n        )\n        cluster = MesosCluster(\"mesos-cluster-a.me\")\n        cluster._process_event(event)\n        assert cluster.runner.stop.call_count == 1\n        assert cluster.deferred is None\n\n    def test_stop_default(self):\n        # When stopping, tasks should not exit. They will be recovered\n        cluster = MesosCluster(\"mesos-cluster-a.me\")\n        mock_task = mock.MagicMock()\n        cluster.tasks = {\"task_id\": mock_task}\n        cluster.stop()\n        assert cluster.runner.stop.call_count == 1\n        assert cluster.deferred is None\n        assert mock_task.exited.call_count == 0\n        assert len(cluster.tasks) == 1\n\n    def test_stop_disabled(self):\n        # Shouldn't raise an error\n        cluster = MesosCluster(\"mesos-cluster-a.me\", enabled=False)\n        cluster.stop()\n\n    def test_kill(self):\n        cluster = MesosCluster(\"mesos-cluster-a.me\")\n        cluster.kill(\"fake_task_id\")\n        cluster.runner.kill.assert_called_once_with(\"fake_task_id\")\n"
  },
  {
    "path": "tests/metrics_test.py",
    "content": "from unittest import mock\n\nimport pytest\n\nimport tron.metrics as metrics\n\n\n@pytest.fixture(autouse=True)\ndef all_metrics():\n    with mock.patch.object(metrics, \"all_metrics\", new=dict()) as mock_all:\n        yield mock_all\n\n\ndef test_get_metric(all_metrics):\n    timer = metrics.get_metric(\n        \"timer\",\n        \"api.requests\",\n        {\"method\": \"GET\"},\n        mock.Mock(),\n    )\n    same_timer = metrics.get_metric(\n        \"timer\",\n        \"api.requests\",\n        {\"method\": \"GET\"},\n        mock.Mock(),\n    )\n    other_timer = metrics.get_metric(\n        \"timer\",\n        \"api.requests\",\n        {\"method\": \"POST\"},\n        mock.Mock(),\n    )\n    metrics.get_metric(\"something\", \"name\", None, mock.Mock())\n    assert timer == same_timer\n    assert other_timer != timer\n    assert len(all_metrics) == 3\n\n\n@mock.patch(\"tron.metrics.get_metric\", autospec=True)\ndef test_timer(mock_get_metric):\n    test_metric = metrics.Timer()\n    mock_get_metric.return_value = test_metric\n    metrics.timer(\"my_metric\", 110)\n    metrics.timer(\"my_metric\", 84)\n    mock_get_metric.assert_called_with(\n        \"timer\",\n        \"my_metric\",\n        None,\n        mock.ANY,\n    )\n    result = metrics.view_timer(test_metric)\n    assert result[\"count\"] == 2\n\n\n@mock.patch(\"tron.metrics.get_metric\", autospec=True)\ndef test_count(mock_get_metric):\n    test_metric = metrics.Counter()\n    mock_get_metric.return_value = test_metric\n    metrics.count(\"my_metric\", 13)\n    metrics.count(\"my_metric\", -1)\n    mock_get_metric.assert_called_with(\n        \"counter\",\n        \"my_metric\",\n        None,\n        mock.ANY,\n    )\n    result = metrics.view_counter(test_metric)\n    assert result[\"count\"] == 12\n\n\n@mock.patch(\"tron.metrics.get_metric\", autospec=True)\ndef test_meter(mock_get_metric):\n    test_metric = metrics.Meter()\n    mock_get_metric.return_value = test_metric\n    metrics.meter(\"my_metric\")\n    metrics.meter(\"my_metric\")\n    mock_get_metric.assert_called_with(\n        \"meter\",\n        \"my_metric\",\n        None,\n        mock.ANY,\n    )\n    result = metrics.view_meter(test_metric)\n    assert result[\"count\"] == 2\n\n\n@mock.patch(\"tron.metrics.get_metric\", autospec=True)\ndef test_gauge(mock_get_metric):\n    test_metric = metrics.SimpleGauge()\n    mock_get_metric.return_value = test_metric\n    metrics.gauge(\"my_metric\", 23)\n    metrics.gauge(\"my_metric\", 102)\n    mock_get_metric.assert_called_with(\n        \"gauge\",\n        \"my_metric\",\n        None,\n        mock.ANY,\n    )\n    result = metrics.view_gauge(test_metric)\n    assert result[\"value\"] == 102\n\n\n@mock.patch(\"tron.metrics.get_metric\", autospec=True)\ndef test_histogram(mock_get_metric):\n    test_metric = metrics.Histogram()\n    mock_get_metric.return_value = test_metric\n    metrics.histogram(\"my_metric\", 2)\n    metrics.histogram(\"my_metric\", 92)\n    mock_get_metric.assert_called_with(\n        \"histogram\",\n        \"my_metric\",\n        None,\n        mock.ANY,\n    )\n    result = metrics.view_histogram(test_metric)\n    assert result[\"count\"] == 2\n\n\ndef test_view_all_metrics_empty():\n    result = metrics.view_all_metrics()\n    assert result == {\n        \"counter\": [],\n        \"gauge\": [],\n        \"histogram\": [],\n        \"meter\": [],\n        \"timer\": [],\n    }\n\n\ndef test_view_all_metrics():\n    metrics.timer(\"a\", 1)\n    metrics.count(\"b\", 9, dimensions={\"method\": \"GET\"})\n    metrics.meter(\"c\")\n    metrics.gauge(\"d\", 3)\n    metrics.histogram(\"e\", 2)\n    metrics.histogram(\"f\", 3)\n    result = metrics.view_all_metrics()\n\n    assert len(result[\"timer\"]) == 1\n    assert result[\"timer\"][0][\"name\"] == \"a\"\n\n    assert len(result[\"counter\"]) == 1\n    assert result[\"counter\"][0][\"name\"] == \"b\"\n    assert result[\"counter\"][0][\"dimensions\"] == {\"method\": \"GET\"}\n\n    assert len(result[\"meter\"]) == 1\n    assert result[\"meter\"][0][\"name\"] == \"c\"\n\n    assert len(result[\"gauge\"]) == 1\n    assert result[\"gauge\"][0][\"name\"] == \"d\"\n\n    assert len(result[\"histogram\"]) == 2\n    names = {metric[\"name\"] for metric in result[\"histogram\"]}\n    assert names == {\"e\", \"f\"}\n"
  },
  {
    "path": "tests/mocks.py",
    "content": "import atexit\nimport datetime\nimport itertools\nimport shutil\nimport tempfile\nfrom unittest.mock import MagicMock\n\n\nclass MockAction(MagicMock):\n    def __init__(self, *args, **kwargs):\n        kwargs.setdefault(\"name\", \"action_name\")\n        kwargs.setdefault(\"required_actions\", [])\n        kwargs.setdefault(\"dependent_actions\", [])\n        super().__init__(*args, **kwargs)\n\n\nclass MockActionGraph(MagicMock):\n    def __init__(self, *args, **kwargs):\n        action = MockAction()\n        kwargs.setdefault(\"graph\", [action])\n        kwargs.setdefault(\"action_map\", {action.name: action})\n        super().__init__(*args, **kwargs)\n\n    def __getitem__(self, item):\n        action = MockAction(name=item)\n        self.action_map.setdefault(item, action)\n        return self.action_map[item]\n\n    def get_required_actions(self, name):\n        return []\n\n\nclass MockActionRun(MagicMock):\n    def __init__(self, *args, **kwargs):\n        kwargs.setdefault(\"output_path\", [tempfile.mkdtemp()])\n        kwargs.setdefault(\"start_time\", datetime.datetime.now())\n        kwargs.setdefault(\"end_time\", datetime.datetime.now())\n        atexit.register(lambda: shutil.rmtree(kwargs[\"output_path\"][0]))\n        super().__init__(*args, **kwargs)\n\n\nclass MockActionRunCollection(MagicMock):\n    def __init__(self, *args, **kwargs):\n        kwargs.setdefault(\"action_graph\", MockActionGraph())\n        kwargs.setdefault(\"run_map\", {})\n        super().__init__(*args, **kwargs)\n\n    def __getitem__(self, item):\n        action_run = MockActionRun(name=item)\n        self.run_map.setdefault(item, action_run)\n        return self.run_map[item]\n\n\nclass MockJobRun(MagicMock):\n    def __init__(self, *args, **kwargs):\n        kwargs.setdefault(\"output_path\", [tempfile.mkdtemp()])\n        kwargs.setdefault(\"action_graph\", MockActionGraph())\n        action_runs = MockActionRunCollection(\n            action_graph=kwargs[\"action_graph\"],\n        )\n        kwargs.setdefault(\"action_runs\", action_runs)\n        atexit.register(lambda: shutil.rmtree(kwargs[\"output_path\"][0]))\n        super().__init__(*args, **kwargs)\n\n\nclass MockNode(MagicMock):\n    def __init__(self, hostname=None):\n        super().__init__()\n        self.name = self.hostname = hostname\n\n    def run(self, runnable):\n        runnable.started()\n        return type(self)()\n\n\nclass MockNodePool:\n    _node = None\n\n    def __init__(self, *node_names):\n        self.nodes = []\n        self._ndx_cycle = None\n        for hostname in node_names:\n            self.nodes.append(MockNode(hostname=hostname))\n\n        if self.nodes:\n            self._ndx_cycle = itertools.cycle(range(0, len(self.nodes)))\n\n    def __getitem__(self, value):\n        for node in self.nodes:\n            if node.hostname == value:\n                return node\n        else:\n            raise KeyError\n\n    def next(self):\n        if not self.nodes:\n            self.nodes.append(MockNode())\n\n        if self._ndx_cycle:\n            return self.nodes[next(self._ndx_cycle)]\n        else:\n            return self.nodes[0]\n\n    next_round_robin = next\n\n\nclass MockJobRunCollection(MagicMock):\n    def __iter__(self):\n        return iter(self.runs)\n"
  },
  {
    "path": "tests/node_test.py",
    "content": "from unittest import mock\n\nfrom testifycompat import assert_equal\nfrom testifycompat import assert_in\nfrom testifycompat import assert_not_equal\nfrom testifycompat import assert_not_in\nfrom testifycompat import assert_raises\nfrom testifycompat import run\nfrom testifycompat import setup\nfrom testifycompat import setup_teardown\nfrom testifycompat import teardown\nfrom testifycompat import TestCase\nfrom tests.testingutils import autospec_method\nfrom tron import actioncommand\nfrom tron import node\nfrom tron import ssh\nfrom tron.config import schema\nfrom tron.core import actionrun\nfrom tron.serialize import filehandler\n\n\ndef create_mock_node(name=None):\n    mock_node = mock.create_autospec(node.Node)\n    if name:\n        mock_node.get_name.return_value = name\n    return mock_node\n\n\ndef create_mock_pool():\n    return mock.create_autospec(node.NodePool)\n\n\nclass TestNodePoolRepository(TestCase):\n    @setup\n    def setup_store(self):\n        self.node = create_mock_node()\n        self.repo = node.NodePoolRepository.get_instance()\n        self.repo.add_node(self.node)\n\n    @teardown\n    def teardown_store(self):\n        self.repo.clear()\n\n    def test_single_instance(self):\n        assert_raises(ValueError, node.NodePoolRepository)\n        assert self.repo is node.NodePoolRepository.get_instance()\n\n    def test_get_by_name(self):\n        node_pool = self.repo.get_by_name(self.node.get_name())\n        assert_equal(self.node, node_pool.next())\n\n    def test_get_by_name_miss(self):\n        assert_equal(None, self.repo.get_by_name(\"bogus\"))\n\n    def test_clear(self):\n        self.repo.clear()\n        assert_not_in(self.node, self.repo.nodes)\n        assert_not_in(self.node, self.repo.pools)\n\n    def test_update_from_config(self):\n        mock_nodes = {\"a\": create_mock_node(\"a\"), \"b\": create_mock_node(\"b\")}\n        self.repo.nodes.update(mock_nodes)\n        node_config = {\"a\": mock.Mock(), \"b\": mock.Mock()}\n        node_pool_config = {\"c\": mock.Mock(nodes=[\"a\", \"b\"])}\n        ssh_options = mock.Mock(identities=[], known_hosts_file=None)\n        node.NodePoolRepository.update_from_config(\n            node_config,\n            node_pool_config,\n            ssh_options,\n        )\n        node_names = [node_config[\"a\"].name, node_config[\"b\"].name]\n        assert_equal(\n            set(self.repo.pools),\n            set(node_names + [node_pool_config[\"c\"].name]),\n        )\n        assert_equal(\n            set(self.repo.nodes),\n            set(list(node_names) + list(mock_nodes.keys())),\n        )\n\n    def test_nodes_by_name(self):\n        mock_nodes = {\"a\": mock.Mock(), \"b\": mock.Mock()}\n        self.repo.nodes.update(mock_nodes)\n        nodes = self.repo._get_nodes_by_name([\"a\", \"b\"])\n        assert_equal(nodes, list(mock_nodes.values()))\n\n    def test_get_node(self):\n        returned_node = self.repo.get_node(self.node.get_name())\n        assert_equal(returned_node, self.node)\n\n\nclass TestKnownHost(TestCase):\n    @setup\n    def setup_known_hosts(self):\n        self.known_hosts = node.KnownHosts(None)\n        self.entry = mock.Mock()\n        self.known_hosts._added.append(self.entry)\n\n    def test_get_public_key(self):\n        hostname = \"hostname\"\n        pub_key = self.known_hosts.get_public_key(hostname)\n        self.entry.matchesHost.assert_called_with(hostname)\n        assert_equal(pub_key, self.entry.publicKey)\n\n    def test_get_public_key_not_found(self):\n        self.entry.matchesHost.return_value = False\n        assert not self.known_hosts.get_public_key(\"hostname\")\n\n\nclass TestDetermineJitter(TestCase):\n    @setup\n    def setup_node_settings(self):\n        self.settings = mock.Mock(\n            jitter_load_factor=1,\n            jitter_min_load=4,\n            jitter_max_delay=20,\n        )\n\n    @setup_teardown\n    def patch_random(self):\n        with mock.patch(\"tron.node.random\", autospec=True) as mock_random:\n            mock_random.random.return_value = 1\n            yield\n\n    def test_jitter_under_min_load(self):\n        assert_equal(node.determine_jitter(3, self.settings), 0)\n        assert_equal(node.determine_jitter(4, self.settings), 0)\n\n    def test_jitter_with_load_factor(self):\n        self.settings.jitter_load_factor = 2\n        assert_equal(node.determine_jitter(3, self.settings), 2.0)\n        assert_equal(node.determine_jitter(2, self.settings), 0)\n\n    def test_jitter_with_max_delay(self):\n        self.settings.jitter_max_delay = 15\n        assert_equal(node.determine_jitter(20, self.settings), 15.0)\n        assert_equal(node.determine_jitter(100, self.settings), 15.0)\n\n\ndef build_node(\n    hostname=\"localhost\",\n    username=\"theuser\",\n    name=\"thename\",\n    pub_key=None,\n):\n    config = mock.Mock(hostname=hostname, username=username, name=name)\n    ssh_opts = mock.create_autospec(ssh.SSHAuthOptions)\n    node_settings = mock.create_autospec(schema.ConfigSSHOptions)\n    return node.Node(config, ssh_opts, pub_key, node_settings)\n\n\nclass TestNode(TestCase):\n    class TestConnection:\n        def openChannel(self, chan):\n            self.chan = chan\n\n    @setup\n    def setup_node(self):\n        self.node = build_node()\n\n    def test_output_logging(self):\n        test_node = build_node()\n        serializer = mock.create_autospec(filehandler.FileHandleManager)\n        action_cmd = actionrun.ActionCommand(\"test\", \"false\", serializer)\n\n        test_node.connection = self.TestConnection()\n        test_node.run_states = {action_cmd.id: mock.Mock(state=0)}\n        test_node.run_states[action_cmd.id].state = node.RUN_STATE_CONNECTING\n        test_node.run_states[action_cmd.id].run = action_cmd\n\n        test_node._open_channel(action_cmd)\n        assert test_node.connection.chan is not None\n        test_node.connection.chan.dataReceived(\"test\")\n        serializer.open.return_value.write.assert_called_with(\"test\")\n\n    def test_from_config(self):\n        ssh_options = self.node.conch_options\n        node_config = mock.Mock(\n            hostname=\"localhost\",\n            username=\"theuser\",\n            name=\"thename\",\n        )\n        ssh_options.__getitem__.return_value = \"something\"\n        public_key = mock.Mock()\n        node_settings = mock.Mock()\n        new_node = node.Node.from_config(\n            node_config,\n            ssh_options,\n            public_key,\n            node_settings,\n        )\n        assert_equal(new_node.name, node_config.name)\n        assert_equal(new_node.hostname, node_config.hostname)\n        assert_equal(new_node.username, node_config.username)\n        assert_equal(new_node.pub_key, public_key)\n        assert_equal(new_node.node_settings, node_settings)\n\n    def test__eq__true(self):\n        other_node = build_node()\n        other_node.conch_options = self.node.conch_options\n        other_node.node_settings = self.node.node_settings\n        other_node.config = self.node.config\n        assert_equal(other_node, self.node)\n\n    def test__eq__false_config_changed(self):\n        other_node = build_node(username=\"different\")\n        assert_not_equal(other_node, self.node)\n\n    def test__eq__false_pub_key_changed(self):\n        other_node = build_node(pub_key=\"something\")\n        assert_not_equal(other_node, self.node)\n\n    def test__eq__false_ssh_options_changed(self):\n        other_node = build_node()\n        other_node.conch_options = mock.create_autospec(ssh.SSHAuthOptions)\n        assert_not_equal(other_node, self.node)\n\n    def test_stop_not_tracked(self):\n        action_command = mock.create_autospec(\n            actioncommand.ActionCommand,\n            id=mock.Mock(),\n        )\n        self.node.stop(action_command)\n\n    def test_stop(self):\n        autospec_method(self.node._fail_run)\n        action_command = mock.create_autospec(\n            actioncommand.ActionCommand,\n            id=mock.Mock(),\n        )\n        self.node.run_states[action_command.id] = mock.Mock()\n        self.node.stop(action_command)\n        assert_equal(self.node._fail_run.call_count, 1)\n\n\nclass TestNodePool(TestCase):\n    @setup\n    def setup_nodes(self):\n        self.nodes = [build_node(name=\"node%s\" % i) for i in range(5)]\n        self.node_pool = node.NodePool(self.nodes, \"thename\")\n\n    def test_from_config(self):\n        name = \"the pool name\"\n        nodes = [create_mock_node(), create_mock_node()]\n        config = mock.Mock(name=name)\n        new_pool = node.NodePool.from_config(config, nodes)\n        assert_equal(new_pool.name, config.name)\n        assert_equal(new_pool.nodes, nodes)\n\n    def test__init__(self):\n        new_node = node.NodePool(self.nodes, \"thename\")\n        assert_equal(new_node.name, \"thename\")\n\n    def test__eq__(self):\n        other_pool = node.NodePool(self.nodes, \"othername\")\n        assert_equal(self.node_pool, other_pool)\n\n    def test_next(self):\n        # Call next many times\n        for _ in range(len(self.nodes) * 2 + 1):\n            assert_in(self.node_pool.next(), self.nodes)\n\n    def test_next_round_robin(self):\n        node_order = [self.node_pool.next_round_robin() for _ in range(len(self.nodes) * 2)]\n        assert_equal(node_order, self.nodes + self.nodes)\n\n\nif __name__ == \"__main__\":\n    run()\n"
  },
  {
    "path": "tests/sandbox.py",
    "content": "import contextlib\nimport functools\nimport logging\nimport os\nimport shutil\nimport signal\nimport socket\nimport sys\nimport tempfile\nimport time\nfrom subprocess import CalledProcessError\nfrom subprocess import PIPE\nfrom subprocess import Popen\nfrom unittest import mock\n\nfrom testifycompat import assert_not_equal\nfrom testifycompat import setup\nfrom testifycompat import teardown\nfrom testifycompat import TestCase\nfrom tron.commands import client\nfrom tron.config import manager\nfrom tron.config import schema\n\n# Used for getting the locations of the executable\ntest_dir, _ = os.path.split(__file__)\nrepo_root, _ = os.path.split(test_dir)\n\nlog = logging.getLogger(__name__)\n\n\ndef wait_on_sandbox(func, delay=0.1, max_wait=5.0):\n    \"\"\"Poll for func() to return True. Sleeps `delay` seconds between polls\n    up to a max of `max_wait` seconds.\n    \"\"\"\n    start_time = time.time()\n    while time.time() - start_time < max_wait:\n        time.sleep(delay)\n        if func():\n            return\n    raise TronSandboxException(\"Failed %s\" % func.__name__)\n\n\ndef wait_on_state(client_func, url, state, field=\"state\"):\n    \"\"\"Use client_func(url) to wait until the resource changes to state.\"\"\"\n\n    def wait_func():\n        return client_func(url)[field] == state\n\n    wait_func.__name__ = f\"{url} wait on {state}\"\n    wait_on_sandbox(wait_func)\n\n\ndef wait_on_proc_terminate(pid):\n    def wait_on_terminate():\n        try:\n            os.kill(pid, 0)\n        except Exception:\n            return True\n\n    wait_on_terminate.__name__ = \"Wait on %s to terminate\" % pid\n    wait_on_sandbox(wait_on_terminate)\n\n\ndef build_waiter_func(client_func, url):\n    return functools.partial(wait_on_state, client_func, url)\n\n\ndef handle_output(cmd, out_err, returncode):\n    \"\"\"Log process output before it is parsed. Raise exception if exit code\n    is nonzero.\n    \"\"\"\n    stdout, stderr = out_err\n    cmd = \" \".join(cmd)\n    if stdout:\n        log.warn(\"%s STDOUT: %s\", cmd, stdout)\n    if stderr:\n        log.warn(\"%s STDERR: %s\", cmd, stderr)\n    if returncode:\n        raise CalledProcessError(returncode, cmd)\n\n\ndef find_unused_port():\n    \"\"\"Return a port number that is not in use.\"\"\"\n    sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n    with contextlib.closing(sock) as sock:\n        sock.bind((\"localhost\", 0))\n        _, port = sock.getsockname()\n    return port\n\n\nclass TronSandboxException(Exception):\n    pass\n\n\nclass SandboxTestCase(TestCase):\n\n    _suites = [\"sandbox\"]\n\n    sandbox = None\n\n    @setup\n    def make_sandbox(self):\n        verify_environment()\n        self.sandbox = TronSandbox()\n        self.client = self.sandbox.client\n\n    @teardown\n    def delete_sandbox(self):\n        if self.sandbox:\n            self.sandbox.delete()\n            self.sandbox = None\n\n    def start_with_config(self, config):\n        self.sandbox.save_config(config)\n        self.sandbox.trond()\n\n    def restart_trond(self):\n        old_pid = self.sandbox.get_trond_pid()\n        self.sandbox.shutdown_trond()\n        wait_on_proc_terminate(self.sandbox.get_trond_pid())\n\n        self.sandbox.trond()\n        assert_not_equal(old_pid, self.sandbox.get_trond_pid())\n\n\nclass ClientProxy:\n    \"\"\"Wrap calls to client and raise a TronSandboxException on connection\n    failures.\n    \"\"\"\n\n    def __init__(self, client, log_filename):\n        self.client = client\n        self.log_filename = log_filename\n\n    def log_contents(self):\n        \"\"\"Return the contents of the log file.\"\"\"\n        with open(self.log_filename) as f:\n            return f.read()\n\n    def wrap(self, func, *args, **kwargs):\n        with mock.patch(\"tron.commands.client.log\", autospec=True):\n            try:\n                return func(*args, **kwargs)\n            except (client.RequestError, ValueError) as e:\n                # ValueError for JSONDecode errors\n                log_contents = self.log_contents()\n                if log_contents:\n                    log.warn(f\"{e!r}, Log:\\n{log_contents}\")\n                return False\n\n    def __getattr__(self, name):\n        attr = getattr(self.client, name)\n        if not callable(attr):\n            return attr\n\n        return functools.partial(self.wrap, attr)\n\n\ndef verify_environment():\n    for env_var in [\"SSH_AUTH_SOCK\", \"PYTHONPATH\"]:\n        if not os.environ.get(env_var):\n            raise TronSandboxException(\n                \"Missing $%s in test environment.\" % env_var,\n            )\n\n\nclass TronSandbox:\n    \"\"\"A sandbox for running trond and tron commands in subprocesses.\"\"\"\n\n    def __init__(self):\n        \"\"\"Set up a temp directory and store paths to relevant binaries\"\"\"\n        self.tmp_dir = tempfile.mkdtemp(prefix=\"tron-\")\n        cmd_path_func = functools.partial(os.path.join, repo_root, \"bin\")\n        cmds = \"tronctl\", \"trond\", \"tronfig\", \"tronview\"\n        self.commands = {cmd: cmd_path_func(cmd) for cmd in cmds}\n        self.log_file = self.abs_path(\"tron.log\")\n        self.log_conf = self.abs_path(\"logging.conf\")\n        self.pid_file = self.abs_path(\"tron.pid\")\n        self.config_path = self.abs_path(\"configs/\")\n        self.port = find_unused_port()\n        self.host = \"localhost\"\n        self.api_uri = f\"http://{self.host}:{self.port}\"\n        cclient = client.Client(self.api_uri)\n        self.client = ClientProxy(cclient, self.log_file)\n        self.setup_logging_conf()\n\n    def abs_path(self, filename):\n        \"\"\"Return the absolute path for a file in the sandbox.\"\"\"\n        return os.path.join(self.tmp_dir, filename)\n\n    def setup_logging_conf(self):\n        config_template = os.path.join(repo_root, \"tests/data/logging.conf\")\n        with open(config_template) as fh:\n            config = fh.read()\n\n        with open(self.log_conf, \"w\") as fh:\n            fh.write(config.format(self.log_file))\n\n    def delete(self):\n        \"\"\"Delete the temp directory and shutdown trond.\"\"\"\n        self.shutdown_trond(sig_num=signal.SIGKILL)\n        shutil.rmtree(self.tmp_dir)\n\n    def save_config(self, config_text):\n        \"\"\"Save the initial tron configuration.\"\"\"\n        manager.create_new_config(self.config_path, config_text)\n\n    def run_command(self, command_name, args=None, stdin_lines=None):\n        \"\"\"Run the command by name and return (stdout, stderr).\"\"\"\n        args = args or []\n        command = [sys.executable, self.commands[command_name]] + args\n        stdin = PIPE if stdin_lines else None\n        proc = Popen(command, stdout=PIPE, stderr=PIPE, stdin=stdin)\n        streams = proc.communicate(stdin_lines)\n        try:\n            handle_output(command, streams, proc.returncode)\n        except CalledProcessError:\n            log.warn(self.client.log_contents())\n            raise\n        return streams\n\n    def tronctl(self, *args):\n        args = list(args) if args else []\n        return self.run_command(\"tronctl\", args + [\"--server\", self.api_uri])\n\n    def tronview(self, *args):\n        args = list(args) if args else []\n        args += [\"--nocolor\", \"--server\", self.api_uri]\n        return self.run_command(\"tronview\", args)\n\n    def trond(self, *args):\n        args = list(args) if args else []\n        args += [\n            \"--working-dir=%s\" % self.tmp_dir,\n            \"--pid-file=%s\" % self.pid_file,\n            \"--port=%d\" % self.port,\n            \"--host=%s\" % self.host,\n            \"--config-path=%s\" % self.config_path,\n            \"--log-conf=%s\" % self.log_conf,\n        ]\n\n        self.run_command(\"trond\", args)\n        wait_on_sandbox(lambda: bool(self.client.home()))\n\n    def tronfig(\n        self,\n        config_content=None,\n        name=schema.MASTER_NAMESPACE,\n    ):\n        args = [\"--server\", self.api_uri, name]\n        args += [\"-\"] if config_content else [\"-p\"]\n        return self.run_command(\"tronfig\", args, stdin_lines=config_content)\n\n    def get_trond_pid(self):\n        if not os.path.exists(self.pid_file):\n            return None\n        with open(self.pid_file) as f:\n            return int(f.read())\n\n    def shutdown_trond(self, sig_num=signal.SIGTERM):\n        trond_pid = self.get_trond_pid()\n        if trond_pid:\n            os.kill(trond_pid, sig_num)\n"
  },
  {
    "path": "tests/scheduler_test.py",
    "content": "import calendar\nimport datetime\nfrom unittest import mock\n\nimport pytz\n\nfrom testifycompat import assert_equal\nfrom testifycompat import assert_gt\nfrom testifycompat import assert_gte\nfrom testifycompat import assert_lt\nfrom testifycompat import assert_lte\nfrom testifycompat import run\nfrom testifycompat import setup\nfrom testifycompat import TestCase\nfrom tests import testingutils\nfrom tron import scheduler\nfrom tron.config import config_utils\nfrom tron.config import schedule_parse\nfrom tron.config.config_utils import NullConfigContext\nfrom tron.config.schedule_parse import parse_groc_expression\nfrom tron.utils import timeutils\n\n\nclass TestSchedulerFromConfig(TestCase):\n    def test_cron_scheduler(self):\n        line = \"cron */5 * * 7,8 *\"\n        config_context = mock.Mock(path=\"test\")\n        config = schedule_parse.valid_schedule(line, config_context)\n        sched = scheduler.scheduler_from_config(config=config, time_zone=None)\n        start_time = datetime.datetime(2012, 3, 14, 15, 9, 26)\n        next_time = sched.next_run_time(start_time)\n        assert_equal(next_time, datetime.datetime(2012, 7, 1, 0))\n        assert_equal(str(sched), \"cron */5 * * 7,8 *\")\n\n    def test_daily_scheduler(self):\n        config_context = config_utils.NullConfigContext\n        line = \"daily 17:32 MWF\"\n        config = schedule_parse.valid_schedule(line, config_context)\n        sched = scheduler.scheduler_from_config(config=config, time_zone=None)\n        assert_equal(sched.time_spec.hours, [17])\n        assert_equal(sched.time_spec.minutes, [32])\n        start_time = datetime.datetime(2012, 3, 14, 15, 9, 26)\n        for day in [14, 16, 19]:\n            next_time = sched.next_run_time(start_time)\n            assert_equal(next_time, datetime.datetime(2012, 3, day, 17, 32))\n            start_time = next_time\n\n        assert_equal(str(sched), \"daily 17:32 MWF\")\n\n\nclass GeneralSchedulerTestCase(testingutils.MockTimeTestCase):\n\n    now = datetime.datetime.now().replace(hour=15, minute=0)\n\n    def expected_time(self, date):\n        return datetime.datetime.combine(date, datetime.time(14, 30))\n\n    @setup\n    def build_scheduler(self):\n        self.scheduler = scheduler.GeneralScheduler(timestr=\"14:30\")\n        one_day = datetime.timedelta(days=1)\n        self.today = self.now.date()\n        self.yesterday = self.now - one_day\n        self.tomorrow = self.now + one_day\n\n    def test_next_run_time(self):\n        next_run = self.scheduler.next_run_time(timeutils.current_time())\n        assert_equal(self.expected_time(self.tomorrow), next_run)\n\n        next_run = self.scheduler.next_run_time(self.yesterday)\n        assert_equal(self.expected_time(self.today), next_run)\n\n    @mock.patch(\"tron.scheduler.get_jitter\", autospec=True)\n    def test_next_run_time_with_jitter(self, mock_jitter):\n        mock_jitter.return_value = delta = datetime.timedelta(seconds=-300)\n        self.scheduler.jitter = datetime.timedelta(seconds=400)\n        expected = self.expected_time(self.tomorrow) + delta\n        next_run_time = self.scheduler.next_run_time(None)\n        assert_equal(next_run_time, expected)\n\n    def test__str__(self):\n        assert_equal(str(self.scheduler), \"daily \")\n\n    def test__str__with_jitter(self):\n        self.scheduler.jitter = datetime.timedelta(seconds=300)\n        assert_equal(str(self.scheduler), \"daily  (+/- 0:05:00)\")\n\n\nclass GeneralSchedulerTimeTestBase(testingutils.MockTimeTestCase):\n\n    now = datetime.datetime(2012, 3, 14, 15, 9, 26)\n\n    @setup\n    def build_scheduler(self):\n        self.scheduler = scheduler.GeneralScheduler(timestr=\"14:30\")\n\n\nclass GeneralSchedulerTodayTest(GeneralSchedulerTimeTestBase):\n\n    now = datetime.datetime.now().replace(hour=12, minute=0)\n\n    def test(self):\n        # If we schedule a job for later today, it should run today\n        run_time = self.scheduler.next_run_time(self.now)\n        next_run_date = run_time.date()\n\n        assert_equal(next_run_date, self.now.date())\n        earlier_time = datetime.datetime(\n            self.now.year,\n            self.now.month,\n            self.now.day,\n            hour=13,\n        )\n        assert_lte(earlier_time, run_time)\n\n\nclass GeneralSchedulerTomorrowTest(GeneralSchedulerTimeTestBase):\n\n    now = datetime.datetime.now().replace(hour=15, minute=0)\n\n    def test(self):\n        # If we schedule a job for later today, it should run today\n        run_time = self.scheduler.next_run_time(self.now)\n        next_run_date = run_time.date()\n        tomorrow = self.now.date() + datetime.timedelta(days=1)\n\n        assert_equal(next_run_date, tomorrow)\n        earlier_time = datetime.datetime(\n            year=tomorrow.year,\n            month=tomorrow.month,\n            day=tomorrow.day,\n            hour=13,\n        )\n        assert_lte(earlier_time, run_time)\n\n\nclass GeneralSchedulerLongJobRunTest(GeneralSchedulerTimeTestBase):\n\n    now = datetime.datetime.now().replace(hour=12, minute=0)\n\n    def test_long_jobs_dont_wedge_scheduler(self):\n        # Advance days twice as fast as they are scheduled, demonstrating\n        # that the scheduler will put things in the past if that's where\n        # they belong, and run them as fast as possible\n\n        last_run = self.scheduler.next_run_time(None)\n        for i in range(10):\n            next_run = self.scheduler.next_run_time(last_run)\n            assert_equal(next_run, last_run + datetime.timedelta(days=1))\n\n            self.now += datetime.timedelta(days=2)\n            last_run = next_run\n\n\nclass GeneralSchedulerDSTTest(testingutils.MockTimeTestCase):\n\n    now = datetime.datetime(2011, 11, 6, 1, 10, 0)\n    now_utc = timeutils.current_time(tz=pytz.timezone(\"UTC\"))\n\n    def hours_until_time(self, run_time, sch):\n        tz = sch.time_zone\n        now = timeutils.current_time()\n        now = tz.localize(now) if tz else now\n        seconds = timeutils.delta_total_seconds(run_time - now)\n        return round(max(0, seconds) / 60 / 60, 1)\n\n    def hours_diff_at_datetime(self, sch, *args, **kwargs):\n        \"\"\"Return the number of hours until the next *two* runs of a job with\n        the given scheduler\n        \"\"\"\n        self.now = datetime.datetime(*args, **kwargs)\n        next_run = sch.next_run_time(self.now)\n        t1 = self.hours_until_time(next_run, sch)\n        next_run = sch.next_run_time(next_run.replace(tzinfo=None))\n        t2 = self.hours_until_time(next_run, sch)\n        return t1, t2\n\n    def _assert_range(self, x, lower, upper):\n        assert_gt(x, lower)\n        assert_lt(x, upper)\n\n    def test_fall_back(self):\n        \"\"\"This test checks the behavior of the scheduler at the daylight\n        savings time 'fall back' point, when the system time zone changes\n        from (e.g.) PDT to PST.\n        \"\"\"\n        sch = scheduler.GeneralScheduler(time_zone=pytz.timezone(\"US/Pacific\"))\n\n        # Exact crossover time:\n        # datetime.datetime(2011, 11, 6, 9, 0, 0, tzinfo=pytz.utc)\n        # This test will use times on either side of it.\n\n        # From the PDT vantage point, the run time is 24.2 hours away:\n        s1a, s1b = self.hours_diff_at_datetime(sch, 2011, 11, 6, 0, 50, 0)\n\n        # From the PST vantage point, the run time is 22.8 hours away:\n        # (this is measured from the point in absolute time 20 minutes after\n        # the other measurement)\n        s2a, s2b = self.hours_diff_at_datetime(sch, 2011, 11, 6, 1, 10, 0)\n\n        self._assert_range(s1b - s1a, 23.99, 24.11)\n        self._assert_range(s2b - s2a, 23.99, 24.11)\n        self._assert_range(s1a - s2a, 1.39, 1.41)\n\n    def test_correct_time(self):\n        sch = scheduler.GeneralScheduler(time_zone=pytz.timezone(\"US/Pacific\"))\n        next_run_time = sch.next_run_time(self.now)\n        assert_equal(next_run_time.hour, 0)\n\n    def test_spring_forward(self):\n        \"\"\"This test checks the behavior of the scheduler at the daylight\n        savings time 'spring forward' point, when the system time zone changes\n        from (e.g.) PST to PDT.\n        \"\"\"\n        sch = scheduler.GeneralScheduler(time_zone=pytz.timezone(\"US/Pacific\"))\n\n        # Exact crossover time:\n        # datetime.datetime(2011, 3, 13, 2, 0, 0, tzinfo=pytz.utc)\n        # This test will use times on either side of it.\n\n        # From the PST vantage point, the run time is 20.2 hours away:\n        s1a, s1b = self.hours_diff_at_datetime(sch, 2011, 3, 13, 2, 50, 0)\n\n        # From the PDT vantage point, the run time is 20.8 hours away:\n        # (this is measured from the point in absolute time 20 minutes after\n        # the other measurement)\n        s2a, s2b = self.hours_diff_at_datetime(sch, 2011, 3, 13, 3, 10, 0)\n\n        self._assert_range(s1b - s1a, 23.99, 24.11)\n        self._assert_range(s2b - s2a, 23.99, 24.11)\n        self._assert_range(s1a - s2a, -0.61, -0.59)\n\n    def test_handles_tz_specific_jobs_with_tz_specific_start_time(self):\n        sch = scheduler.GeneralScheduler(time_zone=pytz.timezone(\"UTC\"))\n        next_run_time = sch.next_run_time(self.now_utc)\n        assert_equal(next_run_time.hour, 0)\n\n    def test_handles_unsetting_the_time_zone(self):\n        sch = scheduler.GeneralScheduler(time_zone=None)\n        next_run_time = sch.next_run_time(self.now_utc)\n        assert_equal(next_run_time.hour, 0)\n\n    def test_handles_changing_the_time_zone(self):\n        pacific_now = datetime.datetime.now(pytz.timezone(\"US/Pacific\"))\n        pacific_offset = pacific_now.utcoffset().total_seconds() / 60 / 60\n        sch = scheduler.GeneralScheduler(time_zone=pytz.timezone(\"US/Pacific\"))\n        next_run_time = sch.next_run_time(self.now_utc)\n        assert_equal(next_run_time.hour, -pacific_offset)\n\n\ndef parse_groc(config):\n    config = schedule_parse.ConfigGenericSchedule(\"groc daily\", config, None)\n    return parse_groc_expression(config, NullConfigContext)\n\n\ndef scheduler_from_config(config):\n    return scheduler.scheduler_from_config(parse_groc(config), None)\n\n\nclass ComplexParserTest(testingutils.MockTimeTestCase):\n\n    now = datetime.datetime(2011, 6, 1)\n\n    def test_parse_all(self):\n        config_string = \"1st,2nd,3rd,4th monday,Tue of march,apr,September at 00:00\"\n        cfg = parse_groc(config_string)\n        assert_equal(cfg.ordinals, {1, 2, 3, 4})\n        assert_equal(cfg.monthdays, None)\n        assert_equal(cfg.weekdays, {1, 2})\n        assert_equal(cfg.months, {3, 4, 9})\n        assert_equal(cfg.timestr, \"00:00\")\n        assert_equal(\n            scheduler_from_config(config_string),\n            scheduler_from_config(config_string),\n        )\n\n    def test_parse_no_weekday(self):\n        cfg = parse_groc(\n            \"1st,2nd,3rd,10th day of march,apr,September at 00:00\",\n        )\n        assert_equal(cfg.ordinals, None)\n        assert_equal(cfg.monthdays, {1, 2, 3, 10})\n        assert_equal(cfg.weekdays, None)\n        assert_equal(cfg.months, {3, 4, 9})\n        assert_equal(cfg.timestr, \"00:00\")\n\n    def test_parse_no_month(self):\n        cfg = parse_groc(\"1st,2nd,3rd,10th day at 00:00\")\n        assert_equal(cfg.ordinals, None)\n        assert_equal(cfg.monthdays, {1, 2, 3, 10})\n        assert_equal(cfg.weekdays, None)\n        assert_equal(cfg.months, None)\n        assert_equal(cfg.timestr, \"00:00\")\n\n    def test_parse_monthly(self):\n        for test_str in (\"1st day\", \"1st day of month\"):\n            cfg = parse_groc(test_str)\n            assert_equal(cfg.ordinals, None)\n            assert_equal(cfg.monthdays, {1})\n            assert_equal(cfg.weekdays, None)\n            assert_equal(cfg.months, None)\n            assert_equal(cfg.timestr, \"00:00\")\n\n    def test_wildcards(self):\n        cfg = parse_groc(\"every day\")\n        assert_equal(cfg.ordinals, None)\n        assert_equal(cfg.monthdays, None)\n        assert_equal(cfg.weekdays, None)\n        assert_equal(cfg.months, None)\n        assert_equal(cfg.timestr, \"00:00\")\n\n    def test_daily(self):\n        sch = scheduler_from_config(\"every day\")\n        next_run_date = sch.next_run_time(None)\n\n        assert_gte(next_run_date, self.now)\n        assert_equal(next_run_date.month, 6)\n        assert_equal(next_run_date.day, 2)\n        assert_equal(next_run_date.hour, 0)\n\n    def test_daily_with_time(self):\n        sch = scheduler_from_config(\"every day at 02:00\")\n        next_run_date = sch.next_run_time(None)\n\n        assert_gte(next_run_date, self.now)\n        assert_equal(next_run_date.year, self.now.year)\n        assert_equal(next_run_date.month, 6)\n        assert_equal(next_run_date.day, 1)\n        assert_equal(next_run_date.hour, 2)\n        assert_equal(next_run_date.minute, 0)\n\n    def test_weekly(self):\n        sch = scheduler_from_config(\"every monday at 01:00\")\n        next_run_date = sch.next_run_time(None)\n\n        assert_gte(next_run_date, self.now)\n        assert_equal(\n            calendar.weekday(\n                next_run_date.year,\n                next_run_date.month,\n                next_run_date.day,\n            ),\n            0,\n        )\n\n    def test_weekly_in_month(self):\n        sch = scheduler_from_config(\"every monday of January at 00:01\")\n        next_run_date = sch.next_run_time(None)\n\n        assert_gte(next_run_date, self.now)\n        assert_equal(next_run_date.year, self.now.year + 1)\n        assert_equal(next_run_date.month, 1)\n        assert_equal(next_run_date.hour, 0)\n        assert_equal(next_run_date.minute, 1)\n        assert_equal(\n            calendar.weekday(\n                next_run_date.year,\n                next_run_date.month,\n                next_run_date.day,\n            ),\n            0,\n        )\n\n    def test_monthly(self):\n        sch = scheduler_from_config(\"1st day\")\n        next_run_date = sch.next_run_time(None)\n\n        assert_gt(next_run_date, self.now)\n        assert_equal(next_run_date.month, 7)\n\n\nif __name__ == \"__main__\":\n    run()\n"
  },
  {
    "path": "tests/serialize/__init__.py",
    "content": ""
  },
  {
    "path": "tests/serialize/filehandler_test.py",
    "content": "import os\nimport shutil\nimport time\nfrom tempfile import mkdtemp\nfrom tempfile import NamedTemporaryFile\nfrom unittest import mock\n\nfrom testifycompat import assert_equal\nfrom testifycompat import assert_in\nfrom testifycompat import assert_not_equal\nfrom testifycompat import assert_not_in\nfrom testifycompat import run\nfrom testifycompat import setup\nfrom testifycompat import suite\nfrom testifycompat import teardown\nfrom testifycompat import TestCase\nfrom tron.serialize.filehandler import FileHandleManager\nfrom tron.serialize.filehandler import NullFileHandle\nfrom tron.serialize.filehandler import OutputPath\nfrom tron.serialize.filehandler import OutputStreamSerializer\n\n\nclass TestFileHandleWrapper(TestCase):\n    @setup\n    def setup_fh_wrapper(self):\n        self.file = NamedTemporaryFile(\"r\")\n        self.manager = FileHandleManager.get_instance()\n        self.fh_wrapper = self.manager.open(self.file.name)\n\n    @teardown\n    def teardown_fh_wrapper(self):\n        self.fh_wrapper.close()\n        FileHandleManager.reset()\n\n    def test_init(self):\n        assert_equal(self.fh_wrapper._fh, NullFileHandle)\n\n    def test_close(self):\n        # Test close without a write, no exception is good\n        self.fh_wrapper.close()\n        # Test close again, after already closed\n        self.fh_wrapper.close()\n\n    def test_close_with_write(self):\n        # Test close with a write\n        self.fh_wrapper.write(\"some things\")\n        self.fh_wrapper.close()\n        assert_equal(self.fh_wrapper._fh, NullFileHandle)\n        assert_equal(self.fh_wrapper.manager, self.manager)\n        # This is somewhat coupled\n        assert_not_in(self.fh_wrapper, self.manager.cache)\n\n    def test_write(self):\n        # Test write without a previous open\n        before_time = time.time()\n        self.fh_wrapper.write(\"some things\")\n        after_time = time.time()\n\n        assert self.fh_wrapper._fh\n        assert_equal(self.fh_wrapper._fh.closed, False)\n        assert before_time <= self.fh_wrapper.last_accessed <= after_time\n\n        # Test write after previous open\n        before_time = time.time()\n        self.fh_wrapper.write(\"\\nmore things\")\n        after_time = time.time()\n        assert before_time <= self.fh_wrapper.last_accessed <= after_time\n        self.fh_wrapper.close()\n        with open(self.file.name) as fh:\n            assert_equal(fh.read(), \"some things\\nmore things\")\n\n    def test_close_many(self):\n        self.fh_wrapper.write(\"some things\")\n        self.fh_wrapper.close()\n        self.fh_wrapper.close()\n\n    def test_context_manager(self):\n        with self.fh_wrapper as fh:\n            fh.write(\"123\")\n        assert fh._fh is None\n        with open(self.file.name) as fh:\n            assert_equal(fh.read(), \"123\")\n\n\nclass TestFileHandleManager(TestCase):\n    @setup\n    def setup_fh_manager(self):\n        FileHandleManager.reset()\n        self.file1 = NamedTemporaryFile(\"r\")\n        self.file2 = NamedTemporaryFile(\"r\")\n        FileHandleManager.set_max_idle_time(2)\n        self.manager = FileHandleManager.get_instance()\n\n    @teardown\n    def teardown_fh_manager(self):\n        FileHandleManager.reset()\n\n    def test_get_instance(self):\n        assert_equal(self.manager, FileHandleManager.get_instance())\n        # Repeat for good measure\n        assert_equal(self.manager, FileHandleManager.get_instance())\n\n    def test_set_max_idle_time(self):\n        max_idle_time = 300\n        FileHandleManager.set_max_idle_time(max_idle_time)\n        assert_equal(max_idle_time, self.manager.max_idle_time)\n\n    def test_open(self):\n        # Not yet in cache\n        fh_wrapper = self.manager.open(self.file1.name)\n        assert_in(fh_wrapper.name, self.manager.cache)\n\n        # Should now be in cache\n        fh_wrapper2 = self.manager.open(self.file1.name)\n\n        # Same wrapper\n        assert_equal(fh_wrapper, fh_wrapper2)\n\n        # Different wrapper\n        assert_not_equal(fh_wrapper, self.manager.open(self.file2.name))\n\n    def test_cleanup_none(self):\n        # Nothing to remove\n        fh_wrapper = self.manager.open(self.file1.name)\n        self.manager.cleanup()\n        assert_in(fh_wrapper.name, self.manager.cache)\n\n    def test_cleanup_single(self):\n        fh_wrapper = self.manager.open(self.file1.name)\n        fh_wrapper.last_accessed = 123456\n\n        def time_func():\n            return 123458.1\n\n        self.manager.cleanup(time_func)\n        assert_not_in(fh_wrapper.name, self.manager.cache)\n        assert_equal(len(self.manager.cache), 0)\n\n    def test_cleanup_many(self):\n        fh_wrappers = [\n            self.manager.open(self.file1.name),\n            self.manager.open(self.file2.name),\n            self.manager.open(NamedTemporaryFile(\"r\").name),\n            self.manager.open(NamedTemporaryFile(\"r\").name),\n            self.manager.open(NamedTemporaryFile(\"r\").name),\n        ]\n        for i, fh_wrapper in enumerate(fh_wrappers):\n            fh_wrapper.last_accessed = 123456 + i\n\n        def time_func():\n            return 123460.1\n\n        self.manager.cleanup(time_func)\n        assert_equal(len(self.manager.cache), 2)\n\n        for fh_wrapper in fh_wrappers[:3]:\n            assert_not_in(fh_wrapper.name, self.manager.cache)\n\n        for fh_wrapper in fh_wrappers[3:]:\n            assert_in(fh_wrapper.name, self.manager.cache)\n\n    def test_cleanup_opened(self):\n        fh_wrapper = self.manager.open(self.file1.name)\n        fh_wrapper.write(\"Some things\")\n\n        fh_wrapper.last_accessed = 123456\n\n        def time_func():\n            return 123458.1\n\n        self.manager.cleanup(time_func)\n        assert_not_in(fh_wrapper.name, self.manager.cache)\n        assert_equal(len(self.manager.cache), 0)\n\n    def test_cleanup_natural(self):\n        FileHandleManager.set_max_idle_time(1)\n        fh_wrapper1 = self.manager.open(self.file1.name)\n        fh_wrapper2 = self.manager.open(self.file2.name)\n        fh_wrapper1.write(\"Some things\")\n\n        time.sleep(1.5)\n        fh_wrapper2.write(\"Other things.\")\n\n        assert_not_in(fh_wrapper1.name, self.manager.cache)\n        assert_in(fh_wrapper2.name, self.manager.cache)\n\n        # Now that 1 is closed, try writing again\n        fh_wrapper1.write(\"Some things\")\n        assert_in(fh_wrapper1.name, self.manager.cache)\n        assert not fh_wrapper1._fh.closed\n\n    def test_remove(self):\n        # In cache\n        fh_wrapper = self.manager.open(self.file1.name)\n        assert_in(fh_wrapper.name, self.manager.cache)\n        self.manager.remove(fh_wrapper)\n        assert_not_in(fh_wrapper.name, self.manager.cache)\n\n        # Not in cache\n        self.manager.remove(fh_wrapper)\n        assert_not_in(fh_wrapper.name, self.manager.cache)\n\n    def test_update(self):\n        fh_wrapper1 = self.manager.open(self.file1.name)\n        fh_wrapper2 = self.manager.open(self.file2.name)\n        assert_equal(\n            list(self.manager.cache.keys()),\n            [fh_wrapper1.name, fh_wrapper2.name],\n        )\n\n        self.manager.update(fh_wrapper1)\n        assert_equal(\n            list(self.manager.cache.keys()),\n            [fh_wrapper2.name, fh_wrapper1.name],\n        )\n\n\nclass TestOutputStreamSerializer(TestCase):\n    @setup\n    def setup_serializer(self):\n        self.test_dir = mkdtemp()\n        self.serial = OutputStreamSerializer([self.test_dir])\n        self.filename = \"STARS\"\n        self.content = \"123\\n456\\n789\"\n        self.expected = [line for line in self.content.split(\"\\n\")]\n\n    @teardown\n    def teardown_test_dir(self):\n        shutil.rmtree(self.test_dir)\n\n    def _write_contents(self):\n        with open(self.serial.full_path(self.filename), \"w\") as f:\n            f.write(self.content)\n\n    def test_open(self):\n        with self.serial.open(self.filename) as fh:\n            fh.write(self.content)\n\n        with open(self.serial.full_path(self.filename)) as f:\n            assert_equal(f.read(), self.content)\n\n    @suite(\"integration\")\n    def test_init_with_output_path(self):\n        path = OutputPath(self.test_dir, \"one\", \"two\", \"three\")\n        stream = OutputStreamSerializer(path)\n        assert_equal(stream.base_path, str(path))\n\n    def test_tail(self):\n        self._write_contents()\n        assert_equal(self.serial.tail(self.filename), self.expected)\n\n    def test_tail_num_lines(self):\n        self._write_contents()\n        assert_equal(self.serial.tail(self.filename, 1), self.expected[-1:])\n\n    def test_tail_file_does_not_exist(self):\n        file_dne = \"bogusfile123\"\n        assert_equal(self.serial.tail(file_dne), [])\n\n\nclass TestOutputPath(TestCase):\n    @setup\n    def setup_path(self):\n        self.path = OutputPath(\"one\", \"two\", \"three\")\n\n    def test__init__(self):\n        assert_equal(self.path.base, \"one\")\n        assert_equal(self.path.parts, [\"two\", \"three\"])\n\n        path = OutputPath(\"base\")\n        assert_equal(path.base, \"base\")\n        assert_equal(path.parts, [])\n\n    def test__iter__(self):\n        assert_equal(list(self.path), [\"one\", \"two\", \"three\"])\n\n    def test__str__(self):\n        # Breaks in windows probably,\n        assert_equal(\"one/two/three\", str(self.path))\n\n    def test_append(self):\n        self.path.append(\"four\")\n        assert_equal(self.path.parts, [\"two\", \"three\", \"four\"])\n\n    def test_clone(self):\n        new_path = self.path.clone()\n        assert_equal(str(new_path), str(self.path))\n\n        self.path.append(\"alpha\")\n        assert_equal(str(new_path), \"one/two/three\")\n\n        new_path.append(\"beta\")\n        assert_equal(str(self.path), \"one/two/three/alpha\")\n\n    def test_clone_with_parts(self):\n        new_path = self.path.clone(\"seven\", \"ten\")\n        assert_equal(list(new_path), [\"one/two/three\", \"seven\", \"ten\"])\n\n    def test_delete(self):\n        tmp_dir = mkdtemp()\n        path = OutputPath(tmp_dir)\n        path.delete()\n        assert not os.path.exists(tmp_dir)\n\n    def test__eq__(self):\n        other = mock.MagicMock(base=\"one\", parts=[\"two\", \"three\"])\n        assert_equal(self.path, other)\n\n    def test__ne__(self):\n        other = mock.MagicMock(base=\"one/two\", parts=[\"three\"])\n        assert_not_equal(self.path, other)\n\n\nif __name__ == \"__main__\":\n    run()\n"
  },
  {
    "path": "tests/serialize/runstate/__init__.py",
    "content": ""
  },
  {
    "path": "tests/serialize/runstate/dynamodb_state_store_test.py",
    "content": "import gzip\nimport json\nfrom unittest import mock\n\nimport boto3\nimport pytest\nfrom boto3.dynamodb.types import Binary\nfrom moto import mock_dynamodb\nfrom moto.dynamodb.responses import dynamo_json_dump\n\nfrom tron.serialize.runstate.dynamodb_state_store import DynamoDBStateStore\nfrom tron.serialize.runstate.dynamodb_state_store import MAX_UNPROCESSED_KEYS_RETRIES\n\n\ndef mock_transact_write_items(self):\n    \"\"\"\n    This mocks moto.dynamodb2.responses.DynamoHandler.transact_write_items,\n    which is used to mock dynamodb client. This function calls put_item,\n    update_item, and delete_item based on the arguments of transact_write_item.\n    \"\"\"\n\n    def put_item(item):\n        name = item[\"TableName\"]\n        record = item[\"Item\"]\n        return self.dynamodb_backend.put_item(name, record)\n\n    def delete_item(item):\n        name = item[\"TableName\"]\n        keys = item[\"Key\"]\n        return self.dynamodb_backend.delete_item(name, keys)\n\n    def update_item(item):\n        name = item[\"TableName\"]\n        key = item[\"Key\"]\n        update_expression = item.get(\"UpdateExpression\")\n        attribute_updates = item.get(\"AttributeUpdates\")\n        expression_attribute_names = item.get(\"ExpressionAttributeNames\", {})\n        expression_attribute_values = item.get(\"ExpressionAttributeValues\", {})\n        return self.dynamodb_backend.update_item(\n            name,\n            key,\n            update_expression,\n            attribute_updates,\n            expression_attribute_names,\n            expression_attribute_values,\n        )\n\n    transact_items = self.body[\"TransactItems\"]\n\n    for transact_item in transact_items:\n        if \"Put\" in transact_item:\n            put_item(transact_item[\"Put\"])\n        elif \"Update\" in transact_item:\n            update_item(transact_item[\"Update\"])\n        elif \"Delete\" in transact_item:\n            delete_item(transact_item[\"Delete\"])\n\n    return dynamo_json_dump({})\n\n\n@pytest.fixture(autouse=True)\ndef store():\n    with mock.patch(\n        \"moto.dynamodb.responses.DynamoHandler.transact_write_items\",\n        new=mock_transact_write_items,\n        create=True,\n    ), mock_dynamodb():\n        dynamodb = boto3.resource(\"dynamodb\", region_name=\"us-west-2\")\n        table_name = \"tmp\"\n        store = DynamoDBStateStore(table_name, \"us-west-2\", stopping=True)\n        store.table = dynamodb.create_table(\n            TableName=table_name,\n            KeySchema=[\n                {\n                    \"AttributeName\": \"key\",\n                    \"KeyType\": \"HASH\",\n                },  # Partition key\n                {\n                    \"AttributeName\": \"index\",\n                    \"KeyType\": \"RANGE\",\n                },  # Sort key\n            ],\n            AttributeDefinitions=[\n                {\n                    \"AttributeName\": \"key\",\n                    \"AttributeType\": \"S\",\n                },\n                {\n                    \"AttributeName\": \"index\",\n                    \"AttributeType\": \"N\",\n                },\n            ],\n            ProvisionedThroughput={\n                \"ReadCapacityUnits\": 10,\n                \"WriteCapacityUnits\": 10,\n            },\n        )\n        store.client = boto3.client(\"dynamodb\", region_name=\"us-west-2\")\n        # Has to be yield here for moto to work\n        yield store\n\n\n@pytest.fixture\ndef small_job():\n    yield {\n        \"enabled\": True,\n        \"run_nums\": [1],\n    }\n\n\n@pytest.fixture\ndef small_object():\n    yield {\n        \"job_name\": \"example_job\",\n        \"run_num\": 1,\n        \"run_time\": None,\n        \"time_zone\": None,\n        \"node_name\": \"example_node\",\n        \"runs\": [],\n        \"cleanup_run\": None,\n        \"manual\": False,\n    }\n\n\n@pytest.fixture\ndef large_object():\n    # We need this object to exceed OBJECT_SIZE after gzip compression so it\n    # requires multiple partitions. We pad node_name because JobRun.to_json()\n    # only serializes known fields — arbitrary fields like \"large_data\" would\n    # be silently dropped. Ideally we'd use a more realistic field like\n    # \"runs\", but each action run requires ~15 nested fields, making the\n    # fixture unwieldy. node_name is the simplest string field that survives\n    # the JSON round-trip.\n    yield {\n        \"job_name\": \"example_job\",\n        \"run_num\": 1,\n        \"run_time\": None,\n        \"time_zone\": None,\n        \"node_name\": \"\".join(str(i) for i in range(200_000)),\n        \"runs\": [],\n        \"cleanup_run\": None,\n        \"manual\": False,\n    }\n\n\n@pytest.mark.usefixtures(\"store\")\nclass TestDynamoDBStateStore:\n    def test_save(self, store, small_job, small_object):\n        key_value_pairs = [\n            (\n                store.build_key(\"job_state\", \"two\"),\n                small_job,\n            ),\n            (\n                store.build_key(\"job_run_state\", \"four\"),\n                small_object,\n            ),\n        ]\n        store.save(key_value_pairs)\n        store._consume_save_queue()\n\n        assert store.save_errors == 0\n        keys = [\n            store.build_key(\"job_state\", \"two\"),\n            store.build_key(\"job_run_state\", \"four\"),\n        ]\n        with mock.patch(\"tron.config.static_config.load_yaml_file\", autospec=True), mock.patch(\n            \"tron.config.static_config.build_configuration_watcher\", autospec=True\n        ):\n            vals = store.restore(keys)\n        for key, value in key_value_pairs:\n            assert vals[key] == value\n\n        expected_values = {keys[0]: small_job, keys[1]: small_object}\n        for key in keys:\n            item = store.table.get_item(Key={\"key\": key, \"index\": 0})\n            assert \"Item\" in item\n            assert \"json_val\" in item[\"Item\"]\n\n            compressed_val = item[\"Item\"][\"json_val\"]\n            assert isinstance(compressed_val, Binary)\n\n            decompressed_json = gzip.decompress(compressed_val.value)\n            assert json.loads(decompressed_json) == expected_values[key]\n\n    def test_save_multi_partition_object(self, store, large_object):\n        key_value_pairs = [\n            (\n                store.build_key(\"job_run_state\", \"two\"),\n                large_object,\n            ),\n        ]\n        store.save(key_value_pairs)\n        store._consume_save_queue()\n\n        assert store.save_errors == 0\n        keys = [store.build_key(\"job_run_state\", \"two\")]\n\n        with mock.patch(\"tron.config.static_config.load_yaml_file\", autospec=True), mock.patch(\n            \"tron.config.static_config.build_configuration_watcher\", autospec=True\n        ):\n            vals = store.restore(keys)\n        for key, value in key_value_pairs:\n            assert vals[key] == value\n\n    def test_restore(self, store, small_object):\n        keys = [store.build_key(\"job_run_state\", i) for i in range(3)]\n        value = small_object\n        pairs = zip(keys, (value for i in range(len(keys))))\n        store.save(pairs)\n        store._consume_save_queue()\n\n        assert store.save_errors == 0\n        with mock.patch(\"tron.config.static_config.load_yaml_file\", autospec=True), mock.patch(\n            \"tron.config.static_config.build_configuration_watcher\", autospec=True\n        ):\n            vals = store.restore(keys)\n        for key in keys:\n            assert vals[key] == small_object\n\n    def test_restore_multi_partition_object(self, store, large_object):\n        keys = [store.build_key(\"job_run_state\", i) for i in range(3)]\n        value = large_object\n        pairs = zip(keys, (value for i in range(len(keys))))\n        store.save(pairs)\n        store._consume_save_queue()\n\n        assert store.save_errors == 0\n\n        for key in keys:\n            num_partitions, num_json_val_partitions = store._get_num_of_partitions(key)\n            assert num_json_val_partitions > 1\n            assert num_partitions > 1\n\n        with mock.patch(\"tron.config.static_config.load_yaml_file\", autospec=True), mock.patch(\n            \"tron.config.static_config.build_configuration_watcher\", autospec=True\n        ):\n            vals = store.restore(keys)\n        for key in keys:\n            assert vals[key] == large_object\n\n    def test_delete_item(self, store, small_object):\n        keys = [store.build_key(\"job_state\", i) for i in range(3)]\n        pairs = list(zip(keys, (small_object for _ in range(len(keys)))))\n\n        store.save(pairs)\n        store._consume_save_queue()\n\n        for key, _ in pairs:\n            store._delete_item(key)\n\n        for key, _ in pairs:\n            num_partitions, num_json_val_partitions = store._get_num_of_partitions(key)\n            assert num_partitions == 0\n            assert num_json_val_partitions == 0\n\n    def test_delete_multi_partition_item(self, store, large_object):\n        keys = [store.build_key(\"job_state\", i) for i in range(3)]\n        pairs = list(zip(keys, (large_object for _ in range(len(keys)))))\n\n        store.save(pairs)\n        store._consume_save_queue()\n\n        for key, _ in pairs:\n            num_partitions, num_json_val_partitions = store._get_num_of_partitions(key)\n            assert num_partitions > 1\n            assert num_json_val_partitions > 1\n\n        for key, _ in pairs:\n            store._delete_item(key)\n\n        for key, _ in pairs:\n            num_partitions, num_json_val_partitions = store._get_num_of_partitions(key)\n            assert num_partitions == 0\n            assert num_json_val_partitions == 0\n\n    def test_delete_if_val_is_none(self, store, small_object):\n        key_value_pairs = [\n            (\n                store.build_key(\"job_state\", \"two\"),\n                small_object,\n            ),\n            (\n                store.build_key(\"job_run_state\", \"four\"),\n                small_object,\n            ),\n        ]\n        store.save(key_value_pairs)\n        store._consume_save_queue()\n\n        delete = [\n            (\n                store.build_key(\"job_state\", \"two\"),\n                None,\n            ),\n        ]\n        store.save(delete)\n        store._consume_save_queue()\n\n        assert store.save_errors == 0\n        # Try to restore both, we should just get one back\n        keys = [\n            store.build_key(\"job_state\", \"two\"),\n            store.build_key(\"job_run_state\", \"four\"),\n        ]\n        with mock.patch(\"tron.config.static_config.load_yaml_file\", autospec=True), mock.patch(\n            \"tron.config.static_config.build_configuration_watcher\", autospec=True\n        ):\n            vals = store.restore(keys)\n        assert vals == {\"job_run_state four\": small_object}\n\n    @pytest.mark.parametrize(\n        \"test_object, side_effects, expected_save_errors, expected_queue_length\",\n        [\n            # All attempts fail\n            (\"small_object\", [KeyError(\"foo\")] * 3, 3, 1),\n            (\"large_object\", [KeyError(\"foo\")] * 3, 3, 1),\n            # Failure followed by success\n            (\"small_object\", [KeyError(\"foo\"), {}], 0, 0),\n            (\"large_object\", [KeyError(\"foo\")] + [{}] * 10, 0, 0),\n        ],\n    )\n    def test_retry_saving(\n        self, test_object, side_effects, expected_save_errors, expected_queue_length, store, small_object, large_object\n    ):\n        object_mapping = {\n            \"small_object\": small_object,\n            \"large_object\": large_object,\n        }\n        value = object_mapping[test_object]\n\n        with mock.patch.object(\n            store.client,\n            \"transact_write_items\",\n            side_effect=side_effects,\n        ) as mock_transact_write:\n            keys = [store.build_key(\"job_state\", 0)]\n            pairs = zip(keys, [value])\n            store.save(pairs)\n\n            for _ in side_effects:\n                store._consume_save_queue()\n\n            assert mock_transact_write.called\n            assert store.save_errors == expected_save_errors\n            assert len(store.save_queue) == expected_queue_length\n\n    @pytest.mark.parametrize(\n        \"attempt, expected_delay\",\n        [\n            (1, 1),\n            (2, 2),\n            (3, 4),\n            (4, 8),\n            (5, 10),\n            (6, 10),\n            (7, 10),\n        ],\n    )\n    def test_calculate_backoff_delay(self, store, attempt, expected_delay):\n        delay = store._calculate_backoff_delay(attempt)\n        assert delay == expected_delay\n\n    def test_retry_reading(self, store):\n        unprocessed_value = {\n            \"Responses\": {},\n            \"UnprocessedKeys\": {\n                store.name: {\n                    \"Keys\": [{\"key\": {\"S\": store.build_key(\"job_state\", 0)}, \"index\": {\"N\": \"0\"}}],\n                    \"ConsistentRead\": True,\n                }\n            },\n        }\n\n        keys = [store.build_key(\"job_state\", 0)]\n\n        with mock.patch.object(\n            store.client,\n            \"batch_get_item\",\n            return_value=unprocessed_value,\n        ) as mock_batch_get_item, mock.patch(\"time.sleep\") as mock_sleep, pytest.raises(Exception) as exec_info:\n            store.restore(keys)\n        assert \"failed to retrieve items with keys\" in str(exec_info.value)\n        assert mock_batch_get_item.call_count == MAX_UNPROCESSED_KEYS_RETRIES\n        assert mock_sleep.call_count == MAX_UNPROCESSED_KEYS_RETRIES\n\n    def test_restore_exception_propagation(self, store):\n        # This test is to ensure that restore propagates exceptions upwards: see DAR-2328\n        keys = [store.build_key(\"job_state\", i) for i in range(3)]\n\n        mock_future = mock.MagicMock()\n        mock_future.result.side_effect = Exception(\"mocked exception\")\n        with mock.patch(\"concurrent.futures.Future\", return_value=mock_future, autospec=True):\n            with mock.patch(\"concurrent.futures.as_completed\", return_value=[mock_future], autospec=True):\n                with pytest.raises(Exception) as exec_info, mock.patch(\n                    \"tron.config.static_config.load_yaml_file\", autospec=True\n                ), mock.patch(\"tron.config.static_config.build_configuration_watcher\", autospec=True):\n                    store.restore(keys)\n                assert str(exec_info.value) == \"mocked exception\"\n\n    def test_serialization_failure_preserves_existing_row(self, store, small_object):\n        \"\"\"When json serialization fails, the existing DynamoDB row should not be deleted.\"\"\"\n        key = store.build_key(\"job_run_state\", \"preserve_me\")\n\n        store.save([(key, small_object)])\n        store._consume_save_queue()\n        assert store.save_errors == 0\n\n        new_val = {**small_object, \"run_num\": 999}\n        with mock.patch.object(store, \"_serialize_item\", return_value=None):\n            store.save([(key, new_val)])\n\n        # The save() call above already called _serialize_item (which returned None),\n        # so the queue has (new_val, None). _consume_save_queue should retry and still fail.\n        with mock.patch.object(store, \"_serialize_item\", return_value=None):\n            store._consume_save_queue()\n\n        # The item should be requeued\n        assert len(store.save_queue) == 1\n        assert store.save_errors == 1\n\n        # The original key should still be intact\n        with mock.patch(\"tron.config.static_config.load_yaml_file\", autospec=True), mock.patch(\n            \"tron.config.static_config.build_configuration_watcher\", autospec=True\n        ):\n            vals = store.restore([key])\n        assert vals[key] == small_object\n\n    def test_serialization_retry_succeeds(self, store, small_object):\n        \"\"\"When initial json_val is None but retry succeeds, the item should be saved normally.\"\"\"\n        key = store.build_key(\"job_run_state\", \"retry_ok\")\n\n        with mock.patch.object(store, \"_serialize_item\", return_value=None):\n            store.save([(key, small_object)])\n\n        queued_val, queued_json = store.save_queue[key]\n        assert queued_val == small_object\n        assert queued_json is None\n\n        # This time it should succeed\n        store._consume_save_queue()\n\n        assert len(store.save_queue) == 0\n        assert store.save_errors == 0\n\n        with mock.patch(\"tron.config.static_config.load_yaml_file\", autospec=True), mock.patch(\n            \"tron.config.static_config.build_configuration_watcher\", autospec=True\n        ):\n            vals = store.restore([key])\n        assert vals[key] == small_object\n\n    def test_delete_sentinel_proceeds_with_deletion(self, store, small_object):\n        \"\"\"When val is None the row should be deleted.\"\"\"\n        key = store.build_key(\"job_run_state\", \"delete_me\")\n\n        store.save([(key, small_object)])\n        store._consume_save_queue()\n        assert store.save_errors == 0\n\n        store.save([(key, None)])\n        store._consume_save_queue()\n        assert store.save_errors == 0\n\n        num_partitions, num_json_val_partitions = store._get_num_of_partitions(key)\n        assert num_partitions == 0\n        assert num_json_val_partitions == 0\n"
  },
  {
    "path": "tests/serialize/runstate/shelvestore_test.py",
    "content": "import os\nimport shutil\nimport tempfile\n\nfrom testifycompat import assert_equal\nfrom testifycompat import run\nfrom testifycompat import setup\nfrom testifycompat import teardown\nfrom testifycompat import TestCase\nfrom tron.serialize.runstate.shelvestore import Py2Shelf\nfrom tron.serialize.runstate.shelvestore import ShelveKey\nfrom tron.serialize.runstate.shelvestore import ShelveStateStore\n\n\nclass TestShelveStateStore(TestCase):\n    @setup\n    def setup_store(self):\n        self.tmpdir = tempfile.mkdtemp()\n        self.filename = os.path.join(self.tmpdir, \"state\")\n        self.store = ShelveStateStore(self.filename)\n\n    @teardown\n    def teardown_store(self):\n        shutil.rmtree(self.tmpdir)\n\n    def test__init__(self):\n        assert_equal(self.filename, self.store.filename)\n\n    def test_save(self):\n        key_value_pairs = [\n            (\n                ShelveKey(\"one\", \"two\"),\n                {\n                    \"this\": \"data\",\n                },\n            ),\n            (\n                ShelveKey(\"three\", \"four\"),\n                {\n                    \"this\": \"data2\",\n                },\n            ),\n        ]\n        self.store.save(key_value_pairs)\n        self.store.cleanup()\n\n        stored_data = Py2Shelf(self.filename)\n        for key, value in key_value_pairs:\n            assert_equal(stored_data[str(key.key)], value)\n        stored_data.close()\n\n    def test_delete(self):\n        key_value_pairs = [\n            (\n                ShelveKey(\"one\", \"two\"),\n                {\n                    \"this\": \"data\",\n                },\n            ),\n            (\n                ShelveKey(\"three\", \"four\"),\n                {\n                    \"this\": \"data2\",\n                },\n            ),\n            # Delete first key\n            (\n                ShelveKey(\"one\", \"two\"),\n                None,\n            ),\n        ]\n        self.store.save(key_value_pairs)\n        self.store.cleanup()\n\n        stored_data = Py2Shelf(self.filename)\n        assert stored_data == {\n            str(ShelveKey(\"three\", \"four\").key): {\"this\": \"data2\"},\n        }\n        stored_data.close()\n\n    def test_restore(self):\n        self.store.cleanup()\n        keys = [ShelveKey(\"thing\", i) for i in range(5)]\n        value = {\"this\": \"data\"}\n        store = Py2Shelf(self.filename)\n        for key in keys:\n            store[str(key.key)] = value\n        store.close()\n\n        self.store.shelve = Py2Shelf(self.filename)\n        retrieved_data = self.store.restore(keys)\n        for key in keys:\n            assert_equal(retrieved_data[key], value)\n\n\nif __name__ == \"__main__\":\n    run()\n"
  },
  {
    "path": "tests/serialize/runstate/statemanager_test.py",
    "content": "import os\nimport shutil\nimport tempfile\nfrom unittest import mock\n\nfrom testifycompat import assert_equal\nfrom testifycompat import run\nfrom testifycompat import setup\nfrom testifycompat import TestCase\nfrom tests.assertions import assert_raises\nfrom tests.testingutils import autospec_method\nfrom tron.config import schema\nfrom tron.core.job import Job\nfrom tron.core.jobrun import JobRun\nfrom tron.mesos import MesosClusterRepository\nfrom tron.serialize import runstate\nfrom tron.serialize.runstate.shelvestore import ShelveStateStore\nfrom tron.serialize.runstate.statemanager import PersistenceManagerFactory\nfrom tron.serialize.runstate.statemanager import PersistenceStoreError\nfrom tron.serialize.runstate.statemanager import PersistentStateManager\nfrom tron.serialize.runstate.statemanager import StateChangeWatcher\nfrom tron.serialize.runstate.statemanager import StateSaveBuffer\n\n\nclass TestPersistenceManagerFactory(TestCase):\n    def test_from_config_shelve(self):\n        tmpdir = tempfile.mkdtemp()\n        try:\n            fname = os.path.join(tmpdir, \"state\")\n            config = schema.ConfigState(\n                store_type=\"shelve\",\n                name=fname,\n                buffer_size=0,\n            )\n            manager = PersistenceManagerFactory.from_config(config)\n            store = manager._impl\n            assert_equal(store.filename, config.name)\n            assert isinstance(store, ShelveStateStore)\n        finally:\n            shutil.rmtree(tmpdir)\n\n\nclass TestStateSaveBuffer(TestCase):\n    @setup\n    def setup_buffer(self):\n        self.buffer_size = 5\n        self.buffer = StateSaveBuffer(self.buffer_size)\n\n    def test_save(self):\n        assert self.buffer.save(1, 2)\n        assert not self.buffer.save(1, 3)\n        assert not self.buffer.save(1, 4)\n        assert not self.buffer.save(1, 5)\n        assert not self.buffer.save(1, 6)\n        assert self.buffer.save(1, 7)\n        assert_equal(self.buffer.buffer[1], 7)\n\n    def test__iter__(self):\n        self.buffer.save(1, 2)\n        self.buffer.save(2, 3)\n        items = list(self.buffer)\n        assert not self.buffer.buffer\n        assert_equal(items, [(1, 2), (2, 3)])\n\n\nclass TestPersistentStateManager(TestCase):\n    @setup\n    def setup_manager(self):\n        self.store = mock.Mock()\n        self.store.build_key.side_effect = lambda t, i: f\"{t}{i}\"\n        self.buffer = StateSaveBuffer(1)\n        self.manager = PersistentStateManager(self.store, self.buffer)\n\n    def test__init__(self):\n        assert_equal(self.manager._impl, self.store)\n\n    def test_keys_for_items(self):\n        names = [\"namea\", \"nameb\"]\n        key_to_item_map = self.manager._keys_for_items(\"type\", names)\n\n        keys = [\"type%s\" % name for name in names]\n        assert_equal(key_to_item_map, dict(zip(keys, names)))\n\n    def test_restore(self):\n        job_names = [\"one\", \"two\"]\n        with mock.patch.object(self.manager, \"_restore_dicts\", autospec=True,) as mock_restore_dicts, mock.patch.object(\n            self.manager,\n            \"_restore_runs_for_job\",\n            autospec=True,\n        ) as mock_restore_runs:\n            mock_restore_dicts.side_effect = [\n                # _restore_dicts for JOB_STATE\n                {\n                    \"one\": {\"key\": \"val1\"},\n                    \"two\": {\"key\": \"val2\"},\n                },\n            ]\n\n            restored_state = self.manager.restore(job_names)\n            assert mock_restore_dicts.call_args_list == [\n                mock.call(runstate.JOB_STATE, job_names),\n            ]\n            assert len(mock_restore_runs.call_args_list) == 2\n            assert restored_state == {\n                runstate.JOB_STATE: {\n                    \"one\": {\"key\": \"val1\", \"runs\": mock_restore_runs.return_value},\n                    \"two\": {\"key\": \"val2\", \"runs\": mock_restore_runs.return_value},\n                },\n            }\n\n    def test_restore_runs_for_job(self):\n        job_state = {\"run_nums\": [2, 3], \"enabled\": True}\n        with mock.patch.object(\n            self.manager,\n            \"_restore_dicts\",\n            autospec=True,\n        ) as mock_restore_dicts:\n            mock_restore_dicts.side_effect = [\n                {\"job_a.2\": {\"job_name\": \"job_a\", \"run_num\": 2}, \"job_a.3\": {\"job_name\": \"job_a\", \"run_num\": 3}}\n            ]\n            runs = self.manager._restore_runs_for_job(\"job_a\", job_state)\n\n            assert mock_restore_dicts.call_args_list == [mock.call(runstate.JOB_RUN_STATE, [\"job_a.2\", \"job_a.3\"])]\n            assert runs == [{\"job_name\": \"job_a\", \"run_num\": 3}, {\"job_name\": \"job_a\", \"run_num\": 2}]\n\n    def test_restore_runs_for_job_one_missing(self):\n        job_state = {\"run_nums\": [2, 3], \"enabled\": True}\n        with mock.patch.object(\n            self.manager,\n            \"_restore_dicts\",\n            autospec=True,\n        ) as mock_restore_dicts:\n            mock_restore_dicts.side_effect = [{\"job_a.3\": {\"job_name\": \"job_a\", \"run_num\": 3}, \"job_b\": {}}]\n            runs = self.manager._restore_runs_for_job(\"job_a\", job_state)\n\n            assert mock_restore_dicts.call_args_list == [\n                mock.call(runstate.JOB_RUN_STATE, [\"job_a.2\", \"job_a.3\"]),\n            ]\n            assert runs == [{\"job_name\": \"job_a\", \"run_num\": 3}]\n\n    def test_restore_dicts(self):\n        names = [\"namea\", \"nameb\"]\n        autospec_method(self.manager._keys_for_items)\n        self.manager._keys_for_items.return_value = dict(enumerate(names))\n        self.store.restore.return_value = {\n            0: {\n                \"state\": \"data\",\n            },\n            1: {\n                \"state\": \"2data\",\n            },\n        }\n        state_data = self.manager._restore_dicts(\"type\", names)\n        expected = {\n            names[0]: {\n                \"state\": \"data\",\n            },\n            names[1]: {\n                \"state\": \"2data\",\n            },\n        }\n        assert_equal(expected, state_data)\n\n    def test_save(self):\n        name, state_data = \"name\", mock.Mock()\n        self.manager.save(runstate.JOB_STATE, name, state_data)\n        key = f\"{runstate.JOB_STATE}{name}\"\n        self.store.save.assert_called_with([(key, state_data)])\n\n    def test_save_failed(self):\n        self.store.save.side_effect = PersistenceStoreError(\"blah\")\n        assert_raises(\n            PersistenceStoreError,\n            self.manager.save,\n            None,\n            None,\n            None,\n        )\n\n    def test_save_while_disabled(self):\n        with self.manager.disabled():\n            self.manager.save(\"something\", \"name\", mock.Mock())\n        assert not self.store.save.mock_calls\n\n    def test_delete(self):\n        name = \"name\"\n        self.manager.delete(runstate.JOB_STATE, name)\n        key = f\"{runstate.JOB_STATE}{name}\"\n        self.store.save.assert_called_with([(key, None)])\n\n    def test_cleanup(self):\n        self.manager.cleanup()\n        self.store.cleanup.assert_called_with()\n\n    def test_disabled(self):\n        with self.manager.disabled():\n            assert not self.manager.enabled\n        assert self.manager.enabled\n\n    def test_disabled_with_exception(self):\n        def testfunc():\n            with self.manager.disabled():\n                raise ValueError()\n\n        assert_raises(ValueError, testfunc)\n        assert self.manager.enabled\n\n    def test_disabled_nested(self):\n        self.manager.enabled = False\n        with self.manager.disabled():\n            pass\n        assert not self.manager.enabled\n\n\nclass TestStateChangeWatcher(TestCase):\n    @setup\n    def setup_watcher(self):\n        self.watcher = StateChangeWatcher()\n        self.state_manager = mock.create_autospec(PersistentStateManager)\n        self.watcher.state_manager = self.state_manager\n\n    def test_update_from_config_no_change(self):\n        self.watcher.config = state_config = mock.Mock()\n        assert not self.watcher.update_from_config(state_config)\n        autospec_method(self.watcher.shutdown)\n        assert_equal(self.watcher.state_manager, self.state_manager)\n        assert not self.watcher.shutdown.mock_calls\n\n    @mock.patch(\n        \"tron.serialize.runstate.statemanager.PersistenceManagerFactory\",\n        autospec=True,\n    )\n    def test_update_from_config_changed(self, mock_factory):\n        state_config = mock.Mock()\n        autospec_method(self.watcher.shutdown)\n        assert self.watcher.update_from_config(state_config)\n        assert_equal(self.watcher.config, state_config)\n        self.watcher.shutdown.assert_called_with()\n        assert_equal(\n            self.watcher.state_manager,\n            mock_factory.from_config.return_value,\n        )\n        mock_factory.from_config.assert_called_with(state_config)\n\n    def test_save_job(self):\n        mock_job = mock.Mock()\n        self.watcher.save_job(mock_job)\n        self.watcher.state_manager.save.assert_called_with(\n            runstate.JOB_STATE,\n            mock_job.name,\n            mock_job.state_data,\n        )\n\n    def test_shutdown(self):\n        self.watcher.shutdown()\n        assert not self.watcher.state_manager.enabled\n        self.watcher.state_manager.cleanup.assert_called_with()\n\n    def test_disabled(self):\n        context = self.watcher.disabled()\n        assert_equal(self.watcher.state_manager.disabled.return_value, context)\n\n    def test_restore(self):\n        jobs = mock.Mock()\n        self.watcher.restore(jobs)\n        self.watcher.state_manager.restore.assert_called_with(jobs)\n\n    def test_handler_mesos_change(self):\n        self.watcher.handler(\n            observable=MesosClusterRepository,\n            event=None,\n        )\n        self.watcher.state_manager.save.assert_called_with(\n            runstate.MESOS_STATE,\n            MesosClusterRepository.name,\n            MesosClusterRepository.state_data,\n        )\n\n    def test_handler_job_state_change(self):\n        mock_job = mock.Mock(spec_set=Job)\n        with mock.patch.object(self.watcher, \"save_job\") as mock_save_job:\n            self.watcher.handler(\n                observable=mock_job,\n                event=Job.NOTIFY_STATE_CHANGE,\n            )\n            mock_save_job.assert_called_with(mock_job)\n\n    def test_handler_job_new_run(self):\n        mock_job = mock.Mock(spec_set=Job)\n        mock_job_run = mock.Mock(spec_set=JobRun)\n        with mock.patch.object(self.watcher, \"save_job\",) as mock_save_job, mock.patch.object(\n            self.watcher,\n            \"watch\",\n        ) as mock_watch:\n            # Error: No job run in event data, do nothing\n            self.watcher.handler(\n                observable=mock_job,\n                event=Job.NOTIFY_NEW_RUN,\n            )\n            assert mock_watch.call_count == 0\n            assert mock_save_job.call_count == 0\n\n            # Correct case\n            self.watcher.handler(\n                observable=mock_job,\n                event=Job.NOTIFY_NEW_RUN,\n                event_data=mock_job_run,\n            )\n            mock_watch.assert_called_with(mock_job_run)\n            assert mock_save_job.call_count == 0\n\n    def test_handler_job_run_state_change(self):\n        mock_job_run = mock.MagicMock(spec_set=JobRun)\n        self.watcher.handler(\n            observable=mock_job_run,\n            event=JobRun.NOTIFY_STATE_CHANGED,\n        )\n        self.watcher.state_manager.save.assert_called_with(\n            runstate.JOB_RUN_STATE,\n            mock_job_run.name,\n            mock_job_run.state_data,\n        )\n\n    def test_handler_job_run_removed(self):\n        mock_job_run = mock.MagicMock(spec_set=JobRun)\n        self.watcher.handler(\n            observable=mock_job_run,\n            event=JobRun.NOTIFY_REMOVED,\n        )\n        self.watcher.state_manager.delete.assert_called_with(\n            runstate.JOB_RUN_STATE,\n            mock_job_run.name,\n        )\n\n\nif __name__ == \"__main__\":\n    run()\n"
  },
  {
    "path": "tests/serialize/runstate/yamlstore_test.py",
    "content": "import os\nimport tempfile\n\nfrom testifycompat import assert_equal\nfrom testifycompat import run\nfrom testifycompat import setup\nfrom testifycompat import teardown\nfrom testifycompat import TestCase\nfrom tron import yaml\nfrom tron.serialize.runstate import yamlstore\n\n\nclass TestYamlStateStore(TestCase):\n    @setup\n    def setup_store(self):\n        self.filename = os.path.join(tempfile.gettempdir(), \"yaml_state\")\n        self.store = yamlstore.YamlStateStore(self.filename)\n        self.test_data = {\n            \"one\": {\n                \"a\": 1,\n            },\n            \"two\": {\n                \"b\": 2,\n            },\n            \"three\": {\n                \"c\": 3,\n            },\n        }\n\n    @teardown\n    def teardown_store(self):\n        try:\n            os.unlink(self.filename)\n        except OSError:\n            pass\n\n    def test_restore(self):\n        with open(self.filename, \"w\") as fh:\n            yaml.dump(self.test_data, fh)\n\n        keys = [yamlstore.YamlKey(\"one\", \"a\"), yamlstore.YamlKey(\"three\", \"c\")]\n        state_data = self.store.restore(keys)\n        assert_equal(self.store.buffer, self.test_data)\n\n        expected = {keys[0]: 1, keys[1]: 3}\n        assert_equal(expected, state_data)\n\n    def test_restore_missing_type_key(self):\n        with open(self.filename, \"w\") as fh:\n            yaml.dump(self.test_data, fh)\n\n        keys = [yamlstore.YamlKey(\"seven\", \"a\")]\n        state_data = self.store.restore(keys)\n        assert_equal(self.store.buffer, self.test_data)\n        assert_equal({}, state_data)\n\n    def test_restore_file_missing(self):\n        state_data = self.store.restore([\"some\", \"keys\"])\n        assert_equal(state_data, {})\n\n    def test_save(self):\n        expected = {\"one\": {\"five\": \"dataz\"}, \"two\": {\"seven\": \"stars\"}}\n\n        key_value_pairs = [\n            (yamlstore.YamlKey(\"one\", \"five\"), \"barz\"),\n        ]\n        # Save first\n        self.store.save(key_value_pairs)\n\n        # Save second\n        key_value_pairs = [\n            (yamlstore.YamlKey(\"two\", \"seven\"), \"stars\"),\n            (yamlstore.YamlKey(\"one\", \"five\"), \"dataz\"),\n        ]\n        self.store.save(key_value_pairs)\n\n        assert_equal(self.store.buffer, expected)\n        with open(self.filename) as fh:\n            actual = yaml.load(fh)\n        assert_equal(actual, expected)\n\n    def test_delete(self):\n        expected = {\"state_a\": {\"five\": \"barz\"}}\n\n        key_value_pairs = [\n            (yamlstore.YamlKey(\"state_a\", \"five\"), \"barz\"),\n            (yamlstore.YamlKey(\"state_c\", \"five\"), \"delete_all_c\"),\n            (yamlstore.YamlKey(\"state_a\", \"six\"), \"delete_one_a\"),\n        ]\n        # Save first\n        self.store.save(key_value_pairs)\n\n        # Save second\n        key_value_pairs = [\n            (yamlstore.YamlKey(\"state_c\", \"five\"), None),\n            (yamlstore.YamlKey(\"state_a\", \"six\"), None),\n        ]\n        self.store.save(key_value_pairs)\n\n        assert_equal(self.store.buffer, expected)\n        with open(self.filename) as fh:\n            actual = yaml.load(fh)\n        assert_equal(actual, expected)\n\n\nif __name__ == \"__main__\":\n    run()\n"
  },
  {
    "path": "tests/ssh_test.py",
    "content": "from unittest import mock\n\nfrom twisted.python import failure\n\nfrom testifycompat import assert_equal\nfrom testifycompat import assert_not_equal\nfrom testifycompat import setup\nfrom testifycompat import TestCase\nfrom tests.testingutils import autospec_method\nfrom tron import ssh\n\n\nclass TestClientTransport(TestCase):\n    @setup\n    def setup_transport(self):\n        self.username = \"username\"\n        self.options = mock.Mock()\n        self.expected_pub_key = mock.Mock()\n        self.transport = ssh.ClientTransport(\n            self.username,\n            self.options,\n            self.expected_pub_key,\n        )\n\n    def test_verifyHostKey_missing_pub_key(self):\n        self.transport.expected_pub_key = None\n        result = self.transport.verifyHostKey(mock.Mock(), mock.Mock())\n        assert_equal(result.result, 1)\n\n    @mock.patch(\"tron.ssh.keys\", autospec=True)\n    def test_verifyHostKey_matching_pub_key(self, mock_keys):\n        mock_keys.Key.fromString.return_value = self.expected_pub_key\n        public_key = mock.Mock()\n        result = self.transport.verifyHostKey(public_key, mock.Mock())\n        assert_equal(result.result, 2)\n        mock_keys.Key.fromString.assert_called_with(public_key)\n\n    @mock.patch(\"tron.ssh.keys\", autospec=True)\n    def test_verifyHostKey_mismatch_pub_key(self, _):\n        public_key = mock.Mock()\n        result = self.transport.verifyHostKey(public_key, mock.Mock())\n        assert isinstance(result.result, failure.Failure)\n\n    def test_connnectionSecure(self):\n        self.transport.connection_defer = mock.Mock()\n        autospec_method(self.transport.requestService)\n        self.transport.connectionSecure()\n        conn = self.transport.connection_defer.mock_calls[0][1][0]\n        assert isinstance(conn, ssh.ClientConnection)\n        auth_service = self.transport.requestService.mock_calls[0][1][0]\n        assert isinstance(auth_service, ssh.NoPasswordAuthClient)\n\n\nclass TestSSHAuthOptions(TestCase):\n    def test_from_config_none(self):\n        ssh_conf = mock.Mock(agent=False, identities=[])\n        ssh_options = ssh.SSHAuthOptions.from_config(ssh_conf)\n        assert_equal(ssh_options[\"noagent\"], True)\n        assert_equal(ssh_options.identitys, [])\n\n    def test_from_config_both(self):\n        identities = [\"one\", \"two\"]\n        ssh_conf = mock.Mock(agent=True, identities=identities)\n        ssh_options = ssh.SSHAuthOptions.from_config(ssh_conf)\n        assert_equal(ssh_options[\"noagent\"], False)\n        assert_equal(ssh_options.identitys, identities)\n\n    def test__eq__true(self):\n        config = mock.Mock(agent=True, identities=[\"one\", \"two\"])\n        assert_equal(\n            ssh.SSHAuthOptions.from_config(config),\n            ssh.SSHAuthOptions.from_config(config),\n        )\n\n    def test__eq__false(self):\n        config = mock.Mock(agent=True, identities=[\"one\", \"two\"])\n        second_config = mock.Mock(agent=True, identities=[\"two\"])\n        assert_not_equal(\n            ssh.SSHAuthOptions.from_config(config),\n            ssh.SSHAuthOptions.from_config(second_config),\n        )\n"
  },
  {
    "path": "tests/test_id_rsa",
    "content": "-----BEGIN RSA PRIVATE KEY-----\nMIIEowIBAAKCAQEAz+pjFOAHRLcQc6X51SyysJurwBTben3OWCG46CB81faxTVrC\ngcVEM3HCHz5MU8jI0Wb+DK9AXU229yQ8OCRPt3CrzxyI031ZrzuagVNRb/hmiBN+\n+SNmIFQl97/bqht5DXKykUJQmP31crLz3+G0rGXONeJjZAqFFIA1NfMMAnRZMRMo\n417Xf/p3yYXCV0AbWqbMWYA8aQKWFs4EY36vOzUJyftJI1cGttvskcCd3dce4DpR\n8JEnI/rkRKOR19eCLCtftV6CmC3igLoSF1JDeQgkVZRdVG/pcwfV1a/SEAlxAO0e\nRcZccMVoPi/Ans2CxIeukQ6ThFyRMcaD+iku6QIBIwKCAQBw3lMLVQtCj0Nx+wP3\nYWhRPpBvlktCfs8Z5muxNjUjsc32yt6eN+Mx3qs1iDgQOcwZ53P4QeEctSjPTi9R\nrU/YnEACt7f9x7RXz+Yo8rcuJ8KhpC78Rmqj1ei5sUtcWA6DpKoUVzMROWf8b8Y3\ntQpO9W/xXaOrVir8gBzixcSw3xgBiobkxWeiwCrD8LsPQ7qrP27nf252wrIa8BJx\n7qFrRSa05I6Hinj3jMwXUiUPVXBG5m+t8Z4c9WITvnr91uwHuHZc9TV6HedqznmA\n0n6iq2rPHPj4D0gWZFKvTFXN06KD+ldAiyWWGg3nob7gzFkGZkbAk2KLUN7PLuv3\n0KRPAoGBAPg8DkyznKNVgenaKt5Jc9YdLCVd4uvvjuTgmBiezvE4iwVRkIm6Q0jq\nFLgN4c4fUd10Fa4QPwIFXQBK5UYTwoklvDttZ+LnwY5SEzGeaXHdi8suOPlcDvDp\nL5Fpbi6wGJk04MXcApIwLIQCQp+pJeIHbKoq7c2ABMuqp+QPbUwrAoGBANZrcXfo\np2fIxbhrdwS7yMT0jInX6HeDvDM6e67X3TZYAjZuj9PNtlg6qOT6mFWUzOvcEFzj\nRDfA2d53DpGfpALdQ8OBxyI7QrdcbeW0Br/pGCicW+ovbT3Z3Bfl+GOhav1EuHRR\nL0GlW2xN3h+oZRexUSPAA80ep9o/XGaAjGM7AoGBAOoMvQZ9dm4da9x9PlyOZeci\n0doWsWIcYihBeXZMlzs1T+BxeaZtymlRu8N6zZZ1TS/ietdRJXbvHSwpW9RbxgxI\nJoEso8dPipwhf8+yne8EFhdXd4wGVzrqfU6WmxYTv2vhZjblYYKFMUlD9ax66TQy\n4sxUXI6OpW+SRoaSM9oZAoGBAKVo0+AogSQtKtAYYyDoojGJc7rLIQu9ZUwXLDZs\nAmvAPDiebvPZNOT6DULtM6+77ooQKeFBmwZwMwqznYZH840uWNil8WOMzREbariD\nkC2lL+TQZCmvjslQSrNZolt8hbwQcQlF8UFFDAMXf3eB55XvL/cB1wvzE8Wel7zJ\nkN7VAoGBAIISjKvjU3VpKNX9CEQ7V6eb7OuUKo3gUTCMaunWwpVYZ4pR5UiOtib+\nlcDpTQybKQOqSgHKQ13L/7Nu0GY9ILlXhfNhRlflSNUNaGcwMykxm0tzh3TsQ3vv\n8cQYV+W+24bwK39tSzl+n/nnuDdly7aTS2nDrhwWIsL45DU1QXK8\n-----END RSA PRIVATE KEY-----\n"
  },
  {
    "path": "tests/test_id_rsa.pub",
    "content": "ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEAz+pjFOAHRLcQc6X51SyysJurwBTben3OWCG46CB81faxTVrCgcVEM3HCHz5MU8jI0Wb+DK9AXU229yQ8OCRPt3CrzxyI031ZrzuagVNRb/hmiBN++SNmIFQl97/bqht5DXKykUJQmP31crLz3+G0rGXONeJjZAqFFIA1NfMMAnRZMRMo417Xf/p3yYXCV0AbWqbMWYA8aQKWFs4EY36vOzUJyftJI1cGttvskcCd3dce4DpR8JEnI/rkRKOR19eCLCtftV6CmC3igLoSF1JDeQgkVZRdVG/pcwfV1a/SEAlxAO0eRcZccMVoPi/Ans2CxIeukQ6ThFyRMcaD+iku6Q== rhettg@devvm1\n"
  },
  {
    "path": "tests/testingutils.py",
    "content": "import functools\nimport logging\nimport time\nfrom unittest import mock\n\nfrom testifycompat import class_setup\nfrom testifycompat import class_teardown\nfrom testifycompat import setup\nfrom testifycompat import teardown\nfrom testifycompat import TestCase\nfrom tron.utils import timeutils\n\nlog = logging.getLogger(__name__)\n\n# TODO: remove when replaced with tron.eventloop\n\n\nclass MockReactorTestCase(TestCase):\n    \"\"\"Patch the reactor to a MockReactor.\"\"\"\n\n    # Override this in subclasses\n    module_to_mock = None\n\n    @class_setup\n    def class_setup_patched_reactor(self):\n        msg = \"%s must set a module_to_mock field\" % self.__class__\n        assert self.module_to_mock, msg\n        self.old_reactor = getattr(self.module_to_mock, \"reactor\")\n\n    @class_teardown\n    def teardown_patched_reactor(self):\n        setattr(self.module_to_mock, \"reactor\", self.old_reactor)\n\n    @setup\n    def setup_mock_reactor(self):\n        self.reactor = mock.MagicMock()\n        setattr(self.module_to_mock, \"reactor\", self.reactor)\n\n\n# TODO: remove\nclass MockTimeTestCase(TestCase):\n\n    now = None\n\n    @setup\n    def setup_current_time(self):\n        assert self.now, \"%s must set a now field\" % self.__class__\n        self.old_current_time = timeutils.current_time\n        timeutils.current_time = lambda tz=None: self.now\n\n    @teardown\n    def teardown_current_time(self):\n        timeutils.current_time = self.old_current_time\n        # Reset 'now' back to what was set on the class because some test may\n        # have changed it\n        self.now = self.__class__.now\n\n\ndef retry(max_tries=3, delay=0.1, exceptions=(KeyError, IndexError)):\n    \"\"\"A function decorator for re-trying an operation. Useful for MongoDB\n    which is only eventually consistent.\n    \"\"\"\n\n    def wrapper(f):\n        @functools.wraps(f)\n        def wrap(*args, **kwargs):\n            for _ in range(max_tries):\n                try:\n                    return f(*args, **kwargs)\n                except exceptions:\n                    time.sleep(delay)\n            raise\n\n        return wrap\n\n    return wrapper\n\n\ndef autospec_method(method, *args, **kwargs):\n    \"\"\"create an autospec for an instance method.\"\"\"\n    mocked_method = mock.create_autospec(method, *args, **kwargs)\n    setattr(method.__self__, method.__name__, mocked_method)\n"
  },
  {
    "path": "tests/tools/sync_tron_state_from_k8s_test.py",
    "content": "from unittest import mock\n\nimport pytest\nfrom kubernetes.client import V1ObjectMeta\nfrom kubernetes.client import V1Pod\nfrom kubernetes.client import V1PodStatus\n\nfrom tools.sync_tron_state_from_k8s import get_matching_pod\nfrom tools.sync_tron_state_from_k8s import get_tron_state_from_api\nfrom tools.sync_tron_state_from_k8s import update_tron_from_pods\n\n\ndef create_mock_pod(name: str, phase: str, labels: dict[str, str], creation_timestamp: str):\n    metadata = V1ObjectMeta(name=name, creation_timestamp=creation_timestamp, labels=labels)\n    status = V1PodStatus(phase=phase)\n    return V1Pod(metadata=metadata, status=status)\n\n\nclass TestSyncTronStateFromK8s:\n    @pytest.fixture(autouse=True)\n    def setup_test_data(self):\n        self.pods = {\n            p.metadata.name: p\n            for p in [\n                create_mock_pod(\n                    \"service.job.2.action\",\n                    \"Succeeded\",\n                    {\n                        \"paasta.yelp.com/service\": \"service\",\n                        \"paasta.yelp.com/instance\": \"job.action\",\n                        \"tron.yelp.com/run_num\": \"2\",\n                    },\n                    \"2024-01-01T00:00:00\",\n                ),\n                create_mock_pod(\n                    \"service.job.3.action-nomatch\",\n                    \"Failed\",\n                    {\n                        \"paasta.yelp.com/service\": \"service\",\n                        \"paasta.yelp.com/instance\": \"job.action\",\n                        \"tron.yelp.com/run_num\": \"3\",\n                    },\n                    \"2024-01-01T00:00:00\",\n                ),\n                create_mock_pod(\n                    \"service.job.4.action-nomatch\",\n                    \"Failed\",\n                    {\n                        \"paasta.yelp.com/service\": \"service\",\n                        \"paasta.yelp.com/instance\": \"job.action\",\n                        \"tron.yelp.com/run_num\": \"4\",\n                    },\n                    \"2024-01-01T00:00:00\",\n                ),\n                create_mock_pod(\n                    \"service.job.4.action-nomatch-retry2\",\n                    \"Succeeded\",\n                    {\n                        \"paasta.yelp.com/service\": \"service\",\n                        \"paasta.yelp.com/instance\": \"job.action\",\n                        \"tron.yelp.com/run_num\": \"4\",\n                    },\n                    \"2024-01-01T01:00:00\",\n                ),\n                create_mock_pod(\n                    \"service.job2.10.action\",\n                    \"Failed\",\n                    {\n                        \"paasta.yelp.com/service\": \"service\",\n                        \"paasta.yelp.com/instance\": \"job2.action\",\n                        \"tron.yelp.com/run_num\": \"10\",\n                    },\n                    \"2024-01-01T01:00:00\",\n                ),\n                create_mock_pod(\n                    \"service.job2.10.action\",\n                    \"Running\",\n                    {\n                        \"paasta.yelp.com/service\": \"service\",\n                        \"paasta.yelp.com/instance\": \"job2.action\",\n                        \"tron.yelp.com/run_num\": \"10\",\n                    },\n                    \"2024-01-01T01:05:00\",\n                ),\n                create_mock_pod(\n                    # Technically this pod name would not actually exist\n                    \"service.job_with_an_extremely_extremely_extremely_extremely_extremely_long_name.10.action\",\n                    \"Succeeded\",\n                    {\n                        \"paasta.yelp.com/service\": \"service\",\n                        # If PaaSTA's setup_tron_namespace changes how we create these labels, this test will need updating\n                        \"paasta.yelp.com/instance\": \"job_with_an_extremely_extremely_extremely_extremely_extrem-26i4\",\n                        \"tron.yelp.com/run_num\": \"10\",\n                    },\n                    \"2024-01-01T01:05:00\",\n                ),\n            ]\n        }\n\n    # 1 matching pod by labels\n    # 2 matching pod by labels\n    # no matching pod\n    # test matching by hashed instance name\n    @pytest.mark.parametrize(\n        \"job_name,run_num,expected_pod_name\",\n        [\n            (\"service.job\", \"3\", \"service.job.3.action-nomatch\"),\n            (\"service.job\", \"4\", \"service.job.4.action-nomatch-retry2\"),\n            (\"service.job2\", \"10\", None),\n            (\"service2.job\", \"1\", None),\n            (\n                \"service.job_with_an_extremely_extremely_extremely_extremely_extremely_long_name\",\n                \"10\",\n                \"service.job_with_an_extremely_extremely_extremely_extremely_extremely_long_name.10.action\",\n            ),\n        ],\n    )\n    def test_get_matching_pod(self, job_name, run_num, expected_pod_name):\n        test_action_run = {\"action_name\": \"action\", \"job_name\": f\"{job_name}\", \"run_num\": run_num}\n        matching_pod = get_matching_pod(test_action_run, self.pods)\n        assert matching_pod == self.pods.get(expected_pod_name)\n\n    # verify we send correct num_runs\n    # verify we are sending request for jobs + one for each job\n    @mock.patch(\"tools.sync_tron_state_from_k8s.get_client_config\", autospec=True)\n    @mock.patch(\"tools.sync_tron_state_from_k8s.Client\", autospec=True)\n    def test_get_tron_state_from_api(self, mock_client, mock_get_client_config):\n        mock_client.return_value = mock.Mock()\n        mock_client.return_value.jobs.return_value = [{\"url\": \"/uri\", \"name\": \"some job\"}]\n        mock_client.return_value.job.return_value = {\"runs\": []}\n        mock_get_client_config.return_value = {\"server\": \"https://localhost:8888\"}\n        get_tron_state_from_api(None, num_runs=10)\n\n        mock_client.assert_called_with(\"https://localhost:8888\")\n        mock_client.return_value.jobs.assert_called_with(\n            include_job_runs=False, include_action_runs=False, include_action_graph=False, include_node_pool=False\n        )\n\n        mock_client.return_value.job.assert_called_with(\"/api/uri\", include_action_runs=True, count=10)\n\n    @mock.patch(\"tools.sync_tron_state_from_k8s.subprocess.run\", autospec=True)\n    def test_update_tron(self, mock_subprocess_run):\n        # sorry for the blob of test data\n        tron_state = [\n            {\n                \"name\": \"service.job\",\n                \"runs\": [\n                    {\n                        \"runs\": [\n                            {\n                                \"id\": \"service.job.2.action\",\n                                \"action_name\": \"action\",\n                                \"run_num\": \"2\",\n                                \"job_name\": \"service.job\",\n                                \"state\": \"unknown\",\n                            }\n                        ]\n                    },\n                    {\n                        \"runs\": [\n                            {\n                                \"id\": \"service.job.3.action\",\n                                \"action_name\": \"action\",\n                                \"run_num\": \"3\",\n                                \"job_name\": \"service.job\",\n                                \"state\": \"running\",\n                            },\n                            {\n                                \"id\": \"service.job.3.action2\",\n                                \"action_name\": \"action2\",\n                                \"run_num\": \"3\",\n                                \"job_name\": \"service.job\",\n                                \"state\": \"running\",\n                            },\n                        ]\n                    },\n                    {\n                        \"runs\": [\n                            {\n                                \"id\": \"service.job.4.action\",\n                                \"action_name\": \"action\",\n                                \"run_num\": \"4\",\n                                \"job_name\": \"service.job\",\n                                \"state\": \"starting\",\n                            }\n                        ]\n                    },\n                    {\n                        \"runs\": [\n                            {\n                                \"id\": \"service.job.5.action\",\n                                \"action_name\": \"action\",\n                                \"run_num\": \"5\",\n                                \"job_name\": \"service.job\",\n                                \"state\": \"starting\",\n                            }\n                        ]\n                    },\n                ],\n            },\n            {\n                \"name\": \"service.job2\",\n                \"runs\": [\n                    {\n                        \"runs\": [\n                            {\n                                \"id\": \"service.job2.10.action\",\n                                \"action_name\": \"action\",\n                                \"run_num\": \"10\",\n                                \"job_name\": \"service.job2\",\n                                \"state\": \"succeeded\",\n                            },\n                        ]\n                    },\n                ],\n            },\n        ]\n\n        good_subprocess_run = mock.Mock(returncode=0)\n        bad_subprocess_run = mock.Mock(returncode=1)\n\n        expected_calls = [\n            mock.call([\"tronctl\", \"success\", \"service.job.2.action\"], capture_output=True, text=True),\n            mock.call([\"tronctl\", \"fail\", \"service.job.3.action\"], capture_output=True, text=True),\n            mock.call([\"tronctl\", \"success\", \"service.job.4.action\"], capture_output=True, text=True),\n        ]\n        mock_subprocess_run.return_value = good_subprocess_run\n\n        result = update_tron_from_pods(tron_state, self.pods, tronctl_wrapper=\"tronctl\", do_work=True)\n\n        assert result[\"updated\"] == [\"service.job.2.action\", \"service.job.3.action\", \"service.job.4.action\"]\n        assert result[\"error\"] == []\n        mock_subprocess_run.assert_has_calls(expected_calls, any_order=True)\n\n        mock_subprocess_run.return_value = bad_subprocess_run\n        result = update_tron_from_pods(tron_state, self.pods, tronctl_wrapper=\"tronctl\", do_work=True)\n        assert result[\"error\"] == [\"service.job.2.action\", \"service.job.3.action\", \"service.job.4.action\"]\n"
  },
  {
    "path": "tests/trond_test.py",
    "content": "import datetime\nimport textwrap\nfrom subprocess import CalledProcessError\nfrom textwrap import dedent\n\nimport pytest\n\nfrom testifycompat import assert_equal\nfrom testifycompat import assert_gt\nfrom tests import sandbox\nfrom tron.core import actionrun\n\nBASIC_CONFIG = \"\"\"\nssh_options:\n    agent: true\n\nnodes:\n  - name: local\n    hostname: 'localhost'\n\nstate_persistence:\n    name: \"state_data.shelve\"\n    store_type: shelve\n\n\"\"\"\n\nSINGLE_ECHO_CONFIG = (\n    BASIC_CONFIG\n    + \"\"\"\njobs:\n  - name: \"echo_job\"\n    node: local\n    schedule: \"cron 0 * * * *\"\n    actions:\n      - name: \"echo_action\"\n        command: \"echo 'Echo!'\" \"\"\"\n)\n\nDOUBLE_ECHO_CONFIG = (\n    SINGLE_ECHO_CONFIG\n    + \"\"\"\n      - name: \"another_echo_action\"\n        command: \"echo 'Today is %(shortdate)s, which is the same\n                    as %(year)s-%(month)s-%(day)s' && false\" \"\"\"\n)\n\nALT_NAMESPACED_ECHO_CONFIG = \"\"\"\njobs:\n  - name: \"echo_job\"\n    node: local\n    schedule: \"cron 0 * * * *\"\n    actions:\n      - name: \"echo_action\"\n        command: \"echo 'Echo!'\" \"\"\"\n\nTOUCH_CLEANUP_FMT = \"\"\"\n    cleanup_action:\n      command: \"echo 'at last'\"\n\"\"\"\n\n\n@pytest.mark.skip(reason=\"We don't have a setup for sandbox tests yet\")\nclass TrondEndToEndTestCase(sandbox.SandboxTestCase):\n    def test_end_to_end_basic(self):\n        self.start_with_config(SINGLE_ECHO_CONFIG)\n        client = self.sandbox.client\n\n        assert_equal(\n            self.client.config(\"MASTER\")[\"config\"],\n            SINGLE_ECHO_CONFIG,\n        )\n\n        # reconfigure and confirm results\n        second_config = DOUBLE_ECHO_CONFIG + TOUCH_CLEANUP_FMT\n        self.sandbox.tronfig(second_config)\n        assert_equal(client.config(\"MASTER\")[\"config\"], second_config)\n\n        # reconfigure, by uploading a third configuration\n        self.sandbox.tronfig(ALT_NAMESPACED_ECHO_CONFIG, name=\"ohce\")\n        self.sandbox.client.home()\n\n        # run the job and check its output\n        echo_job_name = \"MASTER.echo_job\"\n        job_url = client.get_url(echo_job_name)\n        action_url = client.get_url(\"MASTER.echo_job.1.echo_action\")\n\n        self.sandbox.tronctl(\"start\", echo_job_name)\n\n        def wait_on_cleanup():\n            return (\n                len(client.job(job_url)[\"runs\"]) >= 2\n                and client.action_runs(action_url)[\"state\"] == actionrun.ActionRun.SUCCEEDED\n            )\n\n        sandbox.wait_on_sandbox(wait_on_cleanup)\n\n        echo_action_run = client.action_runs(action_url)\n        another_action_url = client.get_url(\n            \"MASTER.echo_job.1.another_echo_action\",\n        )\n        other_act_run = client.action_runs(another_action_url)\n        assert_equal(\n            echo_action_run[\"state\"],\n            actionrun.ActionRun.SUCCEEDED,\n        )\n        assert_equal(echo_action_run[\"stdout\"], [\"Echo!\"])\n        assert_equal(\n            other_act_run[\"state\"],\n            actionrun.ActionRun.FAILED,\n        )\n\n        now = datetime.datetime.now()\n        stdout = now.strftime(\n            \"Today is %Y-%m-%d, which is the same as %Y-%m-%d\",\n        )\n        assert_equal(other_act_run[\"stdout\"], [stdout])\n\n        job_runs_url = client.get_url(\"%s.1\" % echo_job_name)\n        assert_equal(\n            client.job_runs(job_runs_url)[\"state\"],\n            actionrun.ActionRun.FAILED,\n        )\n\n    def test_node_reconfig(self):\n        job_config = dedent(\n            \"\"\"\n            jobs:\n                - name: a_job\n                  node: local\n                  schedule: \"cron * * * * *\"\n                  actions:\n                    - name: first_action\n                      command: \"echo something\"\n        \"\"\"\n        )\n        second_config = (\n            dedent(\n                \"\"\"\n            ssh_options:\n                agent: true\n\n            nodes:\n              - name: local\n                hostname: '127.0.0.1'\n\n            state_persistence:\n                name: \"state_data.shelve\"\n                store_type: shelve\n\n        \"\"\"\n            )\n            + job_config\n        )\n        self.start_with_config(BASIC_CONFIG + job_config)\n\n        job_url = self.client.get_url(\"MASTER.a_job.0\")\n        sandbox.wait_on_state(\n            self.client.job_runs,\n            job_url,\n            actionrun.ActionRun.SUCCEEDED,\n        )\n\n        self.sandbox.tronfig(second_config)\n\n        job_url = self.client.get_url(\"MASTER.a_job\")\n\n        def wait_on_next_run():\n            last_run = self.client.job(job_url)[\"runs\"][0]\n            return last_run[\"node\"][\"hostname\"] == \"127.0.0.1\"\n\n        sandbox.wait_on_sandbox(wait_on_next_run)\n\n\n@pytest.mark.skip(reason=\"We don't have a setup for sandbox tests yet\")\nclass TronCommandsTestCase(sandbox.SandboxTestCase):\n    def test_tronview(self):\n        self.start_with_config(SINGLE_ECHO_CONFIG)\n        expected = \"\"\"\\nServices:\\nNo Services\\n\\n\\nJobs:\n            Name       State       Scheduler           Last Success\n            MASTER.echo_job   enabled     cron 1:00:00    None\n            \"\"\"\n\n        def remove_line_space(s):\n            return [line.replace(\" \", \"\") for line in s.split(\"\\n\")]\n\n        actual = self.sandbox.tronview()[0]\n        assert_equal(remove_line_space(actual), remove_line_space(expected))\n\n    def test_tronctl_with_job(self):\n        self.start_with_config(SINGLE_ECHO_CONFIG + TOUCH_CLEANUP_FMT)\n        job_name = \"MASTER.echo_job\"\n        job_url = self.client.get_url(job_name)\n        self.sandbox.tronctl(\"start\", job_name)\n\n        cleanup_url = self.client.get_url(\"MASTER.echo_job.1.cleanup\")\n        sandbox.wait_on_state(\n            self.client.action_runs,\n            cleanup_url,\n            actionrun.ActionRun.SUCCEEDED,\n        )\n\n        action_run_url = self.client.get_url(\"MASTER.echo_job.1.echo_action\")\n        assert_equal(\n            self.client.action_runs(action_run_url)[\"state\"],\n            actionrun.ActionRun.SUCCEEDED,\n        )\n\n        job_run_url = self.client.get_url(\"MASTER.echo_job.1\")\n        assert_equal(\n            self.client.job_runs(job_run_url)[\"state\"],\n            actionrun.ActionRun.SUCCEEDED,\n        )\n\n        assert_equal(self.client.job(job_url)[\"status\"], \"enabled\")\n        self.sandbox.tronctl(\"disable\", job_name)\n        sandbox.wait_on_state(self.client.job, job_url, \"disabled\", \"status\")\n\n    def test_tronfig(self):\n        self.start_with_config(SINGLE_ECHO_CONFIG)\n        stdout, stderr = self.sandbox.tronfig()\n        assert_equal(stdout.rstrip(), SINGLE_ECHO_CONFIG.rstrip())\n\n    def test_tronfig_failure(self):\n        self.start_with_config(SINGLE_ECHO_CONFIG)\n        bad_config = \"this is not valid: yaml: is it?\"\n\n        def test_return_code(exc):\n            assert_equal(exc.returncode, 1)\n\n        with pytest.raises(CalledProcessError):\n            test_return_code(self.sandbox.tronfig, bad_config)\n\n\n@pytest.mark.skip(reason=\"We don't have a setup for sandbox tests yet\")\nclass JobEndToEndTestCase(sandbox.SandboxTestCase):\n    def test_cleanup_on_failure(self):\n        config = (\n            BASIC_CONFIG\n            + dedent(\n                \"\"\"\n        jobs:\n          - name: \"failjob\"\n            node: local\n            schedule: \"daily 04:20\"\n            actions:\n              - name: \"failaction\"\n                command: \"failplz\"\n        \"\"\"\n            )\n            + TOUCH_CLEANUP_FMT\n        )\n        self.start_with_config(config)\n\n        action_run_url = self.client.get_url(\"MASTER.failjob.0.failaction\")\n        sandbox.wait_on_state(\n            self.client.action_runs,\n            action_run_url,\n            actionrun.ActionRun.FAILED,\n        )\n\n        action_run_url = self.client.get_url(\"MASTER.failjob.1.cleanup\")\n        sandbox.wait_on_state(\n            self.client.action_runs,\n            action_run_url,\n            actionrun.ActionRun.SUCCEEDED,\n        )\n        job_runs = self.client.job(\n            self.client.get_url(\"MASTER.failjob\"),\n        )[\"runs\"]\n        assert_gt(len(job_runs), 1)\n\n    def test_skip_failed_actions(self):\n        config = BASIC_CONFIG + dedent(\n            \"\"\"\n        jobs:\n          - name: \"multi_step_job\"\n            node: local\n            schedule: \"daily 04:20\"\n            actions:\n              - name: \"broken\"\n                command: \"failingcommand\"\n              - name: \"works\"\n                command: \"echo ok\"\n                requires: [broken]\n        \"\"\"\n        )\n        self.start_with_config(config)\n        action_run_url = self.client.get_url(\"MASTER.multi_step_job.0.broken\")\n        waiter = sandbox.build_waiter_func(\n            self.client.action_runs,\n            action_run_url,\n        )\n\n        waiter(actionrun.ActionRun.FAILED)\n        self.sandbox.tronctl(\"skip\", \"MASTER.multi_step_job.0.broken\")\n        waiter(actionrun.ActionRun.SKIPPED)\n\n        action_run_url = self.client.get_url(\"MASTER.multi_step_job.0.works\")\n        sandbox.wait_on_state(\n            self.client.action_runs,\n            action_run_url,\n            actionrun.ActionRun.SUCCEEDED,\n        )\n\n        job_run_url = self.client.get_url(\"MASTER.multi_step_job.0\")\n        sandbox.wait_on_state(\n            self.client.job_runs,\n            job_run_url,\n            actionrun.ActionRun.SUCCEEDED,\n        )\n\n    def test_failure_on_multi_step_job_doesnt_wedge_tron(self):\n        config = BASIC_CONFIG + dedent(\n            \"\"\"\n            jobs:\n                -   name: \"random_failure_job\"\n                    node: local\n                    queueing: true\n                    schedule: \"daily 04:20\"\n                    actions:\n                        -   name: \"fa\"\n                            command: \"sleep 0.1; failplz\"\n                        -   name: \"sa\"\n                            command: \"echo 'you will never see this'\"\n                            requires: [fa]\n        \"\"\"\n        )\n        self.start_with_config(config)\n        job_url = self.client.get_url(\"MASTER.random_failure_job\")\n\n        def wait_on_random_failure_job():\n            return len(self.client.job(job_url)[\"runs\"]) >= 4\n\n        sandbox.wait_on_sandbox(wait_on_random_failure_job)\n\n        job_runs = self.client.job(job_url)[\"runs\"]\n        expected = [actionrun.ActionRun.FAILED for _ in range(3)]\n        assert_equal([run[\"state\"] for run in job_runs[-3:]], expected)\n\n    def test_cancel_schedules_a_new_run(self):\n        config = BASIC_CONFIG + dedent(\n            \"\"\"\n            jobs:\n                -   name: \"a_job\"\n                    node: local\n                    schedule: \"daily 05:00:00\"\n                    actions:\n                        -   name: \"first_action\"\n                            command: \"echo OK\"\n        \"\"\"\n        )\n        self.start_with_config(config)\n        job_name = \"MASTER.a_job\"\n        job_url = self.client.get_url(job_name)\n\n        self.sandbox.tronctl(\"cancel\", \"%s.0\" % job_name)\n\n        def wait_on_cancel():\n            return len(self.client.job(job_url)[\"runs\"]) == 2\n\n        sandbox.wait_on_sandbox(wait_on_cancel)\n\n        run_states = [run[\"state\"] for run in self.client.job(job_url)[\"runs\"]]\n        expected = [\n            actionrun.ActionRun.SCHEDULED,\n            actionrun.ActionRun.CANCELLED,\n        ]\n        assert_equal(run_states, expected)\n\n    def test_job_queueing_false_with_overlap(self):\n        \"\"\"Test that a job that has queueing false properly cancels an\n        overlapping job run.\n        \"\"\"\n        config = BASIC_CONFIG + dedent(\n            \"\"\"\n            jobs:\n                -   name: \"cancel_overlap\"\n                    schedule: \"cron * * * * *\"\n                    queueing: False\n                    node: local\n                    actions:\n                        -   name: \"do_something\"\n                            command: \"sleep 3s\"\n                        -   name: \"do_other\"\n                            command: \"sleep 3s\"\n                    cleanup_action:\n                        command: \"echo done\"\n        \"\"\"\n        )\n        self.start_with_config(config)\n        job_url = self.client.get_url(\"MASTER.cancel_overlap\")\n        job_run_url = self.client.get_url(\"MASTER.cancel_overlap.1\")\n\n        def wait_on_job_schedule():\n            return len(self.client.job(job_url)[\"runs\"]) == 2\n\n        sandbox.wait_on_sandbox(wait_on_job_schedule)\n\n        sandbox.wait_on_state(\n            self.client.job,\n            job_run_url,\n            actionrun.ActionRun.CANCELLED,\n        )\n\n        action_run_states = [action_run[\"state\"] for action_run in self.client.job_runs(job_run_url)[\"runs\"]]\n        expected = [actionrun.ActionRun.CANCELLED for _ in range(len(action_run_states))]\n        assert_equal(action_run_states, expected)\n\n    def test_trond_restart_job_with_run_history(self):\n        config = BASIC_CONFIG + textwrap.dedent(\n            \"\"\"\n           jobs:\n              - name: fast_job\n                node: local\n                schedule: daily 04:20\n                actions:\n                  - name: single_act\n                    command: \"sleep 20 && echo good\"\n        \"\"\"\n        )\n        self.start_with_config(config)\n\n        action_run_url = self.client.get_url(\"MASTER.fast_job.0.single_act\")\n        sandbox.wait_on_state(\n            self.client.action_runs,\n            action_run_url,\n            actionrun.ActionRun.RUNNING,\n        )\n\n        self.restart_trond()\n\n        assert_equal(\n            self.client.job_runs(action_run_url)[\"state\"],\n            actionrun.ActionRun.UNKNOWN,\n        )\n\n        next_run_url = self.client.get_url(\"MASTER.fast_job.-1.single_act\")\n        sandbox.wait_on_state(\n            self.client.action_runs,\n            next_run_url,\n            actionrun.ActionRun.RUNNING,\n        )\n\n    def test_trond_restart_job_running_with_dependencies(self):\n        config = BASIC_CONFIG + textwrap.dedent(\n            \"\"\"\n            jobs:\n                - name: complex_job\n                  node: local\n                  schedule: cron * * * * *\n                  actions:\n                    - name: first_act\n                      command: sleep 20 && echo \"I'm waiting\"\n                    - name: following_act\n                      command: echo \"thing\"\n                      requires: ['first_act']\n                    - name: last_act\n                      command: echo foo\n                      requires: ['following_act']\n        \"\"\"\n        )\n        self.start_with_config(config)\n        job_name = \"MASTER.complex_job\"\n        self.sandbox.tronctl(\"start\", job_name)\n\n        action_run_url = self.client.get_url(\"MASTER.complex_job.1.first_act\")\n        sandbox.wait_on_state(\n            self.client.action_runs,\n            action_run_url,\n            actionrun.ActionRun.RUNNING,\n        )\n\n        self.restart_trond()\n\n        assert_equal(\n            self.client.job_runs(action_run_url)[\"state\"],\n            actionrun.ActionRun.UNKNOWN,\n        )\n\n        for followup_action_run in (\"following_act\", \"last_act\"):\n            url = self.client.get_url(\n                f\"{job_name}.1.{followup_action_run}\",\n            )\n            assert_equal(\n                self.client.action_runs(url)[\"state\"],\n                actionrun.ActionRun.QUEUED,\n            )\n"
  },
  {
    "path": "tests/trondaemon_test.py",
    "content": "import os\nimport tempfile\nimport time\nfrom unittest import mock\n\nfrom testifycompat import setup\nfrom testifycompat import teardown\nfrom testifycompat import TestCase\nfrom tron.trondaemon import TronDaemon\n\n\nclass TronDaemonTestCase(TestCase):\n    @setup\n    @mock.patch(\"tron.trondaemon.setup_logging\", mock.Mock(), autospec=None)\n    @mock.patch(\"signal.signal\", mock.Mock(), autospec=None)\n    def setup(self):\n        self.tmpdir = tempfile.TemporaryDirectory()\n        trond_opts = mock.Mock()\n        trond_opts.working_dir = self.tmpdir.name\n        trond_opts.lock_file = os.path.join(self.tmpdir.name, \"lockfile\")\n        self.trond = TronDaemon(trond_opts)\n\n    @teardown\n    def teardown(self):\n        self.tmpdir.cleanup()\n\n    @mock.patch(\"tron.trondaemon.setup_logging\", mock.Mock(), autospec=None)\n    @mock.patch(\"signal.signal\", mock.Mock(), autospec=None)\n    def test_init(self):\n        daemon = TronDaemon.__new__(TronDaemon)  # skip __init__\n        options = mock.Mock()\n\n        with mock.patch(\n            \"tron.utils.flock\",\n            autospec=True,\n        ) as mock_flock:\n            daemon.__init__(options)\n            assert mock_flock.call_count == 0\n\n    def test_run_uses_context(self):\n        with mock.patch(\"tron.trondaemon.setup_logging\", mock.Mock(), autospec=None,), mock.patch(\n            \"tron.trondaemon.no_daemon_context\",\n            mock.Mock(),\n            autospec=None,\n        ) as ndc:\n            ndc.return_value = mock.MagicMock()\n            boot_time = time.time()\n            ndc.return_value.__enter__.side_effect = RuntimeError()\n            daemon = TronDaemon(mock.Mock())\n            try:\n                daemon.run(boot_time)\n            except Exception:\n                pass\n            assert ndc.call_count == 1\n\n    def test_run_manhole_new_manhole(self):\n        with open(self.trond.manhole_sock, \"w+\"):\n            pass\n\n        with mock.patch(\n            \"twisted.internet.reactor.listenUNIX\",\n            autospec=True,\n        ) as mock_listenUNIX:\n            self.trond._run_manhole()\n\n            assert mock_listenUNIX.call_count == 1\n            # _run_manhole will remove the old manhole.sock but not recreate\n            # it because we mocked out listenUNIX\n            assert not os.path.exists(self.trond.manhole_sock)\n"
  },
  {
    "path": "tests/utils/__init__.py",
    "content": ""
  },
  {
    "path": "tests/utils/collections_test.py",
    "content": "from unittest import mock\n\nfrom testifycompat import assert_equal\nfrom testifycompat import assert_in\nfrom testifycompat import assert_not_in\nfrom testifycompat import assert_raises\nfrom testifycompat import setup\nfrom testifycompat import TestCase\nfrom tests.assertions import assert_mock_calls\nfrom tests.testingutils import autospec_method\nfrom tron.utils import collections\n\n\nclass TestMappingCollections(TestCase):\n    @setup\n    def setup_collection(self):\n        self.name = \"some_name\"\n        self.collection = collections.MappingCollection(self.name)\n\n    def test_filter_by_name(self):\n        autospec_method(self.collection.remove)\n        self.collection.update(dict.fromkeys([\"c\", \"d\", \"e\"]))\n        self.collection.filter_by_name([\"a\", \"c\"])\n        expected = [mock.call(name) for name in [\"d\", \"e\"]]\n        assert_mock_calls(expected, self.collection.remove.mock_calls)\n\n    def test_remove_missing(self):\n        assert_raises(ValueError, self.collection.remove, \"name\")\n\n    def test_remove(self):\n        name = \"the_name\"\n        self.collection[name] = item = mock.Mock()\n        self.collection.remove(name)\n        assert_not_in(name, self.collection)\n        item.disable.assert_called_with()\n\n    def test_contains_item_false(self):\n        mock_item, mock_func = mock.Mock(), mock.Mock()\n        assert not self.collection.contains_item(mock_item, mock_func)\n        assert not mock_func.mock_calls\n\n    def test_contains_item_not_equal(self):\n        mock_item, mock_func = mock.Mock(), mock.Mock()\n        self.collection[mock_item.get_name()] = \"other item\"\n        result = self.collection.contains_item(mock_item, mock_func)\n        assert_equal(result, mock_func.return_value)\n        mock_func.assert_called_with(mock_item)\n\n    def test_contains_item_true(self):\n        mock_item, mock_func = mock.Mock(), mock.Mock()\n        self.collection[mock_item.get_name()] = mock_item\n        assert self.collection.contains_item(mock_item, mock_func)\n\n    def test_add_contains(self):\n        autospec_method(self.collection.contains_item)\n        item, update_func = mock.Mock(), mock.Mock()\n        assert not self.collection.add(item, update_func)\n        assert_not_in(item.get_name(), self.collection)\n\n    def test_add_new(self):\n        autospec_method(self.collection.contains_item, return_value=False)\n        item, update_func = mock.Mock(), mock.Mock()\n        assert self.collection.add(item, update_func)\n        assert_in(item.get_name(), self.collection)\n\n    def test_replace(self):\n        autospec_method(self.collection.add)\n        item = mock.Mock()\n        self.collection.replace(item)\n        self.collection.add.assert_called_with(\n            item,\n            self.collection.remove_item,\n        )\n"
  },
  {
    "path": "tests/utils/crontab_test.py",
    "content": "from unittest import mock\n\nfrom testifycompat import assert_equal\nfrom testifycompat import assert_raises\nfrom testifycompat import run\nfrom testifycompat import setup\nfrom testifycompat import TestCase\nfrom tron.utils import crontab\n\n\nclass TestConvertPredefined(TestCase):\n    def test_convert_predefined_valid(self):\n        expected = crontab.PREDEFINED_SCHEDULE[\"@hourly\"]\n        assert_equal(crontab.convert_predefined(\"@hourly\"), expected)\n\n    def test_convert_predefined_invalid(self):\n        assert_raises(ValueError, crontab.convert_predefined, \"@bogus\")\n\n    def test_convert_predefined_none(self):\n        line = \"something else\"\n        assert_equal(crontab.convert_predefined(line), line)\n\n\nclass TestParseCrontab(TestCase):\n    def test_parse_asterisk(self):\n        line = \"* * * * *\"\n        actual = crontab.parse_crontab(line)\n        assert_equal(actual[\"minutes\"], None)\n        assert_equal(actual[\"hours\"], None)\n        assert_equal(actual[\"months\"], None)\n\n    @mock.patch(\"tron.utils.crontab.MinuteFieldParser.parse\", autospec=True)\n    @mock.patch(\"tron.utils.crontab.HourFieldParser.parse\", autospec=True)\n    @mock.patch(\"tron.utils.crontab.MonthdayFieldParser.parse\", autospec=True)\n    @mock.patch(\"tron.utils.crontab.MonthFieldParser.parse\", autospec=True)\n    @mock.patch(\"tron.utils.crontab.WeekdayFieldParser.parse\", autospec=True)\n    def test_parse(self, mock_dow, mock_month, mock_monthday, mock_hour, mock_min):\n        line = \"* * * * *\"\n        actual = crontab.parse_crontab(line)\n        assert_equal(actual[\"minutes\"], mock_min.return_value)\n        assert_equal(actual[\"hours\"], mock_hour.return_value)\n        assert_equal(actual[\"monthdays\"], mock_monthday.return_value)\n        assert_equal(actual[\"months\"], mock_month.return_value)\n        assert_equal(actual[\"weekdays\"], mock_dow.return_value)\n\n    def test_full_crontab_line(self):\n        line = \"*/15 0 1,15 * 1-5\"\n        expected = {\n            \"minutes\": [0, 15, 30, 45],\n            \"hours\": [0],\n            \"monthdays\": [1, 15],\n            \"months\": None,\n            \"weekdays\": [1, 2, 3, 4, 5],\n            \"ordinals\": None,\n        }\n        assert_equal(crontab.parse_crontab(line), expected)\n\n    def test_full_crontab_line_with_last(self):\n        line = \"0 0 L * *\"\n        expected = {\n            \"minutes\": [0],\n            \"hours\": [0],\n            \"monthdays\": [\"LAST\"],\n            \"months\": None,\n            \"weekdays\": None,\n            \"ordinals\": None,\n        }\n        assert_equal(crontab.parse_crontab(line), expected)\n\n\nclass TestMinuteFieldParser(TestCase):\n    @setup\n    def setup_parser(self):\n        self.parser = crontab.MinuteFieldParser()\n\n    def test_validate_bounds(self):\n        assert_equal(self.parser.validate_bounds(0), 0)\n        assert_equal(self.parser.validate_bounds(59), 59)\n        assert_raises(ValueError, self.parser.validate_bounds, 60)\n\n    def test_get_values_asterisk(self):\n        assert_equal(self.parser.get_values(\"*\"), list(range(0, 60)))\n\n    def test_get_values_min_only(self):\n        assert_equal(self.parser.get_values(\"4\"), [4])\n        assert_equal(self.parser.get_values(\"33\"), [33])\n\n    def test_get_values_with_step(self):\n        assert_equal(self.parser.get_values(\"*/10\"), [0, 10, 20, 30, 40, 50])\n\n    def test_get_values_with_step_and_range(self):\n        assert_equal(self.parser.get_values(\"10-30/10\"), [10, 20, 30])\n\n    def test_get_values_with_step_and_overflow_range(self):\n        assert_equal(self.parser.get_values(\"30-0/10\"), [30, 40, 50, 0])\n\n    def test_parse_with_groups(self):\n        assert_equal(self.parser.parse(\"5,1,7,8,5\"), [1, 5, 7, 8])\n\n    def test_parse_with_groups_and_ranges(self):\n        expected = [0, 1, 11, 13, 15, 17, 19, 20, 21, 40]\n        assert_equal(self.parser.parse(\"1,11-22/2,*/20\"), expected)\n\n\nclass TestMonthFieldParser(TestCase):\n    @setup\n    def setup_parser(self):\n        self.parser = crontab.MonthFieldParser()\n\n    def test_parse(self):\n        expected = [1, 2, 3, 7, 12]\n        assert_equal(self.parser.parse(\"DEC, Jan-Feb, jul, MaR\"), expected)\n\n\nclass TestWeekdayFieldParser(TestCase):\n    @setup\n    def setup_parser(self):\n        self.parser = crontab.WeekdayFieldParser()\n\n    def test_parser(self):\n        expected = [0, 3, 5, 6]\n        assert_equal(self.parser.parse(\"Sun, 3, FRI, SaT-Sun\"), expected)\n\n\nclass TestMonthdayFieldParser(TestCase):\n    @setup\n    def setup_parser(self):\n        self.parser = crontab.MonthdayFieldParser()\n\n    def test_parse_last(self):\n        expected = [5, 6, \"LAST\"]\n        assert_equal(self.parser.parse(\"5, 6, L\"), expected)\n\n\nclass TestComplexExpressions(TestCase):\n    @setup\n    def setup_parser(self):\n        self.parser = crontab.MinuteFieldParser()\n\n    def test_complex_expression(self):\n        expected = [0, 10, 20, 30, 40, 50, 55]\n        assert_equal(self.parser.parse(\"*/10,55\"), expected)\n\n\nclass TestInvalidInputs(TestCase):\n    @setup\n    def setup_parser(self):\n        self.parser = crontab.MinuteFieldParser()\n\n    def test_invalid_expression(self):\n        with assert_raises(ValueError):\n            self.parser.parse(\"61\")\n\n\nclass TestBoundaryValues(TestCase):\n    @setup\n    def setup_parser(self):\n        self.parser = crontab.MinuteFieldParser()\n\n    def test_boundary_values(self):\n        assert_equal(self.parser.parse(\"0\"), [0])\n        assert_equal(self.parser.parse(\"59\"), [59])\n\n\nif __name__ == \"__main__\":\n    run()\n"
  },
  {
    "path": "tests/utils/logreader_test.py",
    "content": "import datetime\nfrom unittest import mock\n\nimport pytest\nimport yaml\n\nimport tron.utils.logreader\nfrom tron.utils.logreader import decompose_action_id\nfrom tron.utils.logreader import read_log_stream_for_action_run\n\ntry:\n    from logreader.readers import S3LogsReader  # noqa: F401\nexcept ImportError:\n    pytest.skip(\"yelp logs readers not available, skipping tests\", allow_module_level=True)\n\n\n# used for an explicit patch of staticconf.read return value for an arbitrary namespace\ndef static_conf_patch(args):\n    return lambda arg, namespace, default=None: args.get(arg)\n\n\ndef test_read_log_stream_for_action_run_not_available():\n    with mock.patch(\"tron.utils.logreader.s3reader_available\", False):\n        output = tron.utils.logreader.read_log_stream_for_action_run(\n            \"namespace.job.1234.action\",\n            component=\"stdout\",\n            min_date=datetime.datetime.now(),\n            max_date=datetime.datetime.now(),\n            paasta_cluster=\"fake\",\n        )\n    assert \"unable to display logs\" in output[0]\n\n\ndef test_read_log_stream_for_action_run():\n    with mock.patch(\n        \"staticconf.read\",\n        autospec=True,\n        side_effect=static_conf_patch({\"logging.max_lines_to_display\": 1000}),\n    ), mock.patch(\"tron.config.static_config.build_configuration_watcher\", autospec=True,), mock.patch(\n        \"tron.config.static_config.load_yaml_file\",\n        autospec=True,\n    ), mock.patch(\n        \"tron.utils.logreader.get_superregion\", autospec=True, return_value=\"fake\"\n    ), mock.patch(\n        \"tron.utils.logreader.S3LogsReader\", autospec=True\n    ) as mock_s3_reader:\n\n        mock_s3_reader.return_value.get_log_reader.return_value = iter(\n            [\n                \"\"\"{\n                \"tron_run_number\": 1234,\n                \"component\": \"stdout\",\n                \"message\": \"line 1\",\n                \"timestamp\": \"2021-01-02T18:10:09.169421619Z\",\n                \"cluster\": \"fake\"\n            }\"\"\",\n                \"\"\"{\n                \"tron_run_number\": 1234,\n                \"component\": \"stdout\",\n                \"message\": \"line 2\",\n                \"timestamp\": \"2021-01-02T18:11:09.169421619Z\",\n                \"cluster\": \"fake\"\n            }\"\"\",\n                \"\"\"{\n                \"tron_run_number\": 1234,\n                \"component\": \"stderr\",\n                \"message\": \"line 3\",\n                \"timestamp\": \"2021-01-02T18:12:09.169421619Z\",\n                \"cluster\": \"fake\"\n            }\"\"\",\n            ]\n        )\n\n        output = read_log_stream_for_action_run(\n            \"namespace.job.1234.action\",\n            component=\"stdout\",\n            min_date=datetime.datetime.now(),\n            max_date=datetime.datetime.now(),\n            paasta_cluster=\"fake\",\n        )\n\n    mock_s3_reader.return_value.get_log_reader.assert_called_once_with(\n        log_name=\"stream_paasta_app_output_namespace_job__action\", start_datetime=mock.ANY, end_datetime=mock.ANY\n    )\n    assert output == [\"line 1\", \"line 2\"]\n\n\n@pytest.mark.parametrize(\n    \"local_datetime, expected_datetime\",\n    [\n        (\n            datetime.datetime(2024, 2, 29, 23, 59, 59, tzinfo=datetime.timezone(datetime.timedelta(hours=+3))),\n            datetime.datetime(2024, 2, 29, 20, 59, 59, tzinfo=datetime.timezone.utc),\n        ),\n        (\n            datetime.datetime(2024, 2, 29, 23, 59, 59, tzinfo=datetime.timezone(datetime.timedelta(hours=-3))),\n            datetime.datetime(2024, 3, 1, 2, 59, 59, tzinfo=datetime.timezone.utc),\n        ),\n    ],\n)\ndef test_read_log_stream_for_action_run_tz(local_datetime, expected_datetime):\n    with mock.patch(\n        \"staticconf.read\",\n        autospec=True,\n        side_effect=static_conf_patch({\"logging.max_lines_to_display\": 1000}),\n    ), mock.patch(\"tron.config.static_config.build_configuration_watcher\", autospec=True,), mock.patch(\n        \"tron.config.static_config.load_yaml_file\",\n        autospec=True,\n    ), mock.patch(\n        \"tron.utils.logreader.get_superregion\", autospec=True, return_value=\"fake\"\n    ), mock.patch(\n        \"tron.utils.logreader.S3LogsReader\", autospec=True\n    ) as mock_s3_log_reader:\n\n        read_log_stream_for_action_run(\n            \"namespace.job.1234.action\",\n            component=\"stdout\",\n            min_date=local_datetime,\n            max_date=local_datetime,\n            paasta_cluster=\"fake\",\n        )\n    mock_s3_log_reader.return_value.get_log_reader.assert_called_once_with(\n        log_name=mock.ANY, start_datetime=expected_datetime, end_datetime=expected_datetime\n    )\n\n\ndef test_read_log_stream_for_action_run_for_long_output():\n    # 1000 represents the number of lines that are expected to be\n    # outputted by the test, which is similar to the logging.max_lines_to_display\n    # in tron.yaml in srv-configs\n    max_lines = 1000\n    with mock.patch(\"tron.utils.logreader.get_superregion\", autospec=True, return_value=\"fake\",), mock.patch(\n        \"tron.config.static_config.build_configuration_watcher\",\n        autospec=True,\n    ), mock.patch(\n        \"staticconf.read\", autospec=True, side_effect=static_conf_patch({\"logging.max_lines_to_display\": 1000})\n    ), mock.patch(\n        \"tron.config.static_config.load_yaml_file\",\n        autospec=True,\n    ), mock.patch(\n        \"tron.utils.logreader.S3LogsReader\", autospec=True\n    ) as mock_s3_reader:\n\n        with open(\"./tests/utils/shortOutputTest.txt\") as f:\n            content_list = f.readlines()\n\n        mock_s3_reader.return_value.get_log_reader.return_value = iter(content_list)\n\n        output = read_log_stream_for_action_run(\n            \"namespace.job.228.action\",\n            component=\"stdout\",\n            min_date=datetime.datetime.now(),\n            max_date=datetime.datetime.now(),\n            paasta_cluster=\"infrastage\",\n        )\n\n    mock_s3_reader.return_value.get_log_reader.assert_called_once_with(\n        log_name=\"stream_paasta_app_output_namespace_job__action\", start_datetime=mock.ANY, end_datetime=mock.ANY\n    )\n    assert len(output) == max_lines + 1\n\n\ndef test_decompose_action_id_file_not_found():\n    action_run_id = \"namespace.job.1234.action\"\n    paasta_cluster = \"fake_cluster\"\n    with mock.patch(\"builtins.open\", side_effect=FileNotFoundError):\n        namespace, job_name, run_num, action = decompose_action_id(action_run_id, paasta_cluster)\n        assert namespace == \"namespace\"\n        assert job_name == \"job\"\n        assert run_num == \"1234\"\n        assert action == \"action\"\n\n\ndef test_decompose_action_id_yaml_error():\n    action_run_id = \"namespace.job.1234.action\"\n    paasta_cluster = \"fake_cluster\"\n    with mock.patch(\"builtins.open\", mock.mock_open(read_data=\"invalid_yaml\")), mock.patch(\n        \"yaml.safe_load\", side_effect=yaml.YAMLError\n    ):\n        namespace, job_name, run_num, action = decompose_action_id(action_run_id, paasta_cluster)\n        assert namespace == \"namespace\"\n        assert job_name == \"job\"\n        assert run_num == \"1234\"\n        assert action == \"action\"\n\n\ndef test_decompose_action_id_generic_error():\n    action_run_id = \"namespace.job.1234.action\"\n    paasta_cluster = \"fake_cluster\"\n    with mock.patch(\"builtins.open\", mock.mock_open(read_data=\"some_data\")), mock.patch(\n        \"yaml.safe_load\", side_effect=Exception\n    ):\n        namespace, job_name, run_num, action = decompose_action_id(action_run_id, paasta_cluster)\n        assert namespace == \"namespace\"\n        assert job_name == \"job\"\n        assert run_num == \"1234\"\n        assert action == \"action\"\n\n\ndef test_decompose_action_id_service_not_found():\n    action_run_id = \"namespace.job.1234.action\"\n    paasta_cluster = \"fake_cluster\"\n    config_content = \"\"\"\n    job:\n        actions:\n            action:\n                command: \"sleep 10\"\n    \"\"\"\n    with mock.patch(\"builtins.open\", mock.mock_open(read_data=config_content)), mock.patch(\n        \"yaml.safe_load\", return_value=yaml.safe_load(config_content)\n    ):\n        namespace, job_name, run_num, action = decompose_action_id(action_run_id, paasta_cluster)\n        assert namespace == \"namespace\"\n        assert job_name == \"job\"\n        assert run_num == \"1234\"\n        assert action == \"action\"\n"
  },
  {
    "path": "tests/utils/observer_test.py",
    "content": "from unittest import mock\n\nfrom testifycompat import assert_equal\nfrom testifycompat import run\nfrom testifycompat import setup\nfrom testifycompat import TestCase\nfrom tests.assertions import assert_length\nfrom tron.utils.observer import Observable\nfrom tron.utils.observer import Observer\n\n\nclass TestObservable(TestCase):\n    @setup\n    def setup_observer(self):\n        self.obs = Observable()\n\n    def test_attach(self):\n        def func():\n            return 1\n\n        self.obs.attach(\"a\", func)\n        assert_equal(len(self.obs._observers), 1)\n        assert_equal(self.obs._observers[\"a\"], [func])\n\n    def test_listen_seq(self):\n        def func():\n            return 1\n\n        self.obs.attach([\"a\", \"b\"], func)\n        assert_equal(len(self.obs._observers), 2)\n        assert_equal(self.obs._observers[\"a\"], [func])\n        assert_equal(self.obs._observers[\"b\"], [func])\n\n    def test_notify(self):\n        handler = mock.MagicMock()\n        self.obs.attach([\"a\", \"b\"], handler)\n        self.obs.notify(\"a\")\n        assert_equal(len(handler.handler.mock_calls), 1)\n        self.obs.notify(\"b\")\n        assert_equal(len(handler.handler.mock_calls), 2)\n\n\nclass TestObserverClear(TestCase):\n    @setup\n    def setup_observer(self):\n        self.obs = Observable()\n\n        def func():\n            return 1\n\n        self.obs.attach(\"a\", func)\n        self.obs.attach(\"b\", func)\n        self.obs.attach(True, func)\n        self.obs.attach([\"a\", \"b\"], func)\n\n    def test_clear_listeners_all(self):\n        self.obs.clear_observers()\n        assert_equal(len(self.obs._observers), 0)\n\n    def test_clear_listeners_some(self):\n        self.obs.clear_observers(\"a\")\n        assert_equal(len(self.obs._observers), 2)\n        assert_equal(set(self.obs._observers.keys()), {True, \"b\"})\n\n    def test_remove_observer_none(self):\n        def observer():\n            return 2\n\n        self.obs.remove_observer(observer)\n        assert_equal(set(self.obs._observers.keys()), {True, \"a\", \"b\"})\n        assert_length(self.obs._observers[\"a\"], 2)\n        assert_length(self.obs._observers[\"b\"], 2)\n        assert_length(self.obs._observers[True], 1)\n\n    def test_remove_observer(self):\n        def observer():\n            return 2\n\n        self.obs.attach(\"a\", observer)\n        self.obs.attach(\"c\", observer)\n        self.obs.remove_observer(observer)\n        assert_length(self.obs._observers[\"a\"], 2)\n        assert_length(self.obs._observers[\"b\"], 2)\n        assert_length(self.obs._observers[True], 1)\n        assert_length(self.obs._observers[\"c\"], 0)\n\n\nclass MockObserver(Observer):\n    def __init__(self, obs, event):\n        self.obs = obs\n        self.event = event\n        self.watch(obs, event)\n        self.has_watched = 0\n        self.event_data = None\n\n    def handler(self, obs, event, event_data):\n        assert_equal(obs, self.obs)\n        assert_equal(event, self.event)\n        self.has_watched += 1\n        self.event_data = event_data\n\n\nclass TestObserver(TestCase):\n    @setup\n    def setup_observer(self):\n        self.obs = Observable()\n\n    def test_watch(self):\n        event = \"FIVE\"\n        handler = MockObserver(self.obs, event)\n\n        self.obs.notify(event)\n        assert_equal(handler.has_watched, 1)\n        assert handler.event_data is None\n        self.obs.notify(\"other event\")\n        assert_equal(handler.has_watched, 1)\n        self.obs.notify(event, \"event_data\")\n        assert_equal(handler.has_watched, 2)\n        assert handler.event_data == \"event_data\"\n\n\nif __name__ == \"__main__\":\n    run()\n"
  },
  {
    "path": "tests/utils/proxy_test.py",
    "content": "from testifycompat import assert_equal\nfrom testifycompat import assert_in\nfrom testifycompat import assert_raises\nfrom testifycompat import run\nfrom testifycompat import setup\nfrom testifycompat import TestCase\nfrom tron.utils.proxy import AttributeProxy\nfrom tron.utils.proxy import CollectionProxy\n\n\nclass DummyTarget:\n    def __init__(self, v):\n        self.v = v\n\n    def foo(self):\n        return self.v\n\n    @property\n    def not_foo(self):\n        return not self.v\n\n    def equals(self, b, sometimes=False):\n        if sometimes:\n            return \"sometimes\"\n        return self.v == b\n\n\nclass DummyObject:\n    def __init__(self, proxy):\n        self.proxy = proxy\n\n    def __getattr__(self, item):\n        return self.proxy.perform(item)\n\n\nclass TestCollectionProxy(TestCase):\n    @setup\n    def setup_proxy(self):\n        self.target_list = [DummyTarget(1), DummyTarget(2), DummyTarget(0)]\n        self.proxy = CollectionProxy(\n            lambda: self.target_list,\n            [\n                (\"foo\", any, True),\n                (\"not_foo\", all, False),\n                (\"equals\", lambda a: list(a), True),\n            ],\n        )\n        self.dummy = DummyObject(self.proxy)\n\n    def test_add(self):\n        self.proxy.add(\"foo\", any, True)\n        assert_equal(self.proxy._defs[\"foo\"], (any, True))\n\n    def test_perform(self):\n        assert self.dummy.foo()\n        assert not self.dummy.not_foo\n\n    def test_perform_not_defined(self):\n        assert_raises(AttributeError, self.dummy.proxy.perform, \"bar\")\n\n    def test_perform_with_params(self):\n        assert_equal(self.proxy.perform(\"equals\")(2), [False, True, False])\n        sometimes = [\"sometimes\"] * 3\n        assert_equal(\n            self.proxy.perform(\"equals\")(3, sometimes=True),\n            sometimes,\n        )\n\n\nclass TestAttributeProxy(TestCase):\n    @setup\n    def setup_proxy(self):\n        self.target = DummyTarget(1)\n        self.proxy = AttributeProxy(self.target, [\"foo\", \"not_foo\"])\n        self.dummy = DummyObject(self.proxy)\n\n    def test_add(self):\n        self.proxy.add(\"bar\")\n        assert_in(\"bar\", self.proxy._attributes)\n\n    def test_perform(self):\n        assert_equal(self.dummy.foo(), 1)\n        assert_equal(self.dummy.not_foo, False)\n\n    def test_perform_not_defined(self):\n        assert_raises(AttributeError, self.dummy.proxy.perform, \"zzz\")\n\n\nif __name__ == \"__main__\":\n    run()\n"
  },
  {
    "path": "tests/utils/shortOutputTest.txt",
    "content": "{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1\",\"timestamp\":\"2022-08-12T13:30:04.686637906Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"5\",\"timestamp\":\"2022-08-12T13:30:04.686730242Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"2\",\"timestamp\":\"2022-08-12T13:30:04.686653695Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"3\",\"timestamp\":\"2022-08-12T13:30:04.68670797Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"6\",\"timestamp\":\"2022-08-12T13:30:04.686799509Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"4\",\"timestamp\":\"2022-08-12T13:30:04.686710726Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"7\",\"timestamp\":\"2022-08-12T13:30:04.68702548Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"8\",\"timestamp\":\"2022-08-12T13:30:04.687027597Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"9\",\"timestamp\":\"2022-08-12T13:30:04.687077736Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"10\",\"timestamp\":\"2022-08-12T13:30:04.687161226Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"11\",\"timestamp\":\"2022-08-12T13:30:04.687273728Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"12\",\"timestamp\":\"2022-08-12T13:30:04.687277071Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"13\",\"timestamp\":\"2022-08-12T13:30:04.687306184Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"14\",\"timestamp\":\"2022-08-12T13:30:04.687341593Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"15\",\"timestamp\":\"2022-08-12T13:30:04.687488029Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"16\",\"timestamp\":\"2022-08-12T13:30:04.687489495Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"17\",\"timestamp\":\"2022-08-12T13:30:04.687502175Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"18\",\"timestamp\":\"2022-08-12T13:30:04.687542498Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"21\",\"timestamp\":\"2022-08-12T13:30:04.687638635Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"19\",\"timestamp\":\"2022-08-12T13:30:04.687618198Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"22\",\"timestamp\":\"2022-08-12T13:30:04.687670676Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"20\",\"timestamp\":\"2022-08-12T13:30:04.687620336Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"23\",\"timestamp\":\"2022-08-12T13:30:04.687813739Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"24\",\"timestamp\":\"2022-08-12T13:30:04.687817306Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"25\",\"timestamp\":\"2022-08-12T13:30:04.687839161Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"26\",\"timestamp\":\"2022-08-12T13:30:04.687878011Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"27\",\"timestamp\":\"2022-08-12T13:30:04.688001387Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"28\",\"timestamp\":\"2022-08-12T13:30:04.688003044Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"29\",\"timestamp\":\"2022-08-12T13:30:04.688014036Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"30\",\"timestamp\":\"2022-08-12T13:30:04.688051272Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"31\",\"timestamp\":\"2022-08-12T13:30:04.688126709Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"32\",\"timestamp\":\"2022-08-12T13:30:04.688128322Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"33\",\"timestamp\":\"2022-08-12T13:30:04.688139583Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"34\",\"timestamp\":\"2022-08-12T13:30:04.68816411Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"35\",\"timestamp\":\"2022-08-12T13:30:04.688234038Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"36\",\"timestamp\":\"2022-08-12T13:30:04.688235549Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"37\",\"timestamp\":\"2022-08-12T13:30:04.68824796Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"38\",\"timestamp\":\"2022-08-12T13:30:04.688278753Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"39\",\"timestamp\":\"2022-08-12T13:30:04.688332263Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"40\",\"timestamp\":\"2022-08-12T13:30:04.688333783Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"41\",\"timestamp\":\"2022-08-12T13:30:04.688349331Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"42\",\"timestamp\":\"2022-08-12T13:30:04.688377769Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"43\",\"timestamp\":\"2022-08-12T13:30:04.688437161Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"44\",\"timestamp\":\"2022-08-12T13:30:04.688439485Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"45\",\"timestamp\":\"2022-08-12T13:30:04.688456515Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"46\",\"timestamp\":\"2022-08-12T13:30:04.68848179Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"47\",\"timestamp\":\"2022-08-12T13:30:04.688559693Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"48\",\"timestamp\":\"2022-08-12T13:30:04.688561281Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"53\",\"timestamp\":\"2022-08-12T13:30:04.688689148Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"54\",\"timestamp\":\"2022-08-12T13:30:04.688714557Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"55\",\"timestamp\":\"2022-08-12T13:30:04.688860562Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"49\",\"timestamp\":\"2022-08-12T13:30:04.688573177Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"56\",\"timestamp\":\"2022-08-12T13:30:04.68886246Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"50\",\"timestamp\":\"2022-08-12T13:30:04.688606432Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"51\",\"timestamp\":\"2022-08-12T13:30:04.688673085Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"52\",\"timestamp\":\"2022-08-12T13:30:04.688675724Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"57\",\"timestamp\":\"2022-08-12T13:30:04.688875227Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"58\",\"timestamp\":\"2022-08-12T13:30:04.688930281Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"59\",\"timestamp\":\"2022-08-12T13:30:04.688975956Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"60\",\"timestamp\":\"2022-08-12T13:30:04.688986533Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"61\",\"timestamp\":\"2022-08-12T13:30:04.688994591Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"62\",\"timestamp\":\"2022-08-12T13:30:04.689021591Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"63\",\"timestamp\":\"2022-08-12T13:30:04.689171143Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"64\",\"timestamp\":\"2022-08-12T13:30:04.689173043Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"65\",\"timestamp\":\"2022-08-12T13:30:04.68918868Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"66\",\"timestamp\":\"2022-08-12T13:30:04.689231633Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"67\",\"timestamp\":\"2022-08-12T13:30:04.689332791Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"68\",\"timestamp\":\"2022-08-12T13:30:04.689334352Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"69\",\"timestamp\":\"2022-08-12T13:30:04.689345198Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"70\",\"timestamp\":\"2022-08-12T13:30:04.68937079Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"71\",\"timestamp\":\"2022-08-12T13:30:04.689463921Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"77\",\"timestamp\":\"2022-08-12T13:30:04.689602894Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"72\",\"timestamp\":\"2022-08-12T13:30:04.689466356Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"78\",\"timestamp\":\"2022-08-12T13:30:04.68963501Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"79\",\"timestamp\":\"2022-08-12T13:30:04.689700114Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"80\",\"timestamp\":\"2022-08-12T13:30:04.689702805Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"73\",\"timestamp\":\"2022-08-12T13:30:04.689484797Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"74\",\"timestamp\":\"2022-08-12T13:30:04.689509529Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"75\",\"timestamp\":\"2022-08-12T13:30:04.689590003Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"76\",\"timestamp\":\"2022-08-12T13:30:04.689591492Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"85\",\"timestamp\":\"2022-08-12T13:30:04.689884469Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"81\",\"timestamp\":\"2022-08-12T13:30:04.689718471Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"86\",\"timestamp\":\"2022-08-12T13:30:04.689930036Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"82\",\"timestamp\":\"2022-08-12T13:30:04.689755218Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"87\",\"timestamp\":\"2022-08-12T13:30:04.690078096Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"83\",\"timestamp\":\"2022-08-12T13:30:04.689868169Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"89\",\"timestamp\":\"2022-08-12T13:30:04.690121194Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"88\",\"timestamp\":\"2022-08-12T13:30:04.690081266Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"84\",\"timestamp\":\"2022-08-12T13:30:04.689870538Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"90\",\"timestamp\":\"2022-08-12T13:30:04.690123522Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"93\",\"timestamp\":\"2022-08-12T13:30:04.690276406Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"91\",\"timestamp\":\"2022-08-12T13:30:04.690263137Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"92\",\"timestamp\":\"2022-08-12T13:30:04.690264843Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"97\",\"timestamp\":\"2022-08-12T13:30:04.690407214Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"101\",\"timestamp\":\"2022-08-12T13:30:04.690536078Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"98\",\"timestamp\":\"2022-08-12T13:30:04.690433197Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"102\",\"timestamp\":\"2022-08-12T13:30:04.690560435Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"99\",\"timestamp\":\"2022-08-12T13:30:04.690522808Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"103\",\"timestamp\":\"2022-08-12T13:30:04.690633135Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"100\",\"timestamp\":\"2022-08-12T13:30:04.690524308Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"104\",\"timestamp\":\"2022-08-12T13:30:04.690634614Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"94\",\"timestamp\":\"2022-08-12T13:30:04.690304932Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"95\",\"timestamp\":\"2022-08-12T13:30:04.690392724Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"96\",\"timestamp\":\"2022-08-12T13:30:04.690394266Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"105\",\"timestamp\":\"2022-08-12T13:30:04.69064582Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"106\",\"timestamp\":\"2022-08-12T13:30:04.690669619Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"107\",\"timestamp\":\"2022-08-12T13:30:04.690753237Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"108\",\"timestamp\":\"2022-08-12T13:30:04.690755122Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"109\",\"timestamp\":\"2022-08-12T13:30:04.690772753Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"110\",\"timestamp\":\"2022-08-12T13:30:04.69080219Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"111\",\"timestamp\":\"2022-08-12T13:30:04.690877097Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"112\",\"timestamp\":\"2022-08-12T13:30:04.690878543Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"113\",\"timestamp\":\"2022-08-12T13:30:04.690893601Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"114\",\"timestamp\":\"2022-08-12T13:30:04.690919509Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"117\",\"timestamp\":\"2022-08-12T13:30:04.691041068Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"118\",\"timestamp\":\"2022-08-12T13:30:04.691063975Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"119\",\"timestamp\":\"2022-08-12T13:30:04.691140054Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"129\",\"timestamp\":\"2022-08-12T13:30:04.691387414Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"120\",\"timestamp\":\"2022-08-12T13:30:04.691141565Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"130\",\"timestamp\":\"2022-08-12T13:30:04.691410889Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"131\",\"timestamp\":\"2022-08-12T13:30:04.691497238Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"132\",\"timestamp\":\"2022-08-12T13:30:04.691498657Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"125\",\"timestamp\":\"2022-08-12T13:30:04.691282187Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"126\",\"timestamp\":\"2022-08-12T13:30:04.691305594Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"121\",\"timestamp\":\"2022-08-12T13:30:04.691153235Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"127\",\"timestamp\":\"2022-08-12T13:30:04.691372941Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"122\",\"timestamp\":\"2022-08-12T13:30:04.691176001Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"128\",\"timestamp\":\"2022-08-12T13:30:04.69137442Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"123\",\"timestamp\":\"2022-08-12T13:30:04.691270038Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"124\",\"timestamp\":\"2022-08-12T13:30:04.691271381Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"115\",\"timestamp\":\"2022-08-12T13:30:04.691026241Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"116\",\"timestamp\":\"2022-08-12T13:30:04.691028254Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"133\",\"timestamp\":\"2022-08-12T13:30:04.691509865Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"134\",\"timestamp\":\"2022-08-12T13:30:04.691531912Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"135\",\"timestamp\":\"2022-08-12T13:30:04.691603332Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"136\",\"timestamp\":\"2022-08-12T13:30:04.691604736Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"137\",\"timestamp\":\"2022-08-12T13:30:04.691618847Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"138\",\"timestamp\":\"2022-08-12T13:30:04.691642663Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"139\",\"timestamp\":\"2022-08-12T13:30:04.691731628Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"140\",\"timestamp\":\"2022-08-12T13:30:04.691732919Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"141\",\"timestamp\":\"2022-08-12T13:30:04.691743131Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"142\",\"timestamp\":\"2022-08-12T13:30:04.691765184Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"143\",\"timestamp\":\"2022-08-12T13:30:04.691832098Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"144\",\"timestamp\":\"2022-08-12T13:30:04.691834198Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"145\",\"timestamp\":\"2022-08-12T13:30:04.691845287Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"146\",\"timestamp\":\"2022-08-12T13:30:04.69187195Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"147\",\"timestamp\":\"2022-08-12T13:30:04.691956577Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"148\",\"timestamp\":\"2022-08-12T13:30:04.691958009Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"149\",\"timestamp\":\"2022-08-12T13:30:04.691969147Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"150\",\"timestamp\":\"2022-08-12T13:30:04.69199273Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"151\",\"timestamp\":\"2022-08-12T13:30:04.692062796Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"152\",\"timestamp\":\"2022-08-12T13:30:04.692064213Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"153\",\"timestamp\":\"2022-08-12T13:30:04.692076476Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"157\",\"timestamp\":\"2022-08-12T13:30:04.692195395Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"154\",\"timestamp\":\"2022-08-12T13:30:04.692100205Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"158\",\"timestamp\":\"2022-08-12T13:30:04.692241973Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"155\",\"timestamp\":\"2022-08-12T13:30:04.692178771Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"159\",\"timestamp\":\"2022-08-12T13:30:04.692298151Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"156\",\"timestamp\":\"2022-08-12T13:30:04.692180225Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"160\",\"timestamp\":\"2022-08-12T13:30:04.692301626Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"161\",\"timestamp\":\"2022-08-12T13:30:04.692317326Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"176\",\"timestamp\":\"2022-08-12T13:30:04.692875814Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"162\",\"timestamp\":\"2022-08-12T13:30:04.692345913Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"172\",\"timestamp\":\"2022-08-12T13:30:04.692728279Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"163\",\"timestamp\":\"2022-08-12T13:30:04.69242628Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"173\",\"timestamp\":\"2022-08-12T13:30:04.692741323Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"164\",\"timestamp\":\"2022-08-12T13:30:04.692427856Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"174\",\"timestamp\":\"2022-08-12T13:30:04.692862163Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"175\",\"timestamp\":\"2022-08-12T13:30:04.692863706Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"169\",\"timestamp\":\"2022-08-12T13:30:04.69266267Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"170\",\"timestamp\":\"2022-08-12T13:30:04.692677926Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"171\",\"timestamp\":\"2022-08-12T13:30:04.692726759Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"165\",\"timestamp\":\"2022-08-12T13:30:04.69243928Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"166\",\"timestamp\":\"2022-08-12T13:30:04.692466086Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"167\",\"timestamp\":\"2022-08-12T13:30:04.692537679Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"177\",\"timestamp\":\"2022-08-12T13:30:04.692907917Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"168\",\"timestamp\":\"2022-08-12T13:30:04.692572346Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"178\",\"timestamp\":\"2022-08-12T13:30:04.692983835Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"179\",\"timestamp\":\"2022-08-12T13:30:04.692985498Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"180\",\"timestamp\":\"2022-08-12T13:30:04.69299595Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"181\",\"timestamp\":\"2022-08-12T13:30:04.693022057Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"182\",\"timestamp\":\"2022-08-12T13:30:04.693098237Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"184\",\"timestamp\":\"2022-08-12T13:30:04.693111014Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"183\",\"timestamp\":\"2022-08-12T13:30:04.693100161Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"192\",\"timestamp\":\"2022-08-12T13:30:04.693415579Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"193\",\"timestamp\":\"2022-08-12T13:30:04.693417373Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"196\",\"timestamp\":\"2022-08-12T13:30:04.693524935Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"194\",\"timestamp\":\"2022-08-12T13:30:04.693510485Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"197\",\"timestamp\":\"2022-08-12T13:30:04.693553832Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"195\",\"timestamp\":\"2022-08-12T13:30:04.693512123Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"198\",\"timestamp\":\"2022-08-12T13:30:04.693635481Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"199\",\"timestamp\":\"2022-08-12T13:30:04.693637098Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"188\",\"timestamp\":\"2022-08-12T13:30:04.693243745Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"189\",\"timestamp\":\"2022-08-12T13:30:04.69328921Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"190\",\"timestamp\":\"2022-08-12T13:30:04.693356023Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"185\",\"timestamp\":\"2022-08-12T13:30:04.693141543Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"191\",\"timestamp\":\"2022-08-12T13:30:04.693357893Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"186\",\"timestamp\":\"2022-08-12T13:30:04.693224619Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"200\",\"timestamp\":\"2022-08-12T13:30:04.693648973Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"187\",\"timestamp\":\"2022-08-12T13:30:04.693226683Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"201\",\"timestamp\":\"2022-08-12T13:30:04.693679566Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"202\",\"timestamp\":\"2022-08-12T13:30:04.693749798Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"203\",\"timestamp\":\"2022-08-12T13:30:04.69375125Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"204\",\"timestamp\":\"2022-08-12T13:30:04.693762378Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"205\",\"timestamp\":\"2022-08-12T13:30:04.693787008Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"206\",\"timestamp\":\"2022-08-12T13:30:04.693847183Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"208\",\"timestamp\":\"2022-08-12T13:30:04.693859593Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"209\",\"timestamp\":\"2022-08-12T13:30:04.693885136Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"216\",\"timestamp\":\"2022-08-12T13:30:04.694060214Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"210\",\"timestamp\":\"2022-08-12T13:30:04.693945469Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"217\",\"timestamp\":\"2022-08-12T13:30:04.694083789Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"211\",\"timestamp\":\"2022-08-12T13:30:04.693947282Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"218\",\"timestamp\":\"2022-08-12T13:30:04.694145516Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"219\",\"timestamp\":\"2022-08-12T13:30:04.694146949Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"212\",\"timestamp\":\"2022-08-12T13:30:04.693963187Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"213\",\"timestamp\":\"2022-08-12T13:30:04.693987556Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"214\",\"timestamp\":\"2022-08-12T13:30:04.694046409Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"207\",\"timestamp\":\"2022-08-12T13:30:04.693848965Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"215\",\"timestamp\":\"2022-08-12T13:30:04.694047924Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"220\",\"timestamp\":\"2022-08-12T13:30:04.694158589Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"221\",\"timestamp\":\"2022-08-12T13:30:04.69418191Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"222\",\"timestamp\":\"2022-08-12T13:30:04.694253672Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"223\",\"timestamp\":\"2022-08-12T13:30:04.694255467Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"224\",\"timestamp\":\"2022-08-12T13:30:04.69426637Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"225\",\"timestamp\":\"2022-08-12T13:30:04.694292993Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"226\",\"timestamp\":\"2022-08-12T13:30:04.694349649Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"228\",\"timestamp\":\"2022-08-12T13:30:04.694361903Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"227\",\"timestamp\":\"2022-08-12T13:30:04.694351011Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"229\",\"timestamp\":\"2022-08-12T13:30:04.694384807Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"230\",\"timestamp\":\"2022-08-12T13:30:04.694447415Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"231\",\"timestamp\":\"2022-08-12T13:30:04.69444895Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"240\",\"timestamp\":\"2022-08-12T13:30:04.69465744Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"241\",\"timestamp\":\"2022-08-12T13:30:04.694683152Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"236\",\"timestamp\":\"2022-08-12T13:30:04.694558979Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"242\",\"timestamp\":\"2022-08-12T13:30:04.694743634Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"237\",\"timestamp\":\"2022-08-12T13:30:04.69458339Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"243\",\"timestamp\":\"2022-08-12T13:30:04.694744993Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"238\",\"timestamp\":\"2022-08-12T13:30:04.694644799Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"239\",\"timestamp\":\"2022-08-12T13:30:04.694646165Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"232\",\"timestamp\":\"2022-08-12T13:30:04.694460741Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"233\",\"timestamp\":\"2022-08-12T13:30:04.694484631Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"234\",\"timestamp\":\"2022-08-12T13:30:04.6945459Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"235\",\"timestamp\":\"2022-08-12T13:30:04.694547231Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"244\",\"timestamp\":\"2022-08-12T13:30:04.694757562Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"245\",\"timestamp\":\"2022-08-12T13:30:04.694781236Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"246\",\"timestamp\":\"2022-08-12T13:30:04.694842613Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"247\",\"timestamp\":\"2022-08-12T13:30:04.694844304Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"248\",\"timestamp\":\"2022-08-12T13:30:04.694855437Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"249\",\"timestamp\":\"2022-08-12T13:30:04.694879208Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"250\",\"timestamp\":\"2022-08-12T13:30:04.695113263Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"251\",\"timestamp\":\"2022-08-12T13:30:04.695114808Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"252\",\"timestamp\":\"2022-08-12T13:30:04.695126598Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"253\",\"timestamp\":\"2022-08-12T13:30:04.695153021Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"254\",\"timestamp\":\"2022-08-12T13:30:04.695215823Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"260\",\"timestamp\":\"2022-08-12T13:30:04.695334059Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"255\",\"timestamp\":\"2022-08-12T13:30:04.695217257Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"261\",\"timestamp\":\"2022-08-12T13:30:04.69535803Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"262\",\"timestamp\":\"2022-08-12T13:30:04.695419545Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"256\",\"timestamp\":\"2022-08-12T13:30:04.695228423Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"263\",\"timestamp\":\"2022-08-12T13:30:04.695420919Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"257\",\"timestamp\":\"2022-08-12T13:30:04.695252559Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"258\",\"timestamp\":\"2022-08-12T13:30:04.695322097Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"259\",\"timestamp\":\"2022-08-12T13:30:04.695323489Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"264\",\"timestamp\":\"2022-08-12T13:30:04.695432895Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"265\",\"timestamp\":\"2022-08-12T13:30:04.69545634Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"266\",\"timestamp\":\"2022-08-12T13:30:04.695518181Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"268\",\"timestamp\":\"2022-08-12T13:30:04.695531492Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"267\",\"timestamp\":\"2022-08-12T13:30:04.69551958Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"269\",\"timestamp\":\"2022-08-12T13:30:04.695555392Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"270\",\"timestamp\":\"2022-08-12T13:30:04.695618372Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"271\",\"timestamp\":\"2022-08-12T13:30:04.695619772Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"272\",\"timestamp\":\"2022-08-12T13:30:04.695630522Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"273\",\"timestamp\":\"2022-08-12T13:30:04.695655147Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"274\",\"timestamp\":\"2022-08-12T13:30:04.695714559Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"275\",\"timestamp\":\"2022-08-12T13:30:04.695715969Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"280\",\"timestamp\":\"2022-08-12T13:30:04.695825707Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"281\",\"timestamp\":\"2022-08-12T13:30:04.695849009Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"282\",\"timestamp\":\"2022-08-12T13:30:04.695911493Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"276\",\"timestamp\":\"2022-08-12T13:30:04.695728339Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"283\",\"timestamp\":\"2022-08-12T13:30:04.695912958Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"277\",\"timestamp\":\"2022-08-12T13:30:04.695751608Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"278\",\"timestamp\":\"2022-08-12T13:30:04.695814162Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"279\",\"timestamp\":\"2022-08-12T13:30:04.69581555Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"284\",\"timestamp\":\"2022-08-12T13:30:04.695924267Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"285\",\"timestamp\":\"2022-08-12T13:30:04.695947645Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"286\",\"timestamp\":\"2022-08-12T13:30:04.696008189Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"287\",\"timestamp\":\"2022-08-12T13:30:04.696009573Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"288\",\"timestamp\":\"2022-08-12T13:30:04.696020269Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"289\",\"timestamp\":\"2022-08-12T13:30:04.69604242Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"290\",\"timestamp\":\"2022-08-12T13:30:04.696093545Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"291\",\"timestamp\":\"2022-08-12T13:30:04.696095061Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"292\",\"timestamp\":\"2022-08-12T13:30:04.696105202Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"293\",\"timestamp\":\"2022-08-12T13:30:04.69612826Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"294\",\"timestamp\":\"2022-08-12T13:30:04.696195685Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"296\",\"timestamp\":\"2022-08-12T13:30:04.696227173Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"295\",\"timestamp\":\"2022-08-12T13:30:04.696197817Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"297\",\"timestamp\":\"2022-08-12T13:30:04.696242784Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"298\",\"timestamp\":\"2022-08-12T13:30:04.69638203Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"299\",\"timestamp\":\"2022-08-12T13:30:04.69638378Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"300\",\"timestamp\":\"2022-08-12T13:30:04.696398687Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"301\",\"timestamp\":\"2022-08-12T13:30:04.696430352Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"302\",\"timestamp\":\"2022-08-12T13:30:04.696518311Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"303\",\"timestamp\":\"2022-08-12T13:30:04.69652062Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"304\",\"timestamp\":\"2022-08-12T13:30:04.696540129Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"305\",\"timestamp\":\"2022-08-12T13:30:04.696590149Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"306\",\"timestamp\":\"2022-08-12T13:30:04.696660475Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"307\",\"timestamp\":\"2022-08-12T13:30:04.696662012Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"308\",\"timestamp\":\"2022-08-12T13:30:04.696690796Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"309\",\"timestamp\":\"2022-08-12T13:30:04.696730877Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"310\",\"timestamp\":\"2022-08-12T13:30:04.696830771Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"311\",\"timestamp\":\"2022-08-12T13:30:04.696832305Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"312\",\"timestamp\":\"2022-08-12T13:30:04.696845426Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"313\",\"timestamp\":\"2022-08-12T13:30:04.696873105Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"314\",\"timestamp\":\"2022-08-12T13:30:04.69695739Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"315\",\"timestamp\":\"2022-08-12T13:30:04.696958961Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"316\",\"timestamp\":\"2022-08-12T13:30:04.696972666Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"317\",\"timestamp\":\"2022-08-12T13:30:04.697001155Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"318\",\"timestamp\":\"2022-08-12T13:30:04.697072945Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"319\",\"timestamp\":\"2022-08-12T13:30:04.697074408Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"320\",\"timestamp\":\"2022-08-12T13:30:04.697085567Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"321\",\"timestamp\":\"2022-08-12T13:30:04.697108718Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"322\",\"timestamp\":\"2022-08-12T13:30:04.697166986Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"323\",\"timestamp\":\"2022-08-12T13:30:04.697168337Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"324\",\"timestamp\":\"2022-08-12T13:30:04.697180091Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"325\",\"timestamp\":\"2022-08-12T13:30:04.697202922Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"326\",\"timestamp\":\"2022-08-12T13:30:04.697268647Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"327\",\"timestamp\":\"2022-08-12T13:30:04.697269994Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"328\",\"timestamp\":\"2022-08-12T13:30:04.697281008Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"329\",\"timestamp\":\"2022-08-12T13:30:04.697304114Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"330\",\"timestamp\":\"2022-08-12T13:30:04.69735311Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"332\",\"timestamp\":\"2022-08-12T13:30:04.697364732Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"331\",\"timestamp\":\"2022-08-12T13:30:04.697354421Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"333\",\"timestamp\":\"2022-08-12T13:30:04.697387362Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"334\",\"timestamp\":\"2022-08-12T13:30:04.697464404Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"335\",\"timestamp\":\"2022-08-12T13:30:04.69746577Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"336\",\"timestamp\":\"2022-08-12T13:30:04.697476992Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"337\",\"timestamp\":\"2022-08-12T13:30:04.697499001Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"338\",\"timestamp\":\"2022-08-12T13:30:04.697548663Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"339\",\"timestamp\":\"2022-08-12T13:30:04.697550048Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"340\",\"timestamp\":\"2022-08-12T13:30:04.697560534Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"341\",\"timestamp\":\"2022-08-12T13:30:04.69758234Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"342\",\"timestamp\":\"2022-08-12T13:30:04.697637478Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"343\",\"timestamp\":\"2022-08-12T13:30:04.697638783Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"344\",\"timestamp\":\"2022-08-12T13:30:04.697652488Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"345\",\"timestamp\":\"2022-08-12T13:30:04.69767455Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"346\",\"timestamp\":\"2022-08-12T13:30:04.697733548Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"347\",\"timestamp\":\"2022-08-12T13:30:04.697734904Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"348\",\"timestamp\":\"2022-08-12T13:30:04.697749361Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"349\",\"timestamp\":\"2022-08-12T13:30:04.697774665Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"350\",\"timestamp\":\"2022-08-12T13:30:04.697829909Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"351\",\"timestamp\":\"2022-08-12T13:30:04.697831278Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"352\",\"timestamp\":\"2022-08-12T13:30:04.697843048Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"353\",\"timestamp\":\"2022-08-12T13:30:04.697865318Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"354\",\"timestamp\":\"2022-08-12T13:30:04.697914563Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"355\",\"timestamp\":\"2022-08-12T13:30:04.697915969Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"356\",\"timestamp\":\"2022-08-12T13:30:04.697926155Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"357\",\"timestamp\":\"2022-08-12T13:30:04.697949141Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"358\",\"timestamp\":\"2022-08-12T13:30:04.697999614Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"359\",\"timestamp\":\"2022-08-12T13:30:04.698000967Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"360\",\"timestamp\":\"2022-08-12T13:30:04.698010865Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"361\",\"timestamp\":\"2022-08-12T13:30:04.698033605Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"362\",\"timestamp\":\"2022-08-12T13:30:04.698083348Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"363\",\"timestamp\":\"2022-08-12T13:30:04.698084693Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"364\",\"timestamp\":\"2022-08-12T13:30:04.698095857Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"365\",\"timestamp\":\"2022-08-12T13:30:04.698117936Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"366\",\"timestamp\":\"2022-08-12T13:30:04.698168574Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"367\",\"timestamp\":\"2022-08-12T13:30:04.698169972Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"368\",\"timestamp\":\"2022-08-12T13:30:04.698180376Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"369\",\"timestamp\":\"2022-08-12T13:30:04.698201946Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"370\",\"timestamp\":\"2022-08-12T13:30:04.698254245Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"371\",\"timestamp\":\"2022-08-12T13:30:04.698255635Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"372\",\"timestamp\":\"2022-08-12T13:30:04.698266316Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"373\",\"timestamp\":\"2022-08-12T13:30:04.698287594Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"374\",\"timestamp\":\"2022-08-12T13:30:04.698339099Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"375\",\"timestamp\":\"2022-08-12T13:30:04.698340447Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"376\",\"timestamp\":\"2022-08-12T13:30:04.698352046Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"377\",\"timestamp\":\"2022-08-12T13:30:04.698373786Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"378\",\"timestamp\":\"2022-08-12T13:30:04.698425574Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"379\",\"timestamp\":\"2022-08-12T13:30:04.698426889Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"380\",\"timestamp\":\"2022-08-12T13:30:04.698438744Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"381\",\"timestamp\":\"2022-08-12T13:30:04.698461298Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"382\",\"timestamp\":\"2022-08-12T13:30:04.698527063Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"383\",\"timestamp\":\"2022-08-12T13:30:04.698528604Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"384\",\"timestamp\":\"2022-08-12T13:30:04.698539973Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"385\",\"timestamp\":\"2022-08-12T13:30:04.698562009Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"386\",\"timestamp\":\"2022-08-12T13:30:04.698610715Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"387\",\"timestamp\":\"2022-08-12T13:30:04.698612126Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"388\",\"timestamp\":\"2022-08-12T13:30:04.698623135Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"389\",\"timestamp\":\"2022-08-12T13:30:04.698645389Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"390\",\"timestamp\":\"2022-08-12T13:30:04.698697102Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"391\",\"timestamp\":\"2022-08-12T13:30:04.698698497Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"392\",\"timestamp\":\"2022-08-12T13:30:04.698709277Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"393\",\"timestamp\":\"2022-08-12T13:30:04.698731161Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"394\",\"timestamp\":\"2022-08-12T13:30:04.698783297Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"395\",\"timestamp\":\"2022-08-12T13:30:04.698784691Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"396\",\"timestamp\":\"2022-08-12T13:30:04.698795352Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"397\",\"timestamp\":\"2022-08-12T13:30:04.698817557Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"398\",\"timestamp\":\"2022-08-12T13:30:04.698869718Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"399\",\"timestamp\":\"2022-08-12T13:30:04.698871062Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"400\",\"timestamp\":\"2022-08-12T13:30:04.698881666Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"401\",\"timestamp\":\"2022-08-12T13:30:04.698902766Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"402\",\"timestamp\":\"2022-08-12T13:30:04.698954504Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"403\",\"timestamp\":\"2022-08-12T13:30:04.698955895Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"404\",\"timestamp\":\"2022-08-12T13:30:04.698966515Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"405\",\"timestamp\":\"2022-08-12T13:30:04.698988908Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"406\",\"timestamp\":\"2022-08-12T13:30:04.699042398Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"407\",\"timestamp\":\"2022-08-12T13:30:04.699043801Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"408\",\"timestamp\":\"2022-08-12T13:30:04.699055661Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"409\",\"timestamp\":\"2022-08-12T13:30:04.699107001Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"410\",\"timestamp\":\"2022-08-12T13:30:04.699186359Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"411\",\"timestamp\":\"2022-08-12T13:30:04.699197581Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"412\",\"timestamp\":\"2022-08-12T13:30:04.699205971Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"413\",\"timestamp\":\"2022-08-12T13:30:04.699217868Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"414\",\"timestamp\":\"2022-08-12T13:30:04.699302302Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"415\",\"timestamp\":\"2022-08-12T13:30:04.699304503Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"416\",\"timestamp\":\"2022-08-12T13:30:04.699317968Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"417\",\"timestamp\":\"2022-08-12T13:30:04.699346817Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"418\",\"timestamp\":\"2022-08-12T13:30:04.699428241Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"419\",\"timestamp\":\"2022-08-12T13:30:04.69942974Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"420\",\"timestamp\":\"2022-08-12T13:30:04.699441257Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"421\",\"timestamp\":\"2022-08-12T13:30:04.699464746Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"422\",\"timestamp\":\"2022-08-12T13:30:04.699548025Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"423\",\"timestamp\":\"2022-08-12T13:30:04.699549752Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"424\",\"timestamp\":\"2022-08-12T13:30:04.699560608Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"425\",\"timestamp\":\"2022-08-12T13:30:04.699584562Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"426\",\"timestamp\":\"2022-08-12T13:30:04.699672833Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"427\",\"timestamp\":\"2022-08-12T13:30:04.699674218Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"428\",\"timestamp\":\"2022-08-12T13:30:04.699686007Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"429\",\"timestamp\":\"2022-08-12T13:30:04.699709147Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"430\",\"timestamp\":\"2022-08-12T13:30:04.699790251Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"431\",\"timestamp\":\"2022-08-12T13:30:04.69979235Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"432\",\"timestamp\":\"2022-08-12T13:30:04.699809916Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"433\",\"timestamp\":\"2022-08-12T13:30:04.699856973Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"434\",\"timestamp\":\"2022-08-12T13:30:04.699917961Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"435\",\"timestamp\":\"2022-08-12T13:30:04.699925398Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"436\",\"timestamp\":\"2022-08-12T13:30:04.699959003Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"437\",\"timestamp\":\"2022-08-12T13:30:04.699960449Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"438\",\"timestamp\":\"2022-08-12T13:30:04.700031357Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"439\",\"timestamp\":\"2022-08-12T13:30:04.700032901Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"440\",\"timestamp\":\"2022-08-12T13:30:04.700045687Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"441\",\"timestamp\":\"2022-08-12T13:30:04.700070031Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"442\",\"timestamp\":\"2022-08-12T13:30:04.700146267Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"443\",\"timestamp\":\"2022-08-12T13:30:04.700147735Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"444\",\"timestamp\":\"2022-08-12T13:30:04.7001586Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"445\",\"timestamp\":\"2022-08-12T13:30:04.70018244Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"446\",\"timestamp\":\"2022-08-12T13:30:04.700243742Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"447\",\"timestamp\":\"2022-08-12T13:30:04.700245134Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"448\",\"timestamp\":\"2022-08-12T13:30:04.700257598Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"449\",\"timestamp\":\"2022-08-12T13:30:04.700280541Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"450\",\"timestamp\":\"2022-08-12T13:30:04.700343448Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"451\",\"timestamp\":\"2022-08-12T13:30:04.700345049Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"452\",\"timestamp\":\"2022-08-12T13:30:04.700357818Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"453\",\"timestamp\":\"2022-08-12T13:30:04.700382802Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"454\",\"timestamp\":\"2022-08-12T13:30:04.700450589Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"455\",\"timestamp\":\"2022-08-12T13:30:04.700452059Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"456\",\"timestamp\":\"2022-08-12T13:30:04.700466067Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"457\",\"timestamp\":\"2022-08-12T13:30:04.700490112Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"458\",\"timestamp\":\"2022-08-12T13:30:04.700553243Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"459\",\"timestamp\":\"2022-08-12T13:30:04.700554651Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"460\",\"timestamp\":\"2022-08-12T13:30:04.700567196Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"461\",\"timestamp\":\"2022-08-12T13:30:04.70059108Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"462\",\"timestamp\":\"2022-08-12T13:30:04.70067286Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"463\",\"timestamp\":\"2022-08-12T13:30:04.700674334Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"464\",\"timestamp\":\"2022-08-12T13:30:04.700685415Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"465\",\"timestamp\":\"2022-08-12T13:30:04.700710005Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"466\",\"timestamp\":\"2022-08-12T13:30:04.700767726Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"467\",\"timestamp\":\"2022-08-12T13:30:04.70076945Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"468\",\"timestamp\":\"2022-08-12T13:30:04.700804243Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"469\",\"timestamp\":\"2022-08-12T13:30:04.700827468Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"470\",\"timestamp\":\"2022-08-12T13:30:04.700894527Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"471\",\"timestamp\":\"2022-08-12T13:30:04.700896113Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"472\",\"timestamp\":\"2022-08-12T13:30:04.700907008Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"473\",\"timestamp\":\"2022-08-12T13:30:04.700931202Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"474\",\"timestamp\":\"2022-08-12T13:30:04.7010021Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"475\",\"timestamp\":\"2022-08-12T13:30:04.701003644Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"476\",\"timestamp\":\"2022-08-12T13:30:04.701014805Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"477\",\"timestamp\":\"2022-08-12T13:30:04.701039215Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"478\",\"timestamp\":\"2022-08-12T13:30:04.70110792Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"479\",\"timestamp\":\"2022-08-12T13:30:04.701109477Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"480\",\"timestamp\":\"2022-08-12T13:30:04.701121995Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"481\",\"timestamp\":\"2022-08-12T13:30:04.701146371Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"482\",\"timestamp\":\"2022-08-12T13:30:04.701201514Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"483\",\"timestamp\":\"2022-08-12T13:30:04.701202942Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"484\",\"timestamp\":\"2022-08-12T13:30:04.701213855Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"485\",\"timestamp\":\"2022-08-12T13:30:04.701238883Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"486\",\"timestamp\":\"2022-08-12T13:30:04.701298431Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"487\",\"timestamp\":\"2022-08-12T13:30:04.701299928Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"488\",\"timestamp\":\"2022-08-12T13:30:04.701310874Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"489\",\"timestamp\":\"2022-08-12T13:30:04.701335722Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"490\",\"timestamp\":\"2022-08-12T13:30:04.701381974Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"491\",\"timestamp\":\"2022-08-12T13:30:04.70138328Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"492\",\"timestamp\":\"2022-08-12T13:30:04.701395934Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"493\",\"timestamp\":\"2022-08-12T13:30:04.701419614Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"494\",\"timestamp\":\"2022-08-12T13:30:04.701467277Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"495\",\"timestamp\":\"2022-08-12T13:30:04.701468843Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"496\",\"timestamp\":\"2022-08-12T13:30:04.701484172Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"497\",\"timestamp\":\"2022-08-12T13:30:04.70150655Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"498\",\"timestamp\":\"2022-08-12T13:30:04.701561836Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"499\",\"timestamp\":\"2022-08-12T13:30:04.701563363Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"500\",\"timestamp\":\"2022-08-12T13:30:04.70157438Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"501\",\"timestamp\":\"2022-08-12T13:30:04.70159676Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"502\",\"timestamp\":\"2022-08-12T13:30:04.701645134Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"503\",\"timestamp\":\"2022-08-12T13:30:04.701646504Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"504\",\"timestamp\":\"2022-08-12T13:30:04.701657604Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"505\",\"timestamp\":\"2022-08-12T13:30:04.701679223Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"506\",\"timestamp\":\"2022-08-12T13:30:04.701746374Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"507\",\"timestamp\":\"2022-08-12T13:30:04.701747996Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"508\",\"timestamp\":\"2022-08-12T13:30:04.701771132Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"509\",\"timestamp\":\"2022-08-12T13:30:04.701793976Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"510\",\"timestamp\":\"2022-08-12T13:30:04.701857158Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"511\",\"timestamp\":\"2022-08-12T13:30:04.701858648Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"512\",\"timestamp\":\"2022-08-12T13:30:04.701869489Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"513\",\"timestamp\":\"2022-08-12T13:30:04.70189186Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"514\",\"timestamp\":\"2022-08-12T13:30:04.70194361Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"515\",\"timestamp\":\"2022-08-12T13:30:04.701944928Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"516\",\"timestamp\":\"2022-08-12T13:30:04.701956241Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"517\",\"timestamp\":\"2022-08-12T13:30:04.7019784Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"518\",\"timestamp\":\"2022-08-12T13:30:04.702037463Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"519\",\"timestamp\":\"2022-08-12T13:30:04.70203889Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"520\",\"timestamp\":\"2022-08-12T13:30:04.702051432Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"521\",\"timestamp\":\"2022-08-12T13:30:04.702074461Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"522\",\"timestamp\":\"2022-08-12T13:30:04.702129706Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"523\",\"timestamp\":\"2022-08-12T13:30:04.702131224Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"524\",\"timestamp\":\"2022-08-12T13:30:04.702159656Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"525\",\"timestamp\":\"2022-08-12T13:30:04.702161136Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"526\",\"timestamp\":\"2022-08-12T13:30:04.702212905Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"527\",\"timestamp\":\"2022-08-12T13:30:04.702214393Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"528\",\"timestamp\":\"2022-08-12T13:30:04.702225942Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"529\",\"timestamp\":\"2022-08-12T13:30:04.702250332Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"530\",\"timestamp\":\"2022-08-12T13:30:04.70230389Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"531\",\"timestamp\":\"2022-08-12T13:30:04.702305398Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"532\",\"timestamp\":\"2022-08-12T13:30:04.702315851Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"533\",\"timestamp\":\"2022-08-12T13:30:04.702338206Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"534\",\"timestamp\":\"2022-08-12T13:30:04.70239301Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"535\",\"timestamp\":\"2022-08-12T13:30:04.702394533Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"536\",\"timestamp\":\"2022-08-12T13:30:04.702405636Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"537\",\"timestamp\":\"2022-08-12T13:30:04.702427381Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"538\",\"timestamp\":\"2022-08-12T13:30:04.702481535Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"539\",\"timestamp\":\"2022-08-12T13:30:04.702482826Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"540\",\"timestamp\":\"2022-08-12T13:30:04.702493337Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"541\",\"timestamp\":\"2022-08-12T13:30:04.702515871Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"542\",\"timestamp\":\"2022-08-12T13:30:04.702570853Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"543\",\"timestamp\":\"2022-08-12T13:30:04.70257297Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"544\",\"timestamp\":\"2022-08-12T13:30:04.702584051Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"545\",\"timestamp\":\"2022-08-12T13:30:04.702607414Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"546\",\"timestamp\":\"2022-08-12T13:30:04.702661289Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"547\",\"timestamp\":\"2022-08-12T13:30:04.702662817Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"548\",\"timestamp\":\"2022-08-12T13:30:04.702672982Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"549\",\"timestamp\":\"2022-08-12T13:30:04.702696363Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"550\",\"timestamp\":\"2022-08-12T13:30:04.702745796Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"551\",\"timestamp\":\"2022-08-12T13:30:04.702747264Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"552\",\"timestamp\":\"2022-08-12T13:30:04.70275758Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"553\",\"timestamp\":\"2022-08-12T13:30:04.702780471Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"554\",\"timestamp\":\"2022-08-12T13:30:04.702845903Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"555\",\"timestamp\":\"2022-08-12T13:30:04.70284778Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"556\",\"timestamp\":\"2022-08-12T13:30:04.70286042Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"557\",\"timestamp\":\"2022-08-12T13:30:04.70290041Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"558\",\"timestamp\":\"2022-08-12T13:30:04.702953273Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"559\",\"timestamp\":\"2022-08-12T13:30:04.702955295Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"560\",\"timestamp\":\"2022-08-12T13:30:04.702984203Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"561\",\"timestamp\":\"2022-08-12T13:30:04.703046168Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"562\",\"timestamp\":\"2022-08-12T13:30:04.703094441Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"563\",\"timestamp\":\"2022-08-12T13:30:04.703096131Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"564\",\"timestamp\":\"2022-08-12T13:30:04.703108827Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"565\",\"timestamp\":\"2022-08-12T13:30:04.703151795Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"566\",\"timestamp\":\"2022-08-12T13:30:04.703209725Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"567\",\"timestamp\":\"2022-08-12T13:30:04.703211279Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"568\",\"timestamp\":\"2022-08-12T13:30:04.703225085Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"569\",\"timestamp\":\"2022-08-12T13:30:04.703257313Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"570\",\"timestamp\":\"2022-08-12T13:30:04.703332939Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"571\",\"timestamp\":\"2022-08-12T13:30:04.703334475Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"572\",\"timestamp\":\"2022-08-12T13:30:04.703345816Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"573\",\"timestamp\":\"2022-08-12T13:30:04.703371928Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"574\",\"timestamp\":\"2022-08-12T13:30:04.703438519Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"575\",\"timestamp\":\"2022-08-12T13:30:04.703440095Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"576\",\"timestamp\":\"2022-08-12T13:30:04.703453879Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"577\",\"timestamp\":\"2022-08-12T13:30:04.703482659Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"578\",\"timestamp\":\"2022-08-12T13:30:04.70359583Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"579\",\"timestamp\":\"2022-08-12T13:30:04.703597317Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"580\",\"timestamp\":\"2022-08-12T13:30:04.703609203Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"581\",\"timestamp\":\"2022-08-12T13:30:04.703640093Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"582\",\"timestamp\":\"2022-08-12T13:30:04.703742086Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"583\",\"timestamp\":\"2022-08-12T13:30:04.703743503Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"584\",\"timestamp\":\"2022-08-12T13:30:04.70375489Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"585\",\"timestamp\":\"2022-08-12T13:30:04.703779561Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"586\",\"timestamp\":\"2022-08-12T13:30:04.703890052Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"587\",\"timestamp\":\"2022-08-12T13:30:04.703891779Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"588\",\"timestamp\":\"2022-08-12T13:30:04.703907719Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"589\",\"timestamp\":\"2022-08-12T13:30:04.703953119Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"590\",\"timestamp\":\"2022-08-12T13:30:04.704059216Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"591\",\"timestamp\":\"2022-08-12T13:30:04.704060732Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"592\",\"timestamp\":\"2022-08-12T13:30:04.704072287Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"593\",\"timestamp\":\"2022-08-12T13:30:04.70410801Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"594\",\"timestamp\":\"2022-08-12T13:30:04.704180498Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"595\",\"timestamp\":\"2022-08-12T13:30:04.704182009Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"596\",\"timestamp\":\"2022-08-12T13:30:04.704193724Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"597\",\"timestamp\":\"2022-08-12T13:30:04.704217116Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"598\",\"timestamp\":\"2022-08-12T13:30:04.704293989Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"599\",\"timestamp\":\"2022-08-12T13:30:04.704295347Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"600\",\"timestamp\":\"2022-08-12T13:30:04.704308533Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"601\",\"timestamp\":\"2022-08-12T13:30:04.704332043Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"602\",\"timestamp\":\"2022-08-12T13:30:04.7044051Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"603\",\"timestamp\":\"2022-08-12T13:30:04.704406453Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"604\",\"timestamp\":\"2022-08-12T13:30:04.704417353Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"605\",\"timestamp\":\"2022-08-12T13:30:04.704440176Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"606\",\"timestamp\":\"2022-08-12T13:30:04.704519933Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"607\",\"timestamp\":\"2022-08-12T13:30:04.70452133Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"608\",\"timestamp\":\"2022-08-12T13:30:04.704534809Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"609\",\"timestamp\":\"2022-08-12T13:30:04.704558533Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"610\",\"timestamp\":\"2022-08-12T13:30:04.704624173Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"611\",\"timestamp\":\"2022-08-12T13:30:04.704625573Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"612\",\"timestamp\":\"2022-08-12T13:30:04.704636102Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"613\",\"timestamp\":\"2022-08-12T13:30:04.704657866Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"614\",\"timestamp\":\"2022-08-12T13:30:04.704714659Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"615\",\"timestamp\":\"2022-08-12T13:30:04.70471608Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"616\",\"timestamp\":\"2022-08-12T13:30:04.704727384Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"617\",\"timestamp\":\"2022-08-12T13:30:04.70475086Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"618\",\"timestamp\":\"2022-08-12T13:30:04.704883009Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"619\",\"timestamp\":\"2022-08-12T13:30:04.704884973Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"620\",\"timestamp\":\"2022-08-12T13:30:04.704896004Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"621\",\"timestamp\":\"2022-08-12T13:30:04.704920169Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"622\",\"timestamp\":\"2022-08-12T13:30:04.704990135Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"623\",\"timestamp\":\"2022-08-12T13:30:04.704991597Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"624\",\"timestamp\":\"2022-08-12T13:30:04.705002334Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"625\",\"timestamp\":\"2022-08-12T13:30:04.705023982Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"626\",\"timestamp\":\"2022-08-12T13:30:04.705087983Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"627\",\"timestamp\":\"2022-08-12T13:30:04.705089826Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"628\",\"timestamp\":\"2022-08-12T13:30:04.705100979Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"629\",\"timestamp\":\"2022-08-12T13:30:04.70512312Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"630\",\"timestamp\":\"2022-08-12T13:30:04.705189122Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"631\",\"timestamp\":\"2022-08-12T13:30:04.705190673Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"632\",\"timestamp\":\"2022-08-12T13:30:04.705201969Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"633\",\"timestamp\":\"2022-08-12T13:30:04.705224357Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"634\",\"timestamp\":\"2022-08-12T13:30:04.705310603Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"635\",\"timestamp\":\"2022-08-12T13:30:04.705312048Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"636\",\"timestamp\":\"2022-08-12T13:30:04.705322383Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"637\",\"timestamp\":\"2022-08-12T13:30:04.70534587Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"638\",\"timestamp\":\"2022-08-12T13:30:04.705409513Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"639\",\"timestamp\":\"2022-08-12T13:30:04.705410943Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"640\",\"timestamp\":\"2022-08-12T13:30:04.705425433Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"641\",\"timestamp\":\"2022-08-12T13:30:04.70544809Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"642\",\"timestamp\":\"2022-08-12T13:30:04.705507883Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"643\",\"timestamp\":\"2022-08-12T13:30:04.705509746Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"644\",\"timestamp\":\"2022-08-12T13:30:04.705521201Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"645\",\"timestamp\":\"2022-08-12T13:30:04.705542864Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"646\",\"timestamp\":\"2022-08-12T13:30:04.705603098Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"647\",\"timestamp\":\"2022-08-12T13:30:04.70560443Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"648\",\"timestamp\":\"2022-08-12T13:30:04.705615912Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"649\",\"timestamp\":\"2022-08-12T13:30:04.705637946Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"650\",\"timestamp\":\"2022-08-12T13:30:04.705766838Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"651\",\"timestamp\":\"2022-08-12T13:30:04.70588959Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"652\",\"timestamp\":\"2022-08-12T13:30:04.705896459Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"653\",\"timestamp\":\"2022-08-12T13:30:04.705910605Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"654\",\"timestamp\":\"2022-08-12T13:30:04.706052465Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"655\",\"timestamp\":\"2022-08-12T13:30:04.706054195Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"656\",\"timestamp\":\"2022-08-12T13:30:04.706065743Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"657\",\"timestamp\":\"2022-08-12T13:30:04.706114943Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"658\",\"timestamp\":\"2022-08-12T13:30:04.706188344Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"659\",\"timestamp\":\"2022-08-12T13:30:04.706190134Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"660\",\"timestamp\":\"2022-08-12T13:30:04.706201666Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"661\",\"timestamp\":\"2022-08-12T13:30:04.706234783Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"662\",\"timestamp\":\"2022-08-12T13:30:04.706321398Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"663\",\"timestamp\":\"2022-08-12T13:30:04.70632295Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"664\",\"timestamp\":\"2022-08-12T13:30:04.706334209Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"665\",\"timestamp\":\"2022-08-12T13:30:04.706366095Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"666\",\"timestamp\":\"2022-08-12T13:30:04.706457669Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"667\",\"timestamp\":\"2022-08-12T13:30:04.706459107Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"668\",\"timestamp\":\"2022-08-12T13:30:04.706469863Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"669\",\"timestamp\":\"2022-08-12T13:30:04.706494218Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"670\",\"timestamp\":\"2022-08-12T13:30:04.706578986Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"671\",\"timestamp\":\"2022-08-12T13:30:04.706581809Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"672\",\"timestamp\":\"2022-08-12T13:30:04.70659846Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"673\",\"timestamp\":\"2022-08-12T13:30:04.706624482Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"674\",\"timestamp\":\"2022-08-12T13:30:04.706698446Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"675\",\"timestamp\":\"2022-08-12T13:30:04.706700361Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"676\",\"timestamp\":\"2022-08-12T13:30:04.706714027Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"677\",\"timestamp\":\"2022-08-12T13:30:04.706743676Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"678\",\"timestamp\":\"2022-08-12T13:30:04.706870599Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"679\",\"timestamp\":\"2022-08-12T13:30:04.706872584Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"680\",\"timestamp\":\"2022-08-12T13:30:04.7068853Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"681\",\"timestamp\":\"2022-08-12T13:30:04.706933435Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"682\",\"timestamp\":\"2022-08-12T13:30:04.707013541Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"683\",\"timestamp\":\"2022-08-12T13:30:04.707015012Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"684\",\"timestamp\":\"2022-08-12T13:30:04.707027844Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"685\",\"timestamp\":\"2022-08-12T13:30:04.707056491Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"686\",\"timestamp\":\"2022-08-12T13:30:04.707146544Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"687\",\"timestamp\":\"2022-08-12T13:30:04.707148249Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"688\",\"timestamp\":\"2022-08-12T13:30:04.707159028Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"689\",\"timestamp\":\"2022-08-12T13:30:04.707186468Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"690\",\"timestamp\":\"2022-08-12T13:30:04.707248597Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"691\",\"timestamp\":\"2022-08-12T13:30:04.707250262Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"692\",\"timestamp\":\"2022-08-12T13:30:04.707261454Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"693\",\"timestamp\":\"2022-08-12T13:30:04.707284654Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"694\",\"timestamp\":\"2022-08-12T13:30:04.707344038Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"695\",\"timestamp\":\"2022-08-12T13:30:04.70734543Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"696\",\"timestamp\":\"2022-08-12T13:30:04.707356688Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"697\",\"timestamp\":\"2022-08-12T13:30:04.707380004Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"698\",\"timestamp\":\"2022-08-12T13:30:04.707444475Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"699\",\"timestamp\":\"2022-08-12T13:30:04.707452512Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"700\",\"timestamp\":\"2022-08-12T13:30:04.707479109Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"701\",\"timestamp\":\"2022-08-12T13:30:04.707513104Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"702\",\"timestamp\":\"2022-08-12T13:30:04.707573415Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"703\",\"timestamp\":\"2022-08-12T13:30:04.70757489Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"704\",\"timestamp\":\"2022-08-12T13:30:04.707585715Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"705\",\"timestamp\":\"2022-08-12T13:30:04.707609484Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"706\",\"timestamp\":\"2022-08-12T13:30:04.707656123Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"707\",\"timestamp\":\"2022-08-12T13:30:04.707657633Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"708\",\"timestamp\":\"2022-08-12T13:30:04.707670601Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"709\",\"timestamp\":\"2022-08-12T13:30:04.707693525Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"710\",\"timestamp\":\"2022-08-12T13:30:04.707752155Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"711\",\"timestamp\":\"2022-08-12T13:30:04.707753711Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"712\",\"timestamp\":\"2022-08-12T13:30:04.707764721Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"713\",\"timestamp\":\"2022-08-12T13:30:04.707788294Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"714\",\"timestamp\":\"2022-08-12T13:30:04.707838814Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"715\",\"timestamp\":\"2022-08-12T13:30:04.707840132Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"716\",\"timestamp\":\"2022-08-12T13:30:04.707850656Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"717\",\"timestamp\":\"2022-08-12T13:30:04.707872226Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"718\",\"timestamp\":\"2022-08-12T13:30:04.707929428Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"719\",\"timestamp\":\"2022-08-12T13:30:04.707930885Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"720\",\"timestamp\":\"2022-08-12T13:30:04.70794376Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"721\",\"timestamp\":\"2022-08-12T13:30:04.707966601Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"722\",\"timestamp\":\"2022-08-12T13:30:04.708034262Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"723\",\"timestamp\":\"2022-08-12T13:30:04.708035589Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"724\",\"timestamp\":\"2022-08-12T13:30:04.708045668Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"725\",\"timestamp\":\"2022-08-12T13:30:04.708068205Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"726\",\"timestamp\":\"2022-08-12T13:30:04.708178754Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"727\",\"timestamp\":\"2022-08-12T13:30:04.708181443Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"728\",\"timestamp\":\"2022-08-12T13:30:04.708232434Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"729\",\"timestamp\":\"2022-08-12T13:30:04.708234571Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"730\",\"timestamp\":\"2022-08-12T13:30:04.708369392Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"731\",\"timestamp\":\"2022-08-12T13:30:04.708372512Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"732\",\"timestamp\":\"2022-08-12T13:30:04.708395368Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"733\",\"timestamp\":\"2022-08-12T13:30:04.708445942Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"734\",\"timestamp\":\"2022-08-12T13:30:04.708553658Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"735\",\"timestamp\":\"2022-08-12T13:30:04.708556006Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"736\",\"timestamp\":\"2022-08-12T13:30:04.708575383Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"737\",\"timestamp\":\"2022-08-12T13:30:04.708613754Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"738\",\"timestamp\":\"2022-08-12T13:30:04.708726841Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"739\",\"timestamp\":\"2022-08-12T13:30:04.708729311Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"740\",\"timestamp\":\"2022-08-12T13:30:04.708745908Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"741\",\"timestamp\":\"2022-08-12T13:30:04.708777182Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"742\",\"timestamp\":\"2022-08-12T13:30:04.708935272Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"743\",\"timestamp\":\"2022-08-12T13:30:04.708937344Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"744\",\"timestamp\":\"2022-08-12T13:30:04.708954836Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"745\",\"timestamp\":\"2022-08-12T13:30:04.709000071Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"746\",\"timestamp\":\"2022-08-12T13:30:04.70912482Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"747\",\"timestamp\":\"2022-08-12T13:30:04.709128254Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"748\",\"timestamp\":\"2022-08-12T13:30:04.709150148Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"749\",\"timestamp\":\"2022-08-12T13:30:04.709210662Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"750\",\"timestamp\":\"2022-08-12T13:30:04.709309652Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"751\",\"timestamp\":\"2022-08-12T13:30:04.709312103Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"752\",\"timestamp\":\"2022-08-12T13:30:04.709331811Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"753\",\"timestamp\":\"2022-08-12T13:30:04.709382885Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"754\",\"timestamp\":\"2022-08-12T13:30:04.709431308Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"755\",\"timestamp\":\"2022-08-12T13:30:04.709433057Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"756\",\"timestamp\":\"2022-08-12T13:30:04.709451889Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"757\",\"timestamp\":\"2022-08-12T13:30:04.709491313Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"758\",\"timestamp\":\"2022-08-12T13:30:04.709590523Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"759\",\"timestamp\":\"2022-08-12T13:30:04.709592009Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"760\",\"timestamp\":\"2022-08-12T13:30:04.709605244Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"761\",\"timestamp\":\"2022-08-12T13:30:04.709636937Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"762\",\"timestamp\":\"2022-08-12T13:30:04.709706196Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"763\",\"timestamp\":\"2022-08-12T13:30:04.709708061Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"764\",\"timestamp\":\"2022-08-12T13:30:04.709727055Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"765\",\"timestamp\":\"2022-08-12T13:30:04.709759537Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"766\",\"timestamp\":\"2022-08-12T13:30:04.709821448Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"767\",\"timestamp\":\"2022-08-12T13:30:04.709823104Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"768\",\"timestamp\":\"2022-08-12T13:30:04.709833983Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"769\",\"timestamp\":\"2022-08-12T13:30:04.709863828Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"770\",\"timestamp\":\"2022-08-12T13:30:04.709974918Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"771\",\"timestamp\":\"2022-08-12T13:30:04.709977426Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"772\",\"timestamp\":\"2022-08-12T13:30:04.709991337Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"773\",\"timestamp\":\"2022-08-12T13:30:04.710025274Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"774\",\"timestamp\":\"2022-08-12T13:30:04.71010215Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"775\",\"timestamp\":\"2022-08-12T13:30:04.710103771Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"776\",\"timestamp\":\"2022-08-12T13:30:04.710114715Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"777\",\"timestamp\":\"2022-08-12T13:30:04.71014817Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"778\",\"timestamp\":\"2022-08-12T13:30:04.710215554Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"779\",\"timestamp\":\"2022-08-12T13:30:04.710216994Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"780\",\"timestamp\":\"2022-08-12T13:30:04.710229487Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"781\",\"timestamp\":\"2022-08-12T13:30:04.710253338Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"782\",\"timestamp\":\"2022-08-12T13:30:04.710318654Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"783\",\"timestamp\":\"2022-08-12T13:30:04.710320067Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"784\",\"timestamp\":\"2022-08-12T13:30:04.710330402Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"785\",\"timestamp\":\"2022-08-12T13:30:04.710354084Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"786\",\"timestamp\":\"2022-08-12T13:30:04.710418256Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"787\",\"timestamp\":\"2022-08-12T13:30:04.710419756Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"788\",\"timestamp\":\"2022-08-12T13:30:04.710430434Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"789\",\"timestamp\":\"2022-08-12T13:30:04.710454041Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"790\",\"timestamp\":\"2022-08-12T13:30:04.710512365Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"791\",\"timestamp\":\"2022-08-12T13:30:04.710513768Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"792\",\"timestamp\":\"2022-08-12T13:30:04.710525787Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"793\",\"timestamp\":\"2022-08-12T13:30:04.710549626Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"794\",\"timestamp\":\"2022-08-12T13:30:04.710615114Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"795\",\"timestamp\":\"2022-08-12T13:30:04.710616478Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"796\",\"timestamp\":\"2022-08-12T13:30:04.710627446Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"797\",\"timestamp\":\"2022-08-12T13:30:04.710652112Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"798\",\"timestamp\":\"2022-08-12T13:30:04.710716468Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"799\",\"timestamp\":\"2022-08-12T13:30:04.710717858Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"800\",\"timestamp\":\"2022-08-12T13:30:04.710728396Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"801\",\"timestamp\":\"2022-08-12T13:30:04.710752871Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"802\",\"timestamp\":\"2022-08-12T13:30:04.710837134Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"803\",\"timestamp\":\"2022-08-12T13:30:04.710838506Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"804\",\"timestamp\":\"2022-08-12T13:30:04.710848881Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"805\",\"timestamp\":\"2022-08-12T13:30:04.7108824Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"806\",\"timestamp\":\"2022-08-12T13:30:04.710962594Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"807\",\"timestamp\":\"2022-08-12T13:30:04.710964116Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"808\",\"timestamp\":\"2022-08-12T13:30:04.710974224Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"809\",\"timestamp\":\"2022-08-12T13:30:04.710997471Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"810\",\"timestamp\":\"2022-08-12T13:30:04.711072504Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"811\",\"timestamp\":\"2022-08-12T13:30:04.711073919Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"812\",\"timestamp\":\"2022-08-12T13:30:04.71108442Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"813\",\"timestamp\":\"2022-08-12T13:30:04.71110896Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"814\",\"timestamp\":\"2022-08-12T13:30:04.711168002Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"815\",\"timestamp\":\"2022-08-12T13:30:04.7111694Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"816\",\"timestamp\":\"2022-08-12T13:30:04.711200056Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"817\",\"timestamp\":\"2022-08-12T13:30:04.711201407Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"818\",\"timestamp\":\"2022-08-12T13:30:04.711284343Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"819\",\"timestamp\":\"2022-08-12T13:30:04.711286559Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"820\",\"timestamp\":\"2022-08-12T13:30:04.711302987Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"821\",\"timestamp\":\"2022-08-12T13:30:04.711333257Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"822\",\"timestamp\":\"2022-08-12T13:30:04.711461684Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"823\",\"timestamp\":\"2022-08-12T13:30:04.711463275Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"824\",\"timestamp\":\"2022-08-12T13:30:04.711473831Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"825\",\"timestamp\":\"2022-08-12T13:30:04.711510722Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"826\",\"timestamp\":\"2022-08-12T13:30:04.711776112Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"827\",\"timestamp\":\"2022-08-12T13:30:04.711778088Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"828\",\"timestamp\":\"2022-08-12T13:30:04.711790555Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"829\",\"timestamp\":\"2022-08-12T13:30:04.711850892Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"830\",\"timestamp\":\"2022-08-12T13:30:04.711955921Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"831\",\"timestamp\":\"2022-08-12T13:30:04.711959057Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"832\",\"timestamp\":\"2022-08-12T13:30:04.711979285Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"833\",\"timestamp\":\"2022-08-12T13:30:04.712012517Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"834\",\"timestamp\":\"2022-08-12T13:30:04.712141797Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"835\",\"timestamp\":\"2022-08-12T13:30:04.712144941Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"836\",\"timestamp\":\"2022-08-12T13:30:04.712167389Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"837\",\"timestamp\":\"2022-08-12T13:30:04.71222609Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"838\",\"timestamp\":\"2022-08-12T13:30:04.712275903Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"839\",\"timestamp\":\"2022-08-12T13:30:04.71227873Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"840\",\"timestamp\":\"2022-08-12T13:30:04.712298541Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"841\",\"timestamp\":\"2022-08-12T13:30:04.712346086Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"844\",\"timestamp\":\"2022-08-12T13:30:04.712432623Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"842\",\"timestamp\":\"2022-08-12T13:30:04.712417798Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"845\",\"timestamp\":\"2022-08-12T13:30:04.712475176Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"843\",\"timestamp\":\"2022-08-12T13:30:04.712419615Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"846\",\"timestamp\":\"2022-08-12T13:30:04.712541331Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"847\",\"timestamp\":\"2022-08-12T13:30:04.712544343Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"848\",\"timestamp\":\"2022-08-12T13:30:04.712565343Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"849\",\"timestamp\":\"2022-08-12T13:30:04.71260141Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"850\",\"timestamp\":\"2022-08-12T13:30:04.712679533Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"851\",\"timestamp\":\"2022-08-12T13:30:04.712682047Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"852\",\"timestamp\":\"2022-08-12T13:30:04.71269993Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"853\",\"timestamp\":\"2022-08-12T13:30:04.712744578Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"854\",\"timestamp\":\"2022-08-12T13:30:04.712832526Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"855\",\"timestamp\":\"2022-08-12T13:30:04.712835083Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"856\",\"timestamp\":\"2022-08-12T13:30:04.712855748Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"857\",\"timestamp\":\"2022-08-12T13:30:04.712901117Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"858\",\"timestamp\":\"2022-08-12T13:30:04.712948278Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"859\",\"timestamp\":\"2022-08-12T13:30:04.712950652Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"860\",\"timestamp\":\"2022-08-12T13:30:04.712972108Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"861\",\"timestamp\":\"2022-08-12T13:30:04.713013007Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"862\",\"timestamp\":\"2022-08-12T13:30:04.713086755Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"863\",\"timestamp\":\"2022-08-12T13:30:04.713095643Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"864\",\"timestamp\":\"2022-08-12T13:30:04.713124847Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"865\",\"timestamp\":\"2022-08-12T13:30:04.713193224Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"868\",\"timestamp\":\"2022-08-12T13:30:04.713242747Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"866\",\"timestamp\":\"2022-08-12T13:30:04.713230415Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"869\",\"timestamp\":\"2022-08-12T13:30:04.713273546Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"867\",\"timestamp\":\"2022-08-12T13:30:04.713231837Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"870\",\"timestamp\":\"2022-08-12T13:30:04.713374227Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"871\",\"timestamp\":\"2022-08-12T13:30:04.713376026Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"876\",\"timestamp\":\"2022-08-12T13:30:04.713494103Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"877\",\"timestamp\":\"2022-08-12T13:30:04.713560486Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"878\",\"timestamp\":\"2022-08-12T13:30:04.713617852Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"872\",\"timestamp\":\"2022-08-12T13:30:04.71338764Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"879\",\"timestamp\":\"2022-08-12T13:30:04.713619264Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"873\",\"timestamp\":\"2022-08-12T13:30:04.71343859Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"874\",\"timestamp\":\"2022-08-12T13:30:04.713480681Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"875\",\"timestamp\":\"2022-08-12T13:30:04.713482158Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"880\",\"timestamp\":\"2022-08-12T13:30:04.713635908Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"881\",\"timestamp\":\"2022-08-12T13:30:04.713666749Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"882\",\"timestamp\":\"2022-08-12T13:30:04.713737208Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"883\",\"timestamp\":\"2022-08-12T13:30:04.713738675Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"884\",\"timestamp\":\"2022-08-12T13:30:04.713749906Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"885\",\"timestamp\":\"2022-08-12T13:30:04.713776348Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"886\",\"timestamp\":\"2022-08-12T13:30:04.713842556Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"887\",\"timestamp\":\"2022-08-12T13:30:04.713844333Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"888\",\"timestamp\":\"2022-08-12T13:30:04.713856017Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"889\",\"timestamp\":\"2022-08-12T13:30:04.713918858Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"890\",\"timestamp\":\"2022-08-12T13:30:04.713957075Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"891\",\"timestamp\":\"2022-08-12T13:30:04.713958725Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"892\",\"timestamp\":\"2022-08-12T13:30:04.71397766Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"893\",\"timestamp\":\"2022-08-12T13:30:04.713997861Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"894\",\"timestamp\":\"2022-08-12T13:30:04.714068363Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"895\",\"timestamp\":\"2022-08-12T13:30:04.714069854Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"904\",\"timestamp\":\"2022-08-12T13:30:04.71441155Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"905\",\"timestamp\":\"2022-08-12T13:30:04.714443146Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"906\",\"timestamp\":\"2022-08-12T13:30:04.714526487Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"907\",\"timestamp\":\"2022-08-12T13:30:04.71452801Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"900\",\"timestamp\":\"2022-08-12T13:30:04.714259517Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"901\",\"timestamp\":\"2022-08-12T13:30:04.71431771Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"896\",\"timestamp\":\"2022-08-12T13:30:04.714081946Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"902\",\"timestamp\":\"2022-08-12T13:30:04.714398763Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"897\",\"timestamp\":\"2022-08-12T13:30:04.714131856Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"903\",\"timestamp\":\"2022-08-12T13:30:04.714400278Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"898\",\"timestamp\":\"2022-08-12T13:30:04.714221145Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"899\",\"timestamp\":\"2022-08-12T13:30:04.714222816Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"908\",\"timestamp\":\"2022-08-12T13:30:04.714539876Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"909\",\"timestamp\":\"2022-08-12T13:30:04.714579616Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"910\",\"timestamp\":\"2022-08-12T13:30:04.714648781Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"911\",\"timestamp\":\"2022-08-12T13:30:04.714650352Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"912\",\"timestamp\":\"2022-08-12T13:30:04.714661169Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"913\",\"timestamp\":\"2022-08-12T13:30:04.714687341Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"914\",\"timestamp\":\"2022-08-12T13:30:04.714752976Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"915\",\"timestamp\":\"2022-08-12T13:30:04.71475437Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"916\",\"timestamp\":\"2022-08-12T13:30:04.714766934Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"917\",\"timestamp\":\"2022-08-12T13:30:04.714791959Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"918\",\"timestamp\":\"2022-08-12T13:30:04.714855719Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"919\",\"timestamp\":\"2022-08-12T13:30:04.714857136Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"924\",\"timestamp\":\"2022-08-12T13:30:04.714966515Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"920\",\"timestamp\":\"2022-08-12T13:30:04.71486945Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"925\",\"timestamp\":\"2022-08-12T13:30:04.714989623Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"921\",\"timestamp\":\"2022-08-12T13:30:04.714897783Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"926\",\"timestamp\":\"2022-08-12T13:30:04.715051119Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"922\",\"timestamp\":\"2022-08-12T13:30:04.714953405Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"927\",\"timestamp\":\"2022-08-12T13:30:04.715052519Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"923\",\"timestamp\":\"2022-08-12T13:30:04.714954731Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"928\",\"timestamp\":\"2022-08-12T13:30:04.715063486Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"929\",\"timestamp\":\"2022-08-12T13:30:04.715086245Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"930\",\"timestamp\":\"2022-08-12T13:30:04.71514682Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"931\",\"timestamp\":\"2022-08-12T13:30:04.715208913Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"932\",\"timestamp\":\"2022-08-12T13:30:04.715309406Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"933\",\"timestamp\":\"2022-08-12T13:30:04.715319793Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"934\",\"timestamp\":\"2022-08-12T13:30:04.715330232Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"935\",\"timestamp\":\"2022-08-12T13:30:04.715355303Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"936\",\"timestamp\":\"2022-08-12T13:30:04.715367929Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"937\",\"timestamp\":\"2022-08-12T13:30:04.715388934Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"938\",\"timestamp\":\"2022-08-12T13:30:04.715536443Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"939\",\"timestamp\":\"2022-08-12T13:30:04.715537919Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"940\",\"timestamp\":\"2022-08-12T13:30:04.715548611Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"941\",\"timestamp\":\"2022-08-12T13:30:04.715578812Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"942\",\"timestamp\":\"2022-08-12T13:30:04.715644144Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"943\",\"timestamp\":\"2022-08-12T13:30:04.71564598Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"944\",\"timestamp\":\"2022-08-12T13:30:04.715657848Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"945\",\"timestamp\":\"2022-08-12T13:30:04.715697271Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"946\",\"timestamp\":\"2022-08-12T13:30:04.715762211Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"947\",\"timestamp\":\"2022-08-12T13:30:04.715764594Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"952\",\"timestamp\":\"2022-08-12T13:30:04.715951386Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"953\",\"timestamp\":\"2022-08-12T13:30:04.715985421Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"954\",\"timestamp\":\"2022-08-12T13:30:04.71606346Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"955\",\"timestamp\":\"2022-08-12T13:30:04.716065057Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"948\",\"timestamp\":\"2022-08-12T13:30:04.715784626Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"949\",\"timestamp\":\"2022-08-12T13:30:04.715819265Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"950\",\"timestamp\":\"2022-08-12T13:30:04.715926101Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"951\",\"timestamp\":\"2022-08-12T13:30:04.715928999Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"956\",\"timestamp\":\"2022-08-12T13:30:04.716078469Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"957\",\"timestamp\":\"2022-08-12T13:30:04.716105203Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"958\",\"timestamp\":\"2022-08-12T13:30:04.716186805Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"959\",\"timestamp\":\"2022-08-12T13:30:04.716189249Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"960\",\"timestamp\":\"2022-08-12T13:30:04.716209061Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"961\",\"timestamp\":\"2022-08-12T13:30:04.716261576Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"962\",\"timestamp\":\"2022-08-12T13:30:04.716300495Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"963\",\"timestamp\":\"2022-08-12T13:30:04.716301971Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"964\",\"timestamp\":\"2022-08-12T13:30:04.716312989Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"965\",\"timestamp\":\"2022-08-12T13:30:04.716348322Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"966\",\"timestamp\":\"2022-08-12T13:30:04.716423956Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"967\",\"timestamp\":\"2022-08-12T13:30:04.716426315Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"968\",\"timestamp\":\"2022-08-12T13:30:04.716447423Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"969\",\"timestamp\":\"2022-08-12T13:30:04.716509994Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"970\",\"timestamp\":\"2022-08-12T13:30:04.71658427Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"971\",\"timestamp\":\"2022-08-12T13:30:04.716585802Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"972\",\"timestamp\":\"2022-08-12T13:30:04.716597559Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"973\",\"timestamp\":\"2022-08-12T13:30:04.716640621Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"974\",\"timestamp\":\"2022-08-12T13:30:04.716699467Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"975\",\"timestamp\":\"2022-08-12T13:30:04.716702214Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"976\",\"timestamp\":\"2022-08-12T13:30:04.716731172Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"977\",\"timestamp\":\"2022-08-12T13:30:04.716795598Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"978\",\"timestamp\":\"2022-08-12T13:30:04.716888316Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"979\",\"timestamp\":\"2022-08-12T13:30:04.716891115Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"980\",\"timestamp\":\"2022-08-12T13:30:04.716912043Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"981\",\"timestamp\":\"2022-08-12T13:30:04.716949806Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"982\",\"timestamp\":\"2022-08-12T13:30:04.717044262Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"983\",\"timestamp\":\"2022-08-12T13:30:04.717046304Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"984\",\"timestamp\":\"2022-08-12T13:30:04.717058535Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"985\",\"timestamp\":\"2022-08-12T13:30:04.717089878Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"988\",\"timestamp\":\"2022-08-12T13:30:04.717224621Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"986\",\"timestamp\":\"2022-08-12T13:30:04.717201882Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"989\",\"timestamp\":\"2022-08-12T13:30:04.717255035Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"987\",\"timestamp\":\"2022-08-12T13:30:04.71720484Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"990\",\"timestamp\":\"2022-08-12T13:30:04.717361332Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"991\",\"timestamp\":\"2022-08-12T13:30:04.717363063Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"992\",\"timestamp\":\"2022-08-12T13:30:04.717374202Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"993\",\"timestamp\":\"2022-08-12T13:30:04.717422883Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"994\",\"timestamp\":\"2022-08-12T13:30:04.717540795Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"995\",\"timestamp\":\"2022-08-12T13:30:04.717542899Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"996\",\"timestamp\":\"2022-08-12T13:30:04.717559179Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1000\",\"timestamp\":\"2022-08-12T13:30:04.71770325Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"997\",\"timestamp\":\"2022-08-12T13:30:04.717602673Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1001\",\"timestamp\":\"2022-08-12T13:30:04.717748364Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"998\",\"timestamp\":\"2022-08-12T13:30:04.717689994Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1002\",\"timestamp\":\"2022-08-12T13:30:04.717810876Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"999\",\"timestamp\":\"2022-08-12T13:30:04.717691639Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1003\",\"timestamp\":\"2022-08-12T13:30:04.717813824Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1004\",\"timestamp\":\"2022-08-12T13:30:04.717825579Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1005\",\"timestamp\":\"2022-08-12T13:30:04.717850867Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1006\",\"timestamp\":\"2022-08-12T13:30:04.717913556Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1007\",\"timestamp\":\"2022-08-12T13:30:04.717915015Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1008\",\"timestamp\":\"2022-08-12T13:30:04.717925497Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1009\",\"timestamp\":\"2022-08-12T13:30:04.717947864Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1010\",\"timestamp\":\"2022-08-12T13:30:04.718040632Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1011\",\"timestamp\":\"2022-08-12T13:30:04.718042702Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1012\",\"timestamp\":\"2022-08-12T13:30:04.718098444Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1013\",\"timestamp\":\"2022-08-12T13:30:04.718151358Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1014\",\"timestamp\":\"2022-08-12T13:30:04.718234563Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1015\",\"timestamp\":\"2022-08-12T13:30:04.718236201Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1016\",\"timestamp\":\"2022-08-12T13:30:04.718251774Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1017\",\"timestamp\":\"2022-08-12T13:30:04.718279106Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1018\",\"timestamp\":\"2022-08-12T13:30:04.71836407Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1019\",\"timestamp\":\"2022-08-12T13:30:04.71836553Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1020\",\"timestamp\":\"2022-08-12T13:30:04.718378678Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1021\",\"timestamp\":\"2022-08-12T13:30:04.718403592Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1022\",\"timestamp\":\"2022-08-12T13:30:04.718492334Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1023\",\"timestamp\":\"2022-08-12T13:30:04.718494063Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1024\",\"timestamp\":\"2022-08-12T13:30:04.718505552Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1025\",\"timestamp\":\"2022-08-12T13:30:04.718558491Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1026\",\"timestamp\":\"2022-08-12T13:30:04.718634674Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1027\",\"timestamp\":\"2022-08-12T13:30:04.71863619Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1028\",\"timestamp\":\"2022-08-12T13:30:04.718647076Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1029\",\"timestamp\":\"2022-08-12T13:30:04.718672497Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1030\",\"timestamp\":\"2022-08-12T13:30:04.718735921Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1031\",\"timestamp\":\"2022-08-12T13:30:04.718737498Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1032\",\"timestamp\":\"2022-08-12T13:30:04.718757782Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1033\",\"timestamp\":\"2022-08-12T13:30:04.71878533Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1034\",\"timestamp\":\"2022-08-12T13:30:04.718850074Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1035\",\"timestamp\":\"2022-08-12T13:30:04.718851545Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1036\",\"timestamp\":\"2022-08-12T13:30:04.718862618Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1037\",\"timestamp\":\"2022-08-12T13:30:04.718888289Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1038\",\"timestamp\":\"2022-08-12T13:30:04.71899266Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1039\",\"timestamp\":\"2022-08-12T13:30:04.718994119Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1040\",\"timestamp\":\"2022-08-12T13:30:04.719006465Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1041\",\"timestamp\":\"2022-08-12T13:30:04.719034067Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1042\",\"timestamp\":\"2022-08-12T13:30:04.719117434Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1043\",\"timestamp\":\"2022-08-12T13:30:04.719118934Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1044\",\"timestamp\":\"2022-08-12T13:30:04.719129992Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1045\",\"timestamp\":\"2022-08-12T13:30:04.719154181Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1046\",\"timestamp\":\"2022-08-12T13:30:04.719220595Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1047\",\"timestamp\":\"2022-08-12T13:30:04.719222004Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1048\",\"timestamp\":\"2022-08-12T13:30:04.71923287Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1049\",\"timestamp\":\"2022-08-12T13:30:04.719257141Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1050\",\"timestamp\":\"2022-08-12T13:30:04.719305938Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1051\",\"timestamp\":\"2022-08-12T13:30:04.719307428Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1052\",\"timestamp\":\"2022-08-12T13:30:04.719320222Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1053\",\"timestamp\":\"2022-08-12T13:30:04.719344568Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1054\",\"timestamp\":\"2022-08-12T13:30:04.719394472Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1055\",\"timestamp\":\"2022-08-12T13:30:04.719395892Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1056\",\"timestamp\":\"2022-08-12T13:30:04.719406265Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1057\",\"timestamp\":\"2022-08-12T13:30:04.719435252Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1058\",\"timestamp\":\"2022-08-12T13:30:04.719482663Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1059\",\"timestamp\":\"2022-08-12T13:30:04.719484978Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1060\",\"timestamp\":\"2022-08-12T13:30:04.719500193Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1061\",\"timestamp\":\"2022-08-12T13:30:04.719544044Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1062\",\"timestamp\":\"2022-08-12T13:30:04.719601622Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1063\",\"timestamp\":\"2022-08-12T13:30:04.719603628Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1064\",\"timestamp\":\"2022-08-12T13:30:04.719620626Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1065\",\"timestamp\":\"2022-08-12T13:30:04.71966462Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1066\",\"timestamp\":\"2022-08-12T13:30:04.719718699Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1067\",\"timestamp\":\"2022-08-12T13:30:04.719720137Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1068\",\"timestamp\":\"2022-08-12T13:30:04.719730749Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1069\",\"timestamp\":\"2022-08-12T13:30:04.719755893Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1070\",\"timestamp\":\"2022-08-12T13:30:04.719820477Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1071\",\"timestamp\":\"2022-08-12T13:30:04.719823913Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1072\",\"timestamp\":\"2022-08-12T13:30:04.719851384Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1073\",\"timestamp\":\"2022-08-12T13:30:04.719900268Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1074\",\"timestamp\":\"2022-08-12T13:30:04.719989798Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1075\",\"timestamp\":\"2022-08-12T13:30:04.719991293Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1076\",\"timestamp\":\"2022-08-12T13:30:04.720002148Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1080\",\"timestamp\":\"2022-08-12T13:30:04.720109938Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1077\",\"timestamp\":\"2022-08-12T13:30:04.720028646Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1081\",\"timestamp\":\"2022-08-12T13:30:04.720135214Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1078\",\"timestamp\":\"2022-08-12T13:30:04.72009678Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1082\",\"timestamp\":\"2022-08-12T13:30:04.720233666Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1079\",\"timestamp\":\"2022-08-12T13:30:04.720098245Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1083\",\"timestamp\":\"2022-08-12T13:30:04.720235717Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1084\",\"timestamp\":\"2022-08-12T13:30:04.720250266Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1085\",\"timestamp\":\"2022-08-12T13:30:04.72029262Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1086\",\"timestamp\":\"2022-08-12T13:30:04.720355963Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1087\",\"timestamp\":\"2022-08-12T13:30:04.720357702Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1092\",\"timestamp\":\"2022-08-12T13:30:04.720491846Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1093\",\"timestamp\":\"2022-08-12T13:30:04.720522913Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1088\",\"timestamp\":\"2022-08-12T13:30:04.720372571Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1094\",\"timestamp\":\"2022-08-12T13:30:04.720582715Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1095\",\"timestamp\":\"2022-08-12T13:30:04.720584485Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1089\",\"timestamp\":\"2022-08-12T13:30:04.720410812Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1090\",\"timestamp\":\"2022-08-12T13:30:04.720478491Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1091\",\"timestamp\":\"2022-08-12T13:30:04.720480218Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1096\",\"timestamp\":\"2022-08-12T13:30:04.720598203Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1097\",\"timestamp\":\"2022-08-12T13:30:04.720626808Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1098\",\"timestamp\":\"2022-08-12T13:30:04.72067591Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1099\",\"timestamp\":\"2022-08-12T13:30:04.720677401Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1100\",\"timestamp\":\"2022-08-12T13:30:04.720690184Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1101\",\"timestamp\":\"2022-08-12T13:30:04.720715219Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1102\",\"timestamp\":\"2022-08-12T13:30:04.720769041Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1103\",\"timestamp\":\"2022-08-12T13:30:04.720770757Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1104\",\"timestamp\":\"2022-08-12T13:30:04.720805757Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1105\",\"timestamp\":\"2022-08-12T13:30:04.72083091Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1106\",\"timestamp\":\"2022-08-12T13:30:04.720910654Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1107\",\"timestamp\":\"2022-08-12T13:30:04.720912234Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1108\",\"timestamp\":\"2022-08-12T13:30:04.720922603Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1109\",\"timestamp\":\"2022-08-12T13:30:04.720961357Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1110\",\"timestamp\":\"2022-08-12T13:30:04.721056935Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1112\",\"timestamp\":\"2022-08-12T13:30:04.721082107Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1111\",\"timestamp\":\"2022-08-12T13:30:04.721060116Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1113\",\"timestamp\":\"2022-08-12T13:30:04.721109374Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1114\",\"timestamp\":\"2022-08-12T13:30:04.721185212Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1115\",\"timestamp\":\"2022-08-12T13:30:04.721186698Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1116\",\"timestamp\":\"2022-08-12T13:30:04.721197567Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1117\",\"timestamp\":\"2022-08-12T13:30:04.721230527Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1118\",\"timestamp\":\"2022-08-12T13:30:04.72128482Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1119\",\"timestamp\":\"2022-08-12T13:30:04.721286311Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1120\",\"timestamp\":\"2022-08-12T13:30:04.721296783Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1121\",\"timestamp\":\"2022-08-12T13:30:04.721330626Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1122\",\"timestamp\":\"2022-08-12T13:30:04.721374349Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1123\",\"timestamp\":\"2022-08-12T13:30:04.721375723Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1124\",\"timestamp\":\"2022-08-12T13:30:04.721386016Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1125\",\"timestamp\":\"2022-08-12T13:30:04.721417245Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1126\",\"timestamp\":\"2022-08-12T13:30:04.721465258Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1127\",\"timestamp\":\"2022-08-12T13:30:04.721466618Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1128\",\"timestamp\":\"2022-08-12T13:30:04.721476998Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1129\",\"timestamp\":\"2022-08-12T13:30:04.721507429Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1130\",\"timestamp\":\"2022-08-12T13:30:04.721567582Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1131\",\"timestamp\":\"2022-08-12T13:30:04.721571813Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1132\",\"timestamp\":\"2022-08-12T13:30:04.721595411Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1133\",\"timestamp\":\"2022-08-12T13:30:04.721642744Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1134\",\"timestamp\":\"2022-08-12T13:30:04.72168199Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1135\",\"timestamp\":\"2022-08-12T13:30:04.721683526Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1136\",\"timestamp\":\"2022-08-12T13:30:04.721699658Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1137\",\"timestamp\":\"2022-08-12T13:30:04.721757707Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1138\",\"timestamp\":\"2022-08-12T13:30:04.7218142Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1139\",\"timestamp\":\"2022-08-12T13:30:04.721815936Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1140\",\"timestamp\":\"2022-08-12T13:30:04.721827629Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1141\",\"timestamp\":\"2022-08-12T13:30:04.721857375Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1142\",\"timestamp\":\"2022-08-12T13:30:04.72193039Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1143\",\"timestamp\":\"2022-08-12T13:30:04.721932146Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1144\",\"timestamp\":\"2022-08-12T13:30:04.721943453Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1148\",\"timestamp\":\"2022-08-12T13:30:04.722041659Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1145\",\"timestamp\":\"2022-08-12T13:30:04.721970231Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1149\",\"timestamp\":\"2022-08-12T13:30:04.722070899Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1146\",\"timestamp\":\"2022-08-12T13:30:04.722029146Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1150\",\"timestamp\":\"2022-08-12T13:30:04.722121361Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1147\",\"timestamp\":\"2022-08-12T13:30:04.72203073Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1151\",\"timestamp\":\"2022-08-12T13:30:04.722123262Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1152\",\"timestamp\":\"2022-08-12T13:30:04.722137804Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1153\",\"timestamp\":\"2022-08-12T13:30:04.722165053Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1154\",\"timestamp\":\"2022-08-12T13:30:04.722202637Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1155\",\"timestamp\":\"2022-08-12T13:30:04.722204357Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1156\",\"timestamp\":\"2022-08-12T13:30:04.722218697Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1157\",\"timestamp\":\"2022-08-12T13:30:04.722248043Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1158\",\"timestamp\":\"2022-08-12T13:30:04.722323697Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1159\",\"timestamp\":\"2022-08-12T13:30:04.722326559Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1160\",\"timestamp\":\"2022-08-12T13:30:04.722346125Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1161\",\"timestamp\":\"2022-08-12T13:30:04.72239747Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1162\",\"timestamp\":\"2022-08-12T13:30:04.722428438Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1163\",\"timestamp\":\"2022-08-12T13:30:04.722430372Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1164\",\"timestamp\":\"2022-08-12T13:30:04.722444225Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1165\",\"timestamp\":\"2022-08-12T13:30:04.722477559Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1166\",\"timestamp\":\"2022-08-12T13:30:04.722545503Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1167\",\"timestamp\":\"2022-08-12T13:30:04.722548106Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1168\",\"timestamp\":\"2022-08-12T13:30:04.722567527Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1169\",\"timestamp\":\"2022-08-12T13:30:04.722603043Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1170\",\"timestamp\":\"2022-08-12T13:30:04.722658737Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1171\",\"timestamp\":\"2022-08-12T13:30:04.72266028Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1172\",\"timestamp\":\"2022-08-12T13:30:04.722671179Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1173\",\"timestamp\":\"2022-08-12T13:30:04.72270491Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1174\",\"timestamp\":\"2022-08-12T13:30:04.722761636Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1175\",\"timestamp\":\"2022-08-12T13:30:04.722841361Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1176\",\"timestamp\":\"2022-08-12T13:30:04.722842974Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1177\",\"timestamp\":\"2022-08-12T13:30:04.722854654Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1178\",\"timestamp\":\"2022-08-12T13:30:04.722880331Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1181\",\"timestamp\":\"2022-08-12T13:30:04.722937624Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1179\",\"timestamp\":\"2022-08-12T13:30:04.722925764Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1182\",\"timestamp\":\"2022-08-12T13:30:04.722961927Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1180\",\"timestamp\":\"2022-08-12T13:30:04.722927287Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1183\",\"timestamp\":\"2022-08-12T13:30:04.723009558Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1184\",\"timestamp\":\"2022-08-12T13:30:04.723010906Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1185\",\"timestamp\":\"2022-08-12T13:30:04.72302468Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1186\",\"timestamp\":\"2022-08-12T13:30:04.7230504Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1187\",\"timestamp\":\"2022-08-12T13:30:04.72309843Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1188\",\"timestamp\":\"2022-08-12T13:30:04.723099875Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1189\",\"timestamp\":\"2022-08-12T13:30:04.723110268Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1190\",\"timestamp\":\"2022-08-12T13:30:04.723134788Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1191\",\"timestamp\":\"2022-08-12T13:30:04.72318872Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1192\",\"timestamp\":\"2022-08-12T13:30:04.723190113Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1193\",\"timestamp\":\"2022-08-12T13:30:04.72320054Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1194\",\"timestamp\":\"2022-08-12T13:30:04.72322426Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1195\",\"timestamp\":\"2022-08-12T13:30:04.72327904Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1196\",\"timestamp\":\"2022-08-12T13:30:04.723280423Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1197\",\"timestamp\":\"2022-08-12T13:30:04.723290803Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1198\",\"timestamp\":\"2022-08-12T13:30:04.723314474Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1199\",\"timestamp\":\"2022-08-12T13:30:04.723369582Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1200\",\"timestamp\":\"2022-08-12T13:30:04.723370982Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1201\",\"timestamp\":\"2022-08-12T13:30:04.723381446Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1202\",\"timestamp\":\"2022-08-12T13:30:04.723405137Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1203\",\"timestamp\":\"2022-08-12T13:30:04.723461813Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1204\",\"timestamp\":\"2022-08-12T13:30:04.723463173Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1205\",\"timestamp\":\"2022-08-12T13:30:04.723473901Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1206\",\"timestamp\":\"2022-08-12T13:30:04.723497556Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1207\",\"timestamp\":\"2022-08-12T13:30:04.723552536Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1208\",\"timestamp\":\"2022-08-12T13:30:04.723554122Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1209\",\"timestamp\":\"2022-08-12T13:30:04.723565649Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1210\",\"timestamp\":\"2022-08-12T13:30:04.723589159Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1211\",\"timestamp\":\"2022-08-12T13:30:04.723642396Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1212\",\"timestamp\":\"2022-08-12T13:30:04.723643789Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1213\",\"timestamp\":\"2022-08-12T13:30:04.723707718Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1214\",\"timestamp\":\"2022-08-12T13:30:04.723784035Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1215\",\"timestamp\":\"2022-08-12T13:30:04.723812348Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1216\",\"timestamp\":\"2022-08-12T13:30:04.72381381Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1217\",\"timestamp\":\"2022-08-12T13:30:04.723825329Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1218\",\"timestamp\":\"2022-08-12T13:30:04.72385155Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1219\",\"timestamp\":\"2022-08-12T13:30:04.723901202Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1220\",\"timestamp\":\"2022-08-12T13:30:04.723902573Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1221\",\"timestamp\":\"2022-08-12T13:30:04.723918403Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1222\",\"timestamp\":\"2022-08-12T13:30:04.723941755Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1223\",\"timestamp\":\"2022-08-12T13:30:04.723991497Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1224\",\"timestamp\":\"2022-08-12T13:30:04.723993912Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1225\",\"timestamp\":\"2022-08-12T13:30:04.724008957Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1226\",\"timestamp\":\"2022-08-12T13:30:04.724053334Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1227\",\"timestamp\":\"2022-08-12T13:30:04.724109193Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1228\",\"timestamp\":\"2022-08-12T13:30:04.724110657Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1229\",\"timestamp\":\"2022-08-12T13:30:04.72412113Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1230\",\"timestamp\":\"2022-08-12T13:30:04.724146191Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1231\",\"timestamp\":\"2022-08-12T13:30:04.724213719Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1232\",\"timestamp\":\"2022-08-12T13:30:04.724215244Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1233\",\"timestamp\":\"2022-08-12T13:30:04.724226742Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1234\",\"timestamp\":\"2022-08-12T13:30:04.724254342Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1235\",\"timestamp\":\"2022-08-12T13:30:04.724348093Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1236\",\"timestamp\":\"2022-08-12T13:30:04.724349915Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1237\",\"timestamp\":\"2022-08-12T13:30:04.724361177Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1238\",\"timestamp\":\"2022-08-12T13:30:04.724397888Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1239\",\"timestamp\":\"2022-08-12T13:30:04.72445394Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1240\",\"timestamp\":\"2022-08-12T13:30:04.724455777Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1241\",\"timestamp\":\"2022-08-12T13:30:04.724466161Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1242\",\"timestamp\":\"2022-08-12T13:30:04.724517422Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1243\",\"timestamp\":\"2022-08-12T13:30:04.724553559Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1244\",\"timestamp\":\"2022-08-12T13:30:04.724556403Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1245\",\"timestamp\":\"2022-08-12T13:30:04.724567528Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1246\",\"timestamp\":\"2022-08-12T13:30:04.724598314Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1247\",\"timestamp\":\"2022-08-12T13:30:04.724649254Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1248\",\"timestamp\":\"2022-08-12T13:30:04.724650763Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1249\",\"timestamp\":\"2022-08-12T13:30:04.724663703Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1250\",\"timestamp\":\"2022-08-12T13:30:04.724694939Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1251\",\"timestamp\":\"2022-08-12T13:30:04.724744175Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1252\",\"timestamp\":\"2022-08-12T13:30:04.724745563Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1253\",\"timestamp\":\"2022-08-12T13:30:04.724756505Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1254\",\"timestamp\":\"2022-08-12T13:30:04.724808177Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1255\",\"timestamp\":\"2022-08-12T13:30:04.724851439Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1256\",\"timestamp\":\"2022-08-12T13:30:04.724852945Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1257\",\"timestamp\":\"2022-08-12T13:30:04.724863212Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1258\",\"timestamp\":\"2022-08-12T13:30:04.72489549Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1259\",\"timestamp\":\"2022-08-12T13:30:04.72494261Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1260\",\"timestamp\":\"2022-08-12T13:30:04.724944021Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1261\",\"timestamp\":\"2022-08-12T13:30:04.724954912Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1262\",\"timestamp\":\"2022-08-12T13:30:04.724985556Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1263\",\"timestamp\":\"2022-08-12T13:30:04.725040078Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1264\",\"timestamp\":\"2022-08-12T13:30:04.725041609Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1265\",\"timestamp\":\"2022-08-12T13:30:04.725052849Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1266\",\"timestamp\":\"2022-08-12T13:30:04.725082432Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1267\",\"timestamp\":\"2022-08-12T13:30:04.725138728Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1268\",\"timestamp\":\"2022-08-12T13:30:04.725140187Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1269\",\"timestamp\":\"2022-08-12T13:30:04.725151372Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1270\",\"timestamp\":\"2022-08-12T13:30:04.725175776Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1271\",\"timestamp\":\"2022-08-12T13:30:04.725225587Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1272\",\"timestamp\":\"2022-08-12T13:30:04.725226994Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1273\",\"timestamp\":\"2022-08-12T13:30:04.725237458Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1274\",\"timestamp\":\"2022-08-12T13:30:04.725261546Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1275\",\"timestamp\":\"2022-08-12T13:30:04.725323126Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1276\",\"timestamp\":\"2022-08-12T13:30:04.725324729Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1277\",\"timestamp\":\"2022-08-12T13:30:04.725337708Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1278\",\"timestamp\":\"2022-08-12T13:30:04.725361718Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1279\",\"timestamp\":\"2022-08-12T13:30:04.725421275Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1280\",\"timestamp\":\"2022-08-12T13:30:04.725422632Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1281\",\"timestamp\":\"2022-08-12T13:30:04.725432696Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1282\",\"timestamp\":\"2022-08-12T13:30:04.725457678Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1283\",\"timestamp\":\"2022-08-12T13:30:04.72551114Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1284\",\"timestamp\":\"2022-08-12T13:30:04.725512574Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1285\",\"timestamp\":\"2022-08-12T13:30:04.725523899Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1286\",\"timestamp\":\"2022-08-12T13:30:04.725547565Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1287\",\"timestamp\":\"2022-08-12T13:30:04.725594163Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1288\",\"timestamp\":\"2022-08-12T13:30:04.725595469Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1289\",\"timestamp\":\"2022-08-12T13:30:04.725605873Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1290\",\"timestamp\":\"2022-08-12T13:30:04.725632579Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1291\",\"timestamp\":\"2022-08-12T13:30:04.725677381Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1292\",\"timestamp\":\"2022-08-12T13:30:04.725678797Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1293\",\"timestamp\":\"2022-08-12T13:30:04.725689297Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1294\",\"timestamp\":\"2022-08-12T13:30:04.725713233Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1295\",\"timestamp\":\"2022-08-12T13:30:04.725772885Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1296\",\"timestamp\":\"2022-08-12T13:30:04.725774249Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1297\",\"timestamp\":\"2022-08-12T13:30:04.725784116Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1298\",\"timestamp\":\"2022-08-12T13:30:04.725808942Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1299\",\"timestamp\":\"2022-08-12T13:30:04.725856476Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1300\",\"timestamp\":\"2022-08-12T13:30:04.725857842Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1301\",\"timestamp\":\"2022-08-12T13:30:04.72586863Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1302\",\"timestamp\":\"2022-08-12T13:30:04.725892592Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1303\",\"timestamp\":\"2022-08-12T13:30:04.725949233Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1304\",\"timestamp\":\"2022-08-12T13:30:04.7259519Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1305\",\"timestamp\":\"2022-08-12T13:30:04.72597082Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1306\",\"timestamp\":\"2022-08-12T13:30:04.726001502Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1307\",\"timestamp\":\"2022-08-12T13:30:04.726068619Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1308\",\"timestamp\":\"2022-08-12T13:30:04.726070651Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1309\",\"timestamp\":\"2022-08-12T13:30:04.7260833Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1310\",\"timestamp\":\"2022-08-12T13:30:04.726108317Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1311\",\"timestamp\":\"2022-08-12T13:30:04.726159542Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1312\",\"timestamp\":\"2022-08-12T13:30:04.726160946Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1313\",\"timestamp\":\"2022-08-12T13:30:04.726174374Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1314\",\"timestamp\":\"2022-08-12T13:30:04.726198185Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1315\",\"timestamp\":\"2022-08-12T13:30:04.726243046Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1316\",\"timestamp\":\"2022-08-12T13:30:04.726244437Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1317\",\"timestamp\":\"2022-08-12T13:30:04.72625513Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1318\",\"timestamp\":\"2022-08-12T13:30:04.726279139Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1319\",\"timestamp\":\"2022-08-12T13:30:04.726328104Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1320\",\"timestamp\":\"2022-08-12T13:30:04.726329551Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1321\",\"timestamp\":\"2022-08-12T13:30:04.726340628Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1322\",\"timestamp\":\"2022-08-12T13:30:04.726363983Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1325\",\"timestamp\":\"2022-08-12T13:30:04.726426695Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1323\",\"timestamp\":\"2022-08-12T13:30:04.726414779Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1326\",\"timestamp\":\"2022-08-12T13:30:04.726450041Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1324\",\"timestamp\":\"2022-08-12T13:30:04.726416144Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1327\",\"timestamp\":\"2022-08-12T13:30:04.726502988Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1328\",\"timestamp\":\"2022-08-12T13:30:04.726504422Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1329\",\"timestamp\":\"2022-08-12T13:30:04.726515286Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1330\",\"timestamp\":\"2022-08-12T13:30:04.726538294Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1331\",\"timestamp\":\"2022-08-12T13:30:04.726589573Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1332\",\"timestamp\":\"2022-08-12T13:30:04.726590938Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1333\",\"timestamp\":\"2022-08-12T13:30:04.726604194Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1334\",\"timestamp\":\"2022-08-12T13:30:04.726629202Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1335\",\"timestamp\":\"2022-08-12T13:30:04.726677631Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1337\",\"timestamp\":\"2022-08-12T13:30:04.726691686Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1336\",\"timestamp\":\"2022-08-12T13:30:04.72667899Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1338\",\"timestamp\":\"2022-08-12T13:30:04.7267156Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1339\",\"timestamp\":\"2022-08-12T13:30:04.726766152Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1340\",\"timestamp\":\"2022-08-12T13:30:04.726767545Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1341\",\"timestamp\":\"2022-08-12T13:30:04.726779868Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1342\",\"timestamp\":\"2022-08-12T13:30:04.726803769Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1343\",\"timestamp\":\"2022-08-12T13:30:04.726866848Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1344\",\"timestamp\":\"2022-08-12T13:30:04.726868706Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1345\",\"timestamp\":\"2022-08-12T13:30:04.72688061Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1346\",\"timestamp\":\"2022-08-12T13:30:04.726914339Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1347\",\"timestamp\":\"2022-08-12T13:30:04.726970228Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1348\",\"timestamp\":\"2022-08-12T13:30:04.726971719Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1349\",\"timestamp\":\"2022-08-12T13:30:04.726986218Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1357\",\"timestamp\":\"2022-08-12T13:30:04.727151561Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1350\",\"timestamp\":\"2022-08-12T13:30:04.727011351Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1358\",\"timestamp\":\"2022-08-12T13:30:04.727185826Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1351\",\"timestamp\":\"2022-08-12T13:30:04.727058523Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1359\",\"timestamp\":\"2022-08-12T13:30:04.727233736Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1352\",\"timestamp\":\"2022-08-12T13:30:04.727059973Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1360\",\"timestamp\":\"2022-08-12T13:30:04.727235228Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1353\",\"timestamp\":\"2022-08-12T13:30:04.727071812Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1354\",\"timestamp\":\"2022-08-12T13:30:04.727096108Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1355\",\"timestamp\":\"2022-08-12T13:30:04.727138971Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1356\",\"timestamp\":\"2022-08-12T13:30:04.727140372Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1361\",\"timestamp\":\"2022-08-12T13:30:04.727247023Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1362\",\"timestamp\":\"2022-08-12T13:30:04.727270416Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1363\",\"timestamp\":\"2022-08-12T13:30:04.727314473Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1364\",\"timestamp\":\"2022-08-12T13:30:04.72731596Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1365\",\"timestamp\":\"2022-08-12T13:30:04.727327822Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1366\",\"timestamp\":\"2022-08-12T13:30:04.727350628Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1369\",\"timestamp\":\"2022-08-12T13:30:04.727410743Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1367\",\"timestamp\":\"2022-08-12T13:30:04.727395569Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1370\",\"timestamp\":\"2022-08-12T13:30:04.727434352Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1368\",\"timestamp\":\"2022-08-12T13:30:04.727397165Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1371\",\"timestamp\":\"2022-08-12T13:30:04.727474503Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1372\",\"timestamp\":\"2022-08-12T13:30:04.727475918Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1373\",\"timestamp\":\"2022-08-12T13:30:04.727488454Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1374\",\"timestamp\":\"2022-08-12T13:30:04.727512885Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1375\",\"timestamp\":\"2022-08-12T13:30:04.727554338Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1376\",\"timestamp\":\"2022-08-12T13:30:04.727555746Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1377\",\"timestamp\":\"2022-08-12T13:30:04.727566931Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1378\",\"timestamp\":\"2022-08-12T13:30:04.727590862Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1379\",\"timestamp\":\"2022-08-12T13:30:04.727651378Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1380\",\"timestamp\":\"2022-08-12T13:30:04.727653445Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1381\",\"timestamp\":\"2022-08-12T13:30:04.72766453Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1382\",\"timestamp\":\"2022-08-12T13:30:04.727693605Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1383\",\"timestamp\":\"2022-08-12T13:30:04.727757231Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1384\",\"timestamp\":\"2022-08-12T13:30:04.727758725Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1385\",\"timestamp\":\"2022-08-12T13:30:04.727768967Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1386\",\"timestamp\":\"2022-08-12T13:30:04.727792182Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1387\",\"timestamp\":\"2022-08-12T13:30:04.727855435Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1388\",\"timestamp\":\"2022-08-12T13:30:04.727856854Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1389\",\"timestamp\":\"2022-08-12T13:30:04.72786886Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1390\",\"timestamp\":\"2022-08-12T13:30:04.7278914Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1391\",\"timestamp\":\"2022-08-12T13:30:04.727956863Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1392\",\"timestamp\":\"2022-08-12T13:30:04.72795815Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1393\",\"timestamp\":\"2022-08-12T13:30:04.727971965Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1394\",\"timestamp\":\"2022-08-12T13:30:04.728004329Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1395\",\"timestamp\":\"2022-08-12T13:30:04.728119497Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1401\",\"timestamp\":\"2022-08-12T13:30:04.728394825Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1396\",\"timestamp\":\"2022-08-12T13:30:04.728121353Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1402\",\"timestamp\":\"2022-08-12T13:30:04.728444298Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1403\",\"timestamp\":\"2022-08-12T13:30:04.728538214Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1397\",\"timestamp\":\"2022-08-12T13:30:04.728134505Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1404\",\"timestamp\":\"2022-08-12T13:30:04.728539818Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1398\",\"timestamp\":\"2022-08-12T13:30:04.728185821Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1399\",\"timestamp\":\"2022-08-12T13:30:04.72837971Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1400\",\"timestamp\":\"2022-08-12T13:30:04.728381238Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1405\",\"timestamp\":\"2022-08-12T13:30:04.72855015Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1406\",\"timestamp\":\"2022-08-12T13:30:04.728581422Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1407\",\"timestamp\":\"2022-08-12T13:30:04.728676286Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1408\",\"timestamp\":\"2022-08-12T13:30:04.72867775Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1409\",\"timestamp\":\"2022-08-12T13:30:04.72869072Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1410\",\"timestamp\":\"2022-08-12T13:30:04.728714949Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1411\",\"timestamp\":\"2022-08-12T13:30:04.728826928Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1412\",\"timestamp\":\"2022-08-12T13:30:04.728828315Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1413\",\"timestamp\":\"2022-08-12T13:30:04.728839654Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1414\",\"timestamp\":\"2022-08-12T13:30:04.728873478Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1415\",\"timestamp\":\"2022-08-12T13:30:04.728953805Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1416\",\"timestamp\":\"2022-08-12T13:30:04.728955354Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1417\",\"timestamp\":\"2022-08-12T13:30:04.728966312Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1418\",\"timestamp\":\"2022-08-12T13:30:04.72900211Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1419\",\"timestamp\":\"2022-08-12T13:30:04.729084668Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1421\",\"timestamp\":\"2022-08-12T13:30:04.729100937Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1420\",\"timestamp\":\"2022-08-12T13:30:04.729086148Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1422\",\"timestamp\":\"2022-08-12T13:30:04.72912988Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1423\",\"timestamp\":\"2022-08-12T13:30:04.729222323Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1424\",\"timestamp\":\"2022-08-12T13:30:04.729223759Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1425\",\"timestamp\":\"2022-08-12T13:30:04.72923462Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1426\",\"timestamp\":\"2022-08-12T13:30:04.729260529Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1427\",\"timestamp\":\"2022-08-12T13:30:04.729324011Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1428\",\"timestamp\":\"2022-08-12T13:30:04.729327645Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1429\",\"timestamp\":\"2022-08-12T13:30:04.729357329Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1430\",\"timestamp\":\"2022-08-12T13:30:04.729400301Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1433\",\"timestamp\":\"2022-08-12T13:30:04.729499517Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1431\",\"timestamp\":\"2022-08-12T13:30:04.729480967Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1434\",\"timestamp\":\"2022-08-12T13:30:04.729542358Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1432\",\"timestamp\":\"2022-08-12T13:30:04.729483589Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1435\",\"timestamp\":\"2022-08-12T13:30:04.729591928Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1436\",\"timestamp\":\"2022-08-12T13:30:04.72959343Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1437\",\"timestamp\":\"2022-08-12T13:30:04.729610379Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1438\",\"timestamp\":\"2022-08-12T13:30:04.729637655Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1441\",\"timestamp\":\"2022-08-12T13:30:04.729739369Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1439\",\"timestamp\":\"2022-08-12T13:30:04.729715463Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1445\",\"timestamp\":\"2022-08-12T13:30:04.729873393Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1440\",\"timestamp\":\"2022-08-12T13:30:04.729716956Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1446\",\"timestamp\":\"2022-08-12T13:30:04.729911713Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1447\",\"timestamp\":\"2022-08-12T13:30:04.730000177Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1448\",\"timestamp\":\"2022-08-12T13:30:04.730002293Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1442\",\"timestamp\":\"2022-08-12T13:30:04.729771376Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1443\",\"timestamp\":\"2022-08-12T13:30:04.729850571Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1444\",\"timestamp\":\"2022-08-12T13:30:04.729853238Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1449\",\"timestamp\":\"2022-08-12T13:30:04.730014324Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1450\",\"timestamp\":\"2022-08-12T13:30:04.730040765Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1451\",\"timestamp\":\"2022-08-12T13:30:04.730101644Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1452\",\"timestamp\":\"2022-08-12T13:30:04.730104439Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1453\",\"timestamp\":\"2022-08-12T13:30:04.730127053Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1454\",\"timestamp\":\"2022-08-12T13:30:04.730164471Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1455\",\"timestamp\":\"2022-08-12T13:30:04.730231814Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1456\",\"timestamp\":\"2022-08-12T13:30:04.730233141Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1461\",\"timestamp\":\"2022-08-12T13:30:04.730397999Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1457\",\"timestamp\":\"2022-08-12T13:30:04.730245355Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1462\",\"timestamp\":\"2022-08-12T13:30:04.730423389Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1458\",\"timestamp\":\"2022-08-12T13:30:04.730280749Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1463\",\"timestamp\":\"2022-08-12T13:30:04.730515122Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1459\",\"timestamp\":\"2022-08-12T13:30:04.730385717Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1464\",\"timestamp\":\"2022-08-12T13:30:04.730516913Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1465\",\"timestamp\":\"2022-08-12T13:30:04.730529857Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1466\",\"timestamp\":\"2022-08-12T13:30:04.730562975Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1467\",\"timestamp\":\"2022-08-12T13:30:04.730634337Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1468\",\"timestamp\":\"2022-08-12T13:30:04.730635776Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1460\",\"timestamp\":\"2022-08-12T13:30:04.730387184Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1469\",\"timestamp\":\"2022-08-12T13:30:04.730648601Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1470\",\"timestamp\":\"2022-08-12T13:30:04.730673802Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1471\",\"timestamp\":\"2022-08-12T13:30:04.730745535Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1472\",\"timestamp\":\"2022-08-12T13:30:04.73074692Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1473\",\"timestamp\":\"2022-08-12T13:30:04.730757894Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1474\",\"timestamp\":\"2022-08-12T13:30:04.730805758Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1475\",\"timestamp\":\"2022-08-12T13:30:04.730872523Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1476\",\"timestamp\":\"2022-08-12T13:30:04.73087409Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1477\",\"timestamp\":\"2022-08-12T13:30:04.730896539Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1478\",\"timestamp\":\"2022-08-12T13:30:04.730943956Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1479\",\"timestamp\":\"2022-08-12T13:30:04.731014997Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1480\",\"timestamp\":\"2022-08-12T13:30:04.731017251Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1481\",\"timestamp\":\"2022-08-12T13:30:04.73107221Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1482\",\"timestamp\":\"2022-08-12T13:30:04.731116021Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1483\",\"timestamp\":\"2022-08-12T13:30:04.731185458Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1484\",\"timestamp\":\"2022-08-12T13:30:04.731187002Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1485\",\"timestamp\":\"2022-08-12T13:30:04.731197755Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1486\",\"timestamp\":\"2022-08-12T13:30:04.731238979Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1493\",\"timestamp\":\"2022-08-12T13:30:04.731428694Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1487\",\"timestamp\":\"2022-08-12T13:30:04.73131753Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1494\",\"timestamp\":\"2022-08-12T13:30:04.731468104Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1488\",\"timestamp\":\"2022-08-12T13:30:04.73131909Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1495\",\"timestamp\":\"2022-08-12T13:30:04.731529004Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1496\",\"timestamp\":\"2022-08-12T13:30:04.731530374Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1489\",\"timestamp\":\"2022-08-12T13:30:04.731329445Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1490\",\"timestamp\":\"2022-08-12T13:30:04.731354836Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1491\",\"timestamp\":\"2022-08-12T13:30:04.731416234Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1492\",\"timestamp\":\"2022-08-12T13:30:04.731417833Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1497\",\"timestamp\":\"2022-08-12T13:30:04.731540925Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1498\",\"timestamp\":\"2022-08-12T13:30:04.731566333Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1499\",\"timestamp\":\"2022-08-12T13:30:04.731622626Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n{\"cluster\":\"infrastage\",\"component\":\"stdout\",\"container_id\":\"d4a92a653f5f\",\"container_name\":\"main\",\"hostname\":\"10-40-22-18-uswest1cdevc\",\"instance\":\"test_output.foo\",\"level\":\"debug\",\"message\":\"1500\",\"timestamp\":\"2022-08-12T13:30:04.731687734Z\",\"pod_name\":\"compute-infra-test-service.test--output.228.foo.jawpvt\",\"tron_run_number\":228}\n"
  },
  {
    "path": "tests/utils/state_test.py",
    "content": "from testifycompat import assert_equal\nfrom testifycompat import setup\nfrom testifycompat import TestCase\nfrom tron.utils import state\n\n\nclass TestStateMachineSimple(TestCase):\n    @setup\n    def build_machine(self):\n        self.state_green = \"green\"\n        self.state_red = \"red\"\n\n        self.machine = state.Machine(self.state_red, red=dict(true=\"green\"))\n\n    def test_transition_many(self):\n        # Stay the same\n        assert not self.machine.transition(\"missing\")\n        assert_equal(self.machine.state, self.state_red)\n\n        # Traffic has arrived\n        self.machine.transition(\"true\")\n        assert_equal(self.machine.state, self.state_green)\n\n        # Still traffic\n        self.machine.transition(\"true\")\n        assert_equal(self.machine.state, self.state_green)\n\n    def test_check(self):\n        assert not self.machine.check(False)\n        assert_equal(self.machine.check(\"true\"), self.state_green)\n        assert_equal(self.machine.state, self.state_red)\n\n\nclass TestStateMachineMultiOption(TestCase):\n    @setup\n    def build_machine(self):\n        # Generalized rules of a conversation\n        # If they are talking, we should listen\n        # If they are listening, we should talk\n        # If they are ignoring us we should get angry\n        self.machine = state.Machine(\n            \"listening\",\n            listening=dict(listening=\"talking\"),\n            talking=dict(ignoring=\"angry\", talking=\"listening\"),\n        )\n\n    def test_transition_many(self):\n        # Talking, we should listen\n        self.machine.transition(\"talking\")\n        assert_equal(self.machine.state, \"listening\")\n\n        # Now be polite\n        self.machine.transition(\"listening\")\n        assert_equal(self.machine.state, \"talking\")\n\n        self.machine.transition(\"listening\")\n        assert_equal(self.machine.state, \"talking\")\n\n        # But they are tired of us...\n        self.machine.transition(\"ignoring\")\n        assert_equal(self.machine.state, \"angry\")\n\n    def test_transition_set(self):\n        expected = {\"listening\", \"talking\", \"ignoring\"}\n        assert_equal(set(self.machine.transition_names), expected)\n"
  },
  {
    "path": "tests/utils/timeutils_test.py",
    "content": "import datetime\n\nimport pytz\n\nfrom testifycompat import assert_equal\nfrom testifycompat import setup\nfrom testifycompat import TestCase\nfrom tests import testingutils\nfrom tron.utils import timeutils\nfrom tron.utils.timeutils import DateArithmetic\nfrom tron.utils.timeutils import duration\nfrom tron.utils.timeutils import macro_timedelta\n\n\nclass TestTimeDelta(TestCase):\n    @setup\n    def make_dates(self):\n        self.start_nonleap = datetime.datetime(year=2011, month=1, day=1)\n        self.end_nonleap = datetime.datetime(year=2011, month=12, day=31)\n        self.begin_feb_nonleap = datetime.datetime(year=2011, month=2, day=1)\n        self.start_leap = datetime.datetime(year=2012, month=1, day=1)\n        self.end_leap = datetime.datetime(year=2012, month=12, day=31)\n        self.begin_feb_leap = datetime.datetime(year=2012, month=2, day=1)\n\n    def check_delta(self, start, target, years=0, months=0, days=0):\n        assert_equal(\n            start\n            + macro_timedelta(\n                start,\n                years=years,\n                months=months,\n                days=days,\n            ),\n            target,\n        )\n\n    def test_days(self):\n        self.check_delta(\n            self.start_nonleap,\n            datetime.datetime(year=2011, month=1, day=11),\n            days=10,\n        )\n        self.check_delta(\n            self.end_nonleap,\n            datetime.datetime(year=2012, month=1, day=10),\n            days=10,\n        )\n        self.check_delta(\n            self.start_leap,\n            datetime.datetime(year=2012, month=1, day=11),\n            days=10,\n        )\n        self.check_delta(\n            self.end_leap,\n            datetime.datetime(year=2013, month=1, day=10),\n            days=10,\n        )\n        self.check_delta(\n            self.begin_feb_nonleap,\n            datetime.datetime(year=2011, month=3, day=1),\n            days=28,\n        )\n        self.check_delta(\n            self.begin_feb_leap,\n            datetime.datetime(year=2012, month=3, day=1),\n            days=29,\n        )\n\n    def test_months(self):\n        self.check_delta(\n            self.start_nonleap,\n            datetime.datetime(year=2011, month=11, day=1),\n            months=10,\n        )\n        self.check_delta(\n            self.end_nonleap,\n            datetime.datetime(year=2012, month=10, day=31),\n            months=10,\n        )\n        self.check_delta(\n            self.start_leap,\n            datetime.datetime(year=2012, month=11, day=1),\n            months=10,\n        )\n        self.check_delta(\n            self.end_leap,\n            datetime.datetime(year=2013, month=10, day=31),\n            months=10,\n        )\n        self.check_delta(\n            self.begin_feb_nonleap,\n            datetime.datetime(year=2011, month=12, day=1),\n            months=10,\n        )\n        self.check_delta(\n            self.begin_feb_leap,\n            datetime.datetime(year=2012, month=12, day=1),\n            months=10,\n        )\n\n    def test_years(self):\n        self.check_delta(\n            self.start_nonleap,\n            datetime.datetime(year=2015, month=1, day=1),\n            years=4,\n        )\n        self.check_delta(\n            self.end_nonleap,\n            datetime.datetime(year=2015, month=12, day=31),\n            years=4,\n        )\n        self.check_delta(\n            self.start_leap,\n            datetime.datetime(year=2016, month=1, day=1),\n            years=4,\n        )\n        self.check_delta(\n            self.end_leap,\n            datetime.datetime(year=2016, month=12, day=31),\n            years=4,\n        )\n        self.check_delta(\n            self.begin_feb_nonleap,\n            datetime.datetime(year=2015, month=2, day=1),\n            years=4,\n        )\n        self.check_delta(\n            self.begin_feb_leap,\n            datetime.datetime(year=2016, month=2, day=1),\n            years=4,\n        )\n\n    def test_start_date_with_timezone(self):\n        pacific_tz = pytz.timezone(\"US/Pacific\")\n        start_date = pacific_tz.localize(\n            datetime.datetime(year=2018, month=1, day=3, hour=13),\n        )\n        expected_end = pacific_tz.localize(\n            datetime.datetime(year=2018, month=1, day=1, hour=13),\n        )\n        self.check_delta(\n            start_date,\n            expected_end,\n            days=-2,\n        )\n\n\nclass TestDuration(TestCase):\n    @setup\n    def setup_times(self):\n        self.earliest = datetime.datetime(2012, 2, 1, 3, 0, 0)\n        self.latest = datetime.datetime(2012, 2, 1, 3, 20, 0)\n\n    def test_duration(self):\n        assert_equal(\n            duration(self.earliest, self.latest),\n            datetime.timedelta(0, 60 * 20),\n        )\n\n    def test_duration_no_end(self):\n        delta = duration(self.earliest)\n        assert delta.days >= 40\n\n    def test_duration_no_start(self):\n        assert_equal(duration(None), None)\n\n\nclass TestDeltaTotalSeconds(TestCase):\n    def test(self):\n        expected = 86702.004002999995\n        delta = datetime.timedelta(*range(1, 6))\n        delta_seconds = timeutils.delta_total_seconds(delta)\n        assert_equal(delta_seconds, expected)\n\n\nclass DateArithmeticTestCase(testingutils.MockTimeTestCase):\n\n    # Set a date with days less then 28, otherwise some tests will fail\n    # when run on days > 28.\n    now = datetime.datetime(2012, 3, 20)\n\n    def _cmp_date(self, item, dt):\n        assert_equal(DateArithmetic.parse(item), dt.strftime(\"%Y-%m-%d\"))\n\n    def _cmp_day(self, item, dt):\n        assert_equal(DateArithmetic.parse(item), dt.strftime(\"%d\"))\n\n    def _cmp_month(self, item, dt):\n        assert_equal(DateArithmetic.parse(item), dt.strftime(\"%m\"))\n\n    def _cmp_year(self, item, dt):\n        assert_equal(DateArithmetic.parse(item), dt.strftime(\"%Y\"))\n\n    def test_shortdate(self):\n        self._cmp_date(\"shortdate\", self.now)\n\n    def test_shortdate_plus(self):\n        for i in range(50):\n            dt = self.now + datetime.timedelta(days=i)\n            self._cmp_date(\"shortdate+%s\" % i, dt)\n\n    def test_shortdate_minus(self):\n        for i in range(50):\n            dt = self.now - datetime.timedelta(days=i)\n            self._cmp_date(\"shortdate-%s\" % i, dt)\n\n    def test_day(self):\n        self._cmp_day(\"day\", self.now)\n\n    def test_day_minus(self):\n        for i in range(50):\n            dt = self.now - datetime.timedelta(days=i)\n            self._cmp_day(\"day-%s\" % i, dt)\n\n    def test_day_plus(self):\n        for i in range(50):\n            dt = self.now + datetime.timedelta(days=i)\n            self._cmp_day(\"day+%s\" % i, dt)\n\n    def test_month(self):\n        self._cmp_month(\"month\", self.now)\n\n    def test_month_plus(self):\n        for i in range(50):\n            dt = self.now + timeutils.macro_timedelta(self.now, months=i)\n            self._cmp_month(\"month+%s\" % i, dt)\n\n    def test_month_minus(self):\n        for i in range(50):\n            dt = self.now - timeutils.macro_timedelta(self.now, months=i)\n            self._cmp_month(\"month-%s\" % i, dt)\n\n    def test_year(self):\n        self._cmp_year(\"year\", self.now)\n\n    def test_year_plus(self):\n        for i in range(50):\n            dt = self.now + timeutils.macro_timedelta(self.now, years=i)\n            self._cmp_year(\"year+%s\" % i, dt)\n\n    def test_year_minus(self):\n        for i in range(50):\n            dt = self.now - timeutils.macro_timedelta(self.now, years=i)\n            self._cmp_year(\"year-%s\" % i, dt)\n\n    def test_unixtime(self):\n        timestamp = int(self.now.timestamp())\n        assert_equal(DateArithmetic.parse(\"unixtime\"), timestamp)\n\n    def test_unixtime_plus(self):\n        timestamp = int(self.now.timestamp()) + 100\n        assert_equal(DateArithmetic.parse(\"unixtime+100\"), timestamp)\n\n    def test_unixtime_minus(self):\n        timestamp = int(self.now.timestamp()) - 99\n        assert_equal(DateArithmetic.parse(\"unixtime-99\"), timestamp)\n\n    def test_daynumber(self):\n        daynum = self.now.toordinal()\n        assert_equal(DateArithmetic.parse(\"daynumber\"), daynum)\n\n    def test_daynumber_plus(self):\n        daynum = self.now.toordinal() + 1\n        assert_equal(DateArithmetic.parse(\"daynumber+1\"), daynum)\n\n    def test_daynumber_minus(self):\n        daynum = self.now.toordinal() - 1\n        assert_equal(DateArithmetic.parse(\"daynumber-1\"), daynum)\n\n    def test_hour(self):\n        hour = self.now.strftime(\"%H\")\n        assert_equal(DateArithmetic.parse(\"hour\"), hour)\n\n    def test_hour_plus(self):\n        hour = \"%02d\" % ((int(self.now.strftime(\"%H\")) + 1) % 24)\n        assert_equal(DateArithmetic.parse(\"hour+1\"), hour)\n\n    def test_hour_minus(self):\n        hour = \"%02d\" % ((int(self.now.strftime(\"%H\")) - 1) % 24)\n        assert_equal(DateArithmetic.parse(\"hour-1\"), hour)\n\n    def test_bad_date_format(self):\n        assert DateArithmetic.parse(\"~~\") is None\n\n    def test_round_day(self):\n        start = datetime.datetime(2019, 3, 30)\n        delta = timeutils.macro_timedelta(start, months=-1)\n        assert (start + delta).day == 28\n\n\nclass DateArithmeticYMDHTest(TestCase):\n    def test_ym_plus(self):\n        def parse(*ym):\n            return DateArithmetic.parse(\"ym+1\", datetime.datetime(*ym))\n\n        assert_equal(parse(2018, 1, 1), \"2018-02\")\n        assert_equal(parse(2017, 12, 1), \"2018-01\")\n\n    def test_ym_minus(self):\n        def parse(*ym):\n            return DateArithmetic.parse(\"ym-1\", datetime.datetime(*ym))\n\n        assert_equal(parse(2018, 1, 1), \"2017-12\")\n        assert_equal(parse(2018, 2, 1), \"2018-01\")\n\n    def test_ymd_plus(self):\n        def parse(*ymd):\n            return DateArithmetic.parse(\"ymd+1\", datetime.datetime(*ymd))\n\n        assert_equal(parse(2018, 1, 1), \"2018-01-02\")\n        assert_equal(parse(2018, 1, 31), \"2018-02-01\")\n\n    def test_ymd_minus(self):\n        def parse(*ymd):\n            return DateArithmetic.parse(\"ymd-1\", datetime.datetime(*ymd))\n\n        assert_equal(parse(2018, 1, 1), \"2017-12-31\")\n        assert_equal(parse(2018, 1, 2), \"2018-01-01\")\n\n    def test_ymdh_plus(self):\n        def parse(*ymdh):\n            return DateArithmetic.parse(\"ymdh+1\", datetime.datetime(*ymdh))\n\n        assert_equal(parse(2018, 1, 1, 1), \"2018-01-01T02\")\n        assert_equal(parse(2018, 1, 31, 23), \"2018-02-01T00\")\n\n    def test_ymdh_minus(self):\n        def parse(*ymdh):\n            return DateArithmetic.parse(\"ymdh-1\", datetime.datetime(*ymdh))\n\n        assert_equal(parse(2018, 1, 1, 1), \"2018-01-01T00\")\n        assert_equal(parse(2018, 1, 1, 0), \"2017-12-31T23\")\n\n    def test_ymdhm_plus(self):\n        def parse(*ymdhm):\n            return DateArithmetic.parse(\"ymdhm+1\", datetime.datetime(*ymdhm))\n\n        assert_equal(parse(2018, 1, 1, 1, 1), \"2018-01-01T01:02\")\n        assert_equal(parse(2018, 1, 31, 23, 59), \"2018-02-01T00:00\")\n\n    def test_ymdhm_minus(self):\n        def parse(*ymdhm):\n            return DateArithmetic.parse(\"ymdhm-1\", datetime.datetime(*ymdhm))\n\n        assert_equal(parse(2018, 1, 1, 1, 2), \"2018-01-01T01:01\")\n        assert_equal(parse(2018, 1, 1, 0, 0), \"2017-12-31T23:59\")\n\n    def test_ym_minus_round(self):\n        dt = datetime.datetime(2019, 3, 30)\n        s = timeutils.DateArithmetic.parse(\"ym-1\", dt=dt)\n        assert s == \"2019-02\"\n\n    def test_ymd_plus_whitespace(self):\n        def parse(*ymd):\n            return DateArithmetic.parse(\"ymd + 1\", datetime.datetime(*ymd))\n\n        assert_equal(parse(2018, 1, 1), \"2018-01-02\")\n        assert_equal(parse(2018, 1, 31), \"2018-02-01\")\n\n    def test_ymd_minus_whitespace(self):\n        def parse(*ymd):\n            return DateArithmetic.parse(\"ymd- 1\", datetime.datetime(*ymd))\n\n        assert_equal(parse(2018, 1, 1), \"2017-12-31\")\n        assert_equal(parse(2018, 1, 2), \"2018-01-01\")\n\n\nclass TestDateArithmeticWithTimezone(DateArithmeticTestCase):\n\n    now = pytz.timezone(\"US/Pacific\").localize(datetime.datetime(2012, 3, 20))\n"
  },
  {
    "path": "tests/utils/trontimespec_test.py",
    "content": "import datetime\n\nimport pytz\n\nfrom testifycompat import assert_equal\nfrom testifycompat import run\nfrom testifycompat import TestCase\nfrom tron.utils import trontimespec\n\n\nclass TestGetTime(TestCase):\n    def test_get_time(self):\n        assert_equal(datetime.time(4, 15), trontimespec.get_time(\"4:15\"))\n        assert_equal(datetime.time(22, 59), trontimespec.get_time(\"22:59\"))\n\n    def test_get_time_invalid_time(self):\n        assert not trontimespec.get_time(\"25:00\")\n        assert not trontimespec.get_time(\"22:61\")\n\n\nclass TestTimeSpecification(TestCase):\n    def _cmp(self, start_time, expected):\n        start_time = datetime.datetime(*start_time)\n        expected = datetime.datetime(*expected)\n        assert_equal(self.time_spec.get_match(start_time), expected)\n\n    def test_get_match_months(self):\n        self.time_spec = trontimespec.TimeSpecification(months=[1, 5])\n        self._cmp((2012, 3, 14), (2012, 5, 1))\n        self._cmp((2012, 5, 22), (2012, 5, 23))\n        self._cmp((2012, 12, 22), (2013, 1, 1))\n\n    def test_get_match_monthdays(self):\n        self.time_spec = trontimespec.TimeSpecification(\n            monthdays=[10, 3, 3, 10],\n        )\n        self._cmp((2012, 3, 14), (2012, 4, 3))\n        self._cmp((2012, 3, 1), (2012, 3, 3))\n\n    def test_get_match_weekdays(self):\n        self.time_spec = trontimespec.TimeSpecification(weekdays=[2, 3])\n        self._cmp((2012, 3, 14), (2012, 3, 20))\n        self._cmp((2012, 3, 20), (2012, 3, 21))\n\n    def test_next_month_generator(self):\n        time_spec = trontimespec.TimeSpecification(months=[2, 5])\n        gen = time_spec.next_month(datetime.datetime(2012, 3, 14))\n        expected = [(5, 2012), (2, 2013), (5, 2013), (2, 2014)]\n        assert_equal([next(gen) for _ in range(4)], expected)\n\n    def test_next_day_monthdays(self):\n        time_spec = trontimespec.TimeSpecification(monthdays=[5, 10, 15])\n        gen = time_spec.next_day(14, 2012, 3)\n        assert_equal(list(gen), [15])\n\n        gen = time_spec.next_day(1, 2012, 3)\n        assert_equal(list(gen), [5, 10, 15])\n\n    def test_next_day_monthdays_with_last(self):\n        time_spec = trontimespec.TimeSpecification(monthdays=[5, \"LAST\"])\n        gen = time_spec.next_day(14, 2012, 3)\n        assert_equal(list(gen), [31])\n\n    def test_next_day_weekdays(self):\n        time_spec = trontimespec.TimeSpecification(weekdays=[1, 5])\n        gen = time_spec.next_day(14, 2012, 3)\n        assert_equal(list(gen), [16, 19, 23, 26, 30])\n\n        gen = time_spec.next_day(1, 2012, 3)\n        assert_equal(list(gen), [2, 5, 9, 12, 16, 19, 23, 26, 30])\n\n    def test_next_day_weekdays_with_ordinals(self):\n        time_spec = trontimespec.TimeSpecification(\n            weekdays=[1, 5],\n            ordinals=[1, 3],\n        )\n        gen = time_spec.next_day(14, 2012, 3)\n        assert_equal(list(gen), [16, 19])\n\n        gen = time_spec.next_day(1, 2012, 3)\n        assert_equal(list(gen), [2, 5, 16, 19])\n\n    def test_next_time_timestr(self):\n        time_spec = trontimespec.TimeSpecification(timestr=\"13:13\")\n        start_date = datetime.datetime(2012, 3, 14, 0, 15)\n        time = time_spec.next_time(start_date, True)\n        assert_equal(time, datetime.time(13, 13))\n\n        start_date = datetime.datetime(2012, 3, 14, 13, 13)\n        assert time_spec.next_time(start_date, True) is None\n        time = time_spec.next_time(start_date, False)\n        assert_equal(time, datetime.time(13, 13))\n\n    def test_next_time_hours(self):\n        time_spec = trontimespec.TimeSpecification(hours=[4, 10])\n        start_date = datetime.datetime(2012, 3, 14, 0, 15)\n        time = time_spec.next_time(start_date, True)\n        assert_equal(time, datetime.time(4, 0))\n\n        start_date = datetime.datetime(2012, 3, 14, 13, 13)\n        assert time_spec.next_time(start_date, True) is None\n        time = time_spec.next_time(start_date, False)\n        assert_equal(time, datetime.time(4, 0))\n\n    def test_next_time_minutes(self):\n        time_spec = trontimespec.TimeSpecification(\n            minutes=[30, 20, 30],\n            seconds=[0],\n        )\n        start_date = datetime.datetime(2012, 3, 14, 0, 25)\n        time = time_spec.next_time(start_date, True)\n        assert_equal(time, datetime.time(0, 30))\n\n        start_date = datetime.datetime(2012, 3, 14, 23, 30)\n        assert time_spec.next_time(start_date, True) is None\n        time = time_spec.next_time(start_date, False)\n        assert_equal(time, datetime.time(0, 20))\n\n    def test_next_time_hours_and_minutes_and_seconds(self):\n        time_spec = trontimespec.TimeSpecification(\n            minutes=[20, 30],\n            hours=[1, 5],\n            seconds=[4, 5],\n        )\n        start_date = datetime.datetime(2012, 3, 14, 1, 25)\n        time = time_spec.next_time(start_date, True)\n        assert_equal(time, datetime.time(1, 30, 4))\n\n        start_date = datetime.datetime(2012, 3, 14, 5, 30, 6)\n        assert time_spec.next_time(start_date, True) is None\n        time = time_spec.next_time(start_date, False)\n        assert_equal(time, datetime.time(1, 20, 4))\n\n    def test_get_match_dst_spring_forward(self):\n        tz = pytz.timezone(\"US/Pacific\")\n        time_spec = trontimespec.TimeSpecification(\n            hours=[0, 1, 2, 3, 4],\n            minutes=[0],\n            seconds=[0],\n            timezone=\"US/Pacific\",\n        )\n        start = trontimespec.naive_as_timezone(datetime.datetime(2020, 3, 8, 1), tz)\n        # Springing forward, the next hour after 1AM should be 3AM\n        next_time = time_spec.get_match(start)\n        assert next_time.hour == 3\n\n    def test_get_match_dst_fall_back(self):\n        tz = pytz.timezone(\"US/Pacific\")\n        time_spec = trontimespec.TimeSpecification(\n            hours=[0, 1, 2, 3, 4],\n            minutes=[0],\n            seconds=[0],\n            timezone=\"US/Pacific\",\n        )\n        start = trontimespec.naive_as_timezone(datetime.datetime(2020, 11, 1, 1), tz)\n        # Falling back, the next hour after 1AM is 1AM again. But we only run on the first 1AM\n        # Next run time should be 2AM\n        next_time = time_spec.get_match(start)\n        assert next_time.hour == 2\n\n\nif __name__ == \"__main__\":\n    run()\n"
  },
  {
    "path": "tools/action_dag_diagram.py",
    "content": "\"\"\"\n Create a graphviz diagram from a Tron Job configuration.\n\n Usage:\n    python tools/action_dag_diagram.py -c <config> -n <job_name>\n\n This will create a file named <job_name>.dot\n You can create a diagram using:\n    dot -Tpng -o <job_name>.png <job_name>.dot\n\"\"\"\nimport optparse\n\nfrom tron.config import manager\nfrom tron.config import schema\n\n\ndef parse_args():\n    parser = optparse.OptionParser()\n    parser.add_option(\"-c\", \"--config\", help=\"Tron configuration path.\")\n    parser.add_option(\n        \"-n\",\n        \"--name\",\n        help=\"Job name to graph. Also used as output filename.\",\n    )\n    parser.add_option(\n        \"--namespace\",\n        default=schema.MASTER_NAMESPACE,\n        help=\"Configuration namespace which contains the job.\",\n    )\n    opts, _ = parser.parse_args()\n\n    if not opts.config:\n        parser.error(\"A config filename is required.\")\n    if not opts.name:\n        parser.error(\"A Job name is required.\")\n    return opts\n\n\ndef build_diagram(job_config):\n    edges, nodes = [], []\n\n    for action in job_config.actions.values():\n        shape = \"invhouse\" if not action.requires else \"rect\"\n        nodes.append(f\"node [shape = {shape}]; {action.name}\")\n        for required_action in action.requires:\n            edges.append(f\"{required_action} -> {action.name}\")\n\n    return \"digraph g{{{}\\n{}}}\".format(\"\\n\".join(nodes), \"\\n\".join(edges))\n\n\ndef get_job(config_container, namespace, job_name):\n    if namespace not in config_container:\n        raise ValueError(\"Unknown namespace: %s\" % namespace)\n\n    config = config_container[opts.namespace]\n    if job_name not in config.jobs:\n        raise ValueError(\"Could not find Job %s\" % job_name)\n\n    return config.jobs[job_name]\n\n\nif __name__ == \"__main__\":\n    opts = parse_args()\n\n    config_manager = manager.ConfigManager(opts.config)\n    container = config_manager.load()\n    job_config = get_job(container, opts.namespace, opts.name)\n    graph = build_diagram(job_config)\n\n    with open(\"%s.dot\" % opts.name, \"w\") as fh:\n        fh.write(graph)\n"
  },
  {
    "path": "tools/compress_json.py",
    "content": "import argparse\nimport gzip\nimport math\nimport os\nimport sys\nimport threading\nimport time\nfrom concurrent.futures import as_completed\nfrom concurrent.futures import ThreadPoolExecutor\n\nimport boto3\nfrom boto3.resources.base import ServiceResource\n\nfrom tron.core.job import Job\nfrom tron.core.jobrun import JobRun\nfrom tron.serialize import runstate\n\n# Max DynamoDB object size is 400KB. Since we save two copies of the object (pickled and JSON),\n# we need to consider this max size applies to the entire item, so we use a max size of 200KB\n# for each version.\nOBJECT_SIZE = 150_000\n\n# DynamoDB TransactWriteItems supports up to 100 items per call. We use 25\n# which comfortably fits under the 4MB request size limit (25 × 150KB = 3.75MB\n# worst case). This is sufficient to handle our largest items (~20 partitions\n# uncompressed) in a single atomic transaction after compression.\nMAX_TRANSACT_WRITE_ITEMS = 25\n\n\ndef get_dynamodb_table(\n    aws_profile: str | None = None, table: str = \"infrastage-tron-state\", region: str = \"us-west-1\"\n) -> ServiceResource:\n    session = boto3.Session(profile_name=aws_profile) if aws_profile else boto3.Session()\n    return session.resource(\"dynamodb\", region_name=region).Table(table)\n\n\ndef get_dynamodb_client(\n    aws_profile: str | None = None,\n    region: str = \"us-west-1\",\n):\n    session = boto3.Session(profile_name=aws_profile) if aws_profile else boto3.Session()\n    return session.client(\"dynamodb\", region_name=region)\n\n\ndef scan_keys(source_table: ServiceResource) -> set[str]:\n    \"\"\"Streaming scan that only projects the key attribute and collects unique partition keys.\n\n    Never stores full items in memory — only the 'key' strings.\n    \"\"\"\n    unique_keys: set[str] = set()\n    scan_kwargs = {\n        \"ProjectionExpression\": \"#k\",\n        \"ExpressionAttributeNames\": {\"#k\": \"key\"},\n    }\n    response = source_table.scan(**scan_kwargs)\n    for item in response.get(\"Items\", []):\n        unique_keys.add(item.get(\"key\", \"Unknown Key\"))\n    while \"LastEvaluatedKey\" in response:\n        response = source_table.scan(ExclusiveStartKey=response[\"LastEvaluatedKey\"], **scan_kwargs)\n        for item in response.get(\"Items\", []):\n            unique_keys.add(item.get(\"key\", \"Unknown Key\"))\n    return unique_keys\n\n\ndef resolve_keys(args, parser, source_table: ServiceResource) -> list[str]:\n    if not args.keys and not args.keys_file and not args.all:\n        parser.error(\"You must provide either --keys, --keys-file, or --all.\")\n\n    if args.all:\n        print(\"Scanning table for all keys (keys-only projection)...\")\n        keys_set = scan_keys(source_table)\n        print(f\"Found {len(keys_set)} unique keys.\")\n        return list(keys_set)\n\n    keys = []\n    if args.keys:\n        keys.extend(args.keys)\n    if args.keys_file:\n        try:\n            with open(args.keys_file) as f:\n                keys_from_file = [line.strip() for line in f if line.strip()]\n                keys.extend(keys_from_file)\n        except Exception as e:\n            parser.error(f\"Error reading keys from file {args.keys_file}: {e}\")\n    if not keys:\n        parser.error(\"No keys provided. Please provide keys via --keys or --keys-file.\")\n    return list(set(keys))\n\n\ndef is_compressed(json_val) -> bool:\n    \"\"\"Check if a json_val from DynamoDB is compressed (Binary type) vs uncompressed (String type).\n\n    boto3 high-level resource API returns:\n    - \"S\" type as Python str\n    - \"B\" type as boto3.dynamodb.types.Binary (which wraps bytes and has a .value attribute)\n    \"\"\"\n    if hasattr(json_val, \"value\"):\n        # boto3.dynamodb.types.Binary\n        return True\n    if isinstance(json_val, bytes):\n        # I don't think this is possible in the high-level API, but it's harmless to check\n        return True\n    return False\n\n\ndef get_json_val_bytes(json_val) -> bytes:\n    \"\"\"Extract raw bytes from a json_val, whether it's a Binary wrapper or raw bytes.\"\"\"\n    if hasattr(json_val, \"value\"):\n        return bytes(json_val.value)\n    if isinstance(json_val, bytes):\n        return bytes(json_val)\n    raise TypeError(f\"Unexpected json_val type: {type(json_val)}\")\n\n\ndef classify_item(item: dict) -> str:\n    \"\"\"Classify a partition-0 item into one of: compressed, uncompressed, pickle_only, no_data.\"\"\"\n    has_json = \"json_val\" in item\n    has_pickle = \"val\" in item\n\n    if has_json:\n        if is_compressed(item[\"json_val\"]):\n            return \"compressed\"\n        else:\n            return \"uncompressed\"\n    elif has_pickle:\n        # Shouldn't exist. If we have items with only pickle data we want to know\n        return \"pickle_only\"\n    else:\n        # Shouldn't exist. If we have funky items with no json_val/val we want to know\n        return \"no_data\"\n\n\ndef compress_json_for_key(\n    source_table: ServiceResource, client, table_name: str, key: str, dry_run: bool = True\n) -> str:\n    \"\"\"Compress uncompressed JSON for a single key.\n\n    Reads all json_val partitions via get_item (ConsistentRead), gzip-compresses the combined JSON,\n    and writes the compressed data back using TransactWriteItems with ConditionExpressions to guard\n    against concurrent trond writes.\n\n    Returns a status string: \"compressed\", \"already_compressed\", \"no_json\", \"skipped\",\n    \"concurrent_update\", or raises on error (including throttle exhaustion).\n    \"\"\"\n    response = source_table.get_item(Key={\"key\": key, \"index\": 0}, ConsistentRead=True)\n    if \"Item\" not in response:\n        print(f\"  SKIP (no item found): {key}\")\n        return \"skipped\"\n\n    item_0 = response[\"Item\"]\n\n    if \"json_val\" not in item_0:\n        print(f\"  SKIP (no json_val — needs pickles_to_json.py first): {key}\")\n        return \"no_json\"\n\n    json_val = item_0[\"json_val\"]\n    if is_compressed(json_val):\n        print(f\"  SKIP (already compressed): {key}\")\n        return \"already_compressed\"\n\n    # It's an uncompressed string — collect all partitions via get_item\n    num_json_partitions = int(item_0.get(\"num_json_val_partitions\", 1))\n    combined_json = \"\"\n    for index in range(num_json_partitions):\n        if index == 0:\n            partition_item = item_0\n        else:\n            resp = source_table.get_item(Key={\"key\": key, \"index\": index}, ConsistentRead=True)\n            if \"Item\" not in resp:\n                raise Exception(f\"Missing JSON partition {index} for key {key}\")\n            partition_item = resp[\"Item\"]\n\n        if \"json_val\" not in partition_item:\n            raise Exception(f\"No 'json_val' in partition {index} for key {key}\")\n        combined_json += partition_item[\"json_val\"]\n\n    # Validate JSON round-trips before compressing\n    state_type = key.split()[0]\n    if state_type == runstate.JOB_STATE:\n        Job.from_json(combined_json)\n    elif state_type == runstate.JOB_RUN_STATE:\n        JobRun.from_json(combined_json)\n    else:\n        print(f\"  SKIP (unknown state type '{state_type}'): {key}\")\n        return \"skipped\"\n\n    # Compress\n    compressed = gzip.compress(combined_json.encode(\"utf-8\"))\n    num_compressed_partitions = math.ceil(len(compressed) / OBJECT_SIZE)\n\n    original_size = len(combined_json.encode(\"utf-8\"))\n    compressed_size = len(compressed)\n    ratio = (1 - compressed_size / original_size) * 100 if original_size > 0 else 0\n\n    if dry_run:\n        print(\n            f\"  DRY RUN (would compress): {key} \"\n            f\"({original_size:,} bytes -> {compressed_size:,} bytes, {ratio:.1f}% reduction, \"\n            f\"{num_json_partitions} partitions -> {num_compressed_partitions} partitions)\"\n        )\n    else:\n        # Build TransactWriteItems with conditional expressions.\n        # Conditions on partition 0 ensure the item is still uncompressed and has the same\n        # number of partitions we read — if trond wrote concurrently the condition fails.\n        transact_items = []\n        for i in range(num_compressed_partitions):\n            chunk = compressed[i * OBJECT_SIZE : (i + 1) * OBJECT_SIZE]\n            update = {\n                \"Update\": {\n                    \"TableName\": table_name,\n                    \"Key\": {\n                        \"key\": {\"S\": key},\n                        \"index\": {\"N\": str(i)},\n                    },\n                    \"UpdateExpression\": \"SET json_val = :json, num_json_val_partitions = :n\",\n                    \"ExpressionAttributeValues\": {\n                        \":json\": {\"B\": chunk},\n                        \":n\": {\"N\": str(num_compressed_partitions)},\n                    },\n                },\n            }\n            if i == 0:\n                # Condition on partition 0: item must still be uncompressed with expected partition count\n                update[\"Update\"][\"ConditionExpression\"] = (\n                    \"attribute_exists(json_val) \"\n                    \"AND attribute_type(json_val, :string_type) \"\n                    \"AND num_json_val_partitions = :expected_partitions\"\n                )\n                update[\"Update\"][\"ExpressionAttributeValues\"][\":string_type\"] = {\"S\": \"S\"}\n                update[\"Update\"][\"ExpressionAttributeValues\"][\":expected_partitions\"] = {\"N\": str(num_json_partitions)}\n            transact_items.append(update)\n\n        # Clean up excess partitions (REMOVE json_val where compressed needs fewer partitions)\n        for i in range(num_compressed_partitions, num_json_partitions):\n            transact_items.append(\n                {\n                    \"Update\": {\n                        \"TableName\": table_name,\n                        \"Key\": {\n                            \"key\": {\"S\": key},\n                            \"index\": {\"N\": str(i)},\n                        },\n                        \"UpdateExpression\": \"REMOVE json_val\",\n                    },\n                }\n            )\n\n        if len(transact_items) > MAX_TRANSACT_WRITE_ITEMS:\n            raise Exception(\n                f\"Compression requires {len(transact_items)} transaction items for key {key}, \"\n                f\"exceeding single-transaction limit of {MAX_TRANSACT_WRITE_ITEMS}. \"\n                f\"({num_json_partitions} uncompressed partitions, \"\n                f\"{num_compressed_partitions} compressed partitions)\"\n            )\n        if compressed_size > 3_500_000:\n            raise Exception(\n                f\"Compressed data too large ({compressed_size:,} bytes) for key {key} — \"\n                f\"risks exceeding 4MB TransactWriteItems request limit.\"\n            )\n\n        # Single atomic transaction — all updates and removes in one call. We retry\n        # on ThrottlingException with exponential backoff on top of boto3's built-in\n        # retries. During testing we are getting throttled almost immediately (on hot\n        # keys) and quickly exhausting the default retry budget. This slows things\n        # down a lot but allows the process to complete without manual intervention\n        # or re-runs.\n        max_retries = 5\n        for attempt in range(max_retries + 1):\n            try:\n                client.transact_write_items(TransactItems=transact_items)\n                break\n            except client.exceptions.TransactionCanceledException as e:\n                reasons = e.response.get(\"CancellationReasons\", [])\n                if any(r.get(\"Code\") in (\"ConditionalCheckFailed\", \"TransactionConflict\") for r in reasons):\n                    print(f\"  SKIPPED (concurrent update detected): {key}\")\n                    return \"concurrent_update\"\n                # Check if cancellation was due to throttling\n                if any(r.get(\"Code\") == \"ThrottlingError\" for r in reasons):\n                    if attempt < max_retries:\n                        wait = 2**attempt\n                        print(f\"  THROTTLED (attempt {attempt + 1}/{max_retries + 1}, retrying in {wait}s): {key}\")\n                        time.sleep(wait)\n                    continue\n                raise\n            except Exception as e:\n                if \"ThrottlingException\" in str(e) or \"Throughput exceeds\" in str(e):\n                    if attempt < max_retries:\n                        wait = 2**attempt\n                        print(f\"  THROTTLED (attempt {attempt + 1}/{max_retries + 1}, retrying in {wait}s): {key}\")\n                        time.sleep(wait)\n                    continue\n                raise\n        else:\n            raise Exception(f\"Throttled after {max_retries + 1} attempts\")\n\n        print(\n            f\"  COMPRESSED: {key} \"\n            f\"({original_size:,} bytes -> {compressed_size:,} bytes, {ratio:.1f}% reduction, \"\n            f\"{num_json_partitions} partitions -> {num_compressed_partitions} partitions)\"\n        )\n\n    return \"compressed\"\n\n\ndef verify_compressed_json_for_key(source_table: ServiceResource, key: str) -> bool:\n    \"\"\"Re-read all compressed JSON partitions, gunzip, and validate via from_json.\n\n    Returns True if verification succeeds, False otherwise. Prints the reason on failure.\n    \"\"\"\n    response = source_table.get_item(Key={\"key\": key, \"index\": 0}, ConsistentRead=True)\n    if \"Item\" not in response:\n        print(f\"  VERIFY FAIL (no item found): {key}\")\n        return False\n\n    item_0 = response[\"Item\"]\n\n    if \"json_val\" not in item_0:\n        print(f\"  VERIFY FAIL (no json_val): {key}\")\n        return False\n\n    if not is_compressed(item_0[\"json_val\"]):\n        print(f\"  VERIFY FAIL (json_val is not compressed): {key}\")\n        return False\n\n    num_json_partitions = int(item_0.get(\"num_json_val_partitions\", 1))\n\n    # Reassemble compressed bytes from all partitions\n    compressed_data = bytearray()\n    for index in range(num_json_partitions):\n        if index == 0:\n            partition_item = item_0\n        else:\n            resp = source_table.get_item(Key={\"key\": key, \"index\": index}, ConsistentRead=True)\n            if \"Item\" not in resp:\n                print(f\"  VERIFY FAIL (missing partition {index}): {key}\")\n                return False\n            partition_item = resp[\"Item\"]\n\n        if \"json_val\" not in partition_item:\n            print(f\"  VERIFY FAIL (no json_val in partition {index}): {key}\")\n            return False\n\n        compressed_data += get_json_val_bytes(partition_item[\"json_val\"])\n\n    # Decompress\n    try:\n        json_str = gzip.decompress(bytes(compressed_data)).decode(\"utf-8\")\n    except Exception as e:\n        print(f\"  VERIFY FAIL (gunzip failed: {e}): {key}\")\n        return False\n\n    # Validate via from_json\n    state_type = key.split()[0]\n    try:\n        if state_type == runstate.JOB_STATE:\n            Job.from_json(json_str)\n        elif state_type == runstate.JOB_RUN_STATE:\n            JobRun.from_json(json_str)\n        else:\n            print(f\"  VERIFY FAIL (unknown state type '{state_type}'): {key}\")\n            return False\n    except Exception as e:\n        print(f\"  VERIFY FAIL (from_json failed: {e}): {key}\")\n        return False\n\n    return True\n\n\ndef delete_pickle_for_key(source_table: ServiceResource, key: str, dry_run: bool = True) -> str:\n    \"\"\"Delete pickle data (val, num_partitions) for a single key.\n\n    Verifies compressed JSON can be fully decoded and parsed before deleting.\n\n    Returns a status string: \"deleted\", \"refused\", \"no_pickle\", \"skipped\", or raises on error.\n    \"\"\"\n    response = source_table.get_item(Key={\"key\": key, \"index\": 0}, ConsistentRead=True)\n    if \"Item\" not in response:\n        print(f\"  SKIP (no item found): {key}\")\n        return \"skipped\"\n\n    item_0 = response[\"Item\"]\n\n    # Safety checks\n    if \"json_val\" not in item_0:\n        print(f\"  REFUSE (no json_val at all): {key}\")\n        return \"refused\"\n\n    if not is_compressed(item_0[\"json_val\"]):\n        print(f\"  REFUSE (json_val is uncompressed — run 'compress' first): {key}\")\n        return \"refused\"\n\n    if \"val\" not in item_0:\n        print(f\"  SKIP (no pickle data to delete): {key}\")\n        return \"no_pickle\"\n\n    # Verify compressed JSON is valid before deleting pickle\n    if not verify_compressed_json_for_key(source_table, key):\n        print(f\"  REFUSE (compressed JSON verification failed): {key}\")\n        return \"refused\"\n\n    num_partitions = int(item_0.get(\"num_partitions\", 1))\n    num_json_partitions = int(item_0.get(\"num_json_val_partitions\", 1))\n    max_partitions = max(num_partitions, num_json_partitions)\n\n    if dry_run:\n        print(\n            f\"  DRY RUN (would delete pickle): {key} ({num_partitions} pickle partitions across {max_partitions} items)\"\n        )\n    else:\n        for i in range(max_partitions):\n            source_table.update_item(\n                Key={\"key\": key, \"index\": i},\n                UpdateExpression=\"REMOVE val, num_partitions\",\n            )\n        print(f\"  DELETED pickle: {key} ({num_partitions} pickle partitions removed across {max_partitions} items)\")\n\n    return \"deleted\"\n\n\ndef cmd_compress(args, source_table: ServiceResource, client, table_name: str, keys: list[str]) -> None:\n    dry_run = not args.execute\n    workers = args.workers\n    total = len(keys)\n    counts = {\n        \"compressed\": 0,\n        \"already_compressed\": 0,\n        \"no_json\": 0,\n        \"skipped\": 0,\n        \"concurrent_update\": 0,\n        \"failed\": 0,\n    }\n    failed_keys = []\n    lock = threading.Lock()\n    completed = [0]  # mutable counter for progress\n\n    mode = \"DRY RUN\" if dry_run else \"EXECUTING\"\n    print(f\"\\n=== Compress JSON ({mode}, {workers} workers) ===\")\n    print(f\"Processing {total} keys...\\n\")\n\n    # Pre-create one Table resource per worker thread. The high-level\n    # resource is not thread-safe, so each worker gets its own, but we\n    # create them once upfront instead of per-key.\n    thread_tables = [get_dynamodb_table(args.aws_profile, args.table_name, args.table_region) for _ in range(workers)]\n    # Map thread IDs to table resources as workers claim them.\n    thread_local = threading.local()\n\n    def get_thread_table() -> ServiceResource:\n        if not hasattr(thread_local, \"table\"):\n            with lock:\n                thread_local.table = thread_tables.pop()\n        return thread_local.table\n\n    def process_key(key: str) -> None:\n        thread_table = get_thread_table()\n        try:\n            result = compress_json_for_key(thread_table, client, table_name, key, dry_run=dry_run)\n        except Exception as e:\n            result = \"failed\"\n            with lock:\n                failed_keys.append(key)\n            print(f\"  FAILED ({key}): {e}\")\n\n        with lock:\n            counts[result] += 1\n            completed[0] += 1\n            if completed[0] % 500 == 0 or completed[0] == total:\n                print(f\"  Progress: {completed[0]}/{total} keys processed\")\n\n    sorted_keys = sorted(keys)\n    with ThreadPoolExecutor(max_workers=workers) as pool:\n        futures = {pool.submit(process_key, key): key for key in sorted_keys}\n        for future in as_completed(futures):\n            # Exceptions are already handled inside process_key, but catch\n            # anything truly unexpected so one bad future doesn't kill the pool.\n            try:\n                future.result()\n            except Exception as e:\n                key = futures[future]\n                print(f\"  UNEXPECTED ERROR ({key}): {e}\")\n                with lock:\n                    counts[\"failed\"] += 1\n                    failed_keys.append(key)\n\n    print(\"\\n=== Summary ===\")\n    print(f\"Total keys:           {total}\")\n    print(f\"Compressed:           {counts['compressed']}\")\n    print(f\"Already compressed:   {counts['already_compressed']}\")\n    print(f\"No JSON (pickle-only):{counts['no_json']}\")\n    print(f\"Skipped:              {counts['skipped']}\")\n    print(f\"Concurrent updates:   {counts['concurrent_update']}\")\n    print(f\"Failed:               {counts['failed']}\")\n\n    if dry_run:\n        print(\"\\nDry run complete. No changes were made.\")\n\n    if args.failed_keys_output and failed_keys:\n        with open(args.failed_keys_output, \"w\") as f:\n            for key in failed_keys:\n                f.write(f\"{key}\\n\")\n        print(f\"Failed keys written to {args.failed_keys_output}\")\n\n\ndef cmd_delete_pickles(args, source_table: ServiceResource, keys: list[str]) -> None:\n    dry_run = not args.execute\n\n    if args.execute and not args.i_hereby_declare_we_no_longer_need_pickles:\n        print(\n            \"ERROR: --execute for delete-pickles requires the safety flag:\\n\"\n            \"  --i-hereby-declare-we-no-longer-need-pickles\\n\\n\"\n            \"This operation is DESTRUCTIVE and IRREVERSIBLE. It removes all pickle data\\n\"\n            \"from DynamoDB items. Only proceed if you are certain that:\\n\"\n            \"  1. All items have valid compressed JSON (run 'status' to verify)\\n\"\n            \"  2. Tron is configured to read from JSON (read_json=True)\\n\"\n            \"  3. You have verified restores work from JSON on replica tables\"\n        )\n        sys.exit(1)\n\n    total = len(keys)\n    counts = {\"deleted\": 0, \"refused\": 0, \"no_pickle\": 0, \"skipped\": 0, \"failed\": 0}\n    failed_keys = []\n\n    mode = \"DRY RUN\" if dry_run else \"EXECUTING\"\n    print(f\"\\n=== Delete Pickles ({mode}) ===\")\n    print(f\"Processing {total} keys...\\n\")\n\n    for i, key in enumerate(sorted(keys), 1):\n        print(f\"[{i}/{total}] {key}\")\n        try:\n            result = delete_pickle_for_key(source_table, key, dry_run=dry_run)\n            counts[result] += 1\n        except Exception as e:\n            print(f\"  FAILED: {e}\")\n            counts[\"failed\"] += 1\n            failed_keys.append(key)\n\n    print(\"\\n=== Summary ===\")\n    print(f\"Total keys:           {total}\")\n    print(f\"Deleted:              {counts['deleted']}\")\n    print(f\"Refused (no comp. JSON): {counts['refused']}\")\n    print(f\"No pickle to delete:  {counts['no_pickle']}\")\n    print(f\"Skipped:              {counts['skipped']}\")\n    print(f\"Failed:               {counts['failed']}\")\n\n    if dry_run:\n        print(\"\\nDry run complete. No changes were made.\")\n\n    if args.failed_keys_output and failed_keys:\n        with open(args.failed_keys_output, \"w\") as f:\n            for key in failed_keys:\n                f.write(f\"{key}\\n\")\n        print(f\"Failed keys written to {args.failed_keys_output}\")\n\n\ndef cmd_status(args, source_table: ServiceResource) -> None:\n    print(f\"\\n=== Status: {args.table_name} ({args.table_region}) ===\")\n    print(\"Scanning partition-0 items...\")\n\n    counts = {\"compressed\": 0, \"uncompressed\": 0, \"pickle_only\": 0, \"no_data\": 0}\n    total = 0\n\n    # Streaming scan filtered to partition 0 only, projecting just the attributes\n    # needed for classification. This avoids per-key get_item calls.\n    scan_kwargs = {\n        \"FilterExpression\": \"#idx = :zero\",\n        \"ProjectionExpression\": \"#k, #idx, json_val, val\",\n        \"ExpressionAttributeNames\": {\"#k\": \"key\", \"#idx\": \"index\"},\n        \"ExpressionAttributeValues\": {\":zero\": 0},\n    }\n    response = source_table.scan(**scan_kwargs)\n    for item in response.get(\"Items\", []):\n        total += 1\n        counts[classify_item(item)] += 1\n    while \"LastEvaluatedKey\" in response:\n        response = source_table.scan(ExclusiveStartKey=response[\"LastEvaluatedKey\"], **scan_kwargs)\n        for item in response.get(\"Items\", []):\n            total += 1\n            counts[classify_item(item)] += 1\n\n    print(f\"\\nTotal unique keys: {total:,}\\n\")\n    for label, count_key in [\n        (\"Compressed JSON (ready for pickle deletion)\", \"compressed\"),\n        (\"Uncompressed JSON (needs compression)\", \"uncompressed\"),\n        (\"Pickle only (anomalous, needs pickles_to_json.py)\", \"pickle_only\"),\n        (\"No data (anomalous, needs investigation)\", \"no_data\"),\n    ]:\n        count = counts[count_key]\n        pct = (count / total * 100) if total > 0 else 0\n        print(f\"  {label + ':':<50} {count:>8,} ({pct:.1f}%)\")\n\n\ndef add_key_arguments(subparser: argparse.ArgumentParser) -> None:\n    subparser.add_argument(\n        \"--keys\",\n        nargs=\"+\",\n        required=False,\n        help=\"Specific key(s) to process.\",\n    )\n    subparser.add_argument(\n        \"--keys-file\",\n        required=False,\n        help=\"Input file containing keys to process. One key per line.\",\n    )\n    subparser.add_argument(\n        \"--all\",\n        action=\"store_true\",\n        help=\"Process all keys in the table.\",\n    )\n\n\ndef main():\n    parser = argparse.ArgumentParser(\n        description=\"Compress JSON and delete pickle data in Tron's DynamoDB state store.\",\n        epilog=\"\"\"\nSub-commands:\n  compress              Compress uncompressed JSON (\"S\" type) to gzip-compressed binary (\"B\" type).\n  delete-pickles        Remove pickle data (val, num_partitions) from items that have compressed JSON.\n  status                Report the state of all keys in the table.\n\nExamples:\n  Check status of a table:\n    compress_json.py --table-name infrastage-tron-state --table-region us-west-1 status\n\n  Dry-run compression for all keys:\n    compress_json.py --table-name infrastage-tron-state --table-region us-west-1 compress --all\n\n  Execute compression for specific keys:\n    compress_json.py --table-name infrastage-tron-state --table-region us-west-1 compress --keys \"job_state myjob\" --execute\n\n  Dry-run pickle deletion:\n    compress_json.py --table-name infrastage-tron-state --table-region us-west-1 delete-pickles --all\n\n  Execute pickle deletion (requires safety flag):\n    compress_json.py --table-name infrastage-tron-state --table-region us-west-1 delete-pickles --all --execute --i-hereby-declare-we-no-longer-need-pickles\n\"\"\",\n        formatter_class=argparse.RawDescriptionHelpFormatter,\n    )\n    parser.add_argument(\n        \"--aws-profile\",\n        default=os.environ.get(\"AWS_PROFILE\", None),\n        help=\"AWS profile to use (default: taken from AWS_PROFILE environment variable)\",\n    )\n    parser.add_argument(\"--table-name\", required=True, help=\"Name of the DynamoDB table\")\n    parser.add_argument(\"--table-region\", required=True, help=\"AWS region of the DynamoDB table\")\n\n    subparsers = parser.add_subparsers(dest=\"action\", required=True, help=\"Action to perform\")\n\n    # compress sub-command\n    compress_parser = subparsers.add_parser(\n        \"compress\",\n        help=\"Compress uncompressed JSON to gzip-compressed binary.\",\n    )\n    add_key_arguments(compress_parser)\n    compress_parser.add_argument(\n        \"--execute\",\n        action=\"store_true\",\n        default=False,\n        help=\"Actually perform the compression. Dry-run by default.\",\n    )\n    compress_parser.add_argument(\n        \"--workers\",\n        type=int,\n        default=8,\n        help=\"Number of concurrent workers (default: 8). Increase for faster throughput; decrease if throttling is excessive.\",\n    )\n    compress_parser.add_argument(\n        \"--failed-keys-output\",\n        required=False,\n        help=\"Output file to write keys that failed compression. One key per line.\",\n    )\n\n    # delete-pickles sub-command\n    delete_parser = subparsers.add_parser(\n        \"delete-pickles\",\n        help=\"Remove pickle data from items that have compressed JSON.\",\n    )\n    add_key_arguments(delete_parser)\n    delete_parser.add_argument(\n        \"--execute\",\n        action=\"store_true\",\n        default=False,\n        help=\"Actually perform the deletion. Dry-run by default.\",\n    )\n    delete_parser.add_argument(\n        \"--i-hereby-declare-we-no-longer-need-pickles\",\n        action=\"store_true\",\n        default=False,\n        help=\"Required safety flag when using --execute. Confirms you understand this is destructive and irreversible.\",\n    )\n    delete_parser.add_argument(\n        \"--failed-keys-output\",\n        required=False,\n        help=\"Output file to write keys that failed deletion. One key per line.\",\n    )\n\n    # status sub-command\n    subparsers.add_parser(\n        \"status\",\n        help=\"Report the state of all keys in the table.\",\n    )\n\n    args = parser.parse_args()\n    source_table = get_dynamodb_table(args.aws_profile, args.table_name, args.table_region)\n    client = get_dynamodb_client(args.aws_profile, args.table_region)\n\n    if args.action == \"status\":\n        cmd_status(args, source_table)\n    elif args.action == \"compress\":\n        keys = resolve_keys(args, parser, source_table)\n        cmd_compress(args, source_table, client, args.table_name, keys)\n    elif args.action == \"delete-pickles\":\n        keys = resolve_keys(args, parser, source_table)\n        cmd_delete_pickles(args, source_table, keys)\n\n\nif __name__ == \"__main__\":\n    main()\n"
  },
  {
    "path": "tools/inspect_serialized_state.py",
    "content": "\"\"\"Read a state file or db and create a report which summarizes it's contents.\n\nDisplays:\nState configuration\nCount of jobs\n\nTable of Jobs with start date of last run\n\n\"\"\"\nimport optparse\n\nfrom tron.config import manager\nfrom tron.serialize.runstate import statemanager\nfrom tron.utils import chdir\n\n\ndef parse_options():\n    parser = optparse.OptionParser()\n    parser.add_option(\"-c\", \"--config-path\", help=\"Path to the configuration.\")\n    parser.add_option(\n        \"-w\",\n        \"--working-dir\",\n        default=\".\",\n        help=\"Working directory to resolve relative paths.\",\n    )\n    opts, _ = parser.parse_args()\n\n    if not opts.config_path:\n        parser.error(\"A --config-path is required.\")\n    return opts\n\n\ndef get_container(config_path):\n    config_manager = manager.ConfigManager(config_path)\n    return config_manager.load()\n\n\ndef get_state(container):\n    config = container.get_master().state_persistence\n    state_manager = statemanager.PersistenceManagerFactory.from_config(config)\n    names = container.get_job_names()\n    return state_manager.restore(*names)\n\n\ndef format_date(date_string):\n    return date_string.strftime(\"%Y-%m-%d %H:%M:%S\") if date_string else None\n\n\ndef format_jobs(job_states):\n    format = \"%-30s %-8s %-5s %s\\n\"\n    header = format % (\"Name\", \"Enabled\", \"Runs\", \"Last Update\")\n\n    def max_run(item):\n        start_time = filter(None, (run[\"start_time\"] for run in item))\n        return max(start_time) if start_time else None\n\n    def build(name, job):\n        start_times = (max_run(job_run[\"runs\"]) for job_run in job[\"runs\"])\n        start_times = filter(None, start_times)\n        last_run = format_date(max(start_times)) if start_times else None\n        return format % (name, job[\"enabled\"], len(job[\"runs\"]), last_run)\n\n    seq = sorted(build(*item) for item in job_states.items())\n    return header + \"\".join(seq)\n\n\ndef display_report(state_config, job_states):\n    print(\"State Config: %s\" % str(state_config))\n    print(\"Total Jobs: %s\" % len(job_states))\n\n    print(\"\\n%s\" % format_jobs(job_states))\n\n\ndef main(config_path, working_dir):\n    container = get_container(config_path)\n    config = container.get_master().state_persistence\n    with chdir(working_dir):\n        display_report(config, *get_state(container))\n\n\nif __name__ == \"__main__\":\n    opts = parse_options()\n    main(opts.config_path, opts.working_dir)\n"
  },
  {
    "path": "tools/migration/migrate_config_0.2_to_0.3.py",
    "content": "\"\"\"\n Convert a 0.2.x Tron configuration file to the 0.3 format.\n\n Removes YAML anchors, references, and tags.\n Display warnings for NodePools under the nodes section.\n Display warnings for action requires sections that are not lists.\n\n\"\"\"\nimport optparse\nimport re\nimport sys\n\nfrom tron import yaml\n\nYAML_TAG_RE = re.compile(r\"!\\w+\\b\")\n\n\nclass Loader(yaml.Loader):\n    \"\"\"A YAML loader that does not clear its anchor mapping.\"\"\"\n\n    def compose_document(self):\n        self.get_event()\n        node = self.compose_node(None, None)\n        self.get_event()\n        return node\n\n\ndef strip_tags(source):\n    \"\"\"Remove YAML tags.\"\"\"\n    return YAML_TAG_RE.sub(\"\", source)\n\n\ndef name_from_doc(doc):\n    \"\"\"Find the string identifier for a doc.\"\"\"\n    if \"name\" in doc:\n        return doc[\"name\"]\n\n    # Special case for node without a name, their name defaults to their hostname\n    if set(doc.keys()) == {\"hostname\"}:\n        return doc[\"hostname\"]\n\n    if set(doc.keys()) == {\"nodes\"}:\n        raise ValueError(\"Please create a name for NodePool %s\" % doc)\n\n    raise ValueError(\"Could not find a name for %s\" % doc)\n\n\ndef warn_node_pools(content):\n    doc = yaml.safe_load(content)\n\n    node_pools = [node_doc for node_doc in doc[\"nodes\"] if \"nodes\" in node_doc]\n\n    if not node_pools:\n        return\n\n    print(\n        \"\\n\\nNode Pools should be moved into a node_pools section.\"\n        + \" The following node pools were found:\\n\"\n        + \"\\n\".join(str(n) for n in node_pools),\n        file=sys.stderr,\n    )\n\n\ndef warn_requires_list(content):\n    action_names = []\n    doc = yaml.safe_load(content)\n\n    for job in doc[\"jobs\"]:\n        for action in job[\"actions\"]:\n            if \"requires\" not in action:\n                continue\n\n            if isinstance(action[\"requires\"], list):\n                continue\n\n            action_names.append(\"{}.{}\".format(job[\"name\"], action[\"name\"]))\n\n    if not action_names:\n        return\n\n    print(\n        \"\\n\\nAction requires should be a list.\"\n        + \" The following actions have requires that are not lists:\\n\"\n        + \"\\n\".join(action_names),\n        file=sys.stderr,\n    )\n\n\ndef create_loader(content):\n    \"\"\"Create a loader, and have it create the document from content.\"\"\"\n    loader = Loader(content)\n    loader.get_single_node()\n    return loader\n\n\ndef build_anchor_mapping(content):\n    \"\"\"Return a map of anchors to the new name to use.\"\"\"\n    loader = create_loader(content)\n\n    return {\n        anchor_name: name_from_doc(loader.construct_document(yaml_node))\n        for anchor_name, yaml_node in loader.anchors.items()\n    }\n\n\ndef update_references(content):\n    anchor_mapping = build_anchor_mapping(content)\n\n    def key_length_func(kv):\n        return len(kv[0])\n\n    anchors_by_length = sorted(\n        anchor_mapping.items(),\n        key=key_length_func,\n        reverse=True,\n    )\n    for anchor_name, string_name in anchors_by_length:\n        # Remove the anchors\n        content = re.sub(r\"\\s*&%s ?\" % anchor_name, \"\", content)\n        # Update the reference to use the string identifier\n        content = re.sub(\n            r\"\\*%s\\b\" % anchor_name,\n            '\"%s\"' % string_name,\n            content,\n        )\n\n    return content\n\n\ndef convert(source, dest):\n    with open(source) as fh:\n        content = fh.read()\n\n    try:\n        content = strip_tags(content)\n        content = update_references(content)\n        warn_node_pools(content)\n        warn_requires_list(content)\n    except yaml.scanner.ScannerError as e:\n        print(f\"Bad content: {e}\\n{content}\")\n\n    with open(dest, \"w\") as fh:\n        fh.write(content)\n\n\nif __name__ == \"__main__\":\n    opt_parser = optparse.OptionParser()\n    opt_parser.add_option(\"-s\", dest=\"source\", help=\"Source config filename.\")\n    opt_parser.add_option(\"-d\", dest=\"dest\", help=\"Destination filename.\")\n    opts, args = opt_parser.parse_args()\n    if not opts.source or not opts.dest:\n        print(\"Source and destination filenames required.\", file=sys.stderr)\n        sys.exit(1)\n\n    convert(opts.source, opts.dest)\n"
  },
  {
    "path": "tools/migration/migrate_config_0.5.1_to_0.5.2.py",
    "content": "\"\"\"Migrate a single configuration file (tron 0.5.1) to the new 0.5.2\nmulti-file format.\n\nUsage:\n\npython tools/migration/migrate_config_0.5.1_to_0.5.2.py \\\n    --source old_config_filename \\\n    --dest new_config_dir\n\"\"\"\nimport optparse\nimport os\n\nfrom tron.config import manager\n\n\ndef parse_options():\n    parser = optparse.OptionParser()\n    parser.add_option(\"-s\", \"--source\", help=\"Path to old configuration file.\")\n    parser.add_option(\n        \"-d\",\n        \"--dest\",\n        help=\"Path to new configuration directory.\",\n    )\n    opts, _ = parser.parse_args()\n\n    if not opts.source:\n        parser.error(\"--source is required\")\n    if not opts.dest:\n        parser.error(\"--dest is required\")\n    return opts\n\n\ndef main(source, dest):\n    dest = os.path.abspath(dest)\n    if not os.path.isfile(source):\n        raise SystemExit(\"Error: Source (%s) is not a file\" % source)\n    if os.path.exists(dest):\n        raise SystemExit(\"Error: Destination path (%s) already exists\" % dest)\n    old_config = manager.read_raw(source)\n    manager.create_new_config(dest, old_config)\n\n\nif __name__ == \"__main__\":\n    opts = parse_options()\n    main(opts.source, opts.dest)\n"
  },
  {
    "path": "tools/migration/migrate_state.py",
    "content": "\"\"\"\n Migrate a state file/database from one StateStore implementation to another. It\n may also be used to add namespace names to jobs when upgrading\n from pre-0.5.2 to version 0.5.2.\n\n Usage:\n    python tools/migration/migrate_state.py \\\n        -s <old_config_dir> -d <new_config_dir> [ --namespace ]\n\n old_config.yaml and new_config.yaml should be configuration files with valid\n state_persistence sections. The state_persistence section configures the\n StateStore.\n\n Pre 0.5 state files can be read by the YamlStateStore. See the configuration\n documentation for more details on how to create state_persistence sections.\n\"\"\"\nimport optparse\n\nfrom tron.config import manager\nfrom tron.config import schema\nfrom tron.serialize import runstate\nfrom tron.serialize.runstate.statemanager import PersistenceManagerFactory\nfrom tron.utils import chdir\n\n\ndef parse_options():\n    parser = optparse.OptionParser()\n    parser.add_option(\n        \"-s\",\n        \"--source\",\n        help=\"The source configuration path which contains a state_persistence \"\n        \"section configured for the state file/database.\",\n    )\n    parser.add_option(\n        \"-d\",\n        \"--dest\",\n        help=\"The destination configuration path which contains a \"\n        \"state_persistence section configured for the state file/database.\",\n    )\n    parser.add_option(\n        \"--source-working-dir\",\n        help=\"The working directory for source dir to resolve relative paths.\",\n    )\n    parser.add_option(\n        \"--dest-working-dir\",\n        help=\"The working directory for dest dir to resolve relative paths.\",\n    )\n    parser.add_option(\n        \"--namespace\",\n        action=\"store_true\",\n        help=\"Move jobs which are missing a namespace to the MASTER\",\n    )\n\n    opts, args = parser.parse_args()\n\n    if not opts.source:\n        parser.error(\"--source is required\")\n    if not opts.dest:\n        parser.error(\"--dest is required.\")\n\n    return opts, args\n\n\ndef get_state_manager_from_config(config_path, working_dir):\n    \"\"\"Return a state manager from the configuration.\"\"\"\n    config_manager = manager.ConfigManager(config_path)\n    config_container = config_manager.load()\n    state_config = config_container.get_master().state_persistence\n    with chdir(working_dir):\n        return PersistenceManagerFactory.from_config(state_config)\n\n\ndef get_current_config(config_path):\n    config_manager = manager.ConfigManager(config_path)\n    return config_manager.load()\n\n\ndef add_namespaces(state_data):\n    return {f\"{schema.MASTER_NAMESPACE}.{name}\": data for (name, data) in state_data.items()}\n\n\ndef strip_namespace(names):\n    return [name.split(\".\", 1)[1] for name in names]\n\n\ndef convert_state(opts):\n    source_manager = get_state_manager_from_config(\n        opts.source,\n        opts.source_working_dir,\n    )\n    dest_manager = get_state_manager_from_config(\n        opts.dest,\n        opts.dest_working_dir,\n    )\n    container = get_current_config(opts.source)\n\n    msg = \"Migrating state from %s to %s\"\n    print(msg % (source_manager._impl, dest_manager._impl))\n\n    job_names = container.get_job_names()\n    if opts.namespace:\n        job_names = strip_namespace(job_names)\n\n    job_states = source_manager.restore(\n        job_names,\n    )\n    source_manager.cleanup()\n\n    if opts.namespace:\n        job_states = add_namespaces(job_states)\n\n    for name, job in job_states.items():\n        dest_manager.save(runstate.JOB_STATE, name, job)\n    print(\"Migrated %s jobs.\" % len(job_states))\n\n    dest_manager.cleanup()\n\n\nif __name__ == \"__main__\":\n    opts, _args = parse_options()\n    convert_state(opts)\n"
  },
  {
    "path": "tools/migration/migrate_state_1.3.15_to_1.4.0.py",
    "content": "import argparse\nimport logging\n\nfrom tron.config import manager\nfrom tron.serialize import runstate\nfrom tron.serialize.runstate.statemanager import PersistenceManagerFactory\nfrom tron.utils import chdir\n\n\ndef parse_args():\n    parser = argparse.ArgumentParser()\n    parser.add_argument(\n        \"--back\",\n        help=\"Flag to migrate back from new state back to old state\",\n        action=\"store_true\",\n        default=False,\n    )\n    parser.add_argument(\n        \"--working-dir\",\n        help=\"Working directory for the Tron daemon\",\n        required=True,\n    )\n    parser.add_argument(\n        \"--config-path\",\n        help=\"Path in working dir with configs\",\n        required=True,\n    )\n    return parser.parse_args()\n\n\ndef create_job_runs_for_job(state_manager, job_name, job_state):\n    for run in job_state[\"runs\"]:\n        run_num = run[\"run_num\"]\n        state_manager.save(runstate.JOB_RUN_STATE, f\"{job_name}.{run_num}\", run)\n    run_nums = [run[\"run_num\"] for run in job_state[\"runs\"]]\n    job_state[\"run_nums\"] = run_nums\n    # Note: not removing 'runs' from job_state for safety.\n    # If Tron starts up correctly after the state migration, it will update the job state\n    # and remove 'runs'.\n    state_manager.save(runstate.JOB_STATE, job_name, job_state)\n\n\ndef move_job_runs_to_job(state_manager, job_name, job_state):\n    runs = state_manager._restore_runs_for_job(job_name, job_state)\n    job_state[\"runs\"] = runs\n    state_manager.save(runstate.JOB_STATE, job_name, job_state)\n    for run in runs:\n        state_manager.delete(runstate.JOB_RUN_STATE, f'{job_name}.{run[\"run_num\"]}')\n\n\ndef update_state(state_manager, job_names, back):\n    jobs = state_manager._restore_dicts(runstate.JOB_STATE, job_names)\n    for job_name, job_state in jobs.items():\n        if back:\n            move_job_runs_to_job(state_manager, job_name, job_state)\n        else:\n            create_job_runs_for_job(state_manager, job_name, job_state)\n\n\ndef migrate_state(config_path, working_dir, back):\n    with chdir(working_dir):\n        config_manager = manager.ConfigManager(config_path)\n        config_container = config_manager.load()\n    job_names = config_container.get_job_names()\n    state_config = config_container.get_master().state_persistence\n    state_manager = PersistenceManagerFactory.from_config(state_config)\n    update_state(state_manager, job_names, back)\n    state_manager.cleanup()\n\n\nif __name__ == \"__main__\":\n    # INFO for boto, DEBUG for all tron-related state logs\n    logging.basicConfig(level=logging.INFO)\n    logging.getLogger(\"tron\").setLevel(logging.DEBUG)\n\n    args = parse_args()\n    migrate_state(args.config_path, args.working_dir, args.back)\n"
  },
  {
    "path": "tools/pickles_to_json.py",
    "content": "import argparse\nimport math\nimport os\nimport pickle\n\nimport boto3\nimport requests\nfrom boto3.resources.base import ServiceResource\n\nfrom tron.core.job import Job\nfrom tron.core.jobrun import JobRun\nfrom tron.serialize import runstate\n\n# Max DynamoDB object size is 400KB. Since we save two copies of the object (pickled and JSON),\n# we need to consider this max size applies to the entire item, so we use a max size of 200KB\n# for each version.\nOBJECT_SIZE = 200_000\n\n\ndef get_dynamodb_table(\n    aws_profile: str | None = None, table: str = \"infrastage-tron-state\", region: str = \"us-west-1\"\n) -> ServiceResource:\n    \"\"\"\n    Get the DynamoDB table resource.\n    :param aws_profile: The name of the AWS profile to use.\n    :param table: The name of the table to get.\n    :param region: The region of the table.\n    :return: The DynamoDB table resource.\n    \"\"\"\n    session = boto3.Session(profile_name=aws_profile) if aws_profile else boto3.Session()\n    return session.resource(\"dynamodb\", region_name=region).Table(table)\n\n\ndef get_all_jobs(source_table: ServiceResource) -> list[str]:\n    \"\"\"\n    Scan the DynamoDB table and return a list of unique job keys.\n    :param source_table: The DynamoDB table resource to scan.\n    :return: A list of all job keys.\n    \"\"\"\n    items = scan_table(source_table)\n    unique_keys = {item.get(\"key\", \"Unknown Key\") for item in items}\n    return list(unique_keys)\n\n\ndef get_job_names(base_url: str) -> list[str]:\n    \"\"\"\n    Get the list of job names from the Tron API.\n    :param base_url: The base URL of the Tron API.\n    :return: A list of job names.\n    \"\"\"\n    try:\n        full_url = f\"http://{base_url}.yelpcorp.com:8089/api/jobs?include_job_runs=0\"\n        response = requests.get(full_url)\n        response.raise_for_status()\n        data = response.json()\n        job_names = [job[\"name\"] for job in data.get(\"jobs\", [])]\n        return job_names\n    except requests.exceptions.RequestException as e:\n        print(f\"An error occurred: {e}\")\n        return []\n\n\ndef combine_pickle_partitions(source_table: ServiceResource, key: str) -> bytes:\n    \"\"\"\n    Load and combine all partitions of a pickled item from DynamoDB.\n    :param source_table: The DynamoDB table resource.\n    :param key: The primary key of the item to retrieve.\n    :return: The combined pickled data as bytes.\n    \"\"\"\n    response = source_table.get_item(Key={\"key\": key, \"index\": 0}, ConsistentRead=True)\n    if \"Item\" not in response:\n        raise Exception(f\"No item found for key {key} at index 0\")\n    item = response[\"Item\"]\n    num_partitions = int(item.get(\"num_partitions\", 1))\n    combined_data = bytearray()\n    for index in range(num_partitions):\n        response = source_table.get_item(Key={\"key\": key, \"index\": index}, ConsistentRead=True)\n        if \"Item\" not in response:\n            raise Exception(f\"Missing partition {index} for key {key}\")\n        item = response[\"Item\"]\n        combined_data.extend(item[\"val\"].value)\n    return bytes(combined_data)\n\n\ndef dump_pickle_key(source_table: ServiceResource, key: str) -> None:\n    \"\"\"\n    Load the pickled data from DynamoDB for a given key, handling partitioned\n    items, and print the full pickle data.\n    :param source_table: The DynamoDB table resource.\n    :param key: The primary key of the item to retrieve.\n    \"\"\"\n    try:\n        pickled_data = combine_pickle_partitions(source_table, key)\n        loaded_pickle = pickle.loads(pickled_data)\n        print(f\"Key: {key} - Pickle data:\")\n        print(loaded_pickle)\n    except Exception as e:\n        print(f\"Key: {key} - Failed to load pickle: {e}\")\n        raise\n\n\ndef dump_pickle_keys(source_table: ServiceResource, keys: list[str]) -> None:\n    \"\"\"\n    Load and print pickles for the given list of keys.\n    :param source_table: The DynamoDB table resource.\n    :param keys: A list of keys for which to load and print pickles.\n    \"\"\"\n    for key in keys:\n        dump_pickle_key(source_table, key)\n\n\ndef dump_json_key(source_table: ServiceResource, key: str) -> None:\n    \"\"\"\n    Load the JSON data from DynamoDB for a given key and print it.\n    :param source_table: The DynamoDB table resource.\n    :param key: The primary key of the item to retrieve.\n    \"\"\"\n    try:\n        json_data = combine_json_partitions(source_table, key)\n        if json_data is not None:\n            print(f\"Key: {key} - JSON data:\")\n            print(json_data)\n        else:\n            print(f\"Key: {key} - No JSON value found\")\n    except Exception as e:\n        print(f\"Key: {key} - Failed to load JSON: {e}\")\n\n\ndef dump_json_keys(source_table: ServiceResource, keys: list[str]) -> None:\n    \"\"\"\n    Load and print JSON data for the given list of keys.\n    :param source_table: The DynamoDB table resource.\n    :param keys: A list of keys for which to load and print JSON data.\n    \"\"\"\n    for key in keys:\n        dump_json_key(source_table, key)\n\n\n# TODO: clean up old run history for valid jobs? something something look at job_state, then whitelist those runs instead of whitelisting entire jobs\ndef delete_keys(source_table: ServiceResource, keys: list[str]) -> None:\n    \"\"\"\n    Delete items with the given list of keys from the DynamoDB table.\n    :param source_table: The DynamoDB table resource.\n    :param keys: A list of keys to delete.\n    \"\"\"\n    total_keys = len(keys)\n    deleted_count = 0\n    failure_count = 0\n    for key in keys:\n        try:\n            num_partitions = get_num_partitions(source_table, key)\n            for index in range(num_partitions):\n                source_table.delete_item(Key={\"key\": key, \"index\": index})\n            print(f\"Key: {key} - Successfully deleted\")\n            deleted_count += 1\n        except Exception as e:\n            print(f\"Key: {key} - Failed to delete: {e}\")\n            failure_count += 1\n    print(f\"Total keys: {total_keys}\")\n    print(f\"Successfully deleted: {deleted_count}\")\n    print(f\"Failures: {failure_count}\")\n\n\ndef get_num_partitions(source_table: ServiceResource, key: str) -> int:\n    \"\"\"\n    Get the number of partitions for a given key in the DynamoDB table.\n    :param source_table: The DynamoDB table resource.\n    :param key: The primary key of the item to retrieve.\n    :return: The number of partitions for the key.\n    \"\"\"\n    response = source_table.get_item(Key={\"key\": key, \"index\": 0}, ConsistentRead=True)\n    if \"Item\" not in response:\n        return 0\n    item = response[\"Item\"]\n    num_partitions = int(item.get(\"num_partitions\", 1))\n    num_json_val_partitions = int(item.get(\"num_json_val_partitions\", 0))\n    return max(num_partitions, num_json_val_partitions)\n\n\ndef combine_json_partitions(source_table: ServiceResource, key: str) -> str | None:\n    \"\"\"\n    Combine all partitions of a JSON item from DynamoDB.\n    :param source_table: The DynamoDB table resource.\n    :param key: The primary key of the item to retrieve.\n    :return: The combined JSON data as a string, or None if not found.\n    \"\"\"\n    response = source_table.get_item(Key={\"key\": key, \"index\": 0}, ConsistentRead=True)\n    if \"Item\" not in response:\n        return None\n    item = response[\"Item\"]\n    num_json_partitions = int(item.get(\"num_json_val_partitions\", 0))\n    if num_json_partitions == 0:\n        return None\n    combined_json = \"\"\n    for index in range(num_json_partitions):\n        response = source_table.get_item(Key={\"key\": key, \"index\": index}, ConsistentRead=True)\n        if \"Item\" not in response:\n            raise Exception(f\"Missing JSON partition {index} for key {key}\")\n        item = response[\"Item\"]\n        if \"json_val\" in item:\n            combined_json += item[\"json_val\"]\n        else:\n            raise Exception(f\"No 'json_val' in partition {index} for key {key}\")\n    return combined_json\n\n\ndef convert_pickle_to_json_and_update_table(source_table: ServiceResource, key: str, dry_run: bool = True) -> bool:\n    \"\"\"\n    Convert a single pickled item to JSON and update the DynamoDB entry.\n    Returns True if the conversion was successful, False if skipped.\n    Raises an exception if conversion fails.\n    \"\"\"\n    try:\n        # Skip conversion for job_state MASTER and job_run_state MASTER jobs that are from infrastage testing (i.e., not real jobs)\n        if key.startswith(\"job_state MASTER\") or key.startswith(\"job_run_state MASTER\"):\n            print(f\"Skipping conversion for key: {key}\")\n            return False\n        pickled_data = combine_pickle_partitions(source_table, key)\n        state_data = pickle.loads(pickled_data)\n        state_type = key.split()[0]\n        if state_type == runstate.JOB_STATE:\n            json_data = Job.to_json(state_data)\n        elif state_type == runstate.JOB_RUN_STATE:\n            json_data = JobRun.to_json(state_data)\n        else:\n            # This will skip the state metadata and any other non-standard keys we have in the table\n            print(f\"Key: {key} - Unknown state type: {state_type}. Skipping.\")\n            return False\n        num_json_partitions = math.ceil(len(json_data) / OBJECT_SIZE)\n        for partition_index in range(num_json_partitions):\n            json_partition = json_data[\n                partition_index * OBJECT_SIZE : min((partition_index + 1) * OBJECT_SIZE, len(json_data))\n            ]\n            if not dry_run:\n                source_table.update_item(\n                    Key={\"key\": key, \"index\": partition_index},\n                    UpdateExpression=\"SET json_val = :json, num_json_val_partitions = :num_partitions\",\n                    ExpressionAttributeValues={\n                        \":json\": json_partition,\n                        \":num_partitions\": num_json_partitions,\n                    },\n                )\n        if dry_run:\n            print(f\"DRY RUN: Key: {key} - Pickle would have been converted to JSON and updated\")\n        else:\n            print(f\"Key: {key} - Pickle converted to JSON and updated\")\n        return True\n    except Exception as e:\n        print(f\"Key: {key} - Failed to convert pickle to JSON: {e}\")\n        raise\n\n\ndef convert_pickles_to_json_and_update_table(\n    source_table: ServiceResource,\n    keys: list[str],\n    dry_run: bool = True,\n    deprecated_keys_output: str | None = None,\n    failed_keys_output: str | None = None,\n    job_names: list[str] = [],\n) -> None:\n    \"\"\"\n    Convert pickled items in the DynamoDB table to JSON and update the entries.\n    :param source_table: The DynamoDB table resource.\n    :param keys: List of keys to convert.\n    :param dry_run: If True, simulate the conversion without updating the table.\n    :param deprecated_keys_output: Output file to write deprecated keys to.\n    :param failed_keys_output: Output file to write keys that failed to convert to.\n    :param job_names: List of job names to use for filtering keys.\n    \"\"\"\n    total_keys = len(keys)\n    converted_keys = 0\n    skipped_keys = 0\n    failed_keys = []\n    delete_keys = []\n\n    for key in keys:\n        # Extract the job name from the key\n        parts = key.split()\n        if len(parts) < 2:\n            continue\n\n        state_type, job_info = parts[0], parts[1]\n\n        # Ignore run_num for job_run_state keys\n        if state_type == \"job_run_state\":\n            job_name = \".\".join(job_info.split(\".\")[:-1])\n        else:\n            job_name = job_info\n\n        if job_name not in job_names:\n            delete_keys.append(key)\n            continue\n\n        try:\n            result = convert_pickle_to_json_and_update_table(source_table, key, dry_run)\n            if result:\n                converted_keys += 1\n            else:\n                skipped_keys += 1\n        except Exception as e:\n            print(f\"Key: {key} - Failed to convert pickle to JSON: {e}\")\n            failed_keys.append(key)\n\n    print(f\"Total keys processed: {total_keys}\")\n    print(f\"Conversions attempted: {total_keys - skipped_keys}\")\n    print(f\"Conversions succeeded: {converted_keys}\")\n    print(f\"Conversions skipped: {skipped_keys}\")\n    print(f\"Conversions failed: {len(failed_keys)}\")\n    print(f\"Keys to be deleted: {len(delete_keys)}\")\n\n    if deprecated_keys_output:\n        with open(deprecated_keys_output, \"w\") as f:\n            for key in delete_keys:\n                f.write(f\"{key}\\n\")\n        print(f\"Deprecated keys have been written to {deprecated_keys_output}\")\n    if failed_keys_output:\n        with open(failed_keys_output, \"w\") as f:\n            for key in failed_keys:\n                f.write(f\"{key}\\n\")\n        print(f\"Failed have been written to {failed_keys_output}\")\n    if dry_run:\n        print(\"Dry run complete. No changes were made to the DynamoDB table.\")\n\n\ndef scan_table(source_table: ServiceResource) -> list[dict]:\n    \"\"\"\n    Scan the DynamoDB table and return all items, handling pagination.\n    :param source_table: The DynamoDB table resource to scan.\n    :return: A list of all items in the table.\n    \"\"\"\n    items = []\n    response = source_table.scan()\n    items.extend(response.get(\"Items\", []))\n    while \"LastEvaluatedKey\" in response:\n        response = source_table.scan(ExclusiveStartKey=response[\"LastEvaluatedKey\"])\n        items.extend(response.get(\"Items\", []))\n    return items\n\n\ndef main():\n    parser = argparse.ArgumentParser(\n        description=\"Utilities for working with pickles and JSON items in Tron's DynamoDB state store.\",\n        epilog=\"\"\"\nActions:\n  convert           Convert pickled state data to JSON format and update the DynamoDB table.\n  dump-pickle       Load and print the pickles for specified keys.\n  dump-json         Load and print JSON data for specified keys.\n  delete       Delete the specified keys from the DynamoDB table.\nExamples:\n  Validate pickles (write deprecated keys to deprecated_keys.txt, dry run by default):\n    pickles_to_json.py --table-name infrastage-tron-state --table-region us-west-1 convert --all --deprecated-keys-output deprecated_keys.txt --tron-api tron-infrastage\n  Convert all pickles to JSON (actually execute the update):\n    pickles_to_json.py --table-name infrastage-tron-state --table-region us-west-1 convert --all --execute --tron-api tron-infrastage\n  Convert specific pickles to JSON using keys from an input file:\n    pickles_to_json.py --table-name infrastage-tron-state --table-region us-west-1 convert --keys-file input_keys.txt --execute --tron-api tron-infrastage\n  Load and print specific JSON keys using keys from an input file:\n    pickles_to_json.py --table-name infrastage-tron-state --table-region us-west-1 dump-json --keys-file input_keys.txt\n  Delete specific keys (dry run by default):\n    pickles_to_json.py --table-name infrastage-tron-state --table-region us-west-1 delete --keys \"key1\" \"key2\"\n  Delete keys from an input file (actually execute the deletion):\n    pickles_to_json.py --table-name infrastage-tron-state --table-region us-west-1 delete --keys-file input_keys.txt --execute\n\"\"\",\n        formatter_class=argparse.RawDescriptionHelpFormatter,\n    )\n    parser.add_argument(\n        \"--aws-profile\",\n        default=os.environ.get(\"AWS_PROFILE\", None),\n        help=\"AWS profile to use (default: taken from AWS_PROFILE environment variable)\",\n    )\n    parser.add_argument(\"--table-name\", required=True, help=\"Name of the DynamoDB table\")\n    parser.add_argument(\"--table-region\", required=True, help=\"AWS region of the DynamoDB table\")\n\n    subparsers = parser.add_subparsers(dest=\"action\", required=True, help=\"Action to perform\")\n\n    convert_parser = subparsers.add_parser(\n        \"convert\", help=\"Convert pickled state data to JSON format and update the DynamoDB table.\"\n    )\n    convert_parser.add_argument(\n        \"--tron-api\",\n        required=True,\n        help=\"Base URL of the Tron API to fetch job names from.\",\n    )\n    convert_parser.add_argument(\n        \"--keys\",\n        nargs=\"+\",\n        required=False,\n        help=\"Specific key(s) to perform the action on.\",\n    )\n    convert_parser.add_argument(\n        \"--keys-file\",\n        required=False,\n        help=\"Input file containing keys to perform the action on. One key per line.\",\n    )\n    convert_parser.add_argument(\n        \"--deprecated-keys-output\",\n        required=False,\n        help=\"Output file to write deprecated keys to. These are keys associated with jobs not present in Tron. One key per line.\",\n    )\n    convert_parser.add_argument(\n        \"--failed-keys-output\",\n        required=False,\n        help=\"Output file to write keys that failed to convert. One key per line.\",\n    )\n    convert_parser.add_argument(\n        \"--all\",\n        action=\"store_true\",\n        help=\"Apply the action to all keys in the table.\",\n    )\n    convert_parser.add_argument(\n        \"--execute\",\n        action=\"store_true\",\n        default=False,\n        help=\"Actually perform the conversion and update the DynamoDB table.\",\n    )\n\n    dump_pickle_parser = subparsers.add_parser(\"dump-pickle\", help=\"Load and print the pickles for specified keys.\")\n    dump_pickle_parser.add_argument(\n        \"--keys\",\n        nargs=\"+\",\n        required=False,\n        help=\"Specific key(s) to perform the action on.\",\n    )\n    dump_pickle_parser.add_argument(\n        \"--keys-file\",\n        required=False,\n        help=\"Input file containing keys to perform the action on. One key per line.\",\n    )\n    dump_pickle_parser.add_argument(\n        \"--all\",\n        action=\"store_true\",\n        help=\"Apply the action to all keys in the table.\",\n    )\n\n    dump_json_parser = subparsers.add_parser(\"dump-json\", help=\"Load and print JSON data for specified keys.\")\n    dump_json_parser.add_argument(\n        \"--keys\",\n        nargs=\"+\",\n        required=False,\n        help=\"Specific key(s) to perform the action on.\",\n    )\n    dump_json_parser.add_argument(\n        \"--keys-file\",\n        required=False,\n        help=\"Input file containing keys to perform the action on. One key per line.\",\n    )\n    dump_json_parser.add_argument(\n        \"--all\",\n        action=\"store_true\",\n        help=\"Apply the action to all keys in the table.\",\n    )\n\n    delete_keys_parser = subparsers.add_parser(\"delete\", help=\"Delete the specified keys from the DynamoDB table.\")\n    delete_keys_parser.add_argument(\n        \"--keys\",\n        nargs=\"+\",\n        required=False,\n        help=\"Specific key(s) to perform the action on.\",\n    )\n    delete_keys_parser.add_argument(\n        \"--keys-file\",\n        required=False,\n        help=\"Input file containing keys to perform the action on. One key per line.\",\n    )\n    delete_keys_parser.add_argument(\n        \"--all\",\n        action=\"store_true\",\n        help=\"Apply the action to all keys in the table.\",\n    )\n    delete_keys_parser.add_argument(\n        \"--execute\",\n        action=\"store_true\",\n        default=False,\n        help=\"Actually perform the deletion on the DynamoDB table.\",\n    )\n\n    args = parser.parse_args()\n    source_table = get_dynamodb_table(args.aws_profile, args.table_name, args.table_region)\n\n    if not args.keys and not args.keys_file and not args.all:\n        parser.error(\"You must provide either --keys, --keys-file, or --all.\")\n\n    if args.all:\n        print(\"Processing all keys in the table...\")\n        keys = get_all_jobs(source_table)\n    else:\n        keys = []\n        if args.keys:\n            keys.extend(args.keys)\n        if args.keys_file:\n            try:\n                with open(args.keys_file) as f:\n                    keys_from_file = [line.strip() for line in f if line.strip()]\n                    keys.extend(keys_from_file)\n            except Exception as e:\n                parser.error(f\"Error reading keys from file {args.keys_file}: {e}\")\n        if not keys:\n            parser.error(\"No keys provided. Please provide keys via --keys or --keys-file.\")\n        keys = list(set(keys))\n\n    if args.action == \"convert\":\n        job_names = get_job_names(args.tron_api)\n        convert_pickles_to_json_and_update_table(\n            source_table,\n            keys=keys,\n            dry_run=not args.execute,\n            deprecated_keys_output=args.deprecated_keys_output,\n            failed_keys_output=args.failed_keys_output,\n            job_names=job_names,\n        )\n    elif args.action == \"dump-pickle\":\n        dump_pickle_keys(source_table, keys)\n    elif args.action == \"dump-json\":\n        dump_json_keys(source_table, keys)\n    elif args.action == \"delete\":\n        if not args.execute:\n            print(f\"DRY RUN: Would delete {len(keys)} keys from the table '{args.table_name}'.\")\n            for key in keys:\n                print(f\"Would delete key: {key}\")\n            print(\"Dry run complete. No changes were made to the DynamoDB table.\")\n        else:\n            confirm = (\n                input(f\"Are you sure you want to delete {len(keys)} keys from the table '{args.table_name}'? [y/N]: \")\n                .strip()\n                .lower()\n            )\n            if confirm in (\"y\", \"yes\"):\n                delete_keys(source_table, keys)\n            else:\n                print(\"Deletion cancelled.\")\n    else:\n        print(f\"Unknown action: {args.action}\")\n\n\nif __name__ == \"__main__\":\n    main()\n"
  },
  {
    "path": "tools/sync_tron_state_from_k8s.py",
    "content": "\"\"\"\nUpdate tron state from k8s api if tron has not yet updated correctly\n\n Usage:\n    python tools/sync_tron_state_from_k8s.py -c <kubeconfig_path> (--do-work|--num-runs N|--tronctl-wrapper tronctl-pnw-devc)\n\nThis will search for completed pods in the cluster specified in the kubeconfig in the `tron` namespace and use tronctl to transition any whose states do not match.\n\"\"\"\nimport argparse\nimport base64\nimport hashlib\nimport logging\nimport os\nimport subprocess\nimport sys\nfrom typing import Any\n\nfrom kubernetes.client import V1Pod\nfrom task_processing.plugins.kubernetes.kube_client import KubeClient\n\nfrom tron.commands.client import Client\nfrom tron.commands.cmd_utils import get_client_config\n\nPOD_STATUS_TO_TRON_STATE = {\n    \"Succeeded\": \"success\",\n    \"Failed\": \"fail\",\n    \"Unknown\": \"Unknown\",  # This should never really happen\n}\n\nTRON_MODIFIABLE_STATES = [\n    \"starting\",  # stuck jobs\n    \"running\",  # stuck jobs\n    \"unknown\",\n    \"lost\",\n]\n\nlog = logging.getLogger(\"sync_tron_from_k8s\")\n\n\n# NOTE: Copied from paasta_tools.kubernetes_tools, if it changes there it must be updated here\ndef limit_size_with_hash(name: str, limit: int = 63, suffix: int = 4) -> str:\n    \"\"\"Returns `name` unchanged if it's length does not exceed the `limit`.\n    Otherwise, returns truncated `name` with it's hash of size `suffix`\n    appended.\n\n    base32 encoding is chosen as it satisfies the common requirement in\n    various k8s names to be alphanumeric.\n    \"\"\"\n    if len(name) > limit:\n        digest = hashlib.md5(name.encode()).digest()\n        hash = base64.b32encode(digest).decode().replace(\"=\", \"\").lower()\n        return f\"{name[:(limit-suffix-1)]}-{hash[:suffix]}\"\n    else:\n        return name\n\n\ndef parse_args():\n    parser = argparse.ArgumentParser()\n    parser.add_argument(\n        \"--kubeconfig-path\",\n        dest=\"kubeconfig_path\",\n        help=\"KUBECONFIG path; multiple can be specified to find pods in multiple clusters\",\n        nargs=\"+\",\n    )\n    parser.add_argument(\n        \"--kubecontext\",\n        dest=\"kubecontext\",\n        help=\"kubecontext to use from specified kubeconfig. multiple can be specified to find pods in multiple clusters, ONLY if a single kubeconfig-path is provided\",\n        nargs=\"*\",\n    )\n    parser.add_argument(\n        \"--do-work\",\n        dest=\"do_work\",\n        action=\"store_true\",\n        default=False,\n        help=\"Actually modify tron actions that need updating; without this flag we will only print those that would be updated\",\n    )\n    parser.add_argument(\"--tron-url\", default=None, help=\"Tron url (default will read from paasta tron config)\")\n    parser.add_argument(\n        \"--tronctl-wrapper\",\n        default=\"tronctl\",\n        dest=\"tronctl_wrapper\",\n        help=\"Tronctl wrapper to use (will not use wrapper by default)\",\n    )\n    parser.add_argument(\"-n\", \"--num-runs\", dest=\"num_runs\", default=100, help=\"Maximum number of job runs to retrieve\")\n    parser.add_argument(\"-v\", \"--verbose\", dest=\"verbose\", action=\"store_true\", default=False, help=\"Verbose logging\")\n    args = parser.parse_args()\n\n    # We can only have multiple kubeconfigs, or multiple contexts with a single config\n    if len(args.kubeconfig_path) > 1 and args.kubecontext:\n        parser.error(\"You can only specify a single --kubeconfig-path if specifying --kubecontext arguments.\")\n\n    # tron's base level is critical, not info, adjust accoringly\n    if args.verbose:\n        level = logging.DEBUG\n        tron_level = logging.WARN\n    else:\n        level = logging.INFO\n        tron_level = logging.CRITICAL\n\n    logging.basicConfig(level=level, stream=sys.stdout)\n\n    tron_client_logger = logging.getLogger(\"tron.commands.client\")\n    tron_client_logger.setLevel(tron_level)\n\n    # We also don't want kube_client debug logs\n    kube_logger = logging.getLogger(\"kubernetes.client.rest\")\n    kube_logger.setLevel(logging.INFO)\n\n    return args\n\n\ndef fetch_pods(kubeconfig_path: str, kubecontext: str | None) -> dict[str, V1Pod]:\n    if kubecontext:\n        # KubeClient only uses the environment variable\n        os.environ[\"KUBECONTEXT\"] = kubecontext\n    kube_client = KubeClient(kubeconfig_path=kubeconfig_path, user_agent=\"sync_tron_state_from_k8s\")\n\n    # Bit of a hack, no helper to fetch pods so reach into core api\n    completed_pod_list = kube_client.core.list_namespaced_pod(\n        namespace=\"tron\",\n    )\n\n    return {pod.metadata.name: pod for pod in completed_pod_list.items}\n\n\ndef get_tron_state_from_api(tron_server: str, num_runs: int = 100) -> list[dict[str, dict[Any, Any]]]:\n    if not tron_server:\n        client_config = get_client_config()\n        tron_server = client_config.get(\"server\", \"http://localhost:8089\")\n    client = Client(tron_server)\n    # /jobs returns only the latest 5 runs, we'll need to request all runs instead ourselves\n    jobs = client.jobs(\n        include_job_runs=False,\n        include_action_runs=False,\n        include_action_graph=False,\n        include_node_pool=False,\n    )\n\n    for job in jobs:\n        # Update job URL to be used with API instead of web\n        url = f'/api{job[\"url\"]}'\n        log.debug(f'Fetching job {job[\"name\"]} at {url}')\n        job_runs = client.job(\n            url,\n            include_action_runs=True,\n            count=num_runs,  # TODO: fetch job run_limit and use that for count ?\n        )\n        job[\"runs\"] = job_runs[\"runs\"]\n    return jobs\n\n\ndef get_matching_pod(action_run: dict[str, Any], pods: dict[str, V1Pod]) -> V1Pod | None:\n    \"\"\"Given a tron action_run, try to find the right pod that matches.\"\"\"\n    action_name = action_run[\"action_name\"]\n    job_name = action_run[\"job_name\"]\n    run_num = action_run[\"run_num\"]\n    service, job = job_name.split(\".\")\n    instance_name = f\"{job}.{action_name}\"\n    sanitized_instance_name = limit_size_with_hash(instance_name)\n    matching_pods = sorted(\n        [\n            pod\n            for pod in pods.values()\n            if pod.metadata.labels[\"paasta.yelp.com/service\"] == service\n            and pod.metadata.labels[\"paasta.yelp.com/instance\"] == sanitized_instance_name\n            and pod.metadata.labels[\"tron.yelp.com/run_num\"] == run_num\n        ],\n        # If action has retries, there will be multiple pods w/ same job_run; we only want the latest\n        key=lambda pod: pod.metadata.creation_timestamp,\n        reverse=True,\n    )\n    return (\n        matching_pods[0] if matching_pods and matching_pods[0].status.phase in POD_STATUS_TO_TRON_STATE.keys() else None\n    )\n\n\ndef get_desired_state_from_pod(pod: V1Pod) -> str:\n    k8s_state = pod.status.phase\n    return POD_STATUS_TO_TRON_STATE.get(k8s_state, \"NoMatch\")\n\n\ndef update_tron_from_pods(\n    jobs: list[dict[str, Any]], pods: dict[str, V1Pod], tronctl_wrapper: str = \"tronctl\", do_work: bool = False\n):\n    updated = []\n    error = []\n    for job in jobs:\n        if job[\"runs\"]:\n            # job_runs\n            for job_run in job[\"runs\"]:\n                # actions for this job_run\n                for action in job_run.get(\"runs\", []):\n                    action_run_id = action[\"id\"]\n                    if action[\"state\"] in TRON_MODIFIABLE_STATES:\n                        pod = get_matching_pod(action, pods)\n                        if pod:\n                            desired_state = get_desired_state_from_pod(pod)\n                            if action[\"state\"] != desired_state:\n                                log.debug(f'{action_run_id} state {action[\"state\"]} needs updating to {desired_state}')\n                                cmd = [tronctl_wrapper, desired_state, action_run_id]\n                                if do_work:\n                                    # tronctl-$cluster success/fail svc.job.run.action\n                                    try:\n                                        log.info(f\"Running {cmd}\")\n                                        proc = subprocess.run(cmd, capture_output=True, text=True)\n                                        if proc.returncode != 0:\n                                            log.error(f\"Got non-zero exit code: {proc.returncode}\")\n                                            log.error(f\"\\t{proc.stderr}\")\n                                            error.append(action_run_id)\n                                        updated.append(action_run_id)\n                                    except Exception:\n                                        log.exception(\"ERROR: Hit exception:\")\n                                        error.append(action_run_id)\n                                else:\n                                    log.info(f\"Dry-Run: Would run {cmd}\")\n                                    updated.append(action_run_id)\n                        else:\n                            log.debug(f\"action run {action_run_id} not found in list of finished pods, no action taken\")\n                    else:\n                        log.debug(f'Action state {action[\"state\"]} for {action_run_id} not modifiable, no action taken')\n    log.info(f\"Updated {len(updated)} actions: {','.join(updated)}\")\n    log.info(f\"Hit {len(error)} errors on actions: {','.join(error)}\")\n    return {\"updated\": updated, \"error\": error}\n\n\nif __name__ == \"__main__\":\n    args = parse_args()\n\n    jobs = get_tron_state_from_api(args.tron_url, args.num_runs)\n    log.debug(f\"Found {len(jobs)} jobs.\")\n\n    pods = {}\n    kube_client_args = (\n        [(args.kubeconfig_path[0], kubecontext) for kubecontext in args.kubecontext]\n        if args.kubecontext\n        else [(kubeconfig_path, None) for kubeconfig_path in args.kubeconfig_path]\n    )\n\n    for kubeconfig_path, kubecontext in kube_client_args:\n        pods.update(fetch_pods(kubeconfig_path, kubecontext))\n\n    log.debug(f\"Found {len(pods.keys())} pods.\")\n\n    update_tron_from_pods(jobs, pods, args.tronctl_wrapper, args.do_work)\n"
  },
  {
    "path": "tox.ini",
    "content": "[tox]\nenvlist = py310\n\n[testenv]\nbasepython = python3.10\ndeps =\n    --requirement={toxinidir}/requirements.txt\n    --requirement={toxinidir}/requirements-dev.txt\nusedevelop = true\npassenv = USER PIP_INDEX_URL\ncommands =\n    pre-commit install -f --install-hooks\n    pre-commit run --all-files\n    # tron has been around for a while, so we'll need to slowly add types or make an effort\n    # to get it mypy-clean in one shot - until then, let's only check files that we've added types to\n    mypy --package tron\n    check-requirements\n    # optionally install yelpy requirements - this is after check-requirements since\n    # check-requirements doesn't understand these extra requirements\n    -pip install -r yelp_package/extra_requirements_yelp.txt\n    # we then run tests at the very end so that we can run tests with yelpy requirements\n    py.test -s {posargs:tests}\n\n[flake8]\nignore = E501,E265,E241,E704,E251,W504,E231,W503,E203\n\n[testenv:docs]\ndeps =\n    --requirement={toxinidir}/requirements-docs.txt\n    --requirement={toxinidir}/requirements.txt\nwhitelist_externals=\n    mkdir\ncommands=\n    /bin/rm -rf docs/source/generated/\n    # The last arg to apidoc is a list of excluded paths\n    sphinx-apidoc -f -e -o docs/source/generated/ tron\n    mkdir -p docs\n    sphinx-build -b html -d docs/_build docs/source docs/_build/html\n\n[testenv:itest]\ncommands =\n    make deb_jammy\n    make _itest_jammy\n"
  },
  {
    "path": "tron/__init__.py",
    "content": "# Copyright 2015-2016 Yelp Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# It is imperative that this file not contain any imports from our\n# dependencies. Since this file is imported from setup.py in the\n# setup phase, the dependencies may not exist on disk yet.\n#\n# Don't bump version manually. See `make release` docs in ./Makefile\n__version__ = \"3.10.0\"\n"
  },
  {
    "path": "tron/actioncommand.py",
    "content": "import json\nimport logging\nimport os\nfrom io import StringIO\nfrom shlex import quote\nfrom typing import Any\n\nfrom tron.config import schema\nfrom tron.serialize import filehandler\nfrom tron.utils import timeutils\nfrom tron.utils.observer import Observable\nfrom tron.utils.persistable import Persistable\nfrom tron.utils.state import Machine\n\nlog = logging.getLogger(__name__)\n\n\nclass ActionCommand(Observable):\n    \"\"\"An ActionCommand encapsulates a runnable task that is passed to a node\n    for execution.\n\n    A Node calls:\n      started   (when the command starts)\n      exited    (when the command exits)\n      write_<channel> (when output is received)\n      done      (when the command is finished)\n    \"\"\"\n\n    PENDING = \"pending\"\n    RUNNING = \"running\"\n    EXITING = \"exiting\"\n    COMPLETE = \"complete\"\n    FAILSTART = \"failstart\"\n\n    STATE_MACHINE = Machine(\n        PENDING,\n        **{\n            PENDING: {\n                \"start\": RUNNING,\n                \"exit\": FAILSTART,\n            },\n            RUNNING: {\n                \"exit\": EXITING,\n            },\n            EXITING: {\n                \"close\": COMPLETE,\n            },\n        },\n    )\n\n    STDOUT = \".stdout\"\n    STDERR = \".stderr\"\n\n    def __init__(self, id, command, serializer=None):\n        super().__init__()\n        self.id = id\n        self.command = command\n        self.machine = Machine.from_machine(ActionCommand.STATE_MACHINE)\n        self.exit_status = None\n        self.start_time = None\n        self.end_time = None\n        if serializer:\n            self.stdout = serializer.open(self.STDOUT)\n            self.stderr = serializer.open(self.STDERR)\n        else:\n            self.stdout = filehandler.NullFileHandle\n            self.stderr = filehandler.NullFileHandle\n\n    @property\n    def state(self):\n        return self.machine.state\n\n    def transition_and_notify(self, target):\n        if self.machine.transition(target):\n            self.notify(self.state)\n            return True\n\n    def started(self):\n        if self.machine.check(\"start\"):\n            self.start_time = timeutils.current_timestamp()\n            return self.transition_and_notify(\"start\")\n\n    def exited(self, exit_status):\n        if self.machine.check(\"exit\"):\n            self.end_time = timeutils.current_timestamp()\n            self.exit_status = exit_status\n            return self.transition_and_notify(\"exit\")\n\n    def write_stderr(self, value):\n        self.stderr.write(value)\n\n    def write_stdout(self, value):\n        self.stdout.write(value)\n\n    def done(self):\n        if self.machine.check(\"close\"):\n            self.stdout.close()\n            self.stderr.close()\n            return self.transition_and_notify(\"close\")\n\n    def handle_errback(self, result):\n        \"\"\"Handle an unexpected error while being run. This will likely be\n        an interval error. Cleanup the state of this ActionCommand and log\n        something useful for debugging.\n        \"\"\"\n        log.error(f\"Unknown failure for {self}, {str(result)}\")\n        self.exited(result)\n        self.done()\n\n    @property\n    def is_unknown(self):\n        return self.exit_status is None\n\n    @property\n    def is_failed(self):\n        return bool(self.exit_status)\n\n    @property\n    def is_complete(self):\n        \"\"\"Complete implies done and success.\"\"\"\n        return self.machine.state == ActionCommand.COMPLETE\n\n    @property\n    def is_done(self):\n        \"\"\"Done implies no more work will be done, but might not be success.\"\"\"\n        return self.machine.state in (\n            ActionCommand.COMPLETE,\n            ActionCommand.FAILSTART,\n        )\n\n    def __repr__(self):\n        return f\"ActionCommand {self.id} {self.command}: {self.state}\"\n\n\nclass StringBufferStore:\n    \"\"\"A serializer object which can be passed to ActionCommand as a\n    serializer, but stores streams in memory.\n    \"\"\"\n\n    def __init__(self):\n        self.buffers = {}\n\n    def open(self, name):\n        return self.buffers.setdefault(name, StringIO())\n\n    def clear(self):\n        self.buffers.clear()\n\n\n# TODO: TRON-2304 - Cleanup NoActionRunnerFactory\nclass NoActionRunnerFactory:\n    \"\"\"Action runner factory that does not wrap the action run command.\"\"\"\n\n    @classmethod\n    def create(cls, id, command, serializer):\n        return ActionCommand(id, command, serializer)\n\n    @classmethod\n    def build_stop_action_command(cls, _id, _command):\n        \"\"\"It is not possible to stop action commands without a runner.\"\"\"\n        raise NotImplementedError(\"An action_runner is required to stop.\")\n\n    @staticmethod\n    def from_json():\n        return None\n\n    @staticmethod\n    def to_json():\n        return None\n\n\nclass SubprocessActionRunnerFactory(Persistable):\n    \"\"\"Run actions by wrapping them in `action_runner.py`.\"\"\"\n\n    runner_exec_name = \"action_runner.py\"\n    status_exec_name = \"action_status.py\"\n\n    def __init__(self, status_path, exec_path):\n        self.status_path = status_path\n        self.exec_path = exec_path\n\n    @classmethod\n    def from_config(cls, config):\n        return cls(config.remote_status_path, config.remote_exec_path)\n\n    def create(self, id, command, serializer):\n        command = self.build_command(id, command, self.runner_exec_name)\n        return ActionCommand(id, command, serializer)\n\n    def build_command(self, id, command, exec_name):\n        status_path = os.path.join(self.status_path, id)\n        runner_path = os.path.join(self.exec_path, exec_name)\n        return f\"{quote(runner_path)} {quote(status_path)} {quote(command)} {quote(id)}\"\n\n    def build_stop_action_command(self, id, command):\n        command = self.build_command(id, command, self.status_exec_name)\n        run_id = f\"{id}.{command}\"\n        return ActionCommand(run_id, command, StringBufferStore())\n\n    def __eq__(self, other):\n        return (\n            self.__class__ == other.__class__\n            and self.status_path == other.status_path\n            and self.exec_path == other.exec_path\n        )\n\n    def __ne__(self, other):\n        return not self == other\n\n    @staticmethod\n    def from_json(state_data: str) -> dict[str, Any]:\n        try:\n            json_data = json.loads(state_data)\n            deserialized_data = {\n                \"status_path\": json_data[\"status_path\"],\n                \"exec_path\": json_data[\"exec_path\"],\n            }\n            return deserialized_data\n        except Exception:\n            log.exception(\"Error deserializing SubprocessActionRunnerFactory from JSON\")\n            raise\n\n    @staticmethod\n    def to_json(state_data: dict) -> str:\n        try:\n            return json.dumps(\n                {\n                    \"status_path\": state_data[\"status_path\"],\n                    \"exec_path\": state_data[\"exec_path\"],\n                }\n            )\n        except KeyError:\n            log.exception(\"Missing key in state_data:\")\n            raise\n        except Exception:\n            log.exception(\"Error serializing SubprocessActionRunnerFactory to JSON:\")\n            raise\n\n\ndef create_action_runner_factory_from_config(config):\n    \"\"\"A factory-factory method which returns a callable that can be used to\n    create ActionCommand objects. The factory definition should match the\n    constructor for ActionCommand.\n    \"\"\"\n    if not config or config.runner_type == schema.ActionRunnerTypes.none.value:\n        return NoActionRunnerFactory()\n    elif config.runner_type == schema.ActionRunnerTypes.subprocess.value:\n        return SubprocessActionRunnerFactory.from_config(config)\n    else:\n        raise ValueError(\"Unknown runner type: %s\", config.runner_type)\n"
  },
  {
    "path": "tron/api/__init__.py",
    "content": ""
  },
  {
    "path": "tron/api/adapter.py",
    "content": "\"\"\"\n Classes which create external representations of core objects. This allows\n the core objects to remain decoupled from the API and clients. These classes\n act as an adapter between the data format api clients expect, and the internal\n data of an object.\n\"\"\"\nimport functools\nimport os.path\nimport time\nfrom collections.abc import Callable\nfrom typing import Any\nfrom typing import TypeVar\nfrom urllib.parse import quote\n\nfrom tron import actioncommand\nfrom tron import scheduler\nfrom tron.core.actionrun import KubernetesActionRun\nfrom tron.serialize import filehandler\nfrom tron.utils import timeutils\nfrom tron.utils.logreader import read_log_stream_for_action_run\nfrom tron.utils.timeutils import delta_total_seconds\n\nR = TypeVar(\"R\")\n\n\nclass ReprAdapter:\n    \"\"\"Creates a dictionary from the given object for a set of rules.\"\"\"\n\n    field_names: list[str] = []\n    translated_field_names: list[str] = []\n\n    def __init__(self, internal_obj):\n        self._obj = internal_obj\n        self.fields = self._get_field_names()\n        self.translators = self._get_translation_mapping()\n\n    def _get_field_names(self):\n        return self.field_names\n\n    def _get_translation_mapping(self):\n        return {field_name: getattr(self, \"get_%s\" % field_name) for field_name in self.translated_field_names}\n\n    def get_repr(self):\n        repr_data = {field: getattr(self._obj, field) for field in self.fields}\n        translated = {field: func() for field, func in self.translators.items()}\n        repr_data.update(translated)\n        return repr_data\n\n\ndef adapt_many(adapter_class, seq, *args, **kwargs):\n    return [adapter_class(item, *args, **kwargs).get_repr() for item in seq if item is not None]\n\n\ndef toggle_flag(\n    flag_name: str,\n) -> Callable[  # the typing here is funky, this is a decorator factory that returns a decorator\n    # which takes a function with \"one\" argument (really, that argument is just self) and returns some type R\n    # where R is the return type of the decorated function\n    [Callable[[Any], R]],\n    # and that decorator returns another callable (since this is a decorator factory) that takes self (again, the Any)\n    # and returns None (if the flag is False) or R (if True)\n    Callable[[Any], R | None],\n]:\n    \"\"\"Create a decorator which checks if flag_name is true before running\n    the wrapped function. If False returns None.\n    \"\"\"\n\n    def wrap(f: Callable[[Any], R]) -> Callable[[Any], R | None]:\n        @functools.wraps(f)\n        def wrapper(self: Any) -> R | None:\n            if getattr(self, flag_name):\n                return f(self)\n            return None\n\n        return wrapper\n\n    return wrap\n\n\nclass RunAdapter(ReprAdapter):\n    \"\"\"Base class for JobRun and ActionRun adapters.\"\"\"\n\n    def get_state(self):\n        return self._obj.state\n\n    def get_node(self):\n        return NodeAdapter(self._obj.node).get_repr()\n\n    def get_duration(self):\n        duration = timeutils.duration(self._obj.start_time, self._obj.end_time)\n        return str(duration or \"\")\n\n\nclass ActionRunAdapter(RunAdapter):\n    \"\"\"Adapt a JobRun and an Action name to an external representation of an\n    ActionRun.\n    \"\"\"\n\n    field_names = [\n        \"id\",\n        \"start_time\",\n        \"end_time\",\n        \"exit_status\",\n        \"action_name\",\n        \"exit_statuses\",\n        \"retries_remaining\",\n        \"original_command\",\n    ]\n\n    translated_field_names = [\n        \"state\",\n        \"node\",\n        \"command\",\n        \"raw_command\",\n        \"requirements\",\n        \"meta\",\n        \"stdout\",\n        \"stderr\",\n        \"duration\",\n        \"job_name\",\n        \"run_num\",\n        \"retries_delay\",\n        \"in_delay\",\n        \"triggered_by\",\n        \"trigger_downstreams\",\n    ]\n\n    def __init__(\n        self,\n        action_run,\n        job_run=None,\n        max_lines=10,\n        include_stdout=False,\n        include_stderr=False,\n        include_meta=False,\n    ):\n        super().__init__(action_run)\n        self.job_run = job_run\n        self.max_lines = max_lines or None\n        self.include_stdout = include_stdout\n        self.include_stderr = include_stderr\n        self.include_meta = include_meta\n\n    def get_raw_command(self):\n        return self._obj.command_config.command\n\n    def get_command(self):\n        return self._obj.rendered_command\n\n    @toggle_flag(\"job_run\")\n    def get_requirements(self):\n        action_name = self._obj.action_name\n        required = self.job_run.action_graph.get_dependencies(action_name)\n        return [act.name for act in required]\n\n    def _get_serializer(self, path: str | None = None) -> filehandler.OutputStreamSerializer:\n        base_path = filehandler.OutputPath(path) if path else self._obj.output_path\n        return filehandler.OutputStreamSerializer(base_path)\n\n    def _get_alternate_output_paths(self):\n        try:\n            namespace, jobname, run_num, action = self._obj.id.split(\".\")\n        except Exception:\n            return None\n\n        # Check to see if the output might have ended up in any alternate locations.\n        for alt_path in self._obj.STDOUT_PATHS:\n            formatted_alt_path = os.path.join(\n                # This ugliness is getting the \"root output directory\"\n                self._obj.context.next.next.base.job.output_path.base,\n                alt_path.format(\n                    namespace=namespace,\n                    jobname=jobname,\n                    run_num=run_num,\n                    action=action,\n                ),\n            )\n            if os.path.exists(formatted_alt_path):\n                yield formatted_alt_path\n\n    @toggle_flag(\"include_meta\")\n    def get_meta(self) -> list[str]:\n        if not isinstance(self._obj, KubernetesActionRun):\n            return [\"When this action is migrated to Kubernetes, this will contain Tron/task_processing output.\"]\n\n        # We're reusing the \"old\" (i.e., SSH/Mesos) logging files for task_processing output since\n        # that won't make it into anything but Splunk\n        filename = actioncommand.ActionCommand.STDERR\n        output: list[str] = self._get_serializer().tail(filename, self.max_lines)\n        if not output:\n            for alt_path in self._get_alternate_output_paths():\n                output = self._get_serializer(alt_path).tail(filename, self.max_lines)\n                if output:\n                    return output\n        return output\n\n    @toggle_flag(\"include_stdout\")\n    def get_stdout(self) -> list[str]:\n        if isinstance(self._obj, KubernetesActionRun):\n            # it's possible that we have a job that logs to the samestream as another job on a\n            # different master (e.g., 1 job in pnw-devc and another in norcal-devc), so we\n            # additionally filter by the cluster in each log message.\n            # we get this information from the last attempt for this ActionRun, but\n            # all of the attempts should always have the same value. This value is guaranteed\n            # to be here as it's part of the PaaSTA Contract, but there's also a fallback in\n            # read_log_stream_for_action_run() to use the current superregion for the tron\n            # master should something go horribly wrong\n            paasta_cluster = None\n            if self._obj.attempts:\n                paasta_cluster = self._obj.attempts[-1].command_config.env.get(\"PAASTA_CLUSTER\")\n\n            return read_log_stream_for_action_run(\n                action_run_id=self._obj.id,\n                component=\"stdout\",\n                # we update the start time of an ActionRun on a retry so we can't just use\n                # that start time to figure out when we should start displaying logs for.\n                # instead, we use the first attempt's start time as the date from which to\n                # start getting logs from and the last attempt's end time as the date at\n                # which we stop getting logs from.\n                # in the case of an action that completed on its initial run, there will\n                # only be one attempt, but that's fine as these single attempts will still\n                # have the correct information.\n                # XXX: this is suboptimal if there's many days between retries\n                min_date=self._obj.attempts[0].start_time if self._obj.attempts else None,\n                max_date=self._obj.attempts[-1].end_time if self._obj.attempts else None,\n                paasta_cluster=paasta_cluster,\n                max_lines=self.max_lines,\n            )\n\n        filename = actioncommand.ActionCommand.STDOUT\n        output = self._get_serializer().tail(filename, self.max_lines)\n        if not output:\n            for alt_path in self._get_alternate_output_paths():\n                output = self._get_serializer(alt_path).tail(filename, self.max_lines)\n                if output:\n                    break\n        return output\n\n    @toggle_flag(\"include_stderr\")\n    def get_stderr(self) -> list[str]:\n        if isinstance(self._obj, KubernetesActionRun):\n            # it's possible that we have a job that logs to the samestream as another job on a\n            # different master (e.g., 1 job in pnw-devc and another in norcal-devc), so we\n            # additionally filter by the cluster in each log message.\n            # we get this information from the last attempt for this ActionRun, but\n            # all of the attempts should always have the same value. This value is guaranteed\n            # to be here as it's part of the PaaSTA Contract, but there's also a fallback in\n            # read_log_stream_for_action_run() to use the current superregion for the tron\n            # master should something go horribly wrong\n            paasta_cluster = None\n            if self._obj.attempts:\n                paasta_cluster = self._obj.attempts[-1].command_config.env.get(\"PAASTA_CLUSTER\")\n\n            return read_log_stream_for_action_run(\n                action_run_id=self._obj.id,\n                component=\"stderr\",\n                # we update the start time of an ActionRun on a retry so we can't just use\n                # that start time to figure out when we should start displaying logs for.\n                # instead, we use the first attempt's start time as the date from which to\n                # start getting logs from and the last attempt's end time as the date at\n                # which we stop getting logs from.\n                # in the case of an action that completed on its initial run, there will\n                # only be one attempt, but that's fine as these single attempts will still\n                # have the correct information.\n                # XXX: this is suboptimal if there's many days between retries\n                min_date=self._obj.attempts[0].start_time if self._obj.attempts else None,\n                max_date=self._obj.attempts[-1].end_time if self._obj.attempts else None,\n                paasta_cluster=paasta_cluster,\n                max_lines=self.max_lines,\n            )\n\n        filename = actioncommand.ActionCommand.STDERR\n        output = self._get_serializer().tail(filename, self.max_lines)\n        if not output:\n            for alt_path in self._get_alternate_output_paths():\n                output = self._get_serializer(alt_path).tail(filename, self.max_lines)\n                if output:\n                    break\n        return output\n\n    def get_job_name(self):\n        return self._obj.job_run_id.rsplit(\".\", 1)[-2]\n\n    def get_run_num(self):\n        return self._obj.job_run_id.split(\".\")[-1]\n\n    def get_retries_delay(self):\n        if self._obj.retries_delay:\n            return str(self._obj.retries_delay)\n\n    def get_in_delay(self):\n        if self._obj.in_delay is not None:\n            return self._obj.in_delay.getTime() - time.time()\n\n    def get_triggered_by(self) -> str:\n        remaining = set(self._obj.remaining_triggers)\n        all_triggers = sorted(self._obj.rendered_triggers)\n        return \", \".join(f\"{trig}{' (done)' if trig not in remaining else ''}\" for trig in all_triggers)\n\n    def get_trigger_downstreams(self) -> str:\n        triggers_to_emit = self._obj.triggers_to_emit()\n        return \", \".join(sorted(triggers_to_emit))\n\n\nclass ActionGraphAdapter:\n    def __init__(self, action_graph):\n        self.action_graph = action_graph\n\n    def get_repr(self):\n        def build(action_name):\n            action = self.action_graph[action_name]\n            dependencies = self.action_graph.get_dependencies(action_name, include_triggers=True)\n            return {\n                \"name\": action.name,\n                \"command\": action.command,\n                \"dependencies\": [d.name for d in dependencies],\n            }\n\n        return [build(action) for action in self.action_graph.names(include_triggers=True)]\n\n\nclass ActionRunGraphAdapter:\n    def __init__(self, action_run_collection):\n        self.action_runs = action_run_collection\n\n    def get_repr(self):\n        def build(action_run):\n            graph = self.action_runs.action_graph\n            dependencies = graph.get_dependencies(action_run.action_name, include_triggers=True)\n            return {\n                \"id\": action_run.id,\n                \"name\": action_run.action_name,\n                \"command\": action_run.rendered_command,\n                \"raw_command\": action_run.command_config.command,\n                \"state\": action_run.state,\n                \"start_time\": action_run.start_time,\n                \"end_time\": action_run.end_time,\n                \"dependencies\": [d.name for d in dependencies],\n            }\n\n        def build_trigger(trigger_name):\n            graph = self.action_runs.action_graph\n            trigger = graph[trigger_name]\n            dependencies = graph.get_dependencies(trigger_name, include_triggers=True)\n            return {\n                \"name\": trigger.name,\n                \"command\": trigger.command,\n                \"dependencies\": [d.name for d in dependencies],\n                \"state\": \"unknown\",  # TODO: TRON-2382: why is this hardcoded and never updated? Can we update this after improving our API timings?\n            }\n\n        return [build(action_run) for action_run in self.action_runs] + [\n            build_trigger(trigger_name) for trigger_name in self.action_runs.action_graph.all_triggers\n        ]\n\n\nclass JobRunAdapter(RunAdapter):\n\n    field_names = [\n        \"id\",\n        \"run_num\",\n        \"run_time\",\n        \"start_time\",\n        \"end_time\",\n        \"manual\",\n        \"job_name\",\n    ]\n    translated_field_names = [\n        \"state\",\n        \"node\",\n        \"duration\",\n        \"url\",\n        \"runs\",\n        \"action_graph\",\n    ]\n\n    def __init__(\n        self,\n        job_run,\n        include_action_runs=False,\n        include_action_graph=False,\n    ):\n        super().__init__(job_run)\n        self.include_action_runs = include_action_runs\n        self.include_action_graph = include_action_graph\n\n    def get_url(self):\n        return f\"/jobs/{self._obj.job_name}/{self._obj.run_num}\"\n\n    @toggle_flag(\"include_action_runs\")\n    def get_runs(self):\n        return adapt_many(ActionRunAdapter, self._obj.action_runs, self._obj)\n\n    @toggle_flag(\"include_action_graph\")\n    def get_action_graph(self):\n        return ActionRunGraphAdapter(self._obj.action_runs).get_repr()\n\n\nclass JobAdapter(ReprAdapter):\n\n    field_names = [\"status\", \"all_nodes\", \"allow_overlap\", \"queueing\"]\n    translated_field_names = [\n        \"name\",\n        \"scheduler\",\n        \"action_names\",\n        \"node_pool\",\n        \"last_success\",\n        \"next_run\",\n        \"url\",\n        \"runs\",\n        \"max_runtime\",\n        \"action_graph\",\n        \"monitoring\",\n        \"expected_runtime\",\n        \"actions_expected_runtime\",\n    ]\n\n    def __init__(\n        self,\n        job,\n        include_job_runs=False,\n        include_action_runs=False,\n        include_action_graph=True,\n        include_node_pool=True,\n        num_runs=None,\n    ):\n        super().__init__(job)\n        self.include_job_runs = include_job_runs\n        self.include_action_runs = include_action_runs\n        self.include_action_graph = include_action_graph\n        self.include_node_pool = include_node_pool\n        self.num_runs = num_runs\n\n    def get_name(self):\n        return self._obj.get_name()\n\n    def get_monitoring(self):\n        return self._obj.get_monitoring()\n\n    def get_scheduler(self):\n        return SchedulerAdapter(self._obj.scheduler).get_repr()\n\n    def get_action_names(self):\n        return list(self._obj.action_graph.names())\n\n    @toggle_flag(\"include_node_pool\")\n    def get_node_pool(self):\n        return NodePoolAdapter(self._obj.node_pool).get_repr()\n\n    def get_last_success(self):\n        last_success = self._obj.runs.last_success\n        return last_success.end_time if last_success else None\n\n    def get_next_run(self):\n        next_run = self._obj.runs.next_run\n        return next_run.run_time if next_run else None\n\n    def get_url(self):\n        return f\"/jobs/{quote(self._obj.get_name())}\"\n\n    @toggle_flag(\"include_job_runs\")\n    def get_runs(self):\n        runs = adapt_many(\n            JobRunAdapter,\n            list(self._obj.runs)[: self.num_runs or None],\n            self.include_action_runs,\n        )\n        return runs\n\n    def get_max_runtime(self):\n        return str(self._obj.max_runtime)\n\n    def get_expected_runtime(self):\n        return delta_total_seconds(self._obj.expected_runtime)\n\n    def get_actions_expected_runtime(self):\n        return self._obj.action_graph.expected_runtime\n\n    @toggle_flag(\"include_action_graph\")\n    def get_action_graph(self):\n        return ActionGraphAdapter(self._obj.action_graph).get_repr()\n\n\nclass JobIndexAdapter(ReprAdapter):\n\n    translated_field_names = [\"name\", \"actions\"]\n\n    def get_name(self):\n        return self._obj.get_name()\n\n    def get_actions(self):\n        def adapt_run(run):\n            return {\"name\": run.action_name, \"command\": run.command_config.command}\n\n        job_run = self._obj.get_runs().get_newest()\n        if not job_run:\n            return []\n        return [adapt_run(action_run) for action_run in job_run.action_runs]\n\n\nclass SchedulerAdapter(ReprAdapter):\n\n    translated_field_names = [\"value\", \"type\", \"jitter\"]\n\n    def get_value(self):\n        return self._obj.get_value()\n\n    def get_type(self):\n        return self._obj.get_name()\n\n    def get_jitter(self):\n        return scheduler.get_jitter_str(self._obj.get_jitter())\n\n\nclass EventAdapter(ReprAdapter):\n\n    field_names = [\"name\", \"entity\", \"time\"]\n    translated_field_names = [\"level\"]\n\n    def get_level(self):\n        return self._obj.level.label\n\n\nclass NodeAdapter(ReprAdapter):\n    field_names = [\"name\", \"hostname\", \"username\", \"port\"]\n\n\nclass NodePoolAdapter(ReprAdapter):\n    translated_field_names = [\"name\", \"nodes\"]\n\n    def get_name(self):\n        return self._obj.get_name()\n\n    def get_nodes(self):\n        return adapt_many(NodeAdapter, self._obj.get_nodes())\n"
  },
  {
    "path": "tron/api/async_resource.py",
    "content": "import threading\nimport time\n\nfrom twisted.internet import threads\nfrom twisted.web import server\n\nfrom tron.metrics import timer\n\n\ndef report_resource_request(resource, request, duration_ms):\n    timer(\n        name=f\"tron.api.{resource.__class__.__name__}\",\n        delta=duration_ms,\n        dimensions={\"method\": request.method.decode()},\n    )\n\n\nclass AsyncResource:\n    capacity = 10\n    semaphore = threading.Semaphore(value=capacity)\n    lock = threading.Lock()\n\n    @staticmethod\n    def finish(result, request, resource):\n        result, duration_ms = result\n        request.write(result)\n        request.finish()\n        report_resource_request(resource, request, duration_ms)\n\n    @staticmethod\n    def process(fn, resource, request):\n        start = time.time()\n        with AsyncResource.semaphore:\n            result = fn(resource, request)\n        duration_ms = 1000 * (time.time() - start)\n        return result, duration_ms\n\n    @staticmethod\n    def bounded(fn):\n        def wrapper(resource, request):\n            d = threads.deferToThread(\n                AsyncResource.process,\n                fn,\n                resource,\n                request,\n            )\n            d.addCallback(AsyncResource.finish, request, resource)\n            d.addErrback(request.processingFailed)\n            return server.NOT_DONE_YET\n\n        return wrapper\n\n    @staticmethod\n    def exclusive(fn):\n        def wrapper(resource, request):\n            # ensures only one exclusive request starts consuming the semaphore\n            start = time.time()\n            with AsyncResource.lock:\n                # this will wait until all bounded requests finished processing\n                for _ in range(AsyncResource.capacity):\n                    AsyncResource.semaphore.acquire()\n                try:\n                    return fn(resource, request)\n                finally:\n                    for _ in range(AsyncResource.capacity):\n                        AsyncResource.semaphore.release()\n                    duration_ms = 1000 * (time.time() - start)\n                    report_resource_request(resource, request, duration_ms)\n\n        return wrapper\n"
  },
  {
    "path": "tron/api/auth.py",
    "content": "import logging\nimport os\nimport re\nfrom functools import lru_cache\nfrom typing import NamedTuple\n\nimport cachetools.func\nimport requests\nfrom twisted.web.server import Request\n\n\nlogger = logging.getLogger(__name__)\nAUTH_CACHE_SIZE = 50000\nAUTH_CACHE_TTL = 30 * 60\nSERVICE_NAME_PATH_PATTERN = re.compile(r\"^/api/jobs/([^/.]+)\")\n\n\nclass AuthorizationOutcome(NamedTuple):\n    authorized: bool\n    reason: str\n\n\nclass AuthorizationFilter:\n    \"\"\"API request authorization via external system\"\"\"\n\n    def __init__(self, endpoint: str, enforce: bool):\n        \"\"\"Constructor\n\n        :param str endpoint: HTTP endpoint of external authorization system\n        :param bool enforce: whether to enforce authorization decisions\n        \"\"\"\n        self.endpoint = endpoint\n        self.enforce = enforce\n        self.session = requests.Session()\n\n    @classmethod\n    @lru_cache(maxsize=1)\n    def get_from_env(cls) -> \"AuthorizationFilter\":\n        return cls(\n            endpoint=os.getenv(\"API_AUTH_ENDPOINT\", \"\"),\n            enforce=bool(os.getenv(\"API_AUTH_ENFORCE\", \"\")),\n        )\n\n    def is_request_authorized(self, request: Request) -> AuthorizationOutcome:\n        \"\"\"Check if API request is authorized\n\n        :param Request request: API request object\n        :return: auth outcome\n        \"\"\"\n        if not self.endpoint:\n            return AuthorizationOutcome(True, \"Auth not enabled\")\n        token = (request.getHeader(\"Authorization\") or \"\").strip()\n        token = token.split()[-1] if token else \"\"  # removes \"Bearer\" prefix\n        url_path = request.path.decode() if request.path is not None else \"\"  # type: ignore[attr-defined] # mypy does not like what twisted is doing here\n        auth_outcome = self._is_request_authorized_impl(\n            # path and method are byte arrays in twisted\n            path=url_path,\n            token=token,\n            method=request.method.decode(),\n            service=self._extract_service_from_path(url_path),\n        )\n        return auth_outcome if self.enforce else AuthorizationOutcome(True, \"Auth dry-run\")\n\n    @cachetools.func.ttl_cache(maxsize=AUTH_CACHE_SIZE, ttl=AUTH_CACHE_TTL)\n    def _is_request_authorized_impl(\n        self,\n        path: str,\n        token: str,\n        method: str,\n        service: str | None,\n    ) -> AuthorizationOutcome:\n        \"\"\"Check if API request is authorized\n\n        :param str path: API path\n        :param str token: authentication token\n        :param str method: http method\n        :return: auth outcome\n        \"\"\"\n        try:\n            response = self.session.post(\n                url=self.endpoint,\n                json={\n                    \"input\": {\n                        \"path\": path,\n                        \"backend\": \"tron\",\n                        \"token\": token,\n                        \"method\": method.lower(),\n                        \"service\": service,\n                    },\n                },\n                timeout=2,\n            ).json()\n        except Exception as e:\n            logger.exception(f\"Issue communicating with auth endpoint: {e}\")\n            return AuthorizationOutcome(False, \"Auth backend error\")\n\n        auth_result_allowed = response.get(\"result\", {}).get(\"allowed\")\n        if auth_result_allowed is None:\n            return AuthorizationOutcome(False, \"Malformed auth response\")\n\n        if not auth_result_allowed:\n            reason = response[\"result\"].get(\"reason\", \"Denied\")\n            return AuthorizationOutcome(False, reason)\n\n        reason = response[\"result\"].get(\"reason\", \"Ok\")\n        return AuthorizationOutcome(True, reason)\n\n    @staticmethod\n    def _extract_service_from_path(path: str) -> str | None:\n        \"\"\"If a request path contains a service name, extract it.\n\n        Example:\n        /api/jobs/someservice.instance/110/run -> someservice\n\n        :param str path: request path\n        :return: service name, or None if not found\n        \"\"\"\n        match = SERVICE_NAME_PATH_PATTERN.search(path)\n        return match.group(1) if match else None\n"
  },
  {
    "path": "tron/api/controller.py",
    "content": "\"\"\"\nWeb Controllers for the API.\n\"\"\"\nimport logging\nfrom typing import TYPE_CHECKING\nfrom typing import TypedDict\n\nfrom tron import yaml\nfrom tron.config.manager import ConfigManager\nfrom tron.core.actionrun import ActionRun\nfrom tron.core.jobrun import JobRun\nfrom tron.eventbus import EventBus\n\nif TYPE_CHECKING:\n    from tron.mcp import MasterControlProgram\n\nlog = logging.getLogger(__name__)\n\n\nclass UnknownCommandError(Exception):\n    \"\"\"Exception raised when a controller received an unknown command.\"\"\"\n\n\nclass InvalidCommandForActionState(Exception):\n    \"\"\"\n    Exception raised when a controller attempts a command on an action in a state\n    that does not support that command (e.g., skipping a successful run).\n    \"\"\"\n\n    def __init__(self, command: str, action_name: str, action_state: str) -> None:\n        self.command = command\n        self.action_name = action_name\n        self.action_state = action_state\n        self.message = f\"Failed to {command} on {action_name}. State is {action_state}.\"\n        super().__init__()\n\n\nclass JobCollectionController:\n    def __init__(self, job_collection):\n        self.job_collection = job_collection\n\n    def handle_command(self, command, old_name=None, new_name=None):\n        if command == \"move\":\n            if old_name not in self.job_collection.get_names():\n                return f\"Error: {old_name} doesn't exist\"\n            if new_name in self.job_collection.get_names():\n                return f\"Error: {new_name} exists already\"\n            return self.job_collection.move(old_name, new_name)\n\n        raise UnknownCommandError(f\"Unknown command {command}. Try running this on an individual job or action run id\")\n\n\nclass ActionRunController:\n\n    mapped_commands = {\n        \"start\",\n        \"success\",\n        \"cancel\",\n        \"fail\",\n        \"skip\",\n        \"stop\",\n        \"kill\",\n        \"retry\",\n        \"recover\",\n    }\n\n    def __init__(self, action_run: ActionRun, job_run: JobRun) -> None:\n        self.action_run = action_run\n        self.job_run = job_run\n\n    def handle_command(self, command, **kwargs):\n        if command not in self.mapped_commands:\n            raise UnknownCommandError(\n                f\"Unknown command {command}. You can only do one of the following to Action runs: {self.mapped_commands}\"\n            )\n\n        if command == \"start\" and self.job_run.is_scheduled:\n            return \"Action run cannot be started if its job run is still \" \"scheduled.\"\n\n        if command == \"recover\" and not self.action_run.is_unknown:\n            return \"Action run cannot be recovered if its state is not unknown.\"\n\n        if command in (\"stop\", \"kill\"):\n            return self.handle_termination(command)\n\n        if command == \"retry\":\n            original_command = not kwargs.get(\"use_latest_command\", False)\n            return self.handle_retry(original_command)\n\n        if getattr(self.action_run, command)():\n            msg = \"%s now in state %s\"\n            return msg % (self.action_run, self.action_run.state)\n\n        raise InvalidCommandForActionState(\n            command=command,\n            action_name=self.action_run.name,\n            action_state=self.action_run.state,\n        )\n\n    def handle_termination(self, command):\n        try:\n            # Extra message is only used for killing mesos action as warning so far.\n            extra_msg = getattr(self.action_run, command)()\n            msg = \"Attempting to %s %s\"\n            if extra_msg is not None:\n                msg = msg + \"\\n\" + extra_msg\n            return msg % (command, self.action_run)\n        except NotImplementedError as e:\n            msg = \"Failed to %s: %s\"\n            return msg % (command, e)\n\n    def handle_retry(self, original_command):\n        cleanup_run = self.job_run.action_runs.cleanup_action_run\n        if cleanup_run and cleanup_run.is_done:\n            return \"JobRun has run a cleanup action, use rerun instead\"\n\n        if self.action_run.retry(original_command=original_command):\n            return \"Retrying %s\" % self.action_run\n        else:\n            return \"Failed to schedule retry for %s\" % self.action_run\n\n\nclass JobRunController:\n\n    mapped_commands = {\"start\", \"success\", \"cancel\", \"fail\", \"stop\"}\n\n    def __init__(self, job_run, job_scheduler):\n        self.job_run = job_run\n        self.job_scheduler = job_scheduler\n\n    def handle_command(self, command):\n        # as of TRON-1340, `tronctl backfill` depends on this response value\n        # (i.e. \"Created JobRun:<job_run_name>\"). be careful when changing this!\n        if command == \"restart\" or command == \"rerun\":\n            runs = self.job_scheduler.manual_start(self.job_run.run_time)\n            return \"Created %s\" % \",\".join(str(run) for run in runs)\n\n        if command in self.mapped_commands:\n            if getattr(self.job_run, command)():\n                return f\"{self.job_run} now in state {self.job_run.state}\"\n\n            msg = \"Failed to %s, %s in state %s\"\n            return msg % (command, self.job_run, self.job_run.state)\n\n        if command == \"retry\":\n            raise UnknownCommandError(\n                \"Error: Job runs cannot be retried, only individual actions can. Did you mean 'rerun'?\"\n            )\n        else:\n            raise UnknownCommandError(\n                f\"Unknown command {command}. Only one of the following applies to a Job run: {self.mapped_commands}\"\n            )\n\n\nclass JobController:\n    def __init__(self, job_scheduler):\n        self.job_scheduler = job_scheduler\n\n    def handle_command(self, command, run_time=None):\n        if command == \"enable\":\n            self.job_scheduler.enable()\n            return \"%s is enabled\" % self.job_scheduler.get_job()\n\n        elif command == \"disable\":\n            self.job_scheduler.disable()\n            return \"%s is disabled\" % self.job_scheduler.get_job()\n\n        elif command == \"start\":\n            runs = self.job_scheduler.manual_start(run_time=run_time)\n            return \"Created %s\" % \",\".join(str(run) for run in runs)\n\n        if command == \"retry\":\n            raise UnknownCommandError(\n                \"Error: A whole Job cannot be retried, only individual actions for a specific job run id can.\"\n            )\n        elif command in [\"stop\", \"success\", \"cancel\", \"fail\", \"stop\"]:\n            raise UnknownCommandError(\n                f\"Error: {command} doesn't apply to a whole Job. Please run this on an individual job run id. Hint: try '{self.job_scheduler.get_job()}.-1' for the latest job id\"\n            )\n        else:\n            raise UnknownCommandError(\n                f\"Unknown command {command}. Does it apply to a whole job? Try a specific Job id or individual action\"\n            )\n\n\nclass ConfigResponse(TypedDict):\n    config: str\n    hash: str\n\n\nclass ConfigController:\n    \"\"\"Control config. Return config contents and accept updated configuration\n    from the API.\n    \"\"\"\n\n    DEFAULT_NAMED_CONFIG = \"\\njobs:\\n\"\n\n    def __init__(self, mcp: \"MasterControlProgram\") -> None:\n        self.mcp = mcp\n        self.config_manager: ConfigManager = mcp.get_config_manager()\n\n    def _get_config_content(self, name: str) -> str:\n        if name not in self.config_manager:\n            return self.DEFAULT_NAMED_CONFIG\n        return self.config_manager.read_raw_config(name)\n\n    def read_config(self, name: str) -> ConfigResponse:\n        config_content = self._get_config_content(name)\n        config_hash = self.config_manager.get_hash(name)\n        return {\"config\": config_content, \"hash\": config_hash}\n\n    def read_all_configs(self) -> dict[str, ConfigResponse]:\n        configs = {}\n\n        for service in self.config_manager.get_namespaces():\n            config: ConfigResponse = {\n                \"config\": self._get_config_content(service),\n                \"hash\": self.config_manager.get_hash(service),\n            }\n            configs[service] = config\n\n        return configs\n\n    def check_config(self, name, content, config_hash):\n        \"\"\"Update a configuration fragment and reload the MCP.\"\"\"\n        if self.config_manager.get_hash(name) != config_hash:\n            return \"Configuration update will fail: config is stale, try again\"\n\n        try:\n            content = yaml.load(content)\n            self.config_manager.validate_with_fragment(name, content)\n        except Exception as e:\n            return \"Configuration update will fail: %s\" % str(e)\n\n    def update_config(self, name, content, config_hash):\n        \"\"\"Update a configuration fragment and reload the MCP.\"\"\"\n        if self.config_manager.get_hash(name) != config_hash:\n            return \"Configuration has changed. Please try again.\"\n\n        old_config = self.read_config(name)[\"config\"]\n        try:\n            log.info(f\"Reconfiguring namespace {name}\")\n            self.config_manager.write_config(name, content)\n            self.mcp.reconfigure(namespace=name)\n        except Exception as e:\n            log.error(f\"Configuration for {name} update failed: {e}\")\n            log.error(\"Reconfiguring with the previous good configuration\")\n            try:\n                self.config_manager.write_config(name, old_config)\n                self.mcp.reconfigure(namespace=name)\n            except Exception as e:\n                log.error(\"Could not restore old config: %s\" % e)\n                return str(e)\n            return str(e)\n\n    def delete_config(self, name, content, config_hash):\n        \"\"\"Delete a configuration fragment and reload the MCP.\"\"\"\n        if self.config_manager.get_hash(name) != config_hash:\n            return \"Configuration has changed. Please try again.\"\n\n        if content != \"\":\n            return \"Configuration content is not empty, will not delete.\"\n\n        try:\n            log.info(f\"Deleting namespace {name}\")\n            self.config_manager.delete_config(name)\n            self.mcp.reconfigure(namespace=name)\n        except Exception as e:\n            log.error(f\"Deleting configuration for {name} failed: {e}\")\n            return str(e)\n\n    def get_namespaces(self):\n        return self.config_manager.get_namespaces()\n\n\nclass EventsController:\n    COMMANDS = {\"publish\", \"discard\"}\n\n    def publish(self, event):\n        if not EventBus.instance:\n            return dict(error=\"EventBus disabled\")\n\n        if EventBus.has_event(event):\n            msg = f\"event {event} already published\"\n            log.warning(msg)\n            return dict(response=msg)\n\n        if not EventBus.publish(event):\n            msg = f\"could not publish {event}\"\n            log.error(msg)\n            return dict(error=msg)\n\n        return dict(response=\"OK\")\n\n    def discard(self, event):\n        if not EventBus.instance:\n            return dict(error=\"EventBus disabled\")\n\n        if not EventBus.discard(event):\n            msg = f\"could not discard {event}\"\n            log.error(msg)\n            return dict(error=msg)\n\n        return dict(response=\"OK\")\n\n    def info(self):\n        if not EventBus.instance:\n            return dict(error=\"EventBus disabled\")\n\n        return dict(response=EventBus.instance.event_log)\n"
  },
  {
    "path": "tron/api/requestargs.py",
    "content": "\"\"\"Functions for returning validated values from a twisted.web.Request object.\n\"\"\"\nimport datetime\n\nDATE_FORMAT = \"%Y-%m-%d %H:%M:%S\"\n\n\ndef get_integer(request, key):\n    \"\"\"Returns the first value in the request args for the given key, if that\n    value is an integer. Otherwise returns None.\n    \"\"\"\n    value = get_string(request, key)\n    if value is None or not value.isdigit():\n        return None\n\n    return int(value)\n\n\ndef get_string(request, key):\n    \"\"\"Returns the first value in the request args for a given key.\"\"\"\n    if not request.args:\n        return None\n\n    if type(key) is not bytes:\n        key = key.encode()\n\n    if key not in request.args:\n        return None\n\n    val = request.args[key][0]\n    if val is not None and type(val) is bytes:\n        val = val.decode()\n\n    return val\n\n\ndef get_bool(request, key, default=None):\n    \"\"\"Returns True if the key exists and is truthy in the request args.\"\"\"\n    int_value = get_integer(request, key)\n    if int_value is None:\n        return default\n\n    return bool(int_value)\n\n\ndef get_datetime(request, key):\n    \"\"\"Returns the first value in the request args for a given key. Casts to\n    a datetime. Returns None if the value cannot be converted to datetime.\n    \"\"\"\n    val = get_string(request, key)\n    if not val:\n        return None\n\n    try:\n        return datetime.datetime.strptime(val, DATE_FORMAT)\n    except ValueError:\n        return None\n"
  },
  {
    "path": "tron/api/resource.py",
    "content": "\"\"\"\nWeb Services Interface used by command-line clients and web frontend to\nview current state, event history and send commands to trond.\n\"\"\"\nimport collections\nimport datetime\nimport json\nimport logging\nimport traceback\n\nimport staticconf\nfrom prometheus_client.twisted import MetricsResource as MetricsResourceProm\nfrom twisted.web import http\nfrom twisted.web import resource\nfrom twisted.web import server\nfrom twisted.web import static\n\nfrom tron import __version__\nfrom tron.api import adapter\nfrom tron.api import controller\nfrom tron.api import requestargs\nfrom tron.api.async_resource import AsyncResource\nfrom tron.api.auth import AuthorizationFilter\nfrom tron.config.static_config import get_config_watcher\nfrom tron.config.static_config import NAMESPACE\nfrom tron.metrics import meter\nfrom tron.metrics import view_all_metrics\nfrom tron.utils import maybe_decode\n\nlog = logging.getLogger(__name__)\n\n\nclass JSONEncoder(json.JSONEncoder):\n    \"\"\"Custom JSON for certain objects\"\"\"\n\n    def default(self, o):\n        if isinstance(o, datetime.datetime):\n            return o.strftime(\"%Y-%m-%d %H:%M:%S\")\n\n        if isinstance(o, datetime.date):\n            return o.isoformat()\n\n        if isinstance(o, collections.abc.KeysView):\n            return list(o)\n\n        return super().default(o)\n\n\ndef respond(request, response, code=None, headers=None):\n    \"\"\"Helper to generate a json response\"\"\"\n    if code is None:\n        if type(response) is dict and response.get(\"error\"):\n            code = http.INTERNAL_SERVER_ERROR\n        else:\n            code = http.OK\n    request.setResponseCode(code)\n    request.setHeader(b\"content-type\", b\"application/json; charset=utf-8\")\n    request.setHeader(b\"Access-Control-Allow-Origin\", b\"*\")\n    for key, val in (headers or {}).items():\n        request.setHeader(str(key), str(val))\n\n    result = (\n        json.dumps(\n            response,\n            cls=JSONEncoder,\n        )\n        if response\n        else \"\"\n    )\n\n    if type(result) is not bytes:\n        result = result.encode(\"utf8\")\n\n    return result\n\n\ndef handle_command(request, api_controller, obj, **kwargs):\n    \"\"\"Handle a request to perform a command.\"\"\"\n    command = requestargs.get_string(request, \"command\")\n    log.info(\"Handling '%s' request on %s\", command, obj)\n    try:\n        response = api_controller.handle_command(command, **kwargs)\n        return respond(request=request, response={\"result\": response})\n    except controller.UnknownCommandError:\n        error_msg = f\"Unknown command '{command}' for '{obj}'\"\n        log.warning(error_msg)\n        return respond(\n            request=request,\n            response={\"error\": error_msg},\n            code=http.NOT_IMPLEMENTED,\n        )\n    except controller.InvalidCommandForActionState as e:\n        log.warning(e.message)\n        return respond(\n            request=request,\n            response={\"error\": e.message},\n            code=http.CONFLICT,\n        )\n    except Exception as e:\n        log.exception(\"%r while executing command %s for %s\", e, command, obj)\n        trace = traceback.format_exc()\n        return respond(request=request, response={\"error\": trace})\n\n\nclass AuthenticatedResource(resource.Resource):\n    def render(self, request):\n        \"\"\"Overriding base `render` method to support authentication\"\"\"\n        auth_outcome = AuthorizationFilter.get_from_env().is_request_authorized(request)\n        if not auth_outcome.authorized:\n            return respond(\n                request=request,\n                response={\"error\": f\"Auth failed (reason: {auth_outcome.reason})\"},\n                code=http.FORBIDDEN,\n                headers={\"X-Auth-Failure-Reason\": auth_outcome.reason},\n            )\n        return super().render(request)\n\n\nclass ErrorResource(resource.Resource):\n    \"\"\"Equivalent to resource.NoResource, except error message is returned\n    as JSON, not HTML\"\"\"\n\n    def __init__(self, error=\"No Such Resource\", code=http.NOT_FOUND):\n        resource.Resource.__init__(self)\n        self.code = code\n        self.error = error\n\n    @AsyncResource.bounded\n    def render_GET(self, request):\n        return respond(request=request, response={\"error\": self.error}, code=self.code)\n\n    @AsyncResource.exclusive\n    def render_POST(self, request):\n        return respond(request=request, response={\"error\": self.error}, code=self.code)\n\n    def getChild(self, chnam, request):\n        \"\"\"Overrided getChild to ensure a NoResource is not returned\"\"\"\n        return self\n\n\ndef resource_from_collection(collection, name, child_resource):\n    \"\"\"Return a child resource from a collection by name.  If no item is found,\n    return ErrorResource.\n    \"\"\"\n    item = collection.get_by_name(name)\n    if item is None:\n        return ErrorResource(\"Cannot find child '%s'\" % name)\n    return child_resource(item)\n\n\nclass ActionRunResource(AuthenticatedResource):\n\n    isLeaf = True\n\n    def __init__(self, action_run, job_run):\n        resource.Resource.__init__(self)\n        self.action_run = action_run\n        self.job_run = job_run\n        self.controller = controller.ActionRunController(action_run, job_run)\n        self.config_watcher = get_config_watcher()\n\n    @AsyncResource.bounded\n    def render_GET(self, request):\n        run_adapter = adapter.ActionRunAdapter(\n            self.action_run,\n            self.job_run,\n            requestargs.get_integer(request, \"num_lines\")\n            or staticconf.read(\"logging.max_lines_to_display\", namespace=NAMESPACE),\n            include_stdout=requestargs.get_bool(request, \"include_stdout\"),\n            include_stderr=requestargs.get_bool(request, \"include_stderr\"),\n            include_meta=requestargs.get_bool(request, \"include_meta\"),\n        )\n        return respond(request=request, response=run_adapter.get_repr())\n\n    @AsyncResource.exclusive\n    def render_POST(self, request):\n        use_latest_command = requestargs.get_bool(request, \"use_latest_command\", False)\n        return handle_command(\n            request,\n            self.controller,\n            self.action_run,\n            use_latest_command=use_latest_command,\n        )\n\n\nclass JobRunResource(AuthenticatedResource):\n    def __init__(self, job_run, job_scheduler):\n        resource.Resource.__init__(self)\n        self.job_run = job_run\n        self.job_scheduler = job_scheduler\n        self.controller = controller.JobRunController(job_run, job_scheduler)\n\n    def getChild(self, action_name, _):\n        if not action_name:\n            return self\n\n        action_name = maybe_decode(\n            action_name\n        )  # TODO: TRON-2293 maybe_decode is a relic of Python2->Python3 migration. Remove it.\n        if action_name in self.job_run.action_runs:\n            action_run = self.job_run.action_runs[action_name]\n            return ActionRunResource(action_run, self.job_run)\n\n        return ErrorResource(\n            f\"Cannot find action '{action_name}' for \" f\"'{self.job_run}'\",\n        )\n\n    @AsyncResource.bounded\n    def render_GET(self, request):\n        include_runs = requestargs.get_bool(request, \"include_action_runs\")\n        include_graph = requestargs.get_bool(request, \"include_action_graph\")\n        run_adapter = adapter.JobRunAdapter(\n            self.job_run,\n            include_action_runs=include_runs,\n            include_action_graph=include_graph,\n        )\n        return respond(request=request, response=run_adapter.get_repr())\n\n    @AsyncResource.exclusive\n    def render_POST(self, request):\n        return handle_command(request, self.controller, self.job_run)\n\n\ndef is_negative_int(string):\n    return string.startswith(\"-\") and string[1:].isdigit()\n\n\nclass JobResource(AuthenticatedResource):\n    def __init__(self, job_scheduler):\n        resource.Resource.__init__(self)\n        self.job_scheduler = job_scheduler\n        self.controller = controller.JobController(job_scheduler)\n\n    def get_run_from_identifier(self, run_id):\n        job_runs = self.job_scheduler.get_job_runs()\n        if run_id.upper() == \"HEAD\":\n            return job_runs.get_newest()\n        if run_id.isdigit():\n            return job_runs.get_run_by_num(int(run_id))\n        if is_negative_int(run_id):\n            return job_runs.get_run_by_index(int(run_id))\n\n    def getChild(self, run_id, _):\n        if not run_id:\n            return self\n\n        run_id = maybe_decode(\n            run_id\n        )  # TODO: TRON-2293 maybe_decode is a relic of Python2->Python3 migration. Remove it.\n        run = self.get_run_from_identifier(run_id)\n        if run:\n            return JobRunResource(run, self.job_scheduler)\n\n        job = self.job_scheduler.get_job()\n        if run_id in job.action_graph.names():\n            action_runs = job.runs.get_action_runs(run_id)\n            return ActionRunHistoryResource(action_runs)\n\n        return ErrorResource(f\"Cannot find job run '{run_id}' for '{job}'\")\n\n    @AsyncResource.bounded\n    def render_GET(self, request):\n        include_action_runs = requestargs.get_bool(\n            request,\n            \"include_action_runs\",\n        )\n        include_graph = requestargs.get_bool(request, \"include_action_graph\")\n        num_runs = requestargs.get_integer(request, \"num_runs\")\n        job_adapter = adapter.JobAdapter(\n            self.job_scheduler.get_job(),\n            include_job_runs=True,\n            include_action_runs=include_action_runs,\n            include_action_graph=include_graph,\n            num_runs=num_runs,\n        )\n        return respond(request=request, response=job_adapter.get_repr())\n\n    @AsyncResource.exclusive\n    def render_POST(self, request):\n        run_time = requestargs.get_datetime(request, \"run_time\")\n        return handle_command(\n            request,\n            self.controller,\n            self.job_scheduler,\n            run_time=run_time,\n        )\n\n\nclass ActionRunHistoryResource(AuthenticatedResource):\n\n    isLeaf = True\n\n    def __init__(self, action_runs):\n        resource.Resource.__init__(self)\n        self.action_runs = action_runs\n\n    @AsyncResource.bounded\n    def render_GET(self, request):\n        return respond(\n            request=request,\n            response=adapter.adapt_many(adapter.ActionRunAdapter, self.action_runs),\n        )\n\n\nclass JobCollectionResource(AuthenticatedResource):\n    def __init__(self, job_collection):\n        self.job_collection = job_collection\n        self.controller = controller.JobCollectionController(job_collection)\n        resource.Resource.__init__(self)\n\n    def getChild(self, name, request):\n        if not name:\n            return self\n\n        name = maybe_decode(name)  # TODO: TRON-2293 maybe_decode is a relic of Python2->Python3 migration. Remove it.\n        return resource_from_collection(self.job_collection, name, JobResource)\n\n    def get_data(\n        self,\n        include_job_run=False,\n        include_action_runs=False,\n        include_action_graph=True,\n        include_node_pool=True,\n    ):\n        return adapter.adapt_many(\n            adapter.JobAdapter,\n            self.job_collection.get_jobs(),\n            include_job_run,\n            include_action_runs,\n            include_action_graph,\n            include_node_pool,\n            num_runs=5,\n        )\n\n    def get_job_index(self):\n        jobs = adapter.adapt_many(\n            adapter.JobIndexAdapter,\n            self.job_collection.get_jobs(),\n        )\n        return {job[\"name\"]: job[\"actions\"] for job in jobs}\n\n    @AsyncResource.bounded\n    def render_GET(self, request):\n        include_job_runs = requestargs.get_bool(\n            request,\n            \"include_job_runs\",\n            default=False,\n        )\n        include_action_runs = requestargs.get_bool(\n            request,\n            \"include_action_runs\",\n            default=False,\n        )\n        include_action_graph = requestargs.get_bool(\n            request,\n            \"include_action_graph\",\n            default=True,\n        )\n        include_node_pool = requestargs.get_bool(\n            request,\n            \"include_node_pool\",\n            default=True,\n        )\n        response = dict(\n            jobs=self.get_data(\n                include_job_runs,\n                include_action_runs,\n                include_action_graph,\n                include_node_pool,\n            ),\n        )\n        return respond(request=request, response=response)\n\n    @AsyncResource.exclusive\n    def render_POST(self, request):\n        old_name = requestargs.get_string(request, \"old_name\")\n        new_name = requestargs.get_string(request, \"new_name\")\n        return handle_command(\n            request=request,\n            api_controller=self.controller,\n            obj=self.job_collection,\n            old_name=old_name,\n            new_name=new_name,\n        )\n\n\nclass ConfigResource(AuthenticatedResource):\n    \"\"\"Resource for configuration changes\"\"\"\n\n    isLeaf = True\n\n    def __init__(self, master_control):\n        self.controller = controller.ConfigController(master_control)\n        resource.Resource.__init__(self)\n\n    def get_config_index(self):\n        return self.controller.get_namespaces()\n\n    @AsyncResource.bounded\n    def render_GET(self, request):\n        config_name = requestargs.get_string(request, \"name\")\n        if not config_name:\n            response = self.controller.read_all_configs()\n        else:\n            response = self.controller.read_config(config_name)\n        return respond(request=request, response=response)\n\n    @AsyncResource.exclusive\n    def render_POST(self, request):\n        config_content = requestargs.get_string(request, \"config\")\n        name = requestargs.get_string(request, \"name\")\n        config_hash = requestargs.get_string(request, \"hash\")\n        check = requestargs.get_bool(request, \"check\")\n\n        if not name:\n            return respond(\n                request=request,\n                response={\"error\": \"'name' for config is required.\"},\n                code=http.BAD_REQUEST,\n            )\n\n        response = {\"status\": \"Active\"}\n\n        if check:\n            fn = self.controller.check_config\n            req = \"configure check\"\n        elif config_content == \"\":\n            fn = self.controller.delete_config\n            req = \"configuration delete\"\n        else:\n            fn = self.controller.update_config\n            req = \"reconfigure\"\n\n        log.info(f\"Handling {req} request: {name}, {config_hash}\")\n        error = fn(name, config_content, config_hash)\n\n        if error:\n            response[\"error\"] = error\n        return respond(request=request, response=response)\n\n\nclass StatusResource(resource.Resource):\n\n    isLeaf = True\n\n    def __init__(self, master_control):\n        self._master_control = master_control\n        resource.Resource.__init__(self)\n\n    @AsyncResource.bounded\n    def render_GET(self, request):\n        return respond(\n            request=request,\n            response={\n                \"status\": \"I'm alive.\",\n                \"version\": __version__,\n                \"boot_time\": int(self._master_control.boot_time),\n            },\n        )\n\n\nclass MetricsResource(resource.Resource):\n\n    isLeaf = True\n\n    def __init__(self):\n        resource.Resource.__init__(self)\n\n    @AsyncResource.exclusive\n    def render_GET(self, request):\n        return respond(request=request, response=view_all_metrics())\n\n\nclass EventsResource(AuthenticatedResource):\n    isLeaf = True\n\n    def __init__(self):\n        super().__init__()\n        self.controller = controller.EventsController()\n\n    @AsyncResource.exclusive\n    def render_GET(self, request):\n        response = self.controller.info()\n        return respond(request=request, response=response)\n\n    @AsyncResource.bounded\n    def render_POST(self, request):\n        command = requestargs.get_string(request, \"command\")\n        if command not in self.controller.COMMANDS:\n            return respond(\n                request=request,\n                response=dict(error=f\"Unknown command: {command}\"),\n                code=http.BAD_REQUEST,\n            )\n        event = requestargs.get_string(request, \"event\")\n        fn = getattr(self.controller, command)\n        response = fn(event)\n        return respond(request=request, response=response)\n\n\nclass ApiRootResource(AuthenticatedResource):\n    def __init__(self, mcp):\n        self._master_control = mcp\n        resource.Resource.__init__(self)\n\n        # Setup children\n        self.putChild(\n            b\"jobs\",\n            JobCollectionResource(mcp.get_job_collection()),\n        )\n\n        self.putChild(b\"config\", ConfigResource(mcp))\n        self.putChild(b\"status\", StatusResource(mcp))\n        self.putChild(b\"events\", EventsResource())\n        self.putChild(b\"metrics\", MetricsResource())\n        self.putChild(b\"prom-metrics\", MetricsResourceProm())\n        self.putChild(b\"\", self)\n\n    @AsyncResource.bounded\n    def render_GET(self, request):\n        \"\"\"Return an index of urls for resources.\"\"\"\n        response = {\n            \"jobs\": self.children[b\"jobs\"].get_job_index(),\n            \"namespaces\": self.children[b\"config\"].get_config_index(),\n        }\n        return respond(request=request, response=response)\n\n\nclass RootResource(resource.Resource):\n    def __init__(self, mcp, web_path):\n        resource.Resource.__init__(self)\n        self.web_path = web_path\n        self.mcp = mcp\n        self.putChild(b\"api\", ApiRootResource(self.mcp))\n        self.putChild(b\"web\", static.File(web_path))\n        self.putChild(b\"\", self)\n\n    def render_GET(self, request):\n        request.redirect(b\"/web\")\n        request.finish()\n        return server.NOT_DONE_YET\n\n    def __str__(self):\n        return f\"{type(self).__name__}({self.mcp}, {self.web_path})\"\n\n\nclass LogAdapter:\n    def __init__(self, logger):\n        self.logger = logger\n\n    def write(self, line):\n        self.logger.info(line.rstrip(b\"\\n\"))\n\n    def close(self):\n        pass\n\n\nclass TronSite(server.Site):\n    \"\"\"Subclass of a twisted Site to customize logging.\"\"\"\n\n    access_log = logging.getLogger(\"tron.api.www.access\")\n\n    @classmethod\n    def create(cls, mcp, web_path):\n        return cls(RootResource(mcp, web_path))\n\n    def startFactory(self):\n        server.Site.startFactory(self)\n        self.logFile = LogAdapter(self.access_log)\n\n    def log(self, request):\n        super().log(request)\n        if 200 <= request.code < 300:\n            meter(\"tron.site.2xx\")\n        if 300 <= request.code < 400:\n            meter(\"tron.site.3xx\")\n        if 400 <= request.code < 500:\n            meter(\"tron.site.4xx\")\n        if 500 <= request.code < 600:\n            meter(\"tron.site.5xx\")\n\n    def __repr__(self):\n        return f\"{self.__class__.__name__}({self.resource})\"\n"
  },
  {
    "path": "tron/bin/action_runner.py",
    "content": "#!/usr/bin/env python3.10\n\"\"\"\nWrite pid and stdout/stderr to a standard location before execing a command.\n\"\"\"\nimport argparse\nimport contextlib\nimport logging\nimport os\nimport subprocess\nimport sys\nimport threading\nimport time\n\nfrom tron import yaml\n\nSTATUS_FILE = \"status\"\n\n\nclass StatusFile:\n    \"\"\"Manage a status file.\"\"\"\n\n    def __init__(self, filename):\n        self.filename = filename\n\n    def get_content(self, run_id, command, proc):\n        return {\n            \"run_id\": run_id,\n            \"command\": command,\n            \"pid\": proc.pid,\n            \"return_code\": proc.returncode,\n            \"runner_pid\": os.getpid(),\n            \"timestamp\": time.time(),\n        }\n\n    @contextlib.contextmanager\n    def wrap(self, command, run_id, proc):\n        with open(self.filename, \"w\") as fh:\n            yaml.safe_dump(\n                self.get_content(\n                    run_id=run_id,\n                    command=command,\n                    proc=proc,\n                ),\n                fh,\n                explicit_start=True,\n                width=1000000,\n            )\n        try:\n            yield\n        finally:\n            with open(self.filename, \"a\") as fh:\n                yaml.safe_dump(\n                    self.get_content(\n                        run_id=run_id,\n                        command=command,\n                        proc=proc,\n                    ),\n                    fh,\n                    explicit_start=True,\n                    width=1000000,\n                )\n\n\ndef validate_output_dir(path):\n    if os.path.isdir(path):\n        if not os.access(path, os.W_OK):\n            raise OSError(\"Output dir %s not writable\" % path)\n        return\n    else:\n        try:\n            os.makedirs(path)\n        except OSError:\n            raise OSError(\"Could not create output dir %s\" % path)\n\n\ndef build_environment(run_id, original_env=None):\n    if original_env is None:\n        original_env = dict(os.environ)\n    try:\n        namespace, job, run_num, action = run_id.split(\".\", maxsplit=3)\n    except ValueError:\n        # if we can't parse the run_id, we don't want to abort, so just\n        # set these semi-arbitrarily\n        namespace, job, run_num, action = [\"UNKNOWN\"] * 4\n\n    new_env = dict(original_env)\n    new_env[\"TRON_JOB_NAMESPACE\"] = namespace\n    new_env[\"TRON_JOB_NAME\"] = job\n    new_env[\"TRON_RUN_NUM\"] = run_num\n    new_env[\"TRON_ACTION\"] = action\n\n    logging.debug(new_env)\n    return new_env\n\n\ndef build_labels(\n    run_id: str,\n    original_labels: dict[str, str] | None = None,\n    attempt_number: int | None = None,\n) -> dict[str, str]:\n    if original_labels is None:\n        original_labels = {}\n\n    try:\n        # reminder: the format here is \"namespace.job.run_num.action\"\n        _, _, run_num, _ = run_id.split(\".\", maxsplit=3)\n    except ValueError:\n        # if we can't parse the run_id, we don't want to abort, so just\n        # set these semi-arbitrarily\n        run_num = \"UNKNOWN\"\n\n    new_labels = dict(original_labels)\n    new_labels[\"tron.yelp.com/run_num\"] = run_num\n    if attempt_number is not None:\n        new_labels[\"tron.yelp.com/attempt_number\"] = str(attempt_number)\n\n    return new_labels\n\n\ndef run_proc(output_path, command, run_id, proc):\n    logging.warning(f\"{run_id} running as pid {proc.pid}\")\n    status_file = StatusFile(os.path.join(output_path, STATUS_FILE))\n    with status_file.wrap(\n        command=command,\n        run_id=run_id,\n        proc=proc,\n    ):\n        returncode = proc.wait()\n        logging.warning(f\"pid {proc.pid} exited with returncode {returncode}\")\n        return returncode\n\n\ndef parse_args():\n    parser = argparse.ArgumentParser(description=\"Action Runner for Tron\")\n    parser.add_argument(\n        \"output_dir\",\n        help=\"The directory to store the state of the action run\",\n    )\n    parser.add_argument(\n        \"command\",\n        help=\"the command to run\",\n    )\n    parser.add_argument(\n        \"run_id\",\n        help=\"run_id of the action\",\n    )\n    return parser.parse_args()\n\n\ndef run_command(command, run_id):\n    return subprocess.Popen(\n        command,\n        shell=True,\n        stdout=subprocess.PIPE,\n        stderr=subprocess.PIPE,\n        env=build_environment(run_id=run_id),\n    )\n\n\ndef stream(source, dst):\n    is_connected = True\n    logging.warning(f\"streaming {source.name} to {dst.name}\")\n    for line in iter(source.readline, b\"\"):\n        if is_connected:\n            try:\n                dst.write(line.decode(\"utf-8\"))\n                dst.flush()\n                logging.warning(f\"{dst.name}: {line}\")\n            except Exception as e:\n                logging.warning(f\"failed writing to {dst}: {e}\")\n                logging.warning(f\"{dst.name}: {line}\")\n                is_connected = False\n        else:\n            logging.warning(f\"{dst.name}: {line}\")\n            is_connected = False\n\n\ndef configure_logging(run_id, output_dir):\n    output_file = os.path.join(output_dir, f\"{run_id}-{os.getpid()}.log\")\n    logging.basicConfig(\n        filename=output_file,\n        format=\"%(asctime)s %(levelname)s %(message)s\",\n        datefmt=\"%Y-%m-%dT%H:%M:%S%z\",\n    )\n\n\ndef main():\n    args = parse_args()\n    validate_output_dir(args.output_dir)\n    configure_logging(run_id=args.run_id, output_dir=args.output_dir)\n    proc = run_command(command=args.command, run_id=args.run_id)\n    threads = [\n        threading.Thread(target=stream, args=p, daemon=True)\n        for p in [(proc.stdout, sys.stdout), (proc.stderr, sys.stderr)]\n    ]\n    for t in threads:\n        t.start()\n    returncode = run_proc(\n        output_path=args.output_dir,\n        run_id=args.run_id,\n        command=args.command,\n        proc=proc,\n    )\n\n    for t in threads:\n        t.join()\n\n    return returncode\n\n\nif __name__ == \"__main__\":\n    sys.exit(main())\n"
  },
  {
    "path": "tron/bin/action_status.py",
    "content": "#!/usr/bin/env python3.10\nimport argparse\nimport logging\nimport os\nimport signal\n\nfrom tron import yaml\n\nlog = logging.getLogger(\"tron.action_status\")\n\nSTATUS_FILE = \"status\"\n\n\ndef get_field(field, status_file):\n    docs = yaml.load_all(status_file.read())\n    content = list(docs)[-1]\n    return content.get(field)\n\n\ndef print_status_file(status_file):\n    for line in status_file.readlines():\n        print(yaml.load(line))\n\n\ndef send_signal(signal_num, status_file):\n    pid = get_field(\"pid\", status_file)\n    if pid:\n        try:\n            os.killpg(os.getpgid(pid), signal_num)\n        except OSError as e:\n            msg = \"Failed to signal %s with %s: %s\"\n            raise SystemExit(msg % (pid, signal_num, e))\n\n\ncommands = {\n    \"print\": print_status_file,\n    \"pid\": lambda statusfile: print(get_field(\"pid\", statusfile)),\n    \"return_code\": lambda statusfile: print(get_field(\"return_code\", statusfile)),\n    \"terminate\": lambda statusfile: send_signal(signal.SIGTERM, statusfile),\n    \"kill\": lambda statusfile: send_signal(signal.SIGKILL, statusfile),\n}\n\n\ndef parse_args():\n    parser = argparse.ArgumentParser(description=\"Action Status for Tron\")\n    parser.add_argument(\n        \"output_dir\",\n        help=\"The directory where the state of the action run is\",\n    )\n    parser.add_argument(\n        \"command\",\n        help=\"the command to run\",\n    )\n    parser.add_argument(\n        \"run_id\",\n        help=\"run_id of the action\",\n    )\n    return parser.parse_args()\n\n\ndef run_command(command, status_file):\n    commands[command](status_file)\n\n\ndef main():\n    logging.basicConfig()\n    args = parse_args()\n    with open(os.path.join(args.output_dir, STATUS_FILE)) as f:\n        run_command(args.command, f)\n\n\nif __name__ == \"__main__\":\n    main()\n"
  },
  {
    "path": "tron/bin/check_tron_datastore_staleness.py",
    "content": "#!/usr/bin/env python3.10\nimport argparse\nimport logging\nimport os\nimport sys\nimport time\n\nimport pytz\n\nfrom tron.config import manager\nfrom tron.config import schema\nfrom tron.serialize.runstate.statemanager import PersistenceManagerFactory\n\n# Default values for arguments\nDEFAULT_WORKING_DIR = \"/var/lib/tron/\"\nDEFAULT_CONF_PATH = \"config/\"\nDEFAULT_STALENESS_THRESHOLD = 1800\nlog = logging.getLogger(\"check_tron_datastore_staleness\")\n\n\ndef get_last_run_time(job):\n    \"\"\"\n    Get all sorted timestamps, and only count the actions that actually ran\n    \"\"\"\n    timestamps = []\n    job_runs = job[\"runs\"]\n    for run in job_runs:\n        for action in run[\"runs\"]:\n            if action.get(\"start_time\") and action.get(\"state\") != \"scheduled\":\n                timestamps.append(action.get(\"start_time\"))\n    return max(timestamps) if timestamps else None\n\n\ndef parse_cli():\n    parser = argparse.ArgumentParser()\n    parser.add_argument(\n        \"-w\",\n        \"--working-dir\",\n        default=DEFAULT_WORKING_DIR,\n        help=\"Working directory for the Tron daemon, default %(default)s\",\n    )\n    parser.add_argument(\n        \"-c\",\n        \"--config-path\",\n        default=DEFAULT_CONF_PATH,\n        help=\"File path to the Tron configuration file\",\n    )\n    parser.add_argument(\n        \"--job-name\",\n        required=True,\n        help=\"The job name to read timestamp from\",\n    )\n    parser.add_argument(\n        \"--staleness-threshold\",\n        default=DEFAULT_STALENESS_THRESHOLD,\n        help=\"how long (in seconds) to wait to alert after the last timestamp of the job\",\n    )\n    args = parser.parse_args()\n    args.working_dir = os.path.abspath(args.working_dir)\n    args.config_path = os.path.join(\n        args.working_dir,\n        args.config_path,\n    )\n    return args\n\n\ndef read_config(args):\n    return manager.ConfigManager(args.config_path).load().get_master().state_persistence\n\n\ndef main():\n    # Fetch configs. You can find the arguments in puppet.\n    args = parse_cli()\n    persistence_config = read_config(args)\n    store_type = schema.StatePersistenceTypes(persistence_config.store_type)\n    job_name = args.job_name\n\n    # Alert for DynamoDB\n    if store_type == schema.StatePersistenceTypes.dynamodb:\n        # Fetch job state from dynamodb\n        state_manager = PersistenceManagerFactory.from_config(persistence_config)\n        try:\n            job = state_manager.restore(job_names=[job_name])[\"job_state\"][job_name]\n        except Exception as e:\n            logging.exception(f\"UNKN: Failed to retreive status for job {job_name} due to {e}\")\n            sys.exit(3)\n\n        # Exit if the job never runs.\n        last_run_time = get_last_run_time(job)\n        if not last_run_time:\n            logging.error(\n                f\"WARN: No last run for {job_name} found. If the job was just added, it might take some time for it to run\"\n            )\n            sys.exit(1)\n\n        # Alert if timestamp is not updated after staleness_threshold\n        stateless_for_secs = time.time() - last_run_time.astimezone(pytz.utc).timestamp()\n        if stateless_for_secs > args.staleness_threshold:\n            logging.error(f\"CRIT: {job_name} has not been updated in DynamoDB for {stateless_for_secs} seconds\")\n            sys.exit(2)\n        else:\n            logging.info(f\"OK: DynamoDB is up to date. It's last updated at {last_run_time}\")\n            sys.exit(0)\n    # Alert for BerkeleyDB\n    elif store_type == schema.StatePersistenceTypes.shelve:\n        os.execl(\n            \"/usr/lib/nagios/plugins/check_file_age\",\n            \"/nail/tron/tron_state\",\n            \"-w\",\n            str(args.staleness_threshold),\n            \"-c\",\n            str(args.staleness_threshold),\n        )\n    else:\n        logging.exception(f\"UNKN: Not designed to check this type of datastore: {store_type}\")\n        sys.exit(3)\n\n\nif __name__ == \"__main__\":\n    main()\n"
  },
  {
    "path": "tron/bin/check_tron_jobs.py",
    "content": "#!/usr/bin/env python3.10\nimport datetime\nimport logging\nimport pprint\nimport sys\nimport time\nfrom collections import defaultdict\nfrom enum import Enum\n\nimport pytimeparse  # type: ignore[import-untyped] # no stubs or py.typed marker; likely want to move off of this\nfrom pyrsistent import m\nfrom pyrsistent import pmap\nfrom pysensu_yelp import send_event\n\nfrom tron.commands import cmd_utils\nfrom tron.commands import display\nfrom tron.commands.client import Client\nfrom tron.commands.client import get_object_type_from_identifier\nfrom tron.utils.logreader import get_superregion\n\nPRECIOUS_JOB_ATTR = \"check_that_every_day_has_a_successful_run\"\nNUM_PRECIOUS = 7\n\nlog = logging.getLogger(\"check_tron_jobs\")\n\n_run_interval = None\n\n\nclass State(Enum):\n    SUCCEEDED = \"succeeded\"\n    FAILED = \"failed\"\n    STUCK = \"stuck\"\n    NO_RUN_YET = \"no_run_yet\"\n    NO_RUNS_TO_CHECK = \"no_runs_to_check\"\n    UNKNOWN = \"unknown\"\n    SKIPPED = \"skipped\"\n\n\ndef parse_cli():\n    parser = cmd_utils.build_option_parser()\n    parser.add_argument(\n        \"--dry-run\",\n        action=\"store_true\",\n        default=False,\n        help=\"Don't actually send alerts out. Defaults to %(default)s\",\n    )\n    parser.add_argument(\n        \"--job\",\n        default=None,\n        help=\"Check a particular job. If unset checks all jobs\",\n    )\n    parser.add_argument(\n        \"--run-interval\",\n        help=\"Run interval for this monitoring script. This is used to \"\n        \"calculate realert and alert_after setting. \"\n        \"Default to %(default)s (seconds)\",\n        type=int,\n        dest=\"run_interval\",\n        default=300,\n    )\n    parser.add_argument(\n        \"--skip-sensu-failure-logging\",\n        help=\"Skip including stdout/stderr logs in alerts for failed jobs\",\n        action=\"store_true\",\n        dest=\"skip_sensu_failure_logging\",\n        default=False,\n    )\n    args = parser.parse_args()\n    return args\n\n\ndef _timestamp_to_timeobj(timestamp):\n    return time.strptime(timestamp, \"%Y-%m-%d %H:%M:%S\")\n\n\ndef _timestamp_to_shortdate(timestamp, separator=\".\"):\n    return time.strftime(\n        \"%Y{0}%m{0}%d\".format(separator),\n        _timestamp_to_timeobj(timestamp),\n    )\n\n\ndef compute_check_result_for_job_runs(client, job, job_content, url_index, hide_stderr=False):\n    cluster = client.cluster_name\n    kwargs = {}\n    if job_content is None:\n        kwargs[\"output\"] = f\"OK: {job['name']} was just added and hasn't run yet on {cluster}.\"\n        kwargs[\"status\"] = 0\n        return kwargs\n\n    relevant_job_run, last_state = get_relevant_run_and_state(job_content)\n    if relevant_job_run is None:\n        kwargs[\"output\"] = (\n            f\"CRIT: {job['name']} hasn't had a successful \" f\"run yet on {cluster}.\\n{pretty_print_job(job_content)}\"\n        )\n        kwargs[\"status\"] = 2\n        return kwargs\n    else:  # if no run scheduled, no run_time available\n        relevant_job_run_date = _timestamp_to_shortdate(\n            relevant_job_run[\"run_time\"],\n        )\n\n    # A job_run is like MASTER.foo.1\n    job_run_id = relevant_job_run[\"id\"]\n\n    # A job action is like MASTER.foo.1.step1\n    actions_expected_runtime = job_content.get(\"actions_expected_runtime\", {})\n    relevant_action = get_relevant_action(\n        action_runs=relevant_job_run[\"runs\"],\n        last_state=last_state,\n        actions_expected_runtime=actions_expected_runtime,\n    )\n    action_run_id = get_object_type_from_identifier(\n        url_index,\n        relevant_action[\"id\"],\n    )\n\n    if last_state in (State.STUCK, State.FAILED, State.UNKNOWN):\n        if _skip_sensu_failure_logging:\n            job_run_url = \"/\".join(job_run_id.rsplit(\".\", 1))\n            tronweb_url = f\"http://y/tron-{get_superregion()}/#job/{job_run_url}\"\n            stderr_default = f\"Please visit {tronweb_url} for stderr details.\"\n            action_run_details = {}\n        else:\n            stderr_default = \"(No stderr available)\"\n            action_run_details = client.action_runs(action_run_id.url, num_lines=10)\n    else:\n        action_run_details = {}\n\n    if last_state == State.SUCCEEDED:\n        prefix = f\"OK: The last job ({job_run_id}) run succeeded on {cluster}. Will watch future or in progress runs for the next failure\"\n        status = 0\n        stderr = \"\"\n    elif last_state == State.NO_RUNS_TO_CHECK:\n        prefix = f\"OK: The job {job['name']} is new and/or has no runs to check on {cluster}\"\n        status = 0\n        stderr = \"\"\n    elif last_state == State.SKIPPED:\n        prefix = f\"OK: The last job ({job_run_id}) run was skipped on {cluster}. Will watch future or in progress runs for the next failure\"\n        status = 0\n        stderr = \"\"\n    elif last_state == State.STUCK:\n        if job[\"monitoring\"].get(\"page_for_expected_runtime\", False):\n            level = \"CRIT\"\n            status = 2\n        else:\n            level = \"WARN\"\n            status = 1\n        prefix = f\"{level}: Job {job_run_id} exceeded expected runtime or still running when next job is scheduled on {cluster}\"\n        stderr = \"\\n\".join(action_run_details.get(\"stderr\", [stderr_default]))\n    elif last_state == State.FAILED:\n        prefix = f\"CRIT: The last job run ({job_run_id}) failed on {cluster}!\"\n        status = 2\n        stderr = \"\\n\".join(action_run_details.get(\"stderr\", [stderr_default]))\n    elif last_state == State.UNKNOWN:\n        prefix = f\"CRIT: Job {job_run_id} has gone 'unknown' and might need manual intervention on {cluster}\"\n        status = 2\n        stderr = \"\"\n    else:\n        prefix = f\"UNKNOWN: Job {job_run_id} is in a state that check_tron_jobs doesn't understand\"\n        status = 3\n        stderr = \"\"\n\n    if hide_stderr:\n        stderr = \"\"\n\n    precious_runs_note = \"\"\n    if job[\"monitoring\"].get(PRECIOUS_JOB_ATTR, False) and status != 0:\n        precious_runs_note = f\"Note: This alert is the run for {relevant_job_run_date}. A resolve event will not occur until a job run for this date succeeds.\\n\"\n\n    kwargs[\"output\"] = (\n        f\"{prefix}\\n\"\n        f\"{stderr}\\n\"\n        f\"The latest run, {relevant_job_run['id']} {relevant_job_run['state']}\\n\"\n        f\"{precious_runs_note}\"\n    )\n    if action_run_details:\n        kwargs[\"output\"] += \"\\nHere is the last action:\\n\" f\"{pretty_print_actions(action_run_details)}\\n\\n\"\n    kwargs[\"output\"] += (\n        \"And the job run view:\\n\"\n        f\"{pretty_print_job_run(relevant_job_run)}\\n\\n\"\n        \"Here is the whole job view for context:\\n\"\n        f\"{pretty_print_job(job_content)}\"\n    )\n    kwargs[\"status\"] = status\n    return kwargs\n\n\ndef pretty_print_job(job_content):\n    return display.format_job_details(job_content)\n\n\ndef pretty_print_job_run(job_run):\n    display_action = display.DisplayActionRuns()\n    return display_action.format(job_run)\n\n\ndef pretty_print_actions(action_run):\n    return display.format_action_run_details(action_run)\n\n\ndef get_relevant_run_and_state(job_content):\n    # The order of job run to check is as follows:\n    #   1. The scheduled but hasn't run one checked first\n    #   2. Then currently running ones are always checked (in case an action is failed/unknown)\n    #   3. If there are multiple running ones, then most recent run_time wins\n    #   4. If nothing is currently running, then most recent end_time wins\n    job_runs = sorted(\n        job_content.get(\"runs\", []),\n        key=lambda k: (k[\"end_time\"] is None, k[\"end_time\"], k[\"run_time\"]),\n        reverse=True,\n    )\n    if len(job_runs) == 0:\n        return None, State.NO_RUN_YET\n\n    job_expected_runtime = job_content.get(\"expected_runtime\", None)\n    actions_expected_runtime = job_content.get(\"actions_expected_runtime\", {})\n    stuck_run = is_job_stuck(\n        job_runs=job_runs,\n        job_expected_runtime=job_expected_runtime,\n        actions_expected_runtime=actions_expected_runtime,\n        allow_overlap=job_content.get(\"allow_overlap\", False),\n        queueing=job_content.get(\"queueing\", True),\n    )\n    for run in job_runs:\n        state = run.get(\"state\", \"unknown\")\n        if state in [\"failed\", \"succeeded\", \"unknown\", \"skipped\"]:\n            return run, State(state)\n        elif state in [\"running\", \"waiting\", \"starting\"]:\n            action_state = is_action_failed_or_unknown(run)\n            if action_state != State.SUCCEEDED:\n                return run, action_state\n            elif stuck_run is not None:\n                return stuck_run, State.STUCK\n    return job_runs[0], State.NO_RUNS_TO_CHECK\n\n\ndef is_action_failed_or_unknown(job_run):\n    for run in job_run.get(\"runs\", []):\n        if run.get(\"state\", None) in [\"failed\", \"unknown\"]:\n            return State(run.get(\"state\"))\n    return State.SUCCEEDED\n\n\ndef is_job_stuck(\n    job_runs,\n    job_expected_runtime,\n    actions_expected_runtime,\n    allow_overlap,\n    queueing,\n):\n    next_run_time = None\n    for job_run in job_runs:\n        states_to_check = {\"running\", \"waiting\", \"starting\"}\n        if job_run.get(\"state\", \"unknown\") in states_to_check:\n            if is_job_run_exceeding_expected_runtime(\n                job_run,\n                job_expected_runtime,\n            ):\n                return job_run\n            # check if it is still running at next scheduled job run time\n            if not allow_overlap and queueing and next_run_time:\n                difftime = _timestamp_to_timeobj(next_run_time)\n                if time.time() > time.mktime(difftime):\n                    return job_run\n            for action_run in job_run.get(\"runs\", []):\n                if is_action_run_exceeding_expected_runtime(\n                    action_run,\n                    actions_expected_runtime,\n                ):\n                    return job_run\n\n        next_run_time = job_run.get(\"run_time\", None)\n    return None\n\n\ndef is_job_run_exceeding_expected_runtime(job_run, job_expected_runtime):\n    states_to_check = {\"running\", \"waiting\", \"starting\"}\n    if (\n        job_expected_runtime is not None\n        and job_run.get(\n            \"state\",\n            \"unknown\",\n        )\n        in states_to_check\n    ):\n        duration_seconds = pytimeparse.parse(job_run.get(\"duration\", \"\"))\n        if duration_seconds and duration_seconds > job_expected_runtime:\n            return True\n    return False\n\n\ndef is_action_run_exceeding_expected_runtime(\n    action_run,\n    actions_expected_runtime,\n):\n    states_to_check = [\"running\", \"starting\"]\n    if action_run.get(\"state\", \"unknown\") in states_to_check:\n        action_name = action_run.get(\"action_name\", None)\n        if action_name in actions_expected_runtime and actions_expected_runtime[action_name] is not None:\n            duration_seconds = pytimeparse.parse(\n                action_run.get(\"duration\", \"\"),\n            )\n            if duration_seconds > actions_expected_runtime[action_name]:\n                return True\n    return False\n\n\ndef get_relevant_action(*, action_runs, last_state, actions_expected_runtime):\n    stuck_action_run_candidate = None\n    for action_run in reversed(action_runs):\n        action_state = action_run.get(\"state\", \"unknown\")\n        try:\n            if State(action_state) == last_state:\n                return action_run\n        except ValueError:\n            if last_state == State.STUCK:\n                if is_action_run_exceeding_expected_runtime(\n                    action_run,\n                    actions_expected_runtime,\n                ):\n                    return action_run\n                if action_state in {\"running\", \"starting\"}:\n                    stuck_action_run_candidate = action_run\n    return stuck_action_run_candidate or action_runs[-1]\n\n\ndef guess_realert_every(job):\n    try:\n        job_next_run = job.get(\"next_run\", None)\n        if job_next_run is None:\n            return -1\n        job_runs = job.get(\"runs\", [])\n        job_runs_started = [\n            run.get(\"start_time\") or run.get(\"run_time\")\n            for run in job_runs\n            if run.get(\"start_time\") or run.get(\"run_time\") and run.get(\"run_time\") != job_next_run\n        ]\n        if len(job_runs_started) == 0:\n            return -1\n        job_previous_run = max(\n            job_runs_started,\n        )\n        time_diff = time.mktime(_timestamp_to_timeobj(job_next_run)) - time.mktime(\n            _timestamp_to_timeobj(job_previous_run)\n        )\n        realert_every = max(int(time_diff / _run_interval), 1)\n    except Exception as e:\n        log.warning(f\"guess_realert_every failed: {e}\")\n        return -1\n    return realert_every\n\n\ndef get_earliest_run_time_to_check(job_content, interval):\n    if not job_content[\"runs\"]:\n        return None\n\n    earliest_run_time = min(time.mktime(_timestamp_to_timeobj(run[\"run_time\"])) for run in job_content[\"runs\"])\n    return max(\n        earliest_run_time,\n        time.time() - datetime.timedelta(**{f\"{interval}s\": NUM_PRECIOUS - 1}).total_seconds(),\n    )\n\n\ndef sort_runs_by_interval(job_content, interval=\"day\", until=None):\n    \"\"\"Sorts a job's runs by a time interval (day, hour, minute, or second),\n    according to a job run's run time.\n    \"\"\"\n    interval_formats = {\n        \"day\": \"%Y.%m.%d\",\n        \"hour\": \"%Y.%m.%d-%H\",\n        \"minute\": \"%Y.%m.%d-%H.%M\",\n        \"second\": \"%Y.%m.%d-%H.%M.%S\",\n    }\n    fmt = interval_formats[interval]\n\n    run_buckets = defaultdict(list)\n    if job_content is not None:\n        if not until:\n            until = time.time()  # can't set in default arg\n        earliest_run_time = get_earliest_run_time_to_check(job_content, interval) or until\n\n        # We add all dates by interval between our earliest run_time and now,\n        # allowing functions downstream to see if some dates had no runs\n        start = datetime.datetime.fromtimestamp(earliest_run_time)\n        end = datetime.datetime.fromtimestamp(until)\n        step = datetime.timedelta(**{f\"{interval}s\": 1})\n\n        # We compare the strings _after_ we've converted to the final format to make\n        # sure we don't miss something due to off-by-one/weird DST bugs, etc\n        while start.strftime(fmt) <= end.strftime(fmt):\n            run_buckets[start.strftime(fmt)] = []\n            start += step\n\n        # Bucket runs by interval\n        for run in job_content[\"runs\"]:\n            run_time = time.strftime(\n                interval_formats[interval],\n                _timestamp_to_timeobj(run[\"run_time\"]),\n            )\n            if run_time not in run_buckets:\n                continue\n            run_buckets[run_time].append(run)\n    return dict(run_buckets)\n\n\ndef compute_check_result_for_job(client, job, url_index):\n    kwargs = m(\n        name=f\"check_tron_job.{job['name']}\",\n        source=client.cluster_name,\n    )\n    if \"realert_every\" not in kwargs:\n        kwargs = kwargs.set(\"realert_every\", guess_realert_every(job))\n    kwargs = kwargs.set(\"check_every\", f\"{_run_interval}s\")\n\n    # We want to prevent a monitoring config from setting the check_every\n    # attribute, since one config should not dictate how often this script runs\n    sensu_kwargs = (\n        pmap(job[\"monitoring\"])\n        .discard(PRECIOUS_JOB_ATTR)\n        .discard(\"check_every\")\n        .discard(\"page_for_expected_runtime\")\n        .discard(\"check_oom_events\")\n    )\n    kwargs = kwargs.update(sensu_kwargs)\n    hide_stderr = kwargs.get(\"hide_stderr\", False)\n    kwargs_list = []\n    if job[\"status\"] == \"disabled\":\n        kwargs = kwargs.set(\n            \"output\",\n            f\"OK: {job['name']} is disabled and won't be checked.\",\n        )\n        kwargs = kwargs.set(\"status\", 0)\n        kwargs_list.append(kwargs)\n    else:\n        # The job is not disabled, therefore we have to look at its run history\n        tron_id = get_object_type_from_identifier(url_index, job[\"name\"])\n        job_content = pmap(\n            client.job(\n                tron_id.url,\n                include_action_runs=True,\n            ),\n        )\n\n        if job[\"monitoring\"].get(PRECIOUS_JOB_ATTR, False):\n            dated_runs = sort_runs_by_interval(job_content, interval=\"day\")\n        else:\n            dated_runs = {\"\": job_content[\"runs\"]}\n\n        for date, runs in dated_runs.items():\n            results = compute_check_result_for_job_runs(\n                job=job,\n                job_content=job_content.set(\"runs\", runs),\n                client=client,\n                url_index=url_index,\n                hide_stderr=hide_stderr,\n            )\n            dated_kwargs = kwargs.update(results)\n            if date:  # if empty date, leave job name alone\n                dated_kwargs = dated_kwargs.set(\n                    \"name\",\n                    f\"{kwargs['name']}-{date}\",\n                )\n            kwargs_list.append(dated_kwargs)\n\n    return [dict(kws) for kws in kwargs_list]\n\n\ndef check_job(job, client, url_index):\n    if job.get(\"monitoring\", {}) == {}:\n        log.debug(f\"Not checking {job['name']}, no monitoring metadata setup.\")\n        return\n    if job.get(\"monitoring\").get(\"team\", None) is None:\n        log.debug(f\"Not checking {job['name']}, no team specified\")\n        return\n    log.info(f\"Checking {job['name']}\")\n    return compute_check_result_for_job(job=job, client=client, url_index=url_index)\n\n\ndef check_job_result(job, client, url_index, dry_run):\n    results = check_job(job, client, url_index)\n    if not results:\n        return\n\n    for result in results:\n        if dry_run:\n            log.info(\"Would have sent this event to sensu: \")\n            log.info(pprint.pformat(result))\n        else:\n            log.debug(f\"Sending event: {pprint.pformat(result)}\")\n            if \"runbook\" not in result:\n                result[\"runbook\"] = (\n                    \"No runbook specified. Please specify a runbook in the monitoring section of the job definition.\",\n                )\n            send_event(**result)\n\n\ndef main():\n    args = parse_cli()\n    cmd_utils.setup_logging(args)\n    cmd_utils.load_config(args)\n    client = Client(args.server, args.cluster_name)\n\n    error_code = 0\n    global _run_interval\n    _run_interval = args.run_interval\n\n    global _skip_sensu_failure_logging\n    _skip_sensu_failure_logging = args.skip_sensu_failure_logging\n\n    url_index = client.index()\n    if args.job is None:\n        jobs = client.jobs(include_job_runs=True)\n        for job in jobs:\n            try:\n                check_job_result(job=job, client=client, url_index=url_index, dry_run=args.dry_run)\n            except Exception as e:\n                log.warning(f\"check job result fails for job {job.get('name', '')}: {e}\")\n                error_code = 1\n    else:\n        job_url = client.get_url(args.job)\n        job = client.job_runs(job_url)\n        check_job_result(job=job, client=client, url_index=url_index, dry_run=args.dry_run)\n\n    return error_code\n\n\nif __name__ == \"__main__\":\n    sys.exit(main())\n"
  },
  {
    "path": "tron/bin/get_tron_metrics.py",
    "content": "#!/usr/bin/env python3.10\n#\n# get_tron_metrics.py\n#   This script is designed to retrieve metrics from Tron via its API and send\n#   send them to meteorite.\nimport logging\nimport pprint\nimport subprocess\nimport sys\nimport textwrap\n\nfrom tron.commands import cmd_utils\nfrom tron.commands.client import Client\n\nlog = logging.getLogger(\"get_tron_metrics\")\n\n\ndef parse_cli():\n    parser = cmd_utils.build_option_parser()\n    parser.description = \"Collects metrics from Tron via its API and forwards them to \" \"meteorite.\"\n    parser.add_argument(\n        \"--dry-run\",\n        action=\"store_true\",\n        default=False,\n        help=\"Don't actually send metrics out. Defaults: %(default)s\",\n    )\n    args = parser.parse_args()\n    return args\n\n\ndef check_bin_exists(bin):\n    \"\"\"\n    Checks if an executable binary exists\n\n    :param bin: (str) Name of the executable; could be a path to one\n    \"\"\"\n    return (\n        subprocess.call(\n            [\"which\", bin],\n            stdout=subprocess.PIPE,\n            stderr=subprocess.PIPE,\n        )\n        == 0\n    )\n\n\ndef send_data_metric(name, metric_type, value, dimensions={}, dry_run=False):\n    \"\"\"\n    Sends a single data point to meteorite via bash command\n\n    :param name: (str) Name of the metric\n    :param metric_type: (str) Type of the meteorite metric. Must be in\n        METEORITE_TYPES\n    :param value: (float) Value of the metric\n    :param dimensions: (dict) Metric dimensions as key-value pairs\n    :param dry_run: (bool) Whether or not to send metrics to meteorite\n    \"\"\"\n    if dry_run:\n        metric_args = dict(\n            name=name,\n            metric_type=metric_type,\n            value=value,\n            dimensions=dimensions,\n        )\n        log.info(\n            f\"Would have sent this to meteorite:\\n\" f\"{pprint.pformat(metric_args)}\",\n        )\n        return\n\n    cmd = [\"meteorite\", \"data\", \"-v\", name, metric_type, str(value)]\n    for k, v in dimensions.items():\n        cmd.extend([\"-d\", f\"{k}:{v}\"])\n\n    process = subprocess.Popen(\n        cmd,\n        stdout=subprocess.PIPE,\n        stderr=subprocess.PIPE,\n    )\n    output, error = process.communicate()\n    output = output.decode(\"utf-8\").rstrip()\n    error = error.decode(\"utf-8\").rstrip()\n\n    if process.returncode != 0:\n        log.error(\n            \"Meteorite failed with:\\n\" f\"{textwrap.indent(error, '    ')}\",\n        )\n    else:\n        log.debug(f\"From meteorite: {output}\")\n\n\ndef send_counter(name, **kwargs):\n    send_data_metric(\n        name=name,\n        metric_type=\"counter\",\n        value=kwargs.pop(\"count\"),\n        dimensions=kwargs.pop(\"dimensions\", {}),\n        dry_run=kwargs.pop(\"dry_run\", False),\n    )\n\n\ndef send_gauge(name, **kwargs):\n    send_data_metric(\n        name=name,\n        metric_type=\"gauge\",\n        value=kwargs.pop(\"value\"),\n        dimensions=kwargs.pop(\"dimensions\", {}),\n        dry_run=kwargs.pop(\"dry_run\", False),\n    )\n\n\ndef send_meter(name, **kwargs):\n    send_counter(name, **kwargs)  # We ignore mX_rate args\n\n\ndef send_histogram(name, **kwargs):\n    for k in [\"p50\", \"p75\", \"p95\", \"p99\"]:  # Only send p50-99\n        gauge_name = f\"{name}.{k}\"\n        kwargs[\"value\"] = kwargs[k]  # set for gauge\n        send_gauge(gauge_name, **kwargs)\n\n\ndef send_timer(name, **kwargs):\n    # We mirror the metrics implementation in Tron by splitting timer into a\n    # meter and a histogram\n    send_meter(name, **kwargs)\n    send_histogram(name, **kwargs)\n\n\n_METRIC_SENDERS = {\n    \"counter\": send_counter,\n    \"gauge\": send_gauge,\n    \"meter\": send_meter,\n    \"histogram\": send_histogram,\n    \"timer\": send_timer,\n}\n\n\ndef send_metrics(metrics, cluster=None, dry_run=False):\n    \"\"\"\n    Send metrics via meteorite\n\n    :param metrics: Dictionary of metrics types and their data\n    \"\"\"\n    for metric_type, data in metrics.items():\n        for kwargs in data:\n            name = kwargs.pop(\"name\")\n            kwargs[\"dry_run\"] = dry_run\n\n            if cluster:\n                dimensions = kwargs.get(\"dimensions\", {})\n                dimensions[\"tron_cluster\"] = cluster\n                kwargs[\"dimensions\"] = dimensions\n\n            _METRIC_SENDERS[metric_type](name, **kwargs)\n\n\ndef main():\n    args = parse_cli()\n    cmd_utils.setup_logging(args)\n    cmd_utils.load_config(args)\n    client = Client(args.server, args.cluster_name)\n\n    if check_bin_exists(\"meteorite\"):\n        metrics = client.metrics()\n        send_metrics(metrics, cluster=client.cluster_name, dry_run=args.dry_run)\n    else:\n        log.error(\"'meteorite' was not found\")\n\n\nif __name__ == \"__main__\":\n    sys.exit(main())\n"
  },
  {
    "path": "tron/bin/recover_batch.py",
    "content": "#!/usr/bin/env python3.10\nimport argparse\nimport logging\nimport signal\nimport sys\nfrom queue import Queue\n\nimport psutil\nfrom twisted.internet import inotify\nfrom twisted.internet import reactor\nfrom twisted.python import filepath\n\nfrom tron import yaml\n\nlog = logging.getLogger(\"tron.recover_batch\")\n\n\nclass StatusFileWatcher:\n    \"\"\"\n    Watches the status file produced by action runners\n    \"\"\"\n\n    def __init__(self, to_watch, callback):\n        notifier = inotify.INotify()\n        notifier.startReading()\n        notifier.watch(filepath.FilePath(to_watch), callbacks=[callback])\n\n\ndef parse_args():\n    parser = argparse.ArgumentParser(\n        description=\"Check if a action runner has exited; wait otherwise\",\n    )\n    parser.add_argument(\"filepath\")\n    return parser.parse_args()\n\n\ndef read_last_yaml_entries(filename):\n    with open(filename) as f:\n        lines = list(yaml.load_all(f))\n        if not lines:\n            entries = {}\n        else:\n            entries = lines[-1]\n    return entries\n\n\ndef notify(notify_queue, ignored, filepath, mask):\n    exit_code, error_message = get_exit_code(filepath.path)\n    if exit_code is not None:\n        reactor.stop()\n        notify_queue.put((exit_code, error_message))\n\n\ndef get_exit_code(filepath):\n    entries = read_last_yaml_entries(filepath)\n    pid = entries.get(\"runner_pid\")\n    return_code = entries.get(\"return_code\")\n    exit_code, error_message = None, None\n\n    if return_code is not None:\n        if return_code < 0:\n            # from the subprocess docs on the return code of a process:\n            # \"A negative value -N indicates that the child was terminated by signal N (POSIX only).\"\n            # We should always exit with a positive code, so we take the absolute value of the return code\n            exit_code = abs(return_code)\n            error_message = f\"Action run killed by signal {signal.Signals(exit_code).name}\"\n        else:\n            exit_code = return_code\n    elif pid is None:\n        log.warning(f\"Status file {filepath} didn't have a PID. Will watch the file for updates.\")\n    elif not psutil.pid_exists(pid):\n        exit_code = 1\n        error_message = f\"Action runner pid {pid} no longer running. Assuming an exit of 1.\"\n\n    return exit_code, error_message\n\n\ndef run(fpath):\n    # Check if the process has already completed.\n    # If it has, we don't expect any more updates.\n    return_code, error_message = get_exit_code(fpath)\n    if return_code is not None:\n        if error_message is not None:\n            log.warning(error_message)\n        sys.exit(return_code)\n\n    # If not, wait for updates to the file.\n    notify_queue = Queue()\n    StatusFileWatcher(\n        fpath,\n        lambda *args, **kwargs: notify(notify_queue, *args, **kwargs),\n    )\n    reactor.run()\n    exit_code, error_message = notify_queue.get()\n    if error_message is not None:\n        log.warning(error_message)\n    sys.exit(exit_code)\n\n\nif __name__ == \"__main__\":\n    args = parse_args()\n    run(args.filepath)\n"
  },
  {
    "path": "tron/command_context.py",
    "content": "\"\"\"Command Context is how we construct the command line for a command which may\nhave variables that need to be rendered.\n\"\"\"\nimport operator\nimport re\nfrom functools import reduce\n\nfrom tron.utils import timeutils\n\n\ndef build_context(object, parent):\n    \"\"\"Construct a CommandContext for object. object must have a property\n    'context_class'.\n    \"\"\"\n    return CommandContext(object.context_class(object), parent)\n\n\ndef build_filled_context(*context_objects):\n    \"\"\"Create a CommandContext chain from context_objects, using a Filler\n    object to pass to each CommandContext. Can be used to validate a format\n    string.\n    \"\"\"\n    if not context_objects:\n        return CommandContext()\n\n    filler = Filler()\n\n    def build(current, next):\n        return CommandContext(next(filler), current)\n\n    return reduce(build, context_objects, None)\n\n\nclass CommandContext:\n    \"\"\"A CommandContext object is a wrapper around any object which has values\n    to be used to render a command for execution.  It looks up values by name.\n\n    It's lookup order is:\n        base[name],\n        base.__getattr__(name),\n        next[name],\n        next.__getattr__(name)\n    \"\"\"\n\n    def __init__(self, base=None, next=None):\n        \"\"\"\n        base - Object to look for attributes in\n        next - Next place to look for more pieces of context\n               Generally this will be another instance of CommandContext\n        \"\"\"\n        self.base = base or {}\n        self.next = next or {}\n\n    def get(self, name, default=None):\n        try:\n            return self.__getitem__(name)\n        except KeyError:\n            return default\n\n    def __getitem__(self, name):\n        getters = [operator.itemgetter(name), operator.attrgetter(name)]\n        for target in [self.base, self.next]:\n            for getter in getters:\n                try:\n                    return getter(target)\n                except (KeyError, TypeError, AttributeError):\n                    pass\n\n        raise KeyError(name)\n\n    def __eq__(self, other):\n        return self.base == other.base and self.next == other.next\n\n    def __ne__(self, other):\n        return not self == other\n\n\nclass JobContext:\n    \"\"\"A class which exposes properties for rendering commands.\"\"\"\n\n    def __init__(self, job):\n        self.job = job\n\n    @property\n    def name(self):\n        return self.job.name\n\n    def __getitem__(self, item):\n        date_name, date_spec = self._get_date_spec_parts(item)\n        if not date_spec:\n            raise KeyError(item)\n\n        if date_name == \"last_success\":\n            last_success = self.job.runs.last_success\n            last_success = last_success.run_time if last_success else None\n\n            time_value = timeutils.DateArithmetic.parse(\n                date_spec,\n                last_success,\n            )\n            if time_value:\n                return time_value\n\n        raise KeyError(item)\n\n    def _get_date_spec_parts(self, name):\n        parts = name.rsplit(\"#\", 1)\n        if len(parts) != 2:\n            return name, None\n        return parts\n\n    @property\n    def namespace(self):\n        return self.job.name.split(\".\")[0]\n\n\nclass JobRunContext:\n    def __init__(self, job_run):\n        self.job_run = job_run\n\n    @property\n    def runid(self):\n        return self.job_run.id\n\n    @property\n    def manual(self):\n        return str(self.job_run.manual).lower()\n\n    @property\n    def cleanup_job_status(self):\n        \"\"\"Provide 'SUCCESS' or 'FAILURE' to a cleanup action context based on\n        the status of the other steps\n        \"\"\"\n        if self.job_run.action_runs.is_failed:\n            return \"FAILURE\"\n        elif self.job_run.action_runs.is_complete_without_cleanup:\n            return \"SUCCESS\"\n        return \"UNKNOWN\"\n\n    def __getitem__(self, name):\n        \"\"\"\n        This function attempts to parse any command context variable expressions\n        that use shortdate or runid in the following order:\n        1) Attempt to parse date arithmetic syntax and apply to run_time unconditionally\n           and, if unsuccessful falls to the next case\n        2) Attempts to parse a delta to apply to the current job runid - this is mostly\n           meant to be used for jobs that rely on the output of the previous run, but\n           this is not enforced in case someone can dream up another scenario where they\n           want to do arbitrary deltas here.\n        \"\"\"\n        run_time = self.job_run.run_time\n        time_value = timeutils.DateArithmetic.parse(name, run_time)\n        if time_value:\n            return time_value\n\n        # this is a little weird, but enumerating the cases that should be parsed by timeutils is hard,\n        # so we just unconditionally attempt to parse the name and then fallback to the runid special cases\n        # rather than attempt to enumerate the timeutils cases\n        elif name == \"runid\":\n            # we could expand the logic below to handle this with the regex, but that\n            # would make the code a little more complex for not much gain\n            return self.runid\n        elif \"runid\" in name:\n            # we're really only expecting runid-1 for now but, as described in the docstring,\n            # we're allowing arbitrary addition/subtration in case someone dreams up a use for\n            # them\n            match = re.match(r\"^runid([+-]\\d+)$\", name)\n            if match:\n                # self.runid here will be the job runid (e.g., NAMESPACE.SERVICE.RUN_NUMBER) - it will not\n                # include an action name.\n                # that said - all we need math-wise here is the run number, so we split on . and store the job name\n                # so that we can re-consistitute the runid after doing math on the run number\n                job_name, run_num = self.runid.rsplit(\".\", maxsplit=1)\n                # NOTE: this will potentially return a runid for a job that will never exist - e.g., if you setup an\n                # action that should only run after the previous jobrun's action has run for a job that has never run\n                # before) - normally this will only be a problem for the very first run and users can easily tronctl start\n                # the action to bootstrap things so we don't do any checking to see if the returned runid is valid\n                return f\"{job_name}.{int(run_num) + int(match.groups()[0])}\"\n\n        raise KeyError(name)\n\n\nclass ActionRunContext:\n    \"\"\"Context object that gives us access to data about the action run.\"\"\"\n\n    def __init__(self, action_run):\n        self.action_run = action_run\n\n    @property\n    def actionname(self):\n        return self.action_run.action_name\n\n    @property\n    def node(self):\n        return self.action_run.node.hostname\n\n\nclass Filler:\n    \"\"\"Filler object for using CommandContext during config parsing. This class\n    is used as a substitute for objects that would be passed to Context objects.\n    This allows the Context objects to be used directly for config validation.\n    \"\"\"\n\n    def __getattr__(self, _):\n        return self\n\n    def __str__(self):\n        return \"%(...)s\"\n\n    def __mod__(self, _):\n        return self\n\n    def __nonzero__(self):\n        return False\n\n    def __bool__(self):\n        return False\n"
  },
  {
    "path": "tron/commands/__init__.py",
    "content": ""
  },
  {
    "path": "tron/commands/authentication.py",
    "content": "import os\nfrom typing import cast\n\nfrom tron.commands.cmd_utils import get_client_config\n\ntry:\n    from vault_tools.oidc import get_instance_oidc_identity_token  # type: ignore # library lacks py.typed marker\n    from okta_auth import get_and_cache_jwt_default  # type: ignore # library lacks py.typed marker\nexcept ImportError:\n\n    def get_instance_oidc_identity_token(role: str, ecosystem: str | None = None) -> str:\n        return \"\"\n\n    def get_and_cache_jwt_default(client_id: str, refreshable: bool = False, force: bool = False) -> str:\n        return \"\"\n\n\ndef get_sso_auth_token(no_cache: bool = False) -> str:\n    \"\"\"Generate an authentication token for the calling user from the Single Sign On provider, if configured\"\"\"\n    client_id = get_client_config().get(\"auth_sso_oidc_client_id\")\n    return cast(str, get_and_cache_jwt_default(client_id, refreshable=True, force=no_cache)) if client_id else \"\"\n\n\ndef get_vault_auth_token() -> str:\n    \"\"\"Generate an authentication token for the underlying instance via Vault\"\"\"\n    vault_role = get_client_config().get(\"vault_api_auth_role\", \"service_authz\")\n    return cast(str, get_instance_oidc_identity_token(vault_role))\n\n\ndef get_auth_token(no_cache: bool = False) -> str:\n    \"\"\"Generate authentication token via Vault or Okta\"\"\"\n    return get_vault_auth_token() if os.getenv(\"TRONCTL_VAULT_AUTH\") else get_sso_auth_token(no_cache)\n"
  },
  {
    "path": "tron/commands/backfill.py",
    "content": "import asyncio\nimport datetime\nimport functools\nimport os\nimport pprint\nimport re\nimport signal\nimport sys\nfrom urllib.parse import urljoin\n\nfrom tron.commands import client\nfrom tron.commands import display\nfrom tron.commands.authentication import get_auth_token\nfrom tron.core.actionrun import ActionRun\n\nDEFAULT_MAX_PARALLEL_RUNS = 3\nLIMIT_MAX_PARALLEL_RUNS = 10\nDEFAULT_POLLING_INTERVAL_S = 10\n\n\ndef get_date_range(\n    start_date: datetime.datetime,\n    end_date: datetime.datetime,\n    descending: bool = False,\n) -> list[datetime.datetime]:\n    dates = []\n    delta = end_date - start_date\n    for days_to_add in range(delta.days + 1):\n        dates.append(start_date + datetime.timedelta(days=days_to_add))\n    if descending:\n        dates.reverse()\n    return dates\n\n\ndef print_backfill_cmds(job: str, date_strs: list[str]) -> None:\n    print(f\"Please run the following {len(date_strs)} commands:\")\n    print(\"\")\n    for date in date_strs:\n        print(f\"tronctl start {job} --run-date {date}\")\n    print(\"\")\n    print(\"Note that many jobs operate on the previous day's data.\")\n\n\ndef confirm_backfill(job: str, date_strs: list[str]) -> bool:\n    print(\n        f\"To backfill for the job '{job}', a job run will be created for each \"\n        f\"of the following {len(date_strs)} dates:\"\n    )\n    pprint.pprint(date_strs)\n    print(\"\")\n    user_resp = input(\"Confirm? [y/n] \")\n\n    if user_resp.lower() != \"y\":\n        print(\"Aborted.\")\n        return False\n    else:\n        print(\"\")  # just for clean separation\n        return True\n\n\nclass BackfillRun:\n    NOT_STARTED_STATE = \"not started\"\n    SUCCESS_STATES = {ActionRun.SUCCEEDED, ActionRun.CANCELLED, ActionRun.SKIPPED}\n    MAX_SYNC_FAILURES = 5\n\n    def __init__(self, tron_client: client.Client, job_id: client.TronObjectIdentifier, run_time: datetime.datetime):\n        self.tron_client = tron_client\n        self.job_id = job_id\n        self.run_time = run_time\n        self.run_name: str | None = None\n        self.run_id: client.TronObjectIdentifier | None = None\n        self.run_state = BackfillRun.NOT_STARTED_STATE\n\n    @property\n    def run_time_str(self) -> str:\n        return self.run_time.date().isoformat()\n\n    async def run_until_completion(self) -> str:\n        \"\"\"Runs this job run until it finishes (i.e. reaches a terminal state).\"\"\"\n        try:\n            if await self.create():\n                await self.sync_state()\n                await self.watch_until_completion()\n        except asyncio.CancelledError:\n            await self.cancel()\n        return self.run_state\n\n    async def create(self) -> str | None:\n        \"\"\"Creates job run for a specific date.\n\n        Returns the name of the run, if it was created with no issues.\n        \"\"\"\n        # create the job run\n        loop = asyncio.get_event_loop()\n        response = await loop.run_in_executor(\n            None,\n            functools.partial(\n                client.request,\n                urljoin(self.tron_client.url_base, self.job_id.url),\n                data=dict(command=\"start\", run_time=self.run_time),\n                user_attribution=True,\n            ),\n        )\n\n        # figure out its name\n        if response.error:\n            print(f\"Error: couldn't start job run for {self.run_time_str}: {response.content}\")\n        else:\n            # determine name of job run so that we can watch it\n            # from tron.api.controller.JobController and tron.core.jobrun, the format\n            # of the response result will be: \"Created JobRun:<job_run_name>\"\n            result = response.content.get(\"result\")\n            match = re.match(r\"^Created JobRun:([-.\\w]+)$\", result)\n\n            if match:\n                self.run_name = match.groups(0)[0]  # type: ignore[assignment] # mypy wrongly identifies self.run_name type as \"Union[str, int]\"\n                self.run_state = ActionRun.STARTING\n                print(f\"Job run '{self.run_name}' for {self.run_time_str} created\")\n            else:\n                print(\n                    f\"Warning: Job run for {self.run_time_str} created, but couldn't determine \"\n                    \"its name, so its state is considered to be unknown.\"\n                )\n                self.run_state = ActionRun.UNKNOWN\n\n        return self.run_name\n\n    async def get_run_id(self) -> client.TronObjectIdentifier | None:\n        if not self.run_id:\n            loop = asyncio.get_event_loop()\n            try:\n                self.run_id = await loop.run_in_executor(\n                    None,\n                    client.get_object_type_from_identifier,\n                    self.tron_client.index(),\n                    self.run_name,\n                )\n\n            except client.RequestError as e:\n                print(f\"Error: couldn't get resource URL for job run '{self.run_name}': {e}\")\n\n        return self.run_id\n\n    async def sync_state(self) -> str:\n        \"\"\"Syncs the local run state with that of the Tron server's.\n\n        Returns the updated state.\n        \"\"\"\n        if not self.run_id:\n            self.run_id = await self.get_run_id()\n\n        if self.run_id:\n            loop = asyncio.get_event_loop()\n            try:\n                # get the state of the run using the resource url\n                resp_content = await loop.run_in_executor(\n                    None,\n                    functools.partial(\n                        self.tron_client.job_runs,\n                        urljoin(self.tron_client.url_base, self.run_id.url),\n                        include_runs=False,\n                        include_graph=False,\n                    ),\n                )\n                self.run_state = resp_content.get(\"state\", ActionRun.UNKNOWN)\n\n            except (client.RequestError, AttributeError, ValueError) as e:\n                print(f\"Error: couldn't get state for job run '{self.run_name}': {e}\")\n                self.run_state = ActionRun.UNKNOWN\n\n        return self.run_state\n\n    async def watch_until_completion(self, poll_intv_s: int = DEFAULT_POLLING_INTERVAL_S) -> str:\n        \"\"\"Watches this job run until it finishes.\n\n        Returns the end state of the run.\n        \"\"\"\n        sync_failures = 0\n        while self.run_state not in ActionRun.END_STATES:\n            await asyncio.sleep(poll_intv_s)\n            try:\n                await self.sync_state()\n                sync_failures = 0\n            except Exception as e:\n                print(f\"Issue syncing state for '{self.run_name}': {e}\", file=sys.stderr)\n                sync_failures += 1\n                if sync_failures > self.MAX_SYNC_FAILURES:\n                    raise\n\n        print(f\"Job run '{self.run_name}' for {self.run_time_str} finished with state: {self.run_state}\")\n        return self.run_state\n\n    async def cancel(self) -> bool:\n        \"\"\"Cancel this run if it is running.\n\n        Returns whether or not the run was successfully cancelled\n        \"\"\"\n        if self.run_id:\n            loop = asyncio.get_event_loop()\n            response = await loop.run_in_executor(\n                None,\n                functools.partial(\n                    client.request,\n                    urljoin(self.tron_client.url_base, self.run_id.url),\n                    data=dict(command=\"cancel\"),\n                    user_attribution=True,\n                ),\n            )\n            if response.error:\n                print(\n                    f\"Error: couldn't cancel '{self.run_name}' for {self.run_time_str}. \"\n                    \"You should use tronview to check on it.\"\n                )\n            else:\n                print(f\"Backfill job run '{self.run_name}' for {self.run_time_str} cancelled\")\n                self.run_state = ActionRun.CANCELLED\n                return True\n        else:\n            # accounts for the case where the job was created, but this coroutine\n            # is cancelled before the name (and id) is returned to us\n            print(\n                f\"Warning: attempted to cancel backfill for {self.run_time_str}, but we \"\n                \"don't know if it was created initially. You should use tronview \"\n                \"to check.\"\n            )\n        return False\n\n\nasync def run_backfill_for_date_range(\n    server: str,\n    job_name: str,\n    dates: list[datetime.datetime],\n    max_parallel: int = DEFAULT_MAX_PARALLEL_RUNS,\n    ignore_errors: bool = True,\n) -> list[BackfillRun]:\n    \"\"\"Creates and watches job runs over a range of dates for a given job. At\n    most, max_parallel runs can run in parallel to prevent resource exhaustion.\n    \"\"\"\n    loop = asyncio.get_event_loop()\n\n    # Trigger authentication before submitting all async jobs, so auth tokens\n    # are cached and won't prompt the user in the individual API calls.\n    # We pass `no_cache` to ensure a new refresh token is generated and stored in memory.\n    if os.getenv(\"TRONCTL_API_AUTH\"):\n        get_auth_token(no_cache=True)\n\n    tron_client = client.Client(server, user_attribution=True)\n    url_index = tron_client.index()\n\n    # check job_name identifies a valid tron object\n    job_id = client.get_object_type_from_identifier(url_index, job_name)\n    # check job_name identifies a job\n    if job_id.type != client.TronObjectType.job:\n        raise ValueError(f\"'{job_name}' is a {job_id.type.lower()}, not a job\")\n\n    backfill_runs = [BackfillRun(tron_client, job_id, run_time) for run_time in dates]\n    running: set[asyncio.Future] = set()\n    finished_cnt = 0\n    all_successful = True\n\n    # `current_task()` will always return a task here, but we need to account\n    # for the None case for mypy\n    current_task = asyncio.current_task()\n    if current_task:\n        loop.add_signal_handler(signal.SIGINT, current_task.cancel)\n    try:\n        while finished_cnt < len(dates):\n            # start more runs if we still have some and tha parallel limit is not yet reached\n            while finished_cnt + len(running) < len(dates) and len(running) < max_parallel:\n                next_run = backfill_runs[finished_cnt + len(running)]\n                running.add(asyncio.ensure_future(next_run.run_until_completion()))\n\n            just_finished, running = await asyncio.wait(running, return_when=asyncio.FIRST_COMPLETED)\n            for task in just_finished:\n                finished_cnt += 1\n                all_successful &= task.result() in BackfillRun.SUCCESS_STATES\n\n            if not ignore_errors and not all_successful:\n                print(\"Error: encountered failing job run; cancelling all in-progress runs and exiting.\")\n                for task in running:\n                    task.cancel()  # cancel running async tasks\n                    await task  # wait until it is done cancelling\n                break\n\n    except asyncio.CancelledError:  # caused by sigint handler\n        print(\"Error: SIGINT detected; aborting all in-progress runs and exiting\")\n        for task in running:\n            task.cancel()\n            await task\n    return backfill_runs\n\n\nclass DisplayBackfillRuns(display.TableDisplay):\n\n    columns = [\"Date\", \"Job Run Name\", \"Final State\"]\n    fields = [\"run_time\", \"run_name\", \"run_state\"]\n    widths = [15, 60, 15]\n    title = \"Backfills Job Runs\"\n    resize_fields = {\"run_time\", \"run_name\", \"run_state\"}\n    header_color = \"hgray\"\n\n\ndef print_backfill_runs_table(runs: list[BackfillRun]) -> None:\n    \"\"\"Prints backfill runs in a table\"\"\"\n    with display.Color.enable():\n        table = DisplayBackfillRuns().format(\n            [\n                dict(run_time=r.run_time.date().isoformat(), run_name=(r.run_name or \"n/a\"), run_state=r.run_state)\n                for r in runs\n            ]\n        )\n        print(table)\n"
  },
  {
    "path": "tron/commands/client.py",
    "content": "\"\"\"\nA command line http client used by tronview, tronctl, and tronfig\n\"\"\"\nimport json\nimport logging\nimport os\nimport urllib.error\nimport urllib.parse\nimport urllib.request\nfrom collections import namedtuple\n\nimport tron\nfrom tron.commands.authentication import get_auth_token\nfrom tron.config.schema import MASTER_NAMESPACE\n\n\nlog = logging.getLogger(__name__)\n\nUSER_AGENT = f\"Tron Command/{tron.__version__} +http://github.com/Yelp/Tron\"\nDECODE_ERROR = \"DECODE_ERROR\"\nURL_ERROR = \"URL_ERROR\"\n\n\nclass RequestError(ValueError):\n    \"\"\"Raised when the request to tron API fails.\"\"\"\n\n\nResponse = namedtuple(\"Response\", \"error msg content\")\n\ndefault_headers = {\n    \"User-Agent\": USER_AGENT,\n}\n\n\ndef build_url_request(uri, data, headers=None, method=None):\n    headers = headers or default_headers\n    enc_data = urllib.parse.urlencode(data).encode() if data else None\n    # Currently implementing auth only for management actions (i.e. POST requests)\n    if os.getenv(\"TRONCTL_API_AUTH\") and (data or (method and method.upper() == \"POST\")):\n        token = get_auth_token()\n        if token:\n            headers[\"Authorization\"] = f\"Bearer {token}\"\n    return urllib.request.Request(uri, enc_data, headers=headers, method=method)\n\n\ndef load_response_content(http_response):\n    encoding = http_response.headers.get_content_charset()\n    if encoding is None:\n        encoding = \"utf8\"\n    content = http_response.read().decode(encoding)\n    try:\n        return Response(None, None, json.loads(content))\n    except ValueError as e:\n        log.error(\"Failed to decode response: %s, %s\", e, content)\n        return Response(DECODE_ERROR, str(e), content)\n\n\ndef build_http_error_response(exc):\n    content = exc.read() if hasattr(exc, \"read\") else None\n    if content:\n        encoding = exc.headers.get_content_charset()\n        if encoding is None:\n            encoding = \"utf8\"\n        content = content.decode(encoding)\n        try:\n            content = json.loads(content)\n            content = content[\"error\"]\n        except ValueError:\n            log.warning(\n                f\"Incorrectly formatted error response: {content}\",\n            )\n    return Response(exc.code, exc.msg, content)\n\n\ndef request(uri, data=None, headers=None, method=None, user_attribution=False):\n    log.info(\"Request to %s with %s\", uri, data)\n    headers = headers or default_headers\n    if user_attribution:\n        headers = ensure_user_attribution(headers)\n    request = build_url_request(uri, data, headers=headers, method=method)\n    try:\n        response = urllib.request.urlopen(request)\n    except urllib.error.HTTPError as e:\n        log.error(\"Received error response: %s\" % e)\n        return build_http_error_response(e)\n    except urllib.error.URLError as e:\n        log.error(\"Received error response: %s\" % e)\n        return Response(URL_ERROR, e.reason, None)\n\n    return load_response_content(response)\n\n\ndef build_get_url(url, data=None):\n    if data:\n        query_str = urllib.parse.urlencode(sorted(data.items()))\n        return f\"{url}?{query_str}\"\n    else:\n        return url\n\n\ndef ensure_user_attribution(headers: dict[str, str]) -> dict[str, str]:\n    headers = headers.copy()\n    if \"User-Agent\" not in headers:\n        headers[\"User-Agent\"] = USER_AGENT\n    headers[\"User-Agent\"] += f' ({os.environ.get(\"USER\", \"anonymous\")})'\n    return headers\n\n\nclass Client:\n    \"\"\"An HTTP client used to issue commands to the Tron API.\"\"\"\n\n    def __init__(self, url_base, cluster_name=None, user_attribution=False):\n        \"\"\"Create a new client.\n        url_base - A url with a schema, hostname and port\n        \"\"\"\n        self.url_base = url_base\n        self.cluster_name = cluster_name\n        self.headers = default_headers\n        if user_attribution:\n            self.headers = ensure_user_attribution(self.headers)\n\n    def status(self):\n        return self.http_get(\"/api/status\")\n\n    def metrics(self):\n        return self.http_get(\"/api/metrics\")\n\n    def config(\n        self,\n        config_name,\n        config_data=None,\n        config_hash=None,\n        check=False,\n    ):\n        \"\"\"Retrieve or update the configuration.\"\"\"\n        if config_data is not None:\n            data_check = 1 if check else 0\n            request_data = dict(\n                config=config_data,\n                name=config_name,\n                hash=config_hash,\n                check=data_check,\n            )\n            return self.request(\"/api/config\", request_data)\n        request_data = dict(name=config_name)\n        return self.http_get(\"/api/config\", request_data)\n\n    def home(self):\n        return self.http_get(\"/api/\")\n\n    index = home\n\n    def get_url(self, identifier):\n        return get_object_type_from_identifier(self.index(), identifier).url\n\n    def jobs(\n        self,\n        include_job_runs=False,\n        include_action_runs=False,\n        include_action_graph=True,\n        include_node_pool=True,\n    ):\n        params = {\n            \"include_job_runs\": int(include_job_runs),\n            \"include_action_runs\": int(include_action_runs),\n            \"include_action_graph\": int(include_action_graph),\n            \"include_node_pool\": int(include_node_pool),\n        }\n        return self.http_get(\"/api/jobs\", params).get(\"jobs\")\n\n    def job(self, job_url, include_action_runs=False, count=0):\n        params = {\n            \"include_action_runs\": int(include_action_runs),\n            \"num_runs\": count,\n        }\n        return self.http_get(job_url, params)\n\n    def job_runs(self, url, include_runs=True, include_graph=False):\n        params = {\n            \"include_action_runs\": int(include_runs),\n            \"include_action_graph\": int(include_graph),\n        }\n        return self.http_get(url, params)\n\n    def action_runs(self, action_run_url, num_lines=0):\n        params = {\n            \"num_lines\": num_lines,\n            \"include_stdout\": 1,\n            \"include_stderr\": 1,\n        }\n        return self.http_get(action_run_url, params)\n\n    def http_get(self, url, data=None):\n        return self.request(build_get_url(url, data))\n\n    def request(self, url, data=None):\n        uri = urllib.parse.urljoin(self.url_base, url)\n        response = request(uri, data, headers=self.headers)\n        if response.error:\n            if response.content:\n                raise RequestError(response.content)\n            else:\n                raise RequestError(f\"{response.error} {response.msg}\")\n        return response.content\n\n\ndef build_api_url(resource, identifier_parts):\n    return \"/api/{}/{}\".format(resource, \"/\".join(identifier_parts))\n\n\ndef split_identifier(identifier):\n    return identifier.rsplit(\".\", identifier.count(\".\") - 1)\n\n\ndef get_job_url(identifier):\n    return build_api_url(\"jobs\", split_identifier(identifier))\n\n\nclass TronObjectType:\n    \"\"\"Constants to identify a Tron object type.\"\"\"\n\n    job = \"JOB\"\n    job_run = \"JOB_RUN\"\n    action_run = \"ACTION_RUN\"\n\n    url_builders = {\n        \"jobs\": get_job_url,\n    }\n\n    groups = {\n        \"jobs\": [job, job_run, action_run],\n    }\n\n\nTronObjectIdentifier = namedtuple(\"TronObjectIdentifier\", \"type url\")\n\nIdentifierParts = namedtuple(\"IdentifierParts\", \"name full_id length\")\n\n\ndef first(seq):\n    for item in filter(None, seq):\n        return item\n\n\ndef get_object_type_from_identifier(url_index, identifier):\n    \"\"\"Given a string identifier, return a TronObjectIdentifier.\"\"\"\n    name_mapping = {\n        \"jobs\": set(url_index[\"jobs\"]),\n    }\n\n    def get_name_parts(identifier, namespace=None):\n        if namespace:\n            identifier = f\"{namespace}.{identifier}\"\n\n        name_elements = identifier.split(\".\")\n        name = \".\".join(name_elements[:2])\n        length = len(name_elements) - 2\n        return IdentifierParts(name, identifier, length)\n\n    def find_by_type(id_parts, index_name):\n        url_type_index = name_mapping[index_name]\n        if id_parts.name in url_type_index:\n            tron_type = TronObjectType.groups[index_name][id_parts.length]\n            url = TronObjectType.url_builders[index_name](id_parts.full_id)\n            return TronObjectIdentifier(tron_type, url)\n\n    def find_by_name(name, namespace=None):\n        id = get_name_parts(name, namespace)\n        return find_by_type(id, \"jobs\")\n\n    namespaces = [None, MASTER_NAMESPACE] + url_index[\"namespaces\"]\n    id_obj = first(find_by_name(identifier, name) for name in namespaces)\n    if id_obj:\n        return id_obj\n\n    raise ValueError(\"Unknown job identifier: %s\" % identifier)\n"
  },
  {
    "path": "tron/commands/cmd_utils.py",
    "content": "\"\"\"\nCommon code for command line utilities (see bin/)\n\"\"\"\nimport argparse\nimport difflib\nimport logging\nimport os\nimport sys\n\nimport tron\nfrom tron import yaml\n\nlog = logging.getLogger(\"tron.commands\")\n\n\nclass ExitCode:\n    \"\"\"Enumeration of exit status codes.\"\"\"\n\n    success = 0\n    fail = 1\n\n\nGLOBAL_CONFIG_FILE_NAME = (\n    os.environ.get(\n        \"TRON_CONFIG\",\n    )\n    or \"/etc/tron/tron.yaml\"\n)\nCONFIG_FILE_NAME = os.path.expanduser(\"~/.tron\")\n\nDEFAULT_HOST = \"localhost\"\nDEFAULT_PORT = 8089\n\nDEFAULT_CONFIG = {\n    \"server\": \"http://%s:%d\" % (DEFAULT_HOST, DEFAULT_PORT),\n    \"display_color\": False,\n    \"cluster_name\": \"Unnamed Cluster\",\n}\n\nTAB_COMPLETE_FILE = \"/var/cache/tron_tab_completions\"\n\nCOLOR_RED = \"\\033[31m\"\nCOLOR_YELLOW = \"\\033[33m\"\nCOLOR_DEFAULT = \"\\033[0m\"\n\nopener = open\n\n\ndef get_default_server():\n    return DEFAULT_CONFIG[\"server\"]\n\n\ndef filter_jobs_actions_runs(prefix, inputs):\n    dots = prefix.count(\".\")\n    if prefix == \"\":\n        # If the user hasn't begun to type anything, we need to get them started with all jobs\n        return [i for i in inputs if i.count(\".\") == 1]\n    elif dots == 0:\n        # If the user hasn't completed a job, we need to get them started with all jobs\n        # that start with what they have\n        return [i for i in inputs if i.count(\".\") == 1 and i.startswith(prefix)]\n    elif prefix in inputs:\n        # If what a user typed is exactly what is already in a suggestion, then we need to give them\n        # Even more suggestions (+1)\n        return [i for i in inputs if i.startswith(prefix) and (i.count(\".\") == dots or i.count(\".\") == dots + 1)]\n    else:\n        # Otherwise we only want to scope our suggestions to those that are on the same \"level\"\n        # which in string form means they have the same number of dots\n        return [i for i in inputs if i.startswith(prefix) and i.count(\".\") == dots]\n\n\ndef tron_jobs_completer(prefix, **kwargs):\n    if os.path.isfile(TAB_COMPLETE_FILE):\n        with opener(TAB_COMPLETE_FILE, \"r\") as f:\n            jobs = f.readlines()\n        return filter_jobs_actions_runs(\n            prefix=prefix,\n            inputs=[job.strip(\"\\n\\r\") for job in jobs],\n        )\n    else:\n        # this import is here to avoid an annoying circular dependency\n        from tron.commands.client import Client\n\n        if \"client\" not in kwargs:\n            client = Client(get_default_server())\n        else:\n            client = kwargs[\"client\"]\n        return filter_jobs_actions_runs(\n            prefix=prefix,\n            inputs=[job[\"name\"] for job in client.jobs()],\n        )\n\n\ndef build_option_parser(usage=None, epilog=None):\n    parser = argparse.ArgumentParser(\n        usage=usage,\n        epilog=epilog,\n        formatter_class=argparse.RawDescriptionHelpFormatter,\n    )\n    parser.add_argument(\n        \"--version\",\n        action=\"version\",\n        version=f\"{parser.prog} {tron.__version__}\",\n    )\n\n    parser.add_argument(\n        \"-v\",\n        \"--verbose\",\n        action=\"count\",\n        help=\"Verbose logging\",\n        default=None,\n    )\n    parser.add_argument(\n        \"--server\",\n        default=None,\n        help=\"Url including scheme, host and port, Default: %(default)s\",\n    )\n    parser.add_argument(\n        \"--cluster_name\",\n        default=None,\n        help=\"Human friendly tron cluster name\",\n    )\n    parser.add_argument(\n        \"-s\",\n        \"--save\",\n        action=\"store_true\",\n        dest=\"save_config\",\n        help=\"Save options used on this job for next time.\",\n    )\n\n    return parser\n\n\ndef get_client_config():\n    config_file_list = [CONFIG_FILE_NAME, GLOBAL_CONFIG_FILE_NAME]\n    for config_file in config_file_list:\n        filename = os.path.expanduser(config_file)\n        if os.access(filename, os.R_OK):\n            config = read_config(filename)\n            if config:\n                return config\n\n    log.debug(\"Could not find a config in: %s.\" % \", \".join(config_file_list))\n    return {}\n\n\ndef load_config(options):\n    \"\"\"Attempt to load a user specific configuration or a global config file\n    and set any unset options based on values from the config. Finally fallback\n    to DEFAULT_CONFIG for those settings.\n\n    Also save back options to the config if options.save_config is True.\n    \"\"\"\n    config = get_client_config()\n\n    for opt_name in DEFAULT_CONFIG.keys():\n        if not hasattr(options, opt_name):\n            continue\n\n        if getattr(options, opt_name) is not None:\n            continue\n\n        default_value = DEFAULT_CONFIG[opt_name]\n        setattr(options, opt_name, config.get(opt_name, default_value))\n\n    if options.save_config:\n        save_config(options)\n\n\ndef read_config(filename=CONFIG_FILE_NAME):\n    try:\n        with opener(filename, \"r\") as config_file:\n            return yaml.load(config_file)\n    except OSError:\n        log.info(\"Failed to read config file: %s\" % CONFIG_FILE_NAME)\n    return {}\n\n\ndef write_config(config):\n    with open(CONFIG_FILE_NAME, \"w\") as config_file:\n        yaml.dump(config, config_file)\n\n\ndef save_config(options):\n    config = read_config()\n    for opt_name in DEFAULT_CONFIG.keys():\n        if not hasattr(options, opt_name):\n            continue\n        config[opt_name] = getattr(options, opt_name)\n    write_config(config)\n\n\ndef setup_logging(options: argparse.Namespace) -> int:\n    if options.verbose is None:\n        level = logging.ERROR\n    elif options.verbose == 1:\n        level = logging.WARNING\n    elif options.verbose == 2:\n        level = logging.INFO\n    else:\n        level = logging.NOTSET\n\n    logging.basicConfig(\n        level=level,\n        format=\"%(name)s %(levelname)s %(message)s\",\n        stream=sys.stdout,\n    )\n\n    return level\n\n\ndef suggest_possibilities(word, possibilities, max_suggestions=6):\n    suggestions = difflib.get_close_matches(\n        word=word,\n        possibilities=possibilities,\n        n=max_suggestions,\n    )\n    if len(suggestions) == 1:\n        return f\"\\nDid you mean: {suggestions[0]}?\"\n    elif len(suggestions) >= 1:\n        return f\"\\nDid you mean one of: {', '.join(suggestions)}?\"\n    else:\n        return \"\"\n\n\ndef warning_output(text: str, color: str = COLOR_RED) -> str:\n    \"\"\"Return the passed-in string colored in red (by default). Suitable for warning messages.\"\"\"\n    return f\"{color}{text}{COLOR_DEFAULT}\"\n"
  },
  {
    "path": "tron/commands/display.py",
    "content": "\"\"\"\nFormat and color output for tron commands.\n\"\"\"\nimport contextlib\nfrom collections.abc import Callable\nfrom collections.abc import Collection\nfrom functools import partial\nfrom operator import itemgetter\n\nfrom tron.core import actionrun\nfrom tron.core import job\nfrom tron.utils import exitcode\nfrom tron.utils import maybe_encode\n\n\nclass Color:\n\n    enabled = None\n    colors = {\n        \"gray\": \"\\033[90m\",\n        \"red\": \"\\033[91m\",\n        \"green\": \"\\033[92m\",\n        \"yellow\": \"\\033[93m\",\n        \"blue\": \"\\033[94m\",\n        \"purple\": \"\\033[95m\",\n        \"cyan\": \"\\033[96m\",\n        \"white\": \"\\033[99m\",\n        # h is for highlighted\n        \"hgray\": \"\\033[100m\",\n        \"hred\": \"\\033[101m\",\n        \"hgreen\": \"\\033[102m\",\n        \"hyellow\": \"\\033[103m\",\n        \"hblue\": \"\\033[104m\",\n        \"hcyan\": \"\\033[106m\",\n        \"end\": \"\\033[0m\",\n    }\n\n    @classmethod\n    @contextlib.contextmanager\n    def enable(cls):\n        old_val = cls.enabled\n        try:\n            cls.enabled = True\n            yield\n        finally:\n            cls.enabled = old_val\n\n    @classmethod\n    def set(cls, color_name, text):\n        if not cls.enabled or not color_name:\n            return text\n        return \"{}{}{}\".format(\n            cls.colors[color_name.lower()],\n            text,\n            cls.colors[\"end\"],\n        )\n\n    @classmethod\n    def toggle(cls, enable):\n        cls.enabled = enable\n\n\nclass TableDisplay:\n    \"\"\"Base class for displaying columns of data.  This class takes a list\n    of dict objects and formats it so that it displays properly in fixed width\n    columns.  Overlap is truncated.\n\n    This class provides many hooks for customizing the output, including:\n        - sorting of rows\n        - building composite values from more then one field\n        - custom formatting of a columns values\n        - adding additional data after each row\n        - coloring of header, columns, or rows\n\n    The default output is:\n\n        Banner\n        Header\n        Row\n        (optional post row)\n        Row\n        (optional post row)\n        ...\n        Footer\n    \"\"\"\n\n    columns: list[str] = []\n    fields: list[str] = []\n    widths: list[int] = []\n    colors: dict[str, Callable[[str], str]] = {}\n    title: str | None = None\n    resize_fields: Collection[str] = set()\n    reversed = False\n\n    header_color = \"hgray\"\n\n    def __init__(self, sort_index=0):\n        self.out = []\n        self.sort_index = sort_index\n\n    def banner(self):\n        if not self.title:\n            return\n        title = self.title.capitalize()\n        self.out.append(\"\\n%s:\" % title)\n        if not self.rows():\n            self.out.append(\"No %s\\n\" % title)\n\n    def header(self):\n        row = [label.ljust(self.get_field_width(i)) for i, label in enumerate(self.columns)]\n        self.out.append(Color.set(self.header_color, \"\".join(row)))\n\n    def footer(self):\n        pass\n\n    def color(self, col, field):\n        return None\n\n    def sorted_fields(self, values):\n        return [values[name] for name in self.fields]\n\n    def format_row(self, fields):\n        row = [\n            Color.set(self.color(i, value), self.trim_value(i, value))\n            for i, value in enumerate(self.sorted_fields(fields))\n        ]\n        return Color.set(self.row_color(fields), \"\".join(row))\n\n    def get_field_width(self, field_idx):\n        return self.widths[field_idx]\n\n    def trim_value(self, field_idx, value):\n        length = self.get_field_width(field_idx)\n        value = self.format_value(field_idx, value)\n        if len(value) > length:\n            return (value[: length - 3] + \"...\").ljust(length)\n        return value.ljust(length)\n\n    def format_value(self, field_idx, value):\n        return str(value)\n\n    def output(self):\n        out = \"\\n\".join(self.out)\n        self.out = []\n        return out\n\n    def post_row(self, row):\n        pass\n\n    def row_color(self, row):\n        return None\n\n    def rows(self):\n        return sorted(\n            self.data,\n            key=itemgetter(self.fields[self.sort_index]),\n            reverse=self.reversed,\n        )\n\n    def store_data(self, data):\n        self.data = data\n\n    def update_column_widths(self):\n        \"\"\"Update column widths to fit the data.\"\"\"\n        for field_idx, field in enumerate(self.fields):\n            if field in self.resize_fields:\n                self.widths[field_idx] = self.calculate_width(field_idx)\n\n    def calculate_width(self, field_idx):\n        default_width = self.widths[field_idx]\n        column = [self.format_value(field_idx, row[self.fields[field_idx]]) for row in self.data]\n        if not column:\n            return default_width\n        max_value_width = max(len(value) for value in column)\n        return max(max_value_width + 1, default_width)\n\n    def format(self, data):\n        self.store_data(data)\n        self.update_column_widths()\n        self.banner()\n\n        if not self.rows():\n            return self.output()\n\n        self.header()\n        for row in self.rows():\n            self.out.append(self.format_row(row))\n            self.post_row(row)\n\n        self.footer()\n        return self.output()\n\n\ndef add_color_for_state(state):\n    if state == actionrun.ActionRun.FAILED:\n        return Color.set(\"red\", state)\n    if state in {\n        actionrun.ActionRun.RUNNING,\n        actionrun.ActionRun.SUCCEEDED,\n        job.Job.STATUS_ENABLED,\n    }:\n        return Color.set(\"green\", state)\n    if state in {job.Job.STATUS_DISABLED}:\n        return Color.set(\"blue\", state)\n    return state\n\n\ndef format_fields(display_obj, content):\n    \"\"\"Format fields with some color.\"\"\"\n\n    def add_color(field, field_value):\n        if field not in display_obj.colors:\n            return field_value\n        return display_obj.colors[field](field_value)\n\n    def format_field(field):\n        formatter = field_display_mapping.get(field, lambda f, _: f)\n        return formatter(content.get(field), content)\n\n    def build_field(label, field):\n        return f\"{label:<20}: {add_color(field, format_field(field))}\"\n\n    return \"\\n\".join(build_field(*item) for item in display_obj.detail_labels)\n\n\ndef format_job_details(job_content):\n    details = format_fields(DisplayJobs, job_content)\n    job_runs = DisplayJobRuns().format(job_content[\"runs\"])\n    actions = \"\\n\\nList of Actions:\\n%s\" % \"\\n\".join(\n        job_content[\"action_names\"],\n    )\n    return details + actions + \"\\n\" + job_runs\n\n\ndef format_action_run_details(content, stdout=True, stderr=True):\n    out = [\"Requirements:\"] + content[\"requirements\"] + [\"\"]\n    if stdout:\n        out.append(\"Stdout:\\n%s\\n\" % \"\\n\".join(content[\"stdout\"]))\n\n    if stderr:\n        out.append(\"Stderr:\\n%s\\n\" % \"\\n\".join(content[\"stderr\"]))\n\n    details = format_fields(DisplayActionRuns, content)\n    return details + \"\\n\" + \"\\n\".join(out)\n\n\nclass DisplayJobRuns(TableDisplay):\n    \"\"\"Format Job runs.\"\"\"\n\n    columns = [\"Run ID\", \"State\", \"Node\", \"Scheduled Time\"]\n    fields = [\"run_num\", \"state\", \"node\", \"run_time\"]\n    widths = [10, 12, 30, 25]\n    title = \"job runs\"\n    reversed = True\n\n    detail_labels = [\n        (\"Job Run\", \"id\"),\n        (\"State\", \"state\"),\n        (\"Node\", \"node\"),\n        (\"Scheduled time\", \"run_time\"),\n        (\"Start time\", \"start_time\"),\n        (\"End time\", \"end_time\"),\n        (\"Manual run\", \"manual\"),\n    ]\n\n    colors = {\n        \"id\": partial(Color.set, \"yellow\"),\n        \"state\": add_color_for_state,\n        \"manual\": lambda value: Color.set(\"cyan\" if value else None, value),\n    }\n\n    def format_value(self, field_idx, value):\n        if self.fields[field_idx] == \"run_num\":\n            value = \".\" + str(value)\n\n        if self.fields[field_idx] == \"scheduled_time\":\n            value = value or \"-\"\n\n        if self.fields[field_idx] == \"node\":\n            value = display_node(value)\n\n        return super().format_value(field_idx, value)\n\n    def row_color(self, fields):\n        return \"red\" if fields[\"state\"] == \"FAIL\" else \"white\"\n\n    def post_row(self, row):\n        start = row[\"start_time\"] or \"-\"\n        end = row[\"end_time\"] or \"-\"\n        duration = row[\"duration\"][:-7] if row[\"duration\"] else \"-\"\n\n        row_data = \"{}Start: {}  End: {}  ({})\".format(\n            \" \" * self.widths[0],\n            start,\n            end,\n            duration,\n        )\n        self.out.append(Color.set(\"gray\", row_data))\n\n\nclass DisplayJobs(TableDisplay):\n\n    columns = [\"Name\", \"State\", \"Scheduler\", \"Last Success\"]\n    fields = [\"name\", \"status\", \"scheduler\", \"last_success\"]\n    widths = [50, 10, 20, 22]\n    title = \"jobs\"\n    resize_fields = [\"name\"]\n\n    detail_labels = [\n        (\"Job\", \"name\"),\n        (\"State\", \"status\"),\n        (\"Scheduler\", \"scheduler\"),\n        (\"Max runtime\", \"max_runtime\"),\n        (\"Node Pool\", \"node_pool\"),\n        (\"Run on all nodes\", \"all_nodes\"),\n        (\"Allow overlapping\", \"allow_overlap\"),\n        (\"Queue overlapping\", \"queueing\"),\n    ]\n\n    colors = {\n        \"name\": partial(Color.set, \"yellow\"),\n        \"status\": add_color_for_state,\n    }\n\n    def format_value(self, field_idx, value):\n        if self.fields[field_idx] == \"scheduler\":\n            value = display_scheduler(value)\n\n        return super().format_value(field_idx, value)\n\n\nclass DisplayActionRuns(TableDisplay):\n\n    columns = [\"Action\", \"State\", \"Start Time\", \"End Time\", \"Duration\"]\n    fields = [\"id\", \"state\", \"start_time\", \"end_time\", \"duration\"]\n    widths = [40, 12, 22, 22, 10]\n    title = \"actions\"\n    resize_fields = [\"id\"]\n\n    detail_labels = [\n        (\"Action Run\", \"id\"),\n        (\"State\", \"state_delayed\"),\n        (\"Node\", \"node\"),\n        (\"Last run command\", \"command\"),\n        (\"Original raw command\", \"original_command\"),\n        (\"Config command\", \"raw_command\"),\n        (\"Start time\", \"start_time\"),\n        (\"End time\", \"end_time\"),\n        (\"Final exit status\", \"exit_status\"),\n        (\"Exit statuses\", \"exit_statuses\"),\n        (\"Waits for triggers\", \"triggered_by\"),\n        (\"Publishes triggers\", \"trigger_downstreams\"),\n    ]\n\n    colors = {\n        \"id\": partial(Color.set, \"yellow\"),\n        \"state\": add_color_for_state,\n        \"command\": partial(Color.set, \"gray\"),\n    }\n\n    def __init__(self):\n        # Action runs need to be storted by start time, which is index 2\n        super().__init__(sort_index=2)\n\n    def banner(self):\n        self.out.append(format_fields(DisplayJobRuns, self.job_run))\n        super().banner()\n\n    def format_value(self, field_idx, value):\n        if self.fields[field_idx] == \"id\":\n            value = \".\" + value.rsplit(\".\", 1)[-1]\n        if self.fields[field_idx] in (\"start_time\", \"end_time\"):\n            value = value or \"-\"\n        if self.fields[field_idx] == \"duration\":\n            # Strip microseconds\n            value = value[:-7] if value else \"-\"\n\n        return super().format_value(field_idx, value)\n\n    def row_color(self, fields):\n        return \"red\" if fields[\"state\"] == \"FAIL\" else \"white\"\n\n    def store_data(self, data):\n        self.data = data[\"runs\"]\n        self.job_run = data\n\n    def rows(self):\n        # Action runs need a sort order that sorts by date\n        # and that can handle situations where it is None, or\n        # othere weird things, so we str()\n        return sorted(\n            self.data,\n            key=lambda x: str(x[self.fields[self.sort_index]]),\n            reverse=self.reversed,\n        )\n\n\ndef display_node(source, _=None):\n    if not source:\n        return \"\"\n    return \"{}@{}\".format(source[\"username\"], source[\"hostname\"])\n\n\ndef display_node_pool(source, _=None):\n    if not source:\n        return \"\"\n    return \"%s (%d node(s))\" % (source[\"name\"], len(source[\"nodes\"]))\n\n\ndef display_scheduler(source, _=None):\n    if not source:\n        return \"\"\n    return \"{} {}{}\".format(source[\"type\"], source[\"value\"], source[\"jitter\"])\n\n\ndef display_state_delayed(_, obj):\n    state = obj[\"state\"]\n    in_delay = obj[\"in_delay\"]\n    if in_delay:\n        return f\"{state} (retry delayed for {int(in_delay)}s)\"\n    else:\n        return state\n\n\nfield_display_mapping = {\n    \"node\": display_node,\n    \"node_pool\": display_node_pool,\n    \"scheduler\": display_scheduler,\n    \"state_delayed\": display_state_delayed,\n    \"exit_status\": lambda v, _: exitcode.EXIT_REASONS.get(v, v),\n}\n\n\ndef view_with_less(content, color=True):\n    \"\"\"Send `content` through less.\"\"\"\n    import subprocess\n\n    cmd = [\"less\"]\n    if color:\n        cmd.append(\"-r\")\n\n    less_proc = subprocess.Popen(cmd, stdin=subprocess.PIPE)\n    less_proc.stdin.write(\n        maybe_encode(content)\n    )  # TODO: TRON-2293 maybe_encode is a relic of Python2->Python3 migration. Remove it.\n    less_proc.stdin.close()\n    less_proc.wait()\n"
  },
  {
    "path": "tron/commands/retry.py",
    "content": "import argparse\nimport asyncio\nimport datetime\nimport functools\nimport random\nfrom urllib.parse import urljoin\n\nimport pytimeparse  # type:ignore\n\nfrom tron.commands import client\nfrom tron.commands import display\nfrom tron.commands.backfill import BackfillRun\n\n\nDEFAULT_POLLING_INTERVAL_S = 10\n\n\ndef parse_deps_timeout(duration: str) -> int:\n    if duration == \"infinity\":\n        return RetryAction.WAIT_FOREVER\n    elif duration.isnumeric():\n        seconds = int(duration)\n    else:\n        seconds = pytimeparse.parse(duration)\n        if seconds is None:\n            raise argparse.ArgumentTypeError(\n                f\"'{duration}' is not a valid duration. Must be either number of seconds or pytimeparse-parsable string.\"\n            )\n    if seconds < 0:\n        raise argparse.ArgumentTypeError(f\"'{duration}' must not be negative\")\n    return seconds\n\n\nclass RetryAction:\n    NO_TIMEOUT = 0\n    WAIT_FOREVER = -1\n\n    RETRY_NOT_ISSUED = None\n    RETRY_SUCCESS = True\n    RETRY_FAIL = False\n\n    def __init__(\n        self,\n        tron_client: client.Client,\n        full_action_name: str,\n        use_latest_command: bool = False,\n    ):\n        self.tron_client = tron_client\n        self.retry_params = dict(command=\"retry\", use_latest_command=int(use_latest_command))\n\n        self.full_action_name = full_action_name\n        self.action_run_id = self._validate_action_name(full_action_name)\n        self.job_run_id = client.get_object_type_from_identifier(self.tron_client.index(), self.job_run_name)\n\n        self._required_action_indices = self._get_required_action_indices()\n        self._elapsed = datetime.timedelta(seconds=0)\n        self._triggers_done = False\n        self._required_actions_done = False\n        self._retry_request_result: bool | None = RetryAction.RETRY_NOT_ISSUED\n\n    @property\n    def job_run_name(self) -> str:\n        return self.full_action_name.rsplit(\".\", 1)[0]\n\n    @property\n    def action_name(self) -> str:\n        return self.full_action_name.rsplit(\".\", 1)[1]\n\n    @property\n    def status(self) -> str:\n        if not self._triggers_done:\n            return \"Upstream triggers not all published\"\n        elif not self._required_actions_done:\n            return \"Required actions not all successfully completed\"\n        elif self._retry_request_result == RetryAction.RETRY_NOT_ISSUED:\n            return \"Retry request not issued, but dependencies done\"\n        elif self._retry_request_result == RetryAction.RETRY_SUCCESS:\n            return \"Retry request issued successfully\"\n        else:\n            return \"Failed to issue retry request\"\n\n    @property\n    def succeeded(self) -> bool:\n        return bool(self._retry_request_result)\n\n    def _validate_action_name(self, full_action_name: str) -> client.TronObjectIdentifier:\n        action_run_id: client.TronObjectIdentifier = client.get_object_type_from_identifier(\n            self.tron_client.index(), full_action_name\n        )\n        if action_run_id.type != client.TronObjectType.action_run:\n            raise ValueError(f\"'{full_action_name}' is a {action_run_id.type.lower()}, not an action\")\n        self.tron_client.action_runs(action_run_id.url, num_lines=0)  # verify action exists\n        return action_run_id\n\n    def _get_required_action_indices(self) -> dict[str, int]:\n        job_run = self.tron_client.job_runs(self.job_run_id.url)\n        required_actions = set()\n        action_indices = {}\n\n        for i, action_run in enumerate(job_run[\"runs\"]):\n            if action_run[\"action_name\"] == self.action_name:\n                required_actions = set(action_run[\"requirements\"])\n            action_indices[action_run[\"action_name\"]] = i\n\n        return {action_name: i for action_name, i in action_indices.items() if action_name in required_actions}\n\n    def _log(self, msg: str) -> None:\n        print(f\"[{self._elapsed}] {self.full_action_name}: {msg}\")\n\n    async def can_retry(self) -> bool:\n        if not self._triggers_done:\n            triggers = await self.check_trigger_statuses()\n            self._triggers_done = all(triggers.values())\n            if self._triggers_done:\n                if len(triggers) > 0:\n                    self._log(\"All upstream triggers published\")\n            else:\n                remaining_triggers = [trigger for trigger, is_done in triggers.items() if not is_done]\n                self._log(f\"Upstream triggers not yet published: {remaining_triggers}\")\n        if not self._required_actions_done:\n            required_actions = await self.check_required_actions_statuses()\n            self._required_actions_done = all(required_actions.values())\n            if self._required_actions_done:\n                if len(required_actions) > 0:\n                    self._log(\"All required actions finished\")\n            else:\n                remaining_required_actions = [action for action, is_done in required_actions.items() if not is_done]\n                self._log(f\"Required actions not yet succeeded: {remaining_required_actions}\")\n        return self._triggers_done and self._required_actions_done\n\n    async def check_trigger_statuses(self) -> dict[str, bool]:\n        action_run = await asyncio.get_event_loop().run_in_executor(\n            None,\n            functools.partial(\n                self.tron_client.action_runs,\n                self.action_run_id.url,\n                num_lines=0,\n            ),\n        )\n        # from tron.api.adapter:ActionRunAdapter.get_triggered_by:\n        # triggered_by is a single string with this format:\n        #   {trigger_1} (done), {trigger_2}, etc.\n        # where trigger_1 has been published, and trigger_2 is still waiting\n        trigger_states = {}\n        for trigger_and_state in action_run[\"triggered_by\"].split(\", \"):\n            if trigger_and_state:\n                trigger, *maybe_state = trigger_and_state.split(\" \")\n                # if len(parts) == 2, then parts is [{trigger}, \"(done)\"]\n                # else, parts is [{trigger}]\n                trigger_states[trigger] = len(maybe_state) == 1\n        return trigger_states\n\n    async def check_required_actions_statuses(self) -> dict[str, bool]:\n        action_runs = (\n            await asyncio.get_event_loop().run_in_executor(\n                None,\n                self.tron_client.job_runs,\n                self.job_run_id.url,\n            )\n        )[\"runs\"]\n        return {\n            action_runs[i][\"action_name\"]: action_runs[i][\"state\"] in BackfillRun.SUCCESS_STATES\n            for i in self._required_action_indices.values()\n        }\n\n    async def wait_and_retry(\n        self,\n        deps_timeout_s: int = 0,\n        poll_interval_s: int = DEFAULT_POLLING_INTERVAL_S,\n        jitter: bool = True,\n    ) -> bool:\n\n        if deps_timeout_s != RetryAction.NO_TIMEOUT and jitter:\n            init_delay_s = random.randint(1, min(deps_timeout_s, poll_interval_s)) - 1\n            self._elapsed += datetime.timedelta(seconds=init_delay_s)\n            await asyncio.sleep(init_delay_s)\n\n        if await self.wait_for_deps(deps_timeout_s=deps_timeout_s, poll_interval_s=poll_interval_s):\n            return await self.issue_retry()\n        else:\n            deps_timeout_td = datetime.timedelta(seconds=deps_timeout_s)\n            msg = \"Action will not be retried.\"\n            if deps_timeout_s != RetryAction.NO_TIMEOUT:\n                msg = f\"Not all dependencies completed after waiting for {deps_timeout_td}. \" + msg\n            self._log(msg)\n            return False\n\n    async def wait_for_deps(\n        self,\n        deps_timeout_s: int = 0,\n        poll_interval_s: int = DEFAULT_POLLING_INTERVAL_S,\n    ) -> bool:\n        \"\"\"Wait for all upstream dependencies to finished up to a timeout. Once the\n        timeout has expired, one final check is always conducted.\n\n        Returns whether or not deps successfully finished.\n        \"\"\"\n        while deps_timeout_s == RetryAction.WAIT_FOREVER or self._elapsed.seconds < deps_timeout_s:\n            if await self.can_retry():\n                return True\n            wait_for = poll_interval_s\n            if deps_timeout_s != RetryAction.WAIT_FOREVER:\n                wait_for = min(wait_for, int(deps_timeout_s - self._elapsed.seconds))\n            await asyncio.sleep(wait_for)\n            self._elapsed += datetime.timedelta(seconds=wait_for)\n\n        return await self.can_retry()\n\n    async def issue_retry(self) -> bool:\n        self._log(\"Issuing retry request\")\n        response = await asyncio.get_event_loop().run_in_executor(\n            None,\n            functools.partial(\n                client.request,\n                urljoin(self.tron_client.url_base, self.action_run_id.url),\n                data=self.retry_params,\n                user_attribution=True,\n            ),\n        )\n        if response.error:\n            self._log(f\"Error: couldn't issue retry request: {response.content}\")\n            self._retry_request_result = RetryAction.RETRY_FAIL\n        else:\n            self._log(f\"Got result: {response.content.get('result')}\")\n            self._log(f\"Check the status of the retry run using: `tronview {self.full_action_name}`\")\n            self._retry_request_result = RetryAction.RETRY_SUCCESS\n        return self._retry_request_result\n\n\ndef retry_actions(\n    tron_server: str,\n    full_action_names: list[str],\n    use_latest_command: bool = False,\n    deps_timeout_s: int = RetryAction.NO_TIMEOUT,\n) -> list[RetryAction]:\n    tron_client = client.Client(tron_server, user_attribution=True)\n    r_actions = [RetryAction(tron_client, name, use_latest_command=use_latest_command) for name in full_action_names]\n\n    loop = asyncio.get_event_loop()\n    try:\n        # first action starts checking immediately, rest have a jitter\n        loop.run_until_complete(\n            asyncio.gather(\n                r_actions[0].wait_and_retry(deps_timeout_s=deps_timeout_s, jitter=False),\n                *[ra.wait_and_retry(deps_timeout_s=deps_timeout_s) for ra in r_actions[1:]],\n            )\n        )\n    finally:\n        loop.close()\n    return r_actions\n\n\nclass DisplayRetries(display.TableDisplay):\n\n    columns = [\"Action Name\", \"Final Status\"]\n    fields = [\"full_action_name\", \"status\"]\n    widths = [60, 60]\n    title = \"Retries\"\n    resize_fields = {\"full_action_name\", \"status\"}\n    header_color = \"hgray\"\n\n\ndef print_retries_table(retries: list[RetryAction]) -> None:\n    \"\"\"Prints retry runs in a table\"\"\"\n    with display.Color.enable():\n        table = DisplayRetries().format([dict(full_action_name=r.full_action_name, status=r.status) for r in retries])\n        print(table)\n"
  },
  {
    "path": "tron/config/__init__.py",
    "content": "class ConfigError(Exception):\n    \"\"\"Generic exception class for errors with config validation\"\"\"\n\n    pass\n"
  },
  {
    "path": "tron/config/config_parse.py",
    "content": "\"\"\"\nParse a dictionary structure and return an immutable structure that\ncontain a validated configuration.\n\n\nWARNING: it is *NOT* safe to delete classes that are being validated (or their attributes) if there are any references to them in DynamoDB until TRON-2200 is complete! (See DAR-2328)\nNOTE: this means that reverting a change that adds a new attribute is not safe :)\n\"\"\"\nimport datetime\nimport getpass\nimport itertools\nimport logging\nimport os\nfrom copy import deepcopy\nfrom typing import Any\nfrom urllib.parse import urlparse\n\nimport pytz\nfrom task_processing.plugins.mesos.constraints import OPERATORS\n\nfrom tron import command_context\nfrom tron.config import config_utils\nfrom tron.config import ConfigError\nfrom tron.config import schema\nfrom tron.config.config_utils import build_dict_name_validator\nfrom tron.config.config_utils import build_dict_value_validator\nfrom tron.config.config_utils import build_list_of_type_validator\nfrom tron.config.config_utils import ConfigContext\nfrom tron.config.config_utils import PartialConfigContext\nfrom tron.config.config_utils import StringFormatter\nfrom tron.config.config_utils import valid_bool\nfrom tron.config.config_utils import valid_dict\nfrom tron.config.config_utils import valid_exit_code\nfrom tron.config.config_utils import valid_float\nfrom tron.config.config_utils import valid_identifier\nfrom tron.config.config_utils import valid_int\nfrom tron.config.config_utils import valid_list\nfrom tron.config.config_utils import valid_name_identifier\nfrom tron.config.config_utils import valid_string\nfrom tron.config.config_utils import Validator\nfrom tron.config.schedule_parse import valid_schedule\nfrom tron.config.schema import CLEANUP_ACTION_NAME\nfrom tron.config.schema import ConfigAction\nfrom tron.config.schema import ConfigCleanupAction\nfrom tron.config.schema import ConfigConstraint\nfrom tron.config.schema import ConfigFieldSelectorSource\nfrom tron.config.schema import ConfigJob\nfrom tron.config.schema import ConfigKubernetes\nfrom tron.config.schema import ConfigMesos\nfrom tron.config.schema import ConfigNodeAffinity\nfrom tron.config.schema import ConfigParameter\nfrom tron.config.schema import ConfigProjectedSAVolume\nfrom tron.config.schema import ConfigSecretSource\nfrom tron.config.schema import ConfigSecretVolume\nfrom tron.config.schema import ConfigSecretVolumeItem\nfrom tron.config.schema import ConfigSSHOptions\nfrom tron.config.schema import ConfigState\nfrom tron.config.schema import ConfigTopologySpreadConstraints\nfrom tron.config.schema import ConfigVolume\nfrom tron.config.schema import MASTER_NAMESPACE\nfrom tron.config.schema import NamedTronConfig\nfrom tron.config.schema import TronConfig\n\nlog = logging.getLogger(__name__)\n\n\ndef build_format_string_validator(context_object):\n    \"\"\"Validate that a string does not contain any unexpected formatting keys.\n    valid_keys - a sequence of strings\n    \"\"\"\n\n    def validator(value, config_context):\n        if config_context.partial:\n            return valid_string(value, config_context)\n\n        context = command_context.CommandContext(\n            context_object,\n            config_context.command_context,\n        )\n\n        try:\n            StringFormatter(context).format(value)\n            return value\n        except (KeyError, ValueError) as e:\n            error_msg = \"Unknown context variable %s at %s: %s\"\n            raise ConfigError(error_msg % (e, config_context.path, value))\n        except TypeError as e:\n            error_msg = \"Wrong command format %s: %s at %s\"\n            raise ConfigError(error_msg % (value, e, config_context.path))\n\n    return validator\n\n\ndef valid_output_stream_dir(output_dir, config_context):\n    \"\"\"Returns a valid string for the output directory, or raises ConfigError\n    if the output_dir is not valid.\n    \"\"\"\n    if not output_dir:\n        return\n\n    if config_context.partial:\n        return output_dir\n\n    valid_string(output_dir, config_context)\n    if not os.path.isdir(output_dir):\n        msg = \"output_stream_dir '%s' is not a directory\"\n        raise ConfigError(msg % output_dir)\n\n    if not os.access(output_dir, os.W_OK):\n        raise ConfigError(\n            \"output_stream_dir '%s' is not writable\" % output_dir,\n        )\n\n    return output_dir\n\n\ndef valid_identity_file(file_path, config_context):\n    valid_string(file_path, config_context)\n\n    if config_context.partial:\n        return file_path\n\n    file_path = os.path.expanduser(file_path)\n    if not os.path.exists(file_path):\n        raise ConfigError(\"Private key file %s doesn't exist\" % file_path)\n\n    public_key_path = file_path + \".pub\"\n    if not os.path.exists(public_key_path):\n        raise ConfigError(\"Public key file %s doesn't exist\" % public_key_path)\n    return file_path\n\n\ndef valid_known_hosts_file(file_path, config_context):\n    valid_string(file_path, config_context)\n\n    if config_context.partial:\n        return file_path\n\n    file_path = os.path.expanduser(file_path)\n    if not os.path.exists(file_path):\n        raise ConfigError(\"Known hosts file %s doesn't exist\" % file_path)\n    return file_path\n\n\ndef valid_command_context(context, config_context):\n    # context can be any dict.\n    return valid_dict(context or {}, config_context)\n\n\ndef valid_time_zone(tz, config_context):\n    if tz is None:\n        return None\n    valid_string(tz, config_context)\n    try:\n        return pytz.timezone(tz)\n    except pytz.exceptions.UnknownTimeZoneError:\n        raise ConfigError(\"%s is not a valid time zone\" % tz)\n\n\ndef valid_node_name(value, config_context):\n    valid_identifier(value, config_context)\n    if not config_context.partial and value not in config_context.nodes:\n        msg = \"Unknown node name %s at %s\"\n        raise ConfigError(msg % (value, config_context.path))\n    return value\n\n\ndef valid_master_address(value, config_context):\n    \"\"\"Validates and normalizes Mesos master address.\n\n    Must be HTTP or not include a scheme, and only include\n    a host, without any path components.\n    \"\"\"\n    valid_string(value, config_context)\n\n    # Parse with HTTP as default, only HTTP allowed.\n    scheme, netloc, path, params, query, fragment = urlparse(value, \"http\")\n    if scheme != \"http\":\n        msg = f\"Only HTTP supported for Mesos master address, got {value}\"\n        raise ConfigError(msg)\n\n    if params or query or fragment:\n        msg = f\"Mesos master address may not contain path components, got {value}\"\n        raise ConfigError(msg)\n\n    # Only one of netloc or path allowed, and no / except trailing ones.\n    # netloc is empty if there's no scheme, then we try the path.\n    path = path.rstrip(\"/\")\n    if (netloc and path) or \"/\" in path:\n        msg = f\"Mesos master address may not contain path components, got {value}\"\n        raise ConfigError(msg)\n\n    if not netloc:\n        netloc = path\n\n    if not netloc:\n        msg = f\"Mesos master address is missing host, got {value}\"\n        raise ConfigError(msg)\n\n    return f\"{scheme}://{netloc}\"\n\n\ndef valid_k8s_master_address(value: str, config_context: ConfigContext) -> str:\n    \"\"\"Validates and normalizes Kubernetes master address.\n\n    Must be HTTP or not include a scheme, and only include\n    a host, without any path components.\n    \"\"\"\n    valid_string(value, config_context)\n\n    # Parse with HTTP as default, only HTTPS allowed.\n    scheme, netloc, path, params, query, fragment = urlparse(url=value, scheme=\"https\")\n    if scheme != \"http\":\n        msg = f\"Only HTTPS supported for Kubernetes master address, got {value}\"\n        raise ConfigError(msg)\n\n    if params or query or fragment:\n        msg = f\"Kubernetes master address may not contain path components, got {value}\"\n        raise ConfigError(msg)\n\n    # Only one of netloc or path allowed, and no / except trailing ones.\n    path = path.rstrip(\"/\")\n    if (netloc and path) or \"/\" in path:\n        msg = f\"Kubernetes master address may not contain path components, got {value}\"\n        raise ConfigError(msg)\n\n    # netloc is empty if there's no scheme, so we fallback to path.\n    if not netloc and path:\n        netloc = path\n\n    if not netloc:\n        msg = f\"Kubernetes master address is missing host, got {value}\"\n        raise ConfigError(msg)\n\n    return f\"{scheme}://{netloc}\"\n\n\nclass ValidateConstraint(Validator):\n    config_class = ConfigConstraint\n    validators = {\n        \"attribute\": valid_string,\n        \"operator\": config_utils.build_enum_validator(OPERATORS.keys()),\n        \"value\": valid_string,\n    }\n\n\nvalid_constraint = ValidateConstraint()\n\n\nclass ValidateDockerParameter(Validator):\n    config_class = ConfigParameter\n    validators = {\n        \"key\": valid_string,\n        \"value\": valid_string,\n    }\n\n\nvalid_docker_parameter = ValidateDockerParameter()\n\n\nclass ValidateVolume(Validator):\n    config_class = ConfigVolume\n    validators = {\n        \"container_path\": valid_string,\n        \"host_path\": valid_string,\n        \"mode\": config_utils.build_real_enum_validator(schema.VolumeModes),\n    }\n\n\nvalid_volume = ValidateVolume()\n\n\nclass ValidateSecretSource(Validator):\n    config_class = ConfigSecretSource\n    validators = {\n        \"secret_name\": valid_string,  # name of Kubernetes Secret\n        \"key\": valid_string,  # key name in Secret data\n    }\n\n\nvalid_secret_source = ValidateSecretSource()\n\n\ndef valid_permission_mode(value: str | int, config_context: ConfigContext) -> str:\n    try:\n        decimal_value = int(\n            str(value), base=8\n        )  # take in permission mode as string or int representation of an octal number. Goes from 0 to 4095 in decimal.\n    except ValueError:\n        error_msg = \"Could not parse {} as octal permission mode at {}\"\n        raise ConfigError(error_msg.format(value, config_context.path))\n    if decimal_value > 4095 or decimal_value < 0:\n        error_msg = \"Octal permission mode {} out of bound at {}\"\n        raise ConfigError(error_msg.format(value, config_context.path))\n    return str(value)\n\n\nclass ValidateSecretVolumeItem(Validator):\n    config_class = ConfigSecretVolumeItem\n\n    validators = {\n        \"key\": valid_string,  # name of current secret\n        \"path\": valid_string,  # New secret filename\n        \"mode\": valid_permission_mode,  # Octal permission mode\n    }\n\n\nvalid_secret_volume_item = ValidateSecretVolumeItem()\n\n\nclass ValidateSecretVolume(Validator):\n    config_class = ConfigSecretVolume\n\n    optional = True\n    defaults = {\n        \"default_mode\": \"0644\",\n        \"items\": None,\n    }\n\n    validators = {\n        \"container_path\": valid_string,\n        \"secret_volume_name\": valid_string,\n        \"secret_name\": valid_string,\n        \"default_mode\": valid_permission_mode,\n        \"items\": build_list_of_type_validator(valid_secret_volume_item, allow_empty=True),\n    }\n\n    def post_validation(self, valid_input, config_context):\n        \"\"\"Propagate default mode and enforce the secret-key match.\"\"\"\n        # Ensure 'items' is an iterable list, even if defaulted to None by set_defaults.\n        # The 'or []' handles the case where valid_input.get('items') returns None.\n        items = valid_input.get(\"items\") or []\n\n        # Our secrets will really only ever have one key, so weirdly we only care about a single\n        # item of this array AND it must have the same name as the secret (which is the single key).\n        if len(items) > 1:\n            raise ConfigError(\n                \"There is more than one item in the items array. This is unsupported as we don't support multi-key secrets.\"\n            )\n\n        processed_items = []\n        modified = False\n        default_mode = valid_input.get(\"default_mode\", self.defaults[\"default_mode\"])\n        secret_name = valid_input.get(\"secret_name\")\n\n        for item in items:\n            if item.key != secret_name:\n                raise ConfigError(f\"Item key '{item.key}' does not match the volume's secret name '{secret_name}'\")\n\n            final_item = item\n            if item.mode is None:\n                # Apply volume's default_mode to items without an explicit mode.\n                final_item = item._replace(mode=default_mode)\n                modified = True\n            processed_items.append(final_item)\n\n        if modified:\n            # Update valid_input with the (potentially) modified items tuple.\n            # This ensures the final object reflects applied defaults.\n            valid_input[\"items\"] = tuple(processed_items)\n\n\nvalid_secret_volume = ValidateSecretVolume()\n\n\nclass ValidateProjectedSAVolume(Validator):\n    config_class = ConfigProjectedSAVolume\n    optional = True\n    defaults = {\n        \"expiration_seconds\": 1800,\n    }\n    validators = {\n        \"container_path\": valid_string,\n        \"audience\": valid_string,\n        \"expiration_seconds\": valid_int,\n    }\n\n\nvalid_projected_sa_volume = ValidateProjectedSAVolume()\n\n\nclass ValidateFieldSelectorSource(Validator):\n    config_class = ConfigFieldSelectorSource\n    validators = {\n        \"field_path\": valid_string,  # k8s field path - e.g., `status.podIP`\n    }\n\n\nvalid_field_selector_source = ValidateFieldSelectorSource()\n\n\ndef _valid_node_affinity_operator(value: str, config_context: ConfigContext) -> str:\n    valid_operators = {\"In\", \"NotIn\", \"Exists\", \"NotExists\", \"Gt\", \"Lt\"}\n    if value not in valid_operators:\n        raise ConfigError(f\"Got {value} as a node affinity operator, expected one of {valid_operators}\")\n\n    return value\n\n\nclass ValidateNodeAffinity(Validator):\n    config_class = ConfigNodeAffinity\n    validators = {\n        \"key\": valid_string,\n        \"operator\": _valid_node_affinity_operator,\n        \"value\": build_list_of_type_validator(valid_string, allow_empty=True),\n    }\n\n\nvalid_node_affinity = ValidateNodeAffinity()\n\n\ndef _valid_when_unsatisfiable(value: str, config_context: ConfigContext) -> str:\n    valid_values = {\"DoNotSchedule\", \"ScheduleAnyway\"}\n    if value not in valid_values:\n        raise ConfigError(f\"Got {value} as a when_unsatisfiable value, expected one of {valid_values}\")\n\n    return value\n\n\ndef _valid_topology_spread_label_selector(value: dict[str, str], config_context: ConfigContext) -> dict[str, str]:\n    if not value:\n        raise ConfigError(\"TopologySpreadConstraints must have a label_selector\")\n\n    # XXX: we probably also want to enforce k8s limits for label lengths and whatnot\n    if not all(isinstance(k, str) for k in value.keys()):\n        raise ConfigError(\"TopologySpreadConstraints label_selector keys must be strings\")\n\n    if not all(isinstance(s, str) for s in value.values()):\n        raise ConfigError(\"TopologySpreadConstraints label_selector values must be strings\")\n\n    return value\n\n\nclass ValidateTopologySpreadConstraints(Validator):\n    config_class = ConfigTopologySpreadConstraints\n    validators = {\n        \"max_skew\": valid_int,\n        \"when_unsatisfiable\": _valid_when_unsatisfiable,\n        \"topology_key\": valid_string,\n        \"label_selector\": _valid_topology_spread_label_selector,\n    }\n\n\nvalid_topology_spread_constraints = ValidateTopologySpreadConstraints()\n\n\nclass ValidateSSHOptions(Validator):\n    \"\"\"Validate SSH options.\"\"\"\n\n    config_class = ConfigSSHOptions\n    optional = True\n    defaults = {\n        \"agent\": False,\n        \"identities\": (),\n        \"known_hosts_file\": None,\n        \"connect_timeout\": 30,\n        \"idle_connection_timeout\": 3600,\n        \"jitter_min_load\": 4,\n        \"jitter_max_delay\": 20,\n        \"jitter_load_factor\": 1,\n    }\n\n    validators = {\n        \"agent\": valid_bool,\n        # TODO: move this config and validations outside master namespace\n        # 'identities':               build_list_of_type_validator(\n        #                                 valid_identity_file, allow_empty=True),\n        \"identities\": build_list_of_type_validator(\n            valid_string,\n            allow_empty=True,\n        ),\n        # 'known_hosts_file':         valid_known_hosts_file,\n        \"known_hosts_file\": valid_string,\n        \"connect_timeout\": config_utils.valid_int,\n        \"idle_connection_timeout\": config_utils.valid_int,\n        \"jitter_min_load\": config_utils.valid_int,\n        \"jitter_max_delay\": config_utils.valid_int,\n        \"jitter_load_factor\": config_utils.valid_int,\n    }\n\n    def post_validation(self, valid_input, config_context):\n        if config_context.partial:\n            return\n\n        if valid_input[\"agent\"] and \"SSH_AUTH_SOCK\" not in os.environ:\n            raise ConfigError(\"No SSH Agent available ($SSH_AUTH_SOCK)\")\n\n\nvalid_ssh_options = ValidateSSHOptions()\n\n\nclass ValidateNode(Validator):\n    config_class = schema.ConfigNode\n    validators = {\n        \"name\": config_utils.valid_identifier,\n        \"username\": config_utils.valid_string,\n        \"hostname\": config_utils.valid_string,\n        \"port\": config_utils.valid_int,\n    }\n\n    defaults = {\n        \"port\": 22,\n        \"username\": getpass.getuser(),\n    }\n\n    def do_shortcut(self, node):\n        \"\"\"Nodes can be specified with just a hostname string.\"\"\"\n        if isinstance(node, str):\n            return schema.ConfigNode(hostname=node, name=node, **self.defaults)\n\n    def set_defaults(self, output_dict, config_context):\n        super().set_defaults(output_dict, config_context)\n        output_dict.setdefault(\"name\", output_dict[\"hostname\"])\n\n\nvalid_node = ValidateNode()\n\n\nclass ValidateNodePool(Validator):\n    config_class = schema.ConfigNodePool\n    validators = {\n        \"name\": valid_identifier,\n        \"nodes\": build_list_of_type_validator(valid_identifier),\n    }\n\n    def cast(self, node_pool, _context):\n        if isinstance(node_pool, list):\n            node_pool = dict(nodes=node_pool)\n        return node_pool\n\n    def set_defaults(self, node_pool, _):\n        node_pool.setdefault(\"name\", \"_\".join(node_pool[\"nodes\"]))\n\n\nvalid_node_pool = ValidateNodePool()\n\n\ndef valid_action_name(value, config_context):\n    valid_identifier(value, config_context)\n    if value == CLEANUP_ACTION_NAME:\n        error_msg = \"Invalid action name %s at %s\"\n        raise ConfigError(error_msg % (value, config_context.path))\n    return value\n\n\naction_context = command_context.build_filled_context(\n    command_context.JobContext,\n    command_context.JobRunContext,\n    command_context.ActionRunContext,\n)\n\n\ndef valid_mesos_action(action, config_context):\n    required_keys = {\"cpus\", \"mem\", \"docker_image\"}\n    if action.get(\"executor\") == schema.ExecutorTypes.mesos.value:\n        missing_keys = required_keys - set(action.keys())\n        if missing_keys:\n            raise ConfigError(\n                \"Mesos executor for action {id} is missing these required keys: {keys}\".format(\n                    id=action[\"name\"],\n                    keys=missing_keys,\n                ),\n            )\n\n\ndef valid_kubernetes_action(action, config_context):\n    required_keys = {\"cpus\", \"mem\", \"docker_image\"}\n    if action.get(\"executor\") == schema.ExecutorTypes.kubernetes.value:\n        missing_keys = required_keys - set(action.keys())\n        if missing_keys:\n            raise ConfigError(\n                \"Kubernetes executor for action {id} is missing these required keys: {keys}\".format(\n                    id=action[\"name\"],\n                    keys=missing_keys,\n                ),\n            )\n\n\ndef valid_trigger_downstreams(trigger_downstreams, config_context):\n    if isinstance(trigger_downstreams, (type(None), bool, dict)):\n        return trigger_downstreams\n    raise ConfigError(\"must be None, bool or dict\")\n\n\nclass ValidateAction(Validator):\n    \"\"\"Validate an action.\"\"\"\n\n    config_class = ConfigAction\n\n    defaults = {\n        \"node\": None,\n        \"requires\": (),\n        \"retries\": None,\n        \"retries_delay\": None,\n        \"expected_runtime\": datetime.timedelta(hours=24),\n        \"executor\": schema.ExecutorTypes.ssh.value,\n        \"cpus\": None,\n        \"mem\": None,\n        \"disk\": None,\n        \"cap_add\": None,\n        \"cap_drop\": None,\n        \"constraints\": None,\n        \"docker_image\": None,\n        \"docker_parameters\": None,\n        \"env\": None,\n        \"secret_env\": None,\n        \"secret_volumes\": None,\n        \"projected_sa_volumes\": None,\n        \"field_selector_env\": None,\n        \"extra_volumes\": None,\n        \"trigger_downstreams\": None,\n        \"triggered_by\": None,\n        \"on_upstream_rerun\": None,\n        \"trigger_timeout\": None,\n        \"node_selectors\": None,\n        \"node_affinities\": None,\n        \"topology_spread_constraints\": None,\n        \"labels\": None,\n        \"annotations\": None,\n        \"service_account_name\": None,\n        \"ports\": None,\n    }\n    requires = build_list_of_type_validator(\n        valid_action_name,\n        allow_empty=True,\n    )\n    validators = {\n        \"name\": valid_action_name,\n        \"command\": build_format_string_validator(action_context),\n        \"node\": valid_node_name,\n        \"requires\": requires,\n        \"retries\": valid_int,\n        \"retries_delay\": config_utils.valid_time_delta,\n        \"expected_runtime\": config_utils.valid_time_delta,\n        \"executor\": config_utils.build_real_enum_validator(schema.ExecutorTypes),\n        \"cpus\": valid_float,\n        \"mem\": valid_float,\n        \"disk\": valid_float,\n        \"cap_add\": valid_list,\n        \"cap_drop\": valid_list,\n        \"constraints\": build_list_of_type_validator(valid_constraint, allow_empty=True),\n        \"docker_image\": valid_string,\n        \"docker_parameters\": build_list_of_type_validator(\n            valid_docker_parameter,\n            allow_empty=True,\n        ),\n        \"env\": valid_dict,\n        \"secret_env\": build_dict_value_validator(valid_secret_source),\n        \"secret_volumes\": build_list_of_type_validator(valid_secret_volume, allow_empty=True),\n        \"projected_sa_volumes\": build_list_of_type_validator(valid_projected_sa_volume, allow_empty=True),\n        \"field_selector_env\": build_dict_value_validator(valid_field_selector_source),\n        \"extra_volumes\": build_list_of_type_validator(valid_volume, allow_empty=True),\n        \"trigger_downstreams\": valid_trigger_downstreams,\n        \"triggered_by\": build_list_of_type_validator(valid_string, allow_empty=True),\n        \"on_upstream_rerun\": config_utils.build_real_enum_validator(schema.ActionOnRerun),\n        \"trigger_timeout\": config_utils.valid_time_delta,\n        \"node_selectors:\": valid_dict,\n        \"node_affinities\": build_list_of_type_validator(valid_node_affinity, allow_empty=True),\n        \"topology_spread_constraints\": build_list_of_type_validator(\n            valid_topology_spread_constraints, allow_empty=True\n        ),\n        \"labels:\": valid_dict,\n        \"annotations\": valid_dict,\n        \"service_account_name\": valid_string,\n        \"ports\": build_list_of_type_validator(valid_int, allow_empty=True),\n    }\n\n    def post_validation(self, action, config_context):\n        valid_mesos_action(action, config_context)\n        valid_kubernetes_action(action, config_context)\n\n\nvalid_action = ValidateAction()\n\n\ndef valid_cleanup_action_name(value, config_context):\n    if value != CLEANUP_ACTION_NAME:\n        msg = \"Cleanup actions cannot have custom names %s.%s\"\n        raise ConfigError(msg % (config_context.path, value))\n    return CLEANUP_ACTION_NAME\n\n\nclass ValidateCleanupAction(Validator):\n    config_class = ConfigCleanupAction\n    defaults = {\n        \"node\": None,\n        \"name\": CLEANUP_ACTION_NAME,\n        \"retries\": None,\n        \"retries_delay\": None,\n        \"expected_runtime\": datetime.timedelta(hours=24),\n        \"executor\": schema.ExecutorTypes.ssh.value,\n        \"cpus\": None,\n        \"mem\": None,\n        \"disk\": None,\n        \"cap_add\": None,\n        \"cap_drop\": None,\n        \"constraints\": None,\n        \"docker_image\": None,\n        \"docker_parameters\": None,\n        \"env\": None,\n        \"secret_env\": None,\n        \"secret_volumes\": None,\n        \"projected_sa_volumes\": None,\n        \"field_selector_env\": None,\n        \"extra_volumes\": None,\n        \"trigger_downstreams\": None,\n        \"triggered_by\": None,\n        \"on_upstream_rerun\": None,\n        \"trigger_timeout\": None,\n        \"node_selectors\": None,\n        \"node_affinities\": None,\n        \"topology_spread_constraints\": None,\n        \"labels\": None,\n        \"annotations\": None,\n        \"service_account_name\": None,\n        \"ports\": None,\n    }\n    validators = {\n        \"name\": valid_cleanup_action_name,\n        \"command\": build_format_string_validator(action_context),\n        \"node\": valid_node_name,\n        \"retries\": valid_int,\n        \"retries_delay\": config_utils.valid_time_delta,\n        \"expected_runtime\": config_utils.valid_time_delta,\n        \"executor\": config_utils.build_real_enum_validator(schema.ExecutorTypes),\n        \"cpus\": valid_float,\n        \"mem\": valid_float,\n        \"disk\": valid_float,\n        \"cap_add\": valid_list,\n        \"cap_drop\": valid_list,\n        \"constraints\": build_list_of_type_validator(valid_constraint, allow_empty=True),\n        \"docker_image\": valid_string,\n        \"docker_parameters\": build_list_of_type_validator(\n            valid_docker_parameter,\n            allow_empty=True,\n        ),\n        \"env\": valid_dict,\n        \"secret_env\": build_dict_value_validator(valid_secret_source),\n        \"secret_volumes\": build_list_of_type_validator(valid_secret_volume, allow_empty=True),\n        \"projected_sa_volumes\": build_list_of_type_validator(valid_projected_sa_volume, allow_empty=True),\n        \"field_selector_env\": build_dict_value_validator(valid_field_selector_source),\n        \"extra_volumes\": build_list_of_type_validator(valid_volume, allow_empty=True),\n        \"trigger_downstreams\": valid_trigger_downstreams,\n        \"triggered_by\": build_list_of_type_validator(valid_string, allow_empty=True),\n        \"on_upstream_rerun\": config_utils.build_real_enum_validator(schema.ActionOnRerun),\n        \"trigger_timeout\": config_utils.valid_time_delta,\n        \"node_selectors:\": valid_dict,\n        \"node_affinities\": build_list_of_type_validator(valid_node_affinity, allow_empty=True),\n        \"topology_spread_constraints\": build_list_of_type_validator(\n            valid_topology_spread_constraints, allow_empty=True\n        ),\n        \"labels\": valid_dict,\n        \"annotations\": valid_dict,\n        \"service_account_name\": valid_string,\n        \"ports\": build_list_of_type_validator(valid_int, allow_empty=True),\n    }\n\n    def post_validation(self, action, config_context):\n        valid_mesos_action(action, config_context)\n        valid_kubernetes_action(action, config_context)\n\n\nvalid_cleanup_action = ValidateCleanupAction()\n\n\nclass ValidateJob(Validator):\n    \"\"\"Validate jobs.\"\"\"\n\n    config_class = ConfigJob\n    defaults: dict[str, Any] = {\n        \"run_limit\": 50,\n        \"all_nodes\": False,\n        \"cleanup_action\": None,\n        \"enabled\": True,\n        \"queueing\": True,\n        \"allow_overlap\": False,\n        \"max_runtime\": None,\n        \"monitoring\": {},\n        \"time_zone\": None,\n        \"expected_runtime\": datetime.timedelta(hours=24),\n        \"use_k8s\": False,\n    }\n\n    validators = {\n        \"name\": valid_name_identifier,\n        \"schedule\": valid_schedule,\n        \"run_limit\": valid_int,\n        \"all_nodes\": valid_bool,\n        \"actions\": build_dict_name_validator(valid_action),\n        \"cleanup_action\": valid_cleanup_action,\n        \"node\": valid_node_name,\n        \"queueing\": valid_bool,\n        \"enabled\": valid_bool,\n        \"allow_overlap\": valid_bool,\n        \"max_runtime\": config_utils.valid_time_delta,\n        \"monitoring\": valid_dict,\n        \"time_zone\": valid_time_zone,\n        \"expected_runtime\": config_utils.valid_time_delta,\n        \"use_k8s\": valid_bool,\n    }\n\n    def cast(self, in_dict, config_context):\n        in_dict[\"namespace\"] = config_context.namespace\n        return in_dict\n\n    # TODO: extract common code to a util function\n    def _validate_dependencies(\n        self,\n        job: dict[str, Any],  # TODO: create TypedDict for this\n        # TODO: setup UniqueNameDict for use with mypy so that the following line\n        # is not a lie\n        actions: dict[str, ConfigAction],\n        base_action: ConfigAction,\n        current_action: ConfigAction | None = None,\n        stack: list[str] | None = None,\n        already_validated: set[tuple[str, str]] | None = None,\n    ) -> None:\n        \"\"\"Check for circular or misspelled dependencies.\"\"\"\n        # for large graphs, we can end up validating the same jobs/actions repeatedly\n        # this is unnecessary and we can skip a ton of work simply by caching what we've\n        # already validated\n        already_validated = already_validated or set()\n        current_action = current_action or base_action\n        validated = (job[\"name\"], current_action.name)\n        if validated in already_validated:\n            return None\n        else:\n            already_validated.add(validated)\n\n        stack = stack or []\n        stack.append(current_action.name)\n        for dep in current_action.requires:\n            if dep == base_action.name and len(stack) > 0:\n                msg = \"Circular dependency in job.%s: %s\"\n                raise ConfigError(msg % (job[\"name\"], \" -> \".join(stack)))\n            if dep not in actions:\n                raise ConfigError(\n                    'Action jobs.%s.%s has a dependency \"%s\"'\n                    \" that is not in the same job!\" % (job[\"name\"], current_action.name, dep),\n                )\n            self._validate_dependencies(job, actions, base_action, actions[dep], stack, already_validated)\n\n        stack.pop()\n\n    def post_validation(self, job, config_context):\n        \"\"\"Validate actions for the job.\"\"\"\n        for _, action in job[\"actions\"].items():\n            self._validate_dependencies(job, job[\"actions\"], action)\n\n\nvalid_job = ValidateJob()\n\n\nclass ValidateActionRunner(Validator):\n    config_class = schema.ConfigActionRunner\n    optional = True\n    defaults = {\n        \"runner_type\": None,\n        \"remote_exec_path\": \"\",\n        \"remote_status_path\": \"/tmp\",\n    }\n\n    validators = {\n        \"runner_type\": config_utils.build_real_enum_validator(schema.ActionRunnerTypes),\n        \"remote_status_path\": valid_string,\n        \"remote_exec_path\": valid_string,\n    }\n\n\nclass ValidateStatePersistence(Validator):\n    config_class = schema.ConfigState\n    defaults = {\n        \"buffer_size\": 1,\n        \"dynamodb_region\": None,\n        \"table_name\": None,\n        \"max_transact_write_items\": 8,\n    }\n\n    validators = {\n        \"name\": valid_string,\n        \"store_type\": config_utils.build_real_enum_validator(schema.StatePersistenceTypes),\n        \"buffer_size\": valid_int,\n        \"dynamodb_region\": valid_string,\n        \"table_name\": valid_string,\n        \"max_transact_write_items\": valid_int,\n    }\n\n    def post_validation(self, config, config_context):\n        buffer_size = config.get(\"buffer_size\")\n\n        if buffer_size and buffer_size < 1:\n            path = config_context.path\n            raise ConfigError(\"%s buffer_size must be >= 1.\" % path)\n\n        store_type = config.get(\"store_type\")\n\n        if store_type == schema.StatePersistenceTypes.dynamodb.value:\n            if not config.get(\"table_name\"):\n                raise ConfigError(f\"{config_context.path} table_name is required when store_type is 'dynamodb'\")\n            if not config.get(\"dynamodb_region\"):\n                raise ConfigError(f\"{config_context.path} dynamodb_region is required when store_type is 'dynamodb'\")\n\n            max_transact = config.get(\"max_transact_write_items\")\n\n            # Upper bound is based on boto3 transact_write_items limit\n            if not 1 <= max_transact <= 100:\n                raise ConfigError(\n                    f\"{config_context.path} max_transact_write_items must be between 1 and 100, got {max_transact}\"\n                )\n\n\nvalid_state_persistence = ValidateStatePersistence()\n\n\nclass ValidateMesos(Validator):\n    config_class = ConfigMesos\n    option = True\n    defaults = {\n        \"master_address\": None,\n        \"master_port\": 5050,\n        \"secret_file\": None,\n        \"role\": \"*\",\n        \"principal\": \"tron\",\n        \"enabled\": False,\n        \"default_volumes\": (),\n        \"dockercfg_location\": None,\n        \"offer_timeout\": 300,\n    }\n\n    validators = {\n        \"master_address\": valid_master_address,\n        \"master_port\": valid_int,\n        \"secret\": valid_string,\n        \"role\": valid_string,\n        \"enabled\": valid_bool,\n        \"default_volumes\": build_list_of_type_validator(valid_volume, allow_empty=True),\n        \"dockercfg_location\": valid_string,\n        \"offer_timeout\": valid_int,\n    }\n\n\nvalid_mesos_options = ValidateMesos()\n\n\nclass ValidateKubernetes(Validator):\n    config_class = ConfigKubernetes\n    optional = True\n    defaults = {\n        \"kubeconfig_path\": None,\n        \"enabled\": False,\n        \"non_retryable_exit_codes\": (),\n        \"default_volumes\": (),\n    }\n\n    validators = {\n        \"kubeconfig_path\": valid_string,\n        \"enabled\": valid_bool,\n        \"non_retryable_exit_codes\": build_list_of_type_validator(valid_exit_code, allow_empty=True),\n        \"default_volumes\": build_list_of_type_validator(valid_volume, allow_empty=True),\n        \"watcher_kubeconfig_paths\": build_list_of_type_validator(valid_string, allow_empty=True),\n    }\n\n\nvalid_kubernetes_options = ValidateKubernetes()\n\n\ndef validate_jobs(config, config_context):\n    \"\"\"Validate jobs\"\"\"\n    valid_jobs = build_dict_name_validator(valid_job, allow_empty=True)\n    validation = [(\"jobs\", valid_jobs)]\n\n    for config_name, valid in validation:\n        child_context = config_context.build_child_context(config_name)\n        config[config_name] = valid(config.get(config_name, []), child_context)\n\n    fmt_string = \"Job names must be unique %s\"\n    config_utils.unique_names(fmt_string, config[\"jobs\"])\n\n\nDEFAULT_STATE_PERSISTENCE = ConfigState(\n    name=\"tron_state\",\n    store_type=\"shelve\",\n    buffer_size=1,\n)\nDEFAULT_NODE = ValidateNode().do_shortcut(node=\"localhost\")\n\n\nclass ValidateConfig(Validator):\n    \"\"\"Given a parsed config file (should be only basic literals and\n    containers), return an immutable, fully populated series of namedtuples and\n    dicts with all defaults filled in, all valid values, and no unused\n    values. Throws a ConfigError if any part of the input dict is invalid.\n    \"\"\"\n\n    config_class = TronConfig\n    defaults = {\n        \"action_runner\": {},\n        \"output_stream_dir\": None,\n        \"command_context\": {},\n        \"ssh_options\": ConfigSSHOptions(**ValidateSSHOptions.defaults),\n        \"time_zone\": None,\n        \"state_persistence\": DEFAULT_STATE_PERSISTENCE,\n        \"nodes\": {\n            \"localhost\": DEFAULT_NODE,\n        },\n        \"node_pools\": {},\n        \"jobs\": (),\n        \"mesos_options\": ConfigMesos(**ValidateMesos.defaults),\n        \"k8s_options\": ConfigKubernetes(**ValidateKubernetes.defaults),\n        \"eventbus_enabled\": None,\n        \"read_json\": False,\n    }\n    node_pools = build_dict_name_validator(valid_node_pool, allow_empty=True)\n    nodes = build_dict_name_validator(valid_node, allow_empty=True)\n    validators = {\n        \"action_runner\": ValidateActionRunner(),\n        \"output_stream_dir\": valid_output_stream_dir,\n        \"command_context\": valid_command_context,\n        \"ssh_options\": valid_ssh_options,\n        \"time_zone\": valid_time_zone,\n        \"state_persistence\": valid_state_persistence,\n        \"nodes\": nodes,\n        \"node_pools\": node_pools,\n        \"mesos_options\": valid_mesos_options,\n        \"k8s_options\": valid_kubernetes_options,\n        \"eventbus_enabled\": valid_bool,\n        \"read_json\": valid_bool,\n    }\n    optional = False\n\n    def validate_node_pool_nodes(self, config):\n        \"\"\"Validate that each node in a node_pool is in fact a node, and not\n        another pool.\n        \"\"\"\n        all_node_names = set(config[\"nodes\"])\n        for node_pool in config[\"node_pools\"].values():\n            invalid_names = set(node_pool.nodes) - all_node_names\n            if invalid_names:\n                msg = \"NodePool %s contains other NodePools: \" % node_pool.name\n                raise ConfigError(msg + \",\".join(invalid_names))\n\n    def post_validation(self, config, _):\n        \"\"\"Validate a non-named config.\"\"\"\n        node_names = config_utils.unique_names(\n            \"Node and NodePool names must be unique %s\",\n            config[\"nodes\"],\n            config.get(\"node_pools\", []),\n        )\n\n        if config.get(\"node_pools\"):\n            self.validate_node_pool_nodes(config)\n\n        config_context = ConfigContext(\n            \"config\",\n            node_names,\n            config.get(\"command_context\"),\n            MASTER_NAMESPACE,\n        )\n        validate_jobs(config, config_context)\n\n\nclass ValidateNamedConfig(Validator):\n    \"\"\"A shorter validator for named configurations, which allow for\n    jobs to be defined as configuration fragments that\n    are, in turn, reconciled by Tron.\n    \"\"\"\n\n    config_class = NamedTronConfig\n    type_name = \"NamedConfigFragment\"\n    defaults = {\n        \"jobs\": (),\n    }\n\n    optional = False\n\n    def post_validation(self, config, config_context):\n        validate_jobs(config, config_context)\n\n\nvalid_config = ValidateConfig()\nvalid_named_config = ValidateNamedConfig()\n\n\ndef validate_fragment(name, fragment, master_config=None):\n    \"\"\"Validate a fragment with a partial context.\"\"\"\n    config_context = PartialConfigContext(name, name)\n    if name == MASTER_NAMESPACE:\n        return valid_config(fragment, config_context=config_context)\n    if master_config is None:\n        return valid_named_config(fragment, config_context=config_context)\n\n    config_mapping = {MASTER_NAMESPACE: master_config, name: fragment}\n    for config_name, config in validate_config_mapping(config_mapping):\n        if config_name == name:\n            return config\n\n\ndef get_nodes_from_master_namespace(master):\n    return set(itertools.chain(master.nodes, master.node_pools))\n\n\ndef validate_config_mapping(config_mapping):\n    if MASTER_NAMESPACE not in config_mapping:\n        msg = \"A config mapping requires a %s namespace\"\n        raise ConfigError(msg % MASTER_NAMESPACE)\n\n    # we mutate this mapping - so let's make sure that we're making a copy\n    # in case the passed-in mapping is used elsewhere\n    config_mapping_to_validate = deepcopy(config_mapping)\n    master = valid_config(config_mapping_to_validate.pop(MASTER_NAMESPACE))\n    nodes = get_nodes_from_master_namespace(master)\n    yield MASTER_NAMESPACE, master\n\n    for name, content in config_mapping_to_validate.items():\n        context = ConfigContext(\n            name,\n            nodes,\n            master.command_context,\n            name,\n        )\n        yield name, valid_named_config(content, config_context=context)\n\n\nclass ConfigContainer:\n    \"\"\"A container around configuration fragments (and master).\"\"\"\n\n    def __init__(self, config_mapping):\n        self.configs = config_mapping\n\n    def items(self):\n        return self.configs.items()\n\n    @classmethod\n    def create(cls, config_mapping):\n        return cls(dict(validate_config_mapping(config_mapping)))\n\n    # TODO: DRY with get_jobs()\n    def get_job_names(self):\n        job_names = []\n        for config in self.configs.values():\n            job_names.extend(config.jobs)\n        return job_names\n\n    def get_jobs(self):\n        return dict(\n            itertools.chain.from_iterable(config.jobs.items() for _, config in self.configs.items()),\n        )\n\n    def get_master(self):\n        return self.configs[MASTER_NAMESPACE]\n\n    def get_node_names(self):\n        return get_nodes_from_master_namespace(self.get_master())\n\n    def __getitem__(self, name):\n        return self.configs[name]\n\n    def __contains__(self, name):\n        return name in self.configs\n"
  },
  {
    "path": "tron/config/config_utils.py",
    "content": "\"\"\"Utilities used for configuration parsing and validation.\"\"\"\nimport datetime\nimport functools\nimport itertools\nimport re\nfrom string import Formatter\n\nfrom tron.config import ConfigError\nfrom tron.config.schema import MASTER_NAMESPACE\n\nMAX_IDENTIFIER_LENGTH = 255\nIDENTIFIER_RE = re.compile(r\"^[A-Za-z_][\\w\\-]{0,254}$\")\n\n\nclass StringFormatter(Formatter):\n    def __init__(self, context=None):\n        Formatter.__init__(self)\n        self.context = context\n\n    def get_value(self, key, args, kwds):\n        if isinstance(key, str):\n            try:\n                return kwds[key]\n            except KeyError:\n                return self.context[key]\n        else:\n            return Formatter.get_value(key, args, kwds)\n\n\nclass UniqueNameDict(dict):\n    \"\"\"A dict like object that throws a ConfigError if a key exists and\n    __setitem__ is called to change the value of that key.\n\n     fmt_string - format string used to create an error message, expects a\n                  single format argument of 'key'\n    \"\"\"\n\n    def __init__(self, fmt_string):\n        super().__init__()\n        self.fmt_string = fmt_string\n\n    def __setitem__(self, key, value):\n        if key in self:\n            raise ConfigError(self.fmt_string % key)\n        super().__setitem__(key, value)\n\n\ndef unique_names(fmt_string, *seqs):\n    \"\"\"Validate that each object in all sequences has a unique name.\"\"\"\n    name_dict = UniqueNameDict(fmt_string)\n    for item in itertools.chain.from_iterable(seqs):\n        name_dict[item] = True\n    return name_dict\n\n\ndef build_type_validator(validator, error_fmt):\n    \"\"\"Create a validator function using `validator` to validate the value.\n    validator - a function which takes a single argument `value`\n    error_fmt - a string which accepts two format variables (path, value)\n\n    Returns a function func(value, config_context) where\n        value - the value to validate\n        config_context - a ConfigContext object\n        Returns True if the value is valid\n    \"\"\"\n\n    def f(value, config_context):\n        if not validator(value):\n            raise ConfigError(error_fmt % (config_context.path, value))\n        return value\n\n    return f\n\n\ndef valid_number(type_func, value, config_context, allow_negative=False):\n    path = config_context.path\n    try:\n        value = type_func(value)\n    except TypeError:\n        name = type_func.__name__\n        raise ConfigError(f\"Value at {path} is not an {name}: {value}\")\n\n    if value < 0 and not allow_negative:\n        raise ConfigError(\"%s must be a positive int.\" % path)\n\n    return value\n\n\nvalid_exit_code = functools.partial(valid_number, int, allow_negative=True)\nvalid_int = functools.partial(valid_number, int)\nvalid_float = functools.partial(valid_number, float)\n\nvalid_identifier = build_type_validator(\n    lambda s: isinstance(s, str) and IDENTIFIER_RE.match(s),\n    \"Identifier at %s is not a valid identifier: %s\",\n)\n\nvalid_list = build_type_validator(\n    lambda s: isinstance(s, list),\n    \"Value at %s is not a list: %s\",\n)\n\nvalid_string = build_type_validator(\n    lambda s: isinstance(s, str),\n    \"Value at %s is not a string: %s\",\n)\n\nvalid_dict = build_type_validator(\n    lambda s: isinstance(s, dict),\n    \"Value at %s is not a dictionary: %s\",\n)\n\nvalid_bool = build_type_validator(\n    lambda s: isinstance(s, bool),\n    \"Value at %s is not a boolean: %s\",\n)\n\n\ndef build_enum_validator(enum):\n    enum = set(enum)\n    msg = \"Value at %%s is not in %s: %%s.\" % str(enum)\n    return build_type_validator(enum.__contains__, msg)\n\n\ndef build_real_enum_validator(enum):\n    def enum_validator(value, config_context):\n        try:\n            return enum(value).value\n        except Exception:\n            raise ConfigError(\n                f\"Value at {config_context.path} is not in {enum!r}: {value!r}\",\n            )\n\n    return enum_validator\n\n\ndef valid_time(value, config_context):\n    valid_string(value, config_context)\n    for format in [\"%H:%M\", \"%H:%M:%S\"]:\n        try:\n            return datetime.datetime.strptime(value, format)\n        except ValueError:\n            pass\n    msg = \"Value at %s is not a valid time\"\n    raise ConfigError(msg % config_context.path)\n\n\n# Translations from possible configuration units to the argument to\n# datetime.timedelta\nTIME_INTERVAL_MAPPING = {\n    \"days\": [\"d\", \"day\", \"days\"],\n    \"hours\": [\"h\", \"hr\", \"hrs\", \"hour\", \"hours\"],\n    \"minutes\": [\"m\", \"min\", \"mins\", \"minute\", \"minutes\"],\n    \"seconds\": [\"s\", \"sec\", \"secs\", \"second\", \"seconds\"],\n}\nTIME_INTERVAL_UNITS = {short: long for (long, short_list) in TIME_INTERVAL_MAPPING.items() for short in short_list}\n\nTIME_INTERVAL_RE = re.compile(r\"^\\s*(?P<value>\\d+)\\s*(?P<units>[a-zA-Z]+)\\s*$\")\n\n\ndef valid_time_delta(value, config_context):\n    error_msg = \"Value at %s is not a valid time delta: %s\"\n    matches = TIME_INTERVAL_RE.match(value)\n    if not matches:\n        raise ConfigError(error_msg % (config_context.path, value))\n\n    units = matches.group(\"units\")\n    if units not in TIME_INTERVAL_UNITS:\n        raise ConfigError(error_msg % (config_context.path, value))\n\n    time_spec = {TIME_INTERVAL_UNITS[units]: int(matches.group(\"value\"))}\n    return datetime.timedelta(**time_spec)\n\n\ndef valid_name_identifier(value, config_context):\n    valid_identifier(value, config_context)\n    if config_context.partial:\n        return value\n    return f\"{config_context.namespace}.{value}\"\n\n\ndef build_list_of_type_validator(item_validator, allow_empty=False):\n    \"\"\"Build a validator which validates a list contains items which pass\n    item_validator.\n    \"\"\"\n\n    def validator(value, config_context):\n        if allow_empty and not value:\n            return ()\n        seq = valid_list(value, config_context)\n        if not seq:\n            msg = \"Required non-empty list at %s\"\n            raise ConfigError(msg % config_context.path)\n        return tuple(item_validator(item, config_context) for item in seq)\n\n    return validator\n\n\ndef build_dict_name_validator(item_validator, allow_empty=False):\n    \"\"\"Build a validator which validates a list or dict, and returns a dict.\n    Item validator must expect a \"name\" key, mapped to the key of the dict item\"\"\"\n    valid = build_list_of_type_validator(item_validator, allow_empty)\n\n    def validator(value, config_context):\n        if isinstance(value, dict):\n            value = [\n                {\n                    \"name\": name,\n                    **config,\n                }\n                for name, config in value.items()\n            ]\n\n        msg = \"Duplicate name %%s at %s\" % config_context.path\n        name_dict = UniqueNameDict(msg)\n        for item in valid(value, config_context):\n            name_dict[item.name] = item\n        return name_dict\n\n    return validator\n\n\ndef build_dict_value_validator(item_validator, allow_empty=False):\n    \"\"\"Build a validator which validates values of a dict, and returns a dict\"\"\"\n\n    def validator(value, config_context):\n        if not isinstance(value, dict):\n            msg = \"Require a dict of type %s at %s\"\n            raise ConfigError(msg % (item_validator.type_name, config_context.path))\n        result_dict = dict()\n        for k, v in value.items():\n            result_dict[k] = item_validator(v, config_context)\n        return result_dict\n\n    return validator\n\n\nclass ConfigContext:\n    \"\"\"An object to encapsulate the context in a configuration file. Supplied\n    to Validators to perform validation which requires knowledge of\n    configuration outside of the immediate configuration dictionary.\n    \"\"\"\n\n    partial = False\n\n    def __init__(self, path, nodes, command_context, namespace):\n        self.path = path\n        self.nodes = set(nodes or [])\n        self.command_context = command_context or {}\n        self.namespace = namespace\n\n    def build_child_context(self, path):\n        \"\"\"Construct a new ConfigContext based on this one.\"\"\"\n        path = f\"{self.path}.{path}\"\n        args = path, self.nodes, self.command_context, self.namespace\n        return ConfigContext(*args)\n\n\nclass PartialConfigContext:\n    \"\"\"A context object which has only a partial context. It is missing\n    command_context and nodes.  This is likely because it is being used in\n    a named configuration fragment that does not have access to those pieces\n    of the configuration.\n    \"\"\"\n\n    partial = True\n\n    def __init__(self, path, namespace):\n        self.path = path\n        self.namespace = namespace\n\n    def build_child_context(self, path):\n        path = f\"{self.path}.{path}\"\n        return PartialConfigContext(path, self.namespace)\n\n\nclass NullConfigContext:\n    path = \"\"\n    nodes = set()  # type: ignore\n    command_context = {}  # type: ignore\n    namespace = MASTER_NAMESPACE\n    partial = False\n\n    @staticmethod\n    def build_child_context(_):\n        return NullConfigContext\n\n\n# TODO: extract code\nclass Validator:\n    \"\"\"Base class for validating a collection and creating a mutable\n    collection from the source.\n    \"\"\"\n\n    config_class: type | None = None\n    defaults = {}  # type: ignore\n    validators = {}  # type: ignore\n    optional = False\n\n    def validate(self, in_dict, config_context):\n        if self.optional and in_dict is None:\n            return None\n\n        if in_dict is None:\n            raise ConfigError(\"A %s is required.\" % self.type_name)\n\n        shortcut_value = self.do_shortcut(in_dict)\n        if shortcut_value:\n            return shortcut_value\n\n        config_context = self.build_context(in_dict, config_context)\n        in_dict = self.cast(in_dict, config_context)\n        self.validate_required_keys(in_dict)\n        self.validate_extra_keys(in_dict)\n        return self.build_config(in_dict, config_context)\n\n    def __call__(self, in_dict, config_context=NullConfigContext):\n        return self.validate(in_dict, config_context)\n\n    @property\n    def type_name(self):\n        \"\"\"Return a string that represents the config_class being validated.\n        This name is used for error messages, so we strip off the word\n        Config so the name better matches what the user sees in the config.\n        \"\"\"\n        return self.config_class.__name__.replace(\"Config\", \"\")\n\n    @property\n    def all_keys(self):\n        return self.config_class.required_keys + self.config_class.optional_keys\n\n    def do_shortcut(self, in_dict):\n        \"\"\"Override if your validator can skip most of the validation by\n        checking this condition.  If this returns a truthy value, the\n        validation will end immediately and return that value.\n        \"\"\"\n        pass\n\n    def cast(self, in_dict, _):\n        \"\"\"If your validator accepts input in different formations, override\n        this method to cast your input into a common format.\n        \"\"\"\n        return in_dict\n\n    def build_context(self, in_dict, config_context):\n        path = self.path_name(in_dict.get(\"name\"))\n        return config_context.build_child_context(path)\n\n    def validate_required_keys(self, in_dict):\n        \"\"\"Check that all required keys are present.\"\"\"\n        missing_keys = set(self.config_class.required_keys) - set(in_dict)\n        if not missing_keys:\n            return\n\n        missing_key_str = \", \".join(missing_keys)\n        if \"name\" in self.all_keys and \"name\" in in_dict:\n            msg = \"%s %s is missing options: %s\"\n            name = in_dict[\"name\"]\n            raise ConfigError(msg % (self.type_name, name, missing_key_str))\n\n        msg = \"Nameless %s is missing options: %s\"\n        raise ConfigError(msg % (self.type_name, missing_key_str))\n\n    def validate_extra_keys(self, in_dict):\n        \"\"\"Check that no unexpected keys are present.\"\"\"\n        extra_keys = set(in_dict) - set(self.all_keys)\n        if not extra_keys:\n            return\n\n        msg = \"Unknown keys in %s %s: %s\"\n        name = in_dict.get(\"name\", \"\")\n        raise ConfigError(msg % (self.type_name, name, \", \".join(extra_keys)))\n\n    def set_defaults(self, output_dict, _config_context):\n        \"\"\"Set any default values for any optional values that were not\n        specified.\n        \"\"\"\n        for key, value in self.defaults.items():\n            output_dict.setdefault(key, value)\n\n    def path_name(self, name=None):\n        return f\"{self.type_name}.{name}\" if name else self.type_name\n\n    def post_validation(self, valid_input, config_context):\n        \"\"\"Hook to perform additional validation steps after key validation\n        completes.\n        \"\"\"\n        pass\n\n    def build_config(self, in_dict, config_context):\n        \"\"\"Construct the configuration by validating the contents, setting\n        defaults, and returning an instance of the config_class.\n        \"\"\"\n        output_dict = self.validate_contents(in_dict, config_context)\n        self.set_defaults(output_dict, config_context)\n        self.post_validation(output_dict, config_context)\n\n        return self.config_class(**output_dict)\n\n    def validate_contents(self, input, config_context):\n        \"\"\"Override this to validate each value in the input.\"\"\"\n        valid_input = {}\n        for key, value in input.items():\n            if key in self.validators:\n                child_context = config_context.build_child_context(key)\n                valid_input[key] = self.validators[key](value, child_context)\n            else:\n                valid_input[key] = value\n        return valid_input\n"
  },
  {
    "path": "tron/config/manager.py",
    "content": "import hashlib\nimport logging\nimport os\nfrom copy import deepcopy\n\nfrom tron import yaml\nfrom tron.config import config_parse\nfrom tron.config import ConfigError\nfrom tron.config import schema\nfrom tron.core.jobgraph import JobGraph\nfrom tron.utils import maybe_decode\nfrom tron.utils import maybe_encode\n\nlog = logging.getLogger(__name__)\n\n\ndef from_string(content):\n    try:\n        return yaml.safe_load(content)\n    except yaml.yaml.error.YAMLError as e:\n        raise ConfigError(\"Invalid config format: %s\" % str(e))\n\n\ndef write(path, content):\n    with open(path, \"w\") as fh:\n        yaml.dump(content, fh)\n\n\ndef read(path):\n    with open(path) as fh:\n        return from_string(fh)\n\n\ndef write_raw(path, content):\n    with open(path, \"w\") as fh:\n        fh.write(\n            maybe_decode(content)\n        )  # TODO: TRON-2293 maybe_decode is a relic of Python2->Python3 migration. Remove it.\n\n\ndef read_raw(path: str) -> str:\n    with open(path) as fh:\n        return fh.read()\n\n\ndef hash_digest(content: str | bytes) -> str:\n    return hashlib.sha1(\n        maybe_encode(content)\n    ).hexdigest()  # TODO: TRON-2293 maybe_encode is a relic of Python2->Python3 migration. Remove it.\n\n\nclass ManifestFile:\n    \"\"\"Manage the manifest file, which tracks name to filename.\"\"\"\n\n    MANIFEST_FILENAME = \"_manifest.yaml\"\n\n    def __init__(self, path):\n        self.filename = os.path.join(path, self.MANIFEST_FILENAME)\n\n    def create(self):\n        if os.path.isfile(self.filename):\n            msg = \"Refusing to create manifest. File %s exists.\"\n            log.info(msg % self.filename)\n            return\n\n        write(self.filename, {})\n\n    def add(self, name, filename):\n        manifest = read(self.filename)\n        manifest[name] = filename\n        write(self.filename, manifest)\n\n    def delete(self, name):\n        manifest = read(self.filename)\n        if name not in manifest:\n            msg = \"Namespace %s does not exist in manifest, cannot delete.\"\n            log.info(msg % name)\n            return\n\n        del manifest[name]\n        write(self.filename, manifest)\n\n    def get_file_mapping(self):\n        return read(self.filename)\n\n    def get_file_name(self, name):\n        return self.get_file_mapping().get(name)\n\n    def __contains__(self, name):\n        return name in self.get_file_mapping()\n\n\nclass ConfigManager:\n    \"\"\"Read, load and write configuration.\"\"\"\n\n    DEFAULT_HASH = hash_digest(\"\")\n\n    def __init__(self, config_path, manifest=None):\n        self.config_path = config_path\n        self.manifest = manifest or ManifestFile(config_path)\n        self.name_mapping = None\n\n    def build_file_path(self, name):\n        name = name.replace(\".\", \"_\").replace(os.path.sep, \"_\")\n        return os.path.join(self.config_path, \"%s.yaml\" % name)\n\n    def read_raw_config(self, name: str = schema.MASTER_NAMESPACE) -> str:\n        \"\"\"Read the config file without converting to yaml.\"\"\"\n        filename = self.manifest.get_file_name(name)\n        return read_raw(filename)\n\n    def write_config(self, name: str, content: str) -> None:\n        loaded_content = from_string(content)\n        self.validate_with_fragment(\n            name,\n            content=loaded_content,\n            # TODO: remove this constraint after tron triggers across clusters are supported.\n            should_validate_missing_dependency=False,\n        )\n        # validate_with_fragment throws if the updated content is invalid - so if we get here\n        # we know it's safe to reflect the update in our config store\n        self.get_config_name_mapping()[name] = loaded_content\n\n        # ...and then let's also persist the update to disk since memory is temporary, but disk is forever™\n        filename = self.get_filename_from_manifest(name)\n        write_raw(filename, content)\n\n    def delete_config(self, name: str) -> None:\n        filename = self.manifest.get_file_name(name)\n        if not filename:\n            msg = \"Namespace %s does not exist in manifest, cannot delete.\"\n            log.info(msg % name)\n            return\n\n        # to avoid needing to reload from disk on every config load - we need to ensure that\n        # we also persist config deletions into our cache\n        self.get_config_name_mapping().pop(name, None)\n        self.manifest.delete(name)\n        os.remove(filename)\n\n    def get_filename_from_manifest(self, name):\n        def create_filename():\n            filename = self.build_file_path(name)\n            self.manifest.add(name, filename)\n            return filename\n\n        return self.manifest.get_file_name(name) or create_filename()\n\n    def validate_with_fragment(\n        self,\n        name,\n        content,\n        should_validate_missing_dependency=True,\n    ):\n        # NOTE: we deepcopy rather than swap values to keep this a pure function\n        # get_config_name_mapping() returns a shared dict, so this would otherwise\n        # actually update the mapping - which would be unwanted/need to be rolled-back\n        # should validation fail.\n        name_mapping = deepcopy(self.get_config_name_mapping())\n        name_mapping[name] = content\n        try:\n            JobGraph(\n                config_parse.ConfigContainer.create(name_mapping),\n                should_validate_missing_dependency=should_validate_missing_dependency,\n            )\n        except ValueError as e:\n            raise ConfigError(str(e))\n\n    def get_config_name_mapping(self):\n        if self.name_mapping is None:\n            log.info(\"Creating config mapping cache...\")\n            seq = self.manifest.get_file_mapping().items()\n            self.name_mapping = {name: read(filename) for name, filename in seq}\n        return self.name_mapping\n\n    def load(self):\n        \"\"\"Return the fully constructed configuration.\"\"\"\n        log.info(\"Loading full config from %s\" % self.config_path)\n        name_mapping = self.get_config_name_mapping()\n        return config_parse.ConfigContainer.create(name_mapping)\n\n    def get_hash(self, name: str) -> str:\n        \"\"\"Return a hash of the configuration contents for name.\"\"\"\n        if name not in self:\n            return self.DEFAULT_HASH\n\n        if name in self.get_config_name_mapping():\n            # unfortunately, we have the parsed dict in memory.\n            # rather than hit the disk to get the raw string - let's convert\n            # the in-memory dict to a yaml string and hash that to save a couple\n            # ms (in testing, ~3ms over loading from disk and ~1ms over dumping to json :p)\n            # TODO: consider storing the hash alongside the config so that we only calculate\n            # hashes once?\n            return hash_digest(\n                yaml.dump(\n                    self.get_config_name_mapping()[name],\n                    # ensure that the keys are always in a stable order\n                    sort_keys=True,\n                )\n            )\n\n        # the config for any name should always be in our name mapping\n        # ...but just in case, let's fallback to reading from disk.\n        log.warning(\"%s not found in name mapping - falling back to hashing contents on disk!\")\n        return hash_digest(self.read_raw_config(name))\n\n    def __contains__(self, name):\n        return name in self.manifest\n\n    def get_namespaces(self) -> list[str]:\n        return list(self.manifest.get_file_mapping().keys())\n\n\ndef create_new_config(path, master_content):\n    \"\"\"Create a new configuration directory with master config.\"\"\"\n    os.makedirs(path)\n    manager = ConfigManager(path)\n    manager.manifest.create()\n    filename = manager.get_filename_from_manifest(schema.MASTER_NAMESPACE)\n    write_raw(filename, master_content)\n"
  },
  {
    "path": "tron/config/schedule_parse.py",
    "content": "\"\"\"\nParse and validate scheduler configuration and return immutable structures.\n\"\"\"\nimport calendar\nimport datetime\nimport re\nfrom collections import namedtuple\n\nfrom tron.config import config_utils\nfrom tron.config import ConfigError\nfrom tron.config import schema\nfrom tron.utils import crontab\n\nConfigGenericSchedule = schema.config_object_factory(\n    \"ConfigGenericSchedule\",\n    [\"type\", \"value\"],\n    [\"jitter\"],\n)\n\nConfigGrocScheduler = namedtuple(\n    \"ConfigGrocScheduler\",\n    \"original ordinals weekdays monthdays months timestr jitter\",\n)\n\nConfigCronScheduler = namedtuple(\n    \"ConfigCronScheduler\",\n    \"original minutes hours monthdays months weekdays ordinals jitter\",\n)\n\nConfigDailyScheduler = namedtuple(\n    \"ConfigDailyScheduler\",\n    \"original hour minute second days jitter\",\n)\n\n\nclass ScheduleParseError(ConfigError):\n    pass\n\n\ndef pad_sequence(seq, size, padding=None):\n    \"\"\"Force a sequence to size. Pad with padding if too short, and ignore\n    extra pieces if too long.\"\"\"\n    return (list(seq) + [padding for _ in range(size)])[:size]\n\n\ndef schedule_config_from_string(schedule, config_context):\n    \"\"\"Return a scheduler config object from a string.\"\"\"\n    schedule = schedule.strip()\n    name, schedule_config = pad_sequence(\n        schedule.split(None, 1),\n        2,\n        padding=\"\",\n    )\n    if name not in schedulers:\n        config = ConfigGenericSchedule(\"groc daily\", schedule, jitter=None)\n        return parse_groc_expression(config, config_context)\n\n    config = ConfigGenericSchedule(name, schedule_config, jitter=None)\n    return validate_generic_schedule_config(config, config_context)\n\n\ndef validate_generic_schedule_config(config, config_context):\n    return schedulers[config.type](config, config_context)\n\n\n# TODO: remove in 0.7\ndef schedule_config_from_legacy_dict(schedule, config_context):\n    \"\"\"Support old style schedules as dicts.\"\"\"\n    if \"start_time\" in schedule or \"days\" in schedule:\n        start_time = schedule.get(\"start_time\", \"00:00:00\")\n        days = schedule.get(\"days\", \"\")\n        scheduler_config = f\"{start_time} {days}\"\n        config = ConfigGenericSchedule(\"daily\", scheduler_config, None)\n        return valid_daily_scheduler(config, config_context)\n\n    path = config_context.path\n    raise ConfigError(f\"Unknown scheduler at {path}: {schedule}\")\n\n\ndef valid_schedule(schedule, config_context):\n    if isinstance(schedule, str):\n        return schedule_config_from_string(schedule, config_context)\n\n    if \"type\" not in schedule:\n        return schedule_config_from_legacy_dict(schedule, config_context)\n\n    schedule = ScheduleValidator().validate(schedule, config_context)\n    return validate_generic_schedule_config(schedule, config_context)\n\n\ndef valid_daily_scheduler(config, config_context):\n    \"\"\"Daily scheduler, accepts a time of day and an optional list of days.\"\"\"\n    schedule_config = config.value\n    time_string, days = pad_sequence(schedule_config.split(), 2)\n    time_string = time_string or \"00:00:00\"\n    time_spec = config_utils.valid_time(time_string, config_context)\n    days = config_utils.valid_string(days or \"\", config_context)\n\n    def valid_day(day):\n        if day not in CONVERT_DAYS_INT:\n            raise ConfigError(\n                f\"Unknown day {day} at {config_context.path}\",\n            )\n        return CONVERT_DAYS_INT[day]\n\n    original = f\"{time_string} {days}\"\n    weekdays = {valid_day(day) for day in days or ()}\n    return ConfigDailyScheduler(\n        original,\n        time_spec.hour,\n        time_spec.minute,\n        time_spec.second,\n        weekdays,\n        jitter=config.jitter,\n    )\n\n\ndef normalize_weekdays(seq):\n    return seq[6:7] + seq[:6]\n\n\ndef day_canonicalization_map():\n    \"\"\"Build a map of weekday synonym to int index 0-6 inclusive.\"\"\"\n    canon_map = dict()\n\n    # 7-element lists with weekday names in order\n    weekday_lists = [\n        normalize_weekdays(calendar.day_name),\n        normalize_weekdays(calendar.day_abbr),\n        (\n            \"u\",\n            \"m\",\n            \"t\",\n            \"w\",\n            \"r\",\n            \"f\",\n            \"s\",\n        ),\n        (\n            \"su\",\n            \"mo\",\n            \"tu\",\n            \"we\",\n            \"th\",\n            \"fr\",\n            \"sa\",\n        ),\n    ]\n    for day_list in weekday_lists:\n        for day_name_synonym, day_index in zip(day_list, range(7)):\n            canon_map[day_name_synonym] = day_index\n            canon_map[day_name_synonym.lower()] = day_index\n            canon_map[day_name_synonym.upper()] = day_index\n\n    return canon_map\n\n\n# Canonicalize weekday names to integer indices\nCONVERT_DAYS_INT = day_canonicalization_map()  # day name/abbrev => {0123456}\n\n\ndef month_canonicalization_map():\n    \"\"\"Build a map of month synonym to int index 0-11 inclusive.\"\"\"\n    canon_map = dict()\n\n    # calendar stores month data with a useless element in front. cut it off.\n    monthname_lists = (calendar.month_name[1:], calendar.month_abbr[1:])\n    for month_list in monthname_lists:\n        for key, value in zip(month_list, range(1, 13)):\n            canon_map[key] = value\n            canon_map[key.lower()] = value\n    return canon_map\n\n\n# Canonicalize month names to integer indices\n# month name/abbrev => {0 <= k <= 11}\nCONVERT_MONTHS = month_canonicalization_map()\n\n\ndef build_groc_schedule_parser_re():\n    \"\"\"Build a regular expression that matches this:\n\n        (\"every\"|ordinal) (day) [\"of|in\" (monthspec)] ([\"at\"] HH:MM)\n\n    ordinal   - comma-separated list of \"1st\" and so forth\n    days      - comma-separated list of days of the week (for example,\n                \"mon\", \"tuesday\", with both short and long forms being\n                accepted); \"every day\" is equivalent to\n                \"every mon,tue,wed,thu,fri,sat,sun\"\n    monthspec - comma-separated list of month names (for example,\n                \"jan\", \"march\", \"sep\"). If omitted, implies every month.\n                You can also say \"month\" to mean every month, as in\n                \"1,8th,15,22nd of month 09:00\".\n    HH:MM     - time of day in 24 hour time.\n\n    This is a slightly more permissive version of Google App Engine's schedule\n    parser, documented here:\n    http://code.google.com/appengine/docs/python/config/cron.html#The_Schedule_Format\n    \"\"\"\n\n    # m|mon|monday|...|day\n    DAY_VALUES = \"|\".join(list(CONVERT_DAYS_INT.keys()) + [\"day\"])\n\n    # jan|january|...|month\n    MONTH_VALUES = \"|\".join(list(CONVERT_MONTHS.keys()) + [\"month\"])\n\n    DATE_SUFFIXES = \"st|nd|rd|th\"\n\n    # every|1st|2nd|3rd (also would accept 3nd, 1rd, 4st)\n    MONTH_DAYS_EXPR = r\"(?P<month_days>every|((\\d+(%s),?)+))?\" % DATE_SUFFIXES\n    DAYS_EXPR = r\"((?P<days>((%s),?)+))?\" % DAY_VALUES\n    MONTHS_EXPR = r\"((in|of)\\s+(?P<months>((%s),?)+))?\" % MONTH_VALUES\n\n    # [at] 00:00\n    TIME_EXPR = r\"((at\\s+)?(?P<time>\\d\\d:\\d\\d))?\"\n\n    DAILY_SCHEDULE_EXPR = \"\".join(\n        [\n            r\"^\",\n            MONTH_DAYS_EXPR,\n            r\"\\s*\",\n            DAYS_EXPR,\n            r\"\\s*\",\n            MONTHS_EXPR,\n            r\"\\s*\",\n            TIME_EXPR,\n            r\"\\s*\",\n            r\"$\",\n        ]\n    )\n    return re.compile(DAILY_SCHEDULE_EXPR)\n\n\n# Matches expressions of the form\n# ``(\"every\"|ordinal) (days) [\"of|in\" (monthspec)] ([\"at\"] HH:MM)``.\n# See :py:func:`daily_schedule_parser_re` for details.\nDAILY_SCHEDULE_RE = build_groc_schedule_parser_re()\n\n\ndef _parse_number(day):\n    return int(\"\".join(c for c in day if c.isdigit()))\n\n\ndef parse_groc_expression(config, config_context):\n    \"\"\"Given an expression of the form in the docstring of\n    daily_schedule_parser_re(), return the parsed values in a\n    ConfigGrocScheduler\n    \"\"\"\n    expression = config.value\n    m = DAILY_SCHEDULE_RE.match(expression.lower())\n    if not m:\n        msg = \"Schedule at %s is not a valid expression: %s\"\n        raise ScheduleParseError(msg % (config_context.path, expression))\n\n    timestr = m.group(\"time\")\n    if timestr is None:\n        timestr = \"00:00\"\n\n    if m.group(\"days\") in (None, \"day\"):\n        weekdays = None\n    else:\n        weekdays = {CONVERT_DAYS_INT[d] for d in m.group(\"days\").split(\",\")}\n\n    monthdays = None\n    ordinals = None\n\n    if m.group(\"month_days\") != \"every\":\n        values = {_parse_number(n) for n in m.group(\"month_days\").split(\",\")}\n        if weekdays is None:\n            monthdays = values\n        else:\n            ordinals = values\n\n    if m.group(\"months\") in (None, \"month\"):\n        months = None\n    else:\n        months = {CONVERT_MONTHS[mo] for mo in m.group(\"months\").split(\",\")}\n\n    return ConfigGrocScheduler(\n        original=expression,\n        ordinals=ordinals,\n        weekdays=weekdays,\n        monthdays=monthdays,\n        months=months,\n        timestr=timestr,\n        jitter=config.jitter,\n    )\n\n\ndef valid_cron_scheduler(config, config_context):\n    \"\"\"Parse a cron schedule.\"\"\"\n    try:\n        crontab_kwargs = crontab.parse_crontab(config.value)\n        return ConfigCronScheduler(\n            original=config.value,\n            jitter=config.jitter,\n            **crontab_kwargs,\n        )\n    except ValueError as e:\n        msg = \"Invalid cron scheduler %s: %s\"\n        raise ConfigError(msg % (config_context.path, e))\n\n\nschedulers = {\n    \"daily\": valid_daily_scheduler,\n    \"cron\": valid_cron_scheduler,\n    \"groc daily\": parse_groc_expression,\n}\n\n\nclass ScheduleValidator(config_utils.Validator):\n    \"\"\"Validate the structure of a scheduler config.\"\"\"\n\n    config_class = ConfigGenericSchedule\n    defaults = {\n        \"jitter\": datetime.timedelta(),\n    }\n    validators = {\n        \"type\": config_utils.build_enum_validator(schedulers.keys()),\n        \"jitter\": config_utils.valid_time_delta,\n    }\n"
  },
  {
    "path": "tron/config/schema.py",
    "content": "\"\"\"\nImmutable config schema objects.\nWARNING: it is *NOT* safe to delete these classes (or their attributes) if there are any references to them in DynamoDB until TRON-2200 is complete! (See DAR-2328)\nNOTE: this means that reverting a change that adds a new attribute is not safe :)\n\"\"\"\nfrom collections import namedtuple\nfrom enum import Enum\nfrom typing import Any\nfrom typing import TypeVar\n\nMASTER_NAMESPACE = \"MASTER\"\n\nCLEANUP_ACTION_NAME = \"cleanup\"\n\n\ndef config_object_factory(name, required=None, optional=None):\n    \"\"\"\n    Creates a namedtuple which has two additional attributes:\n        required_keys:\n            all keys required to be set on this configuration object\n        optional keys:\n            optional keys for this configuration object\n\n    The tuple is created from required + optional\n    \"\"\"\n    required = required or []\n    optional = optional or []\n\n    config_class = namedtuple(name, required + optional)\n\n    # make last len(optional) args actually optional\n    config_class.__new__.__defaults__ = (None,) * len(optional)\n    config_class.required_keys = required\n    config_class.optional_keys = optional\n\n    T = TypeVar(\"T\", bound=\"config_class\")  # i'm sorry.\n\n    @classmethod\n    def from_dict(cls: type[T], data: dict[str, Any]) -> T:\n        supported_keys = set(required + optional)\n        filtered_data = {k: v for k, v in data.items() if k in supported_keys}\n        return cls(**filtered_data)\n\n    config_class.from_dict = from_dict\n    return config_class\n\n\nTronConfig = config_object_factory(\n    name=\"TronConfig\",\n    optional=[\n        \"output_stream_dir\",  # str\n        \"action_runner\",  # ConfigActionRunner\n        \"state_persistence\",  # ConfigState\n        \"command_context\",  # dict of str\n        \"ssh_options\",  # ConfigSSHOptions\n        \"time_zone\",  # pytz time zone\n        \"nodes\",  # dict of ConfigNode\n        \"node_pools\",  # dict of ConfigNodePool\n        \"jobs\",  # dict of ConfigJob\n        \"mesos_options\",  # ConfigMesos\n        \"k8s_options\",  # ConfigKubernetes\n        \"eventbus_enabled\",  # bool or None\n        \"read_json\",  # bool, deprecated — accepted but ignored\n    ],\n)\n\nNamedTronConfig = config_object_factory(\n    name=\"NamedTronConfig\",\n    optional=[\n        \"jobs\",\n    ],\n)  # dict of ConfigJob\n\nConfigActionRunner = config_object_factory(\n    \"ConfigActionRunner\",\n    optional=[\"runner_type\", \"remote_status_path\", \"remote_exec_path\"],\n)\n\nConfigSSHOptions = config_object_factory(\n    name=\"ConfigSSHOptions\",\n    optional=[\n        \"agent\",\n        \"identities\",\n        \"known_hosts_file\",\n        \"connect_timeout\",\n        \"idle_connection_timeout\",\n        \"jitter_min_load\",\n        \"jitter_max_delay\",\n        \"jitter_load_factor\",\n    ],\n)\n\nConfigNode = config_object_factory(\n    name=\"ConfigNode\",\n    required=[\"hostname\"],\n    optional=[\"name\", \"username\", \"port\"],\n)\n\nConfigNodePool = config_object_factory(\"ConfigNodePool\", [\"nodes\"], [\"name\"])\n\nConfigState = config_object_factory(\n    name=\"ConfigState\",\n    required=[\n        \"name\",\n        \"store_type\",\n    ],\n    optional=[\n        \"buffer_size\",\n        \"dynamodb_region\",\n        \"table_name\",\n        \"max_transact_write_items\",\n    ],\n)\n\nConfigMesos = config_object_factory(\n    name=\"ConfigMesos\",\n    optional=[\n        \"master_address\",\n        \"master_port\",\n        \"secret_file\",\n        \"principal\",\n        \"role\",\n        \"enabled\",\n        \"default_volumes\",\n        \"dockercfg_location\",\n        \"offer_timeout\",\n    ],\n)\n\nConfigKubernetes = config_object_factory(\n    name=\"ConfigKubernetes\",\n    optional=[\n        \"kubeconfig_path\",\n        \"enabled\",\n        \"non_retryable_exit_codes\",\n        \"default_volumes\",\n        \"watcher_kubeconfig_paths\",\n    ],\n)\n\nConfigJob = config_object_factory(\n    name=\"ConfigJob\",\n    required=[\n        \"name\",  # str\n        \"node\",  # str\n        \"schedule\",  # Config*Scheduler\n        \"actions\",  # dict of ConfigAction\n        \"namespace\",  # str\n    ],\n    optional=[\n        \"monitoring\",  # dict\n        \"queueing\",  # bool\n        \"run_limit\",  # int\n        \"all_nodes\",  # bool\n        \"cleanup_action\",  # ConfigAction\n        \"enabled\",  # bool\n        \"allow_overlap\",  # bool\n        \"max_runtime\",  # datetime.Timedelta\n        \"time_zone\",  # pytz time zone\n        \"expected_runtime\",  # datetime.Timedelta\n        # TODO: cleanup once we're fully off of Mesos and all non-SSH jobs *only* use k8s\n        \"use_k8s\",  # bool\n    ],\n)\n\nConfigAction = config_object_factory(\n    name=\"ConfigAction\",\n    required=[\n        \"name\",\n        \"command\",\n    ],  # str  # str\n    optional=[\n        \"requires\",  # tuple of str\n        \"node\",  # str\n        \"retries\",  # int\n        \"retries_delay\",  # datetime.Timedelta\n        \"executor\",  # str\n        \"cpus\",  # float\n        \"mem\",  # float\n        \"disk\",  # float\n        \"cap_add\",  # List of str\n        \"cap_drop\",  # List of str\n        \"constraints\",  # List of ConfigConstraint\n        \"docker_image\",  # str\n        \"docker_parameters\",  # List of ConfigParameter\n        \"env\",  # dict\n        \"secret_env\",  # dict of str, ConfigSecretSource\n        \"secret_volumes\",  # List of ConfigSecretVolume\n        \"projected_sa_volumes\",  # List of ConfigProjectedSAVolume\n        \"field_selector_env\",  # dict of str, ConfigFieldSelectorSource\n        \"extra_volumes\",  # List of ConfigVolume\n        \"expected_runtime\",  # datetime.Timedelta\n        \"trigger_downstreams\",  # None, bool or dict\n        \"triggered_by\",  # list or None\n        \"on_upstream_rerun\",  # ActionOnRerun or None\n        \"trigger_timeout\",  # datetime.deltatime or None\n        \"node_selectors\",  # Dict of str, str\n        \"node_affinities\",  # List of ConfigNodeAffinity\n        \"topology_spread_constraints\",  # List of ConfigTopologySpreadConstraints\n        \"idempotent\",  # bool\n        \"labels\",  # Dict of str, str\n        \"annotations\",  # Dict of str, str\n        \"service_account_name\",  # str\n        \"ports\",  # List of int\n    ],\n)\n\nConfigCleanupAction = config_object_factory(\n    name=\"ConfigCleanupAction\",\n    required=[\n        \"command\",\n    ],  # str\n    optional=[\n        \"name\",  # str\n        \"node\",  # str\n        \"retries\",  # int\n        \"retries_delay\",  # datetime.Timedelta\n        \"expected_runtime\",  # datetime.Timedelta\n        \"executor\",  # str\n        \"cpus\",  # float\n        \"mem\",  # float\n        \"disk\",  # float\n        \"cap_add\",  # List of str\n        \"cap_drop\",  # List of str\n        \"constraints\",  # List of ConfigConstraint\n        \"docker_image\",  # str\n        \"docker_parameters\",  # List of ConfigParameter\n        \"env\",  # dict\n        \"secret_env\",  # dict of str, ConfigSecretSource\n        \"secret_volumes\",  # List of ConfigSecretVolume\n        \"projected_sa_volumes\",  # List of ConfigProjectedSAVolume\n        \"field_selector_env\",  # dict of str, ConfigFieldSelectorSource\n        \"extra_volumes\",  # List of ConfigVolume\n        \"trigger_downstreams\",  # None, bool or dict\n        \"triggered_by\",  # list or None\n        \"on_upstream_rerun\",  # ActionOnRerun or None\n        \"trigger_timeout\",  # datetime.deltatime or None\n        \"node_selectors\",  # Dict of str, str\n        \"node_affinities\",  # List of ConfigNodeAffinity\n        \"topology_spread_constraints\",  # List of ConfigTopologySpreadConstraints\n        \"idempotent\",  # bool\n        \"labels\",  # Dict of str, str\n        \"annotations\",  # Dict of str, str\n        \"service_account_name\",  # str\n        \"ports\",  # List of int\n    ],\n)\n\nConfigConstraint = config_object_factory(\n    name=\"ConfigConstraint\",\n    required=[\n        \"attribute\",\n        \"operator\",\n        \"value\",\n    ],\n    optional=[],\n)\n\nConfigVolume = config_object_factory(\n    name=\"ConfigVolume\",\n    required=[\n        \"container_path\",\n        \"host_path\",\n    ],\n    optional=[\"mode\"],\n)\n\n\nConfigSecretVolumeItem = config_object_factory(\n    name=\"ConfigSecretVolumeItem\",\n    required=[\n        \"key\",\n        \"path\",\n    ],\n    optional=[\"mode\"],\n)\n\n_ConfigSecretVolume = config_object_factory(\n    name=\"ConfigSecretVolume\",\n    required=[\"secret_volume_name\", \"secret_name\", \"container_path\"],\n    optional=[\"default_mode\", \"items\"],\n)\n\n\nclass ConfigSecretVolume(_ConfigSecretVolume):  # type: ignore\n    def _asdict(self) -> dict:\n        d = super()._asdict().copy()\n        items = d.get(\"items\", [])\n        if items is not None and items:\n            # the config parsing code appears to be turning arrays into tuples - however, updating the\n            # code we think is at fault breaks a non-trivial amount of tests. in the interest of time, we're\n            # just casting to a list here, but we should eventually circle back here\n            # and either ensure that we always get a list from the config parse code OR document that we're\n            # expecting Tron's config parsing code to return immutable data if this is behavior we want to depend on.\n            d[\"items\"] = list(d[\"items\"])\n            for i, item in enumerate(items):\n                if isinstance(item, ConfigSecretVolumeItem):\n                    d[\"items\"][i] = item._asdict()\n        return d  # type: ignore\n\n\nConfigSecretSource = config_object_factory(\n    name=\"ConfigSecretSource\",\n    required=[\"secret_name\", \"key\"],\n    optional=[],\n)\n\nConfigProjectedSAVolume = config_object_factory(\n    name=\"ConfigProjectedSAVolume\",\n    required=[\"container_path\", \"audience\"],\n    optional=[\"expiration_seconds\"],\n)\n\nConfigFieldSelectorSource = config_object_factory(\n    name=\"ConfigFieldSelectorSource\",\n    required=[\"field_path\"],\n    optional=[],\n)\n\nConfigNodeAffinity = config_object_factory(\n    name=\"ConfigNodeAffinity\",\n    required=[\"key\", \"operator\", \"value\"],\n    optional=[],\n)\n\nConfigTopologySpreadConstraints = config_object_factory(\n    name=\"ConfigTopologySpreadConstraints\",\n    required=[\"max_skew\", \"label_selector\", \"topology_key\", \"when_unsatisfiable\"],\n    optional=[],\n)\n\nConfigParameter = config_object_factory(\n    name=\"ConfigParameter\",\n    required=[\n        \"key\",\n        \"value\",\n    ],\n    optional=[],\n)\n\nStatePersistenceTypes = Enum(  # type: ignore\n    \"StatePersistenceTypes\",\n    dict(shelve=\"shelve\", yaml=\"yaml\", dynamodb=\"dynamodb\"),\n)\n\n\nclass ExecutorTypes(Enum):\n    ssh = \"ssh\"\n    mesos = \"mesos\"\n    kubernetes = \"kubernetes\"\n    spark = \"spark\"\n\n\nActionRunnerTypes = Enum(\"ActionRunnerTypes\", dict(none=\"none\", subprocess=\"subprocess\"))  # type: ignore\n\nVolumeModes = Enum(\"VolumeModes\", dict(RO=\"RO\", RW=\"RW\"))  # type: ignore\n\nActionOnRerun = Enum(\"ActionOnRerun\", dict(rerun=\"rerun\"))  # type: ignore\n\n# WARNING: it is *NOT* safe to delete these classes (or their attributes) if there are any references to them in DynamoDB until TRON-2200 is complete! (See DAR-2328)\n# NOTE: this means that reverting a change that adds a new attribute is not safe :)\n"
  },
  {
    "path": "tron/config/static_config.py",
    "content": "from functools import partial\n\nimport staticconf\nfrom staticconf import config\n\nFILENAME = \"/nail/srv/configs/tron.yaml\"\nNAMESPACE = \"tron\"\n\n\ndef load_yaml_file() -> None:\n    staticconf.YamlConfiguration(filename=FILENAME, namespace=NAMESPACE)\n\n\ndef build_configuration_watcher(filename: str, namespace: str) -> config.ConfigurationWatcher:\n    config_loader = partial(staticconf.YamlConfiguration, filename, namespace=namespace)\n    reloader = config.ReloadCallbackChain(namespace)\n    return config.ConfigurationWatcher(config_loader, filename, min_interval=0, reloader=reloader)\n\n\n# Load configuration from 'tron.yaml' into namespace 'tron'\ndef get_config_watcher() -> config.ConfigurationWatcher:\n    load_yaml_file()\n    return build_configuration_watcher(FILENAME, NAMESPACE)\n"
  },
  {
    "path": "tron/config/tronfig_schema.json",
    "content": "{\n    \"$schema\": \"http://json-schema.org/draft-06/schema#\",\n    \"description\": \"http://tron.readthedocs.io/en/latest/config.html\",\n    \"type\": \"object\",\n    \"additionalProperties\": false,\n    \"properties\": {\n        \"ssh_options\": {\n            \"type\": \"object\",\n            \"properties\": {\n                \"agent\": {\n                    \"type\": \"boolean\",\n                    \"default\": false\n                },\n                \"identities\": {\n                    \"type\": \"array\",\n                    \"items\": {\n                        \"type\": \"string\"\n                    }\n                },\n                \"known_hosts_file\": {\n                    \"type\": \"string\"\n                },\n                \"connect_timeout\": {\n                    \"type\": \"number\",\n                    \"default\": 30\n                },\n                \"idle_connection_timeout\": {\n                    \"type\": \"number\",\n                    \"default\": 3600\n                },\n                \"jitter_min_load\": {\n                    \"type\": \"number\",\n                    \"default\": 4\n                },\n                \"jitter_max_delay\": {\n                    \"type\": \"number\",\n                    \"default\": 20\n                },\n                \"jitter_load_factor\": {\n                    \"type\": \"number\",\n                    \"default\": 20\n                }\n            }\n        },\n        \"time_zone\": {\n            \"type\": \"string\"\n        },\n        \"command_context\": {\n            \"type\": \"object\"\n        },\n        \"output_stream_dir\": {\n            \"type\": \"string\"\n        },\n        \"state_persistence\": {\n            \"type\": \"object\",\n            \"properties\": {\n                \"store_type\": {\n                    \"type\": \"string\"\n                },\n                \"name\": {\n                    \"type\": \"string\"\n                },\n                \"buffer_size\": {\n                    \"type\": \"number\",\n                    \"default\": 1\n                },\n                \"table_name\": {\n                    \"type\": \"string\"\n                },\n                \"dynamodb_region\": {\n                    \"type\": \"string\"\n                }\n            }\n        },\n        \"action_runner\": {\n            \"type\": \"object\",\n            \"properties\": {\n                \"runner_type\": {\n                    \"type\": \"string\"\n                },\n                \"remote_status_path\": {\n                    \"type\": \"string\"\n                },\n                \"remote_exec_path\": {\n                    \"type\": \"string\"\n                }\n            }\n        },\n        \"nodes\": {\n            \"type\": \"array\",\n            \"items\": {\n                \"type\": \"object\",\n                \"properties\": {\n                    \"hostname\": {\n                        \"type\": \"string\"\n                    },\n                    \"name\": {\n                        \"type\": \"string\"\n                    },\n                    \"username\": {\n                        \"type\": \"string\"\n                    },\n                    \"port\": {\n                        \"type\": \"number\"\n                    }\n                }\n            }\n        },\n        \"node_pools\": {\n            \"type\": \"array\",\n            \"items\": {\n                \"type\": \"object\",\n                \"properties\": {\n                    \"name\": {\n                        \"type\": \"string\",\n                        \"nodes\": {\n                            \"type\": \"array\",\n                            \"items\": {\n                                \"type\": \"string\"\n                            }\n                        }\n                    }\n                }\n            }\n        },\n        \"jobs\": {\n            \"type\": \"array\",\n            \"items\": {\n                \"type\": \"object\",\n                \"properties\": {\n                    \"use_k8s\": {\n                        \"type\": \"boolean\",\n                        \"default\": false\n                    },\n                    \"name\": {\n                        \"type\": \"string\"\n                    },\n                    \"node\": {\n                        \"type\": \"string\"\n                    },\n                    \"schedule\": {\n                        \"oneOf\": [\n                            {\n                                \"type\": \"string\"\n                            },\n                            {\n                                \"type\": \"object\"\n                            }\n                        ]\n                    },\n                    \"actions\": {\n                        \"type\": \"array\",\n                        \"items\": {\n                            \"type\": \"object\",\n                            \"properties\": {\n                                \"name\": {\n                                    \"type\": \"string\"\n                                },\n                                \"command\": {\n                                    \"type\": \"string\"\n                                },\n                                \"requires\": {\n                                    \"type\": \"array\",\n                                    \"items\": {\n                                        \"type\": \"string\"\n                                    }\n                                },\n                                \"node\": {\n                                    \"type\": \"string\"\n                                }\n                            }\n                        }\n                    },\n                    \"queuing\": {\n                        \"type\": \"boolean\",\n                        \"default\": true\n                    },\n                    \"allow_overlap\": {\n                        \"type\": \"boolean\",\n                        \"default\": false\n                    },\n                    \"run_limit\": {\n                        \"type\": \"number\",\n                        \"default\": 50\n                    },\n                    \"all_nodes\": {\n                        \"type\": \"boolean\",\n                        \"default\": false\n                    },\n                    \"cleanup_action\": {\n                        \"type\": \"object\"\n                    },\n                    \"enabled\": {\n                        \"type\": \"boolean\",\n                        \"default\": true\n                    },\n                    \"max_runtime\": {\n                        \"type\": \"string\"\n                    },\n                    \"expected_runtime\": {\n                        \"type\": \"string\"\n                    }\n                }\n            }\n        }\n    }\n}\n"
  },
  {
    "path": "tron/core/__init__.py",
    "content": "\"\"\"\n tron.core contains all the core objects for running and scheduling jobs.\n\"\"\"\n"
  },
  {
    "path": "tron/core/action.py",
    "content": "import datetime\nimport json\nimport logging\nfrom dataclasses import dataclass\nfrom dataclasses import field\nfrom dataclasses import fields\nfrom typing import Any\n\nfrom tron import node\nfrom tron.config.schema import CLEANUP_ACTION_NAME\nfrom tron.config.schema import ConfigAction\nfrom tron.config.schema import ConfigConstraint\nfrom tron.config.schema import ConfigFieldSelectorSource\nfrom tron.config.schema import ConfigNodeAffinity\nfrom tron.config.schema import ConfigParameter\nfrom tron.config.schema import ConfigProjectedSAVolume\nfrom tron.config.schema import ConfigSecretSource\nfrom tron.config.schema import ConfigSecretVolume\nfrom tron.config.schema import ConfigTopologySpreadConstraints\nfrom tron.config.schema import ConfigVolume\nfrom tron.utils.persistable import Persistable\n\nlog = logging.getLogger(__name__)\n\n\n@dataclass\nclass ActionCommandConfig(Persistable):\n    \"\"\"A configurable data object for one try of an Action.\"\"\"\n\n    command: str\n    cpus: float | None = None\n    mem: float | None = None\n    disk: float | None = None\n    cap_add: list[str] = field(default_factory=list)\n    cap_drop: list[str] = field(default_factory=list)\n    constraints: set = field(default_factory=set)\n    docker_image: str | None = None\n    docker_parameters: set = field(default_factory=set)\n    env: dict = field(default_factory=dict)\n    secret_env: dict = field(default_factory=dict)\n    secret_volumes: list[ConfigSecretVolume] = field(default_factory=list)\n    projected_sa_volumes: list[ConfigProjectedSAVolume] = field(default_factory=list)\n    field_selector_env: dict = field(default_factory=dict)\n    extra_volumes: set = field(default_factory=set)\n    node_selectors: dict = field(default_factory=dict)\n    node_affinities: list[ConfigNodeAffinity] = field(default_factory=list)\n    topology_spread_constraints: list[ConfigTopologySpreadConstraints] = field(default_factory=list)\n    labels: dict = field(default_factory=dict)\n    idempotent: bool = False\n    annotations: dict = field(default_factory=dict)\n    service_account_name: str | None = None\n    ports: list[int] = field(default_factory=list)\n\n    @property\n    def state_data(self):\n        return {field.name: getattr(self, field.name) for field in fields(self)}\n\n    def copy(self):\n        return ActionCommandConfig(**self.state_data)\n\n    @staticmethod\n    def from_json(\n        state_data: str,\n    ) -> dict[str, Any]:  # TODO: use a TypedDict (or return an ActionCommandConfig instance)\n        \"\"\"Deserialize a JSON string to an ActionCommandConfig dict.\"\"\"\n        try:\n            json_data = json.loads(state_data)\n            deserialized_data = {\n                \"constraints\": [\n                    ConfigConstraint.from_dict(val) for val in json_data[\"constraints\"]\n                ],  # convert back the list of dictionaries to a list of ConfigConstraint\n                \"docker_parameters\": [ConfigParameter.from_dict(val) for val in json_data[\"docker_parameters\"]],\n                \"extra_volumes\": [ConfigVolume.from_dict(val) for val in json_data[\"extra_volumes\"]],\n                \"node_affinities\": [ConfigNodeAffinity.from_dict(val) for val in json_data[\"node_affinities\"]],\n                \"topology_spread_constraints\": [\n                    ConfigTopologySpreadConstraints.from_dict(val) for val in json_data[\"topology_spread_constraints\"]\n                ],\n                \"secret_volumes\": [ConfigSecretVolume.from_dict(val) for val in json_data[\"secret_volumes\"]],\n                \"projected_sa_volumes\": [\n                    ConfigProjectedSAVolume.from_dict(val) for val in json_data[\"projected_sa_volumes\"]\n                ],\n                \"secret_env\": {key: ConfigSecretSource.from_dict(val) for key, val in json_data[\"secret_env\"].items()},\n                \"field_selector_env\": {\n                    key: ConfigFieldSelectorSource.from_dict(val)\n                    for key, val in json_data[\"field_selector_env\"].items()\n                },\n                \"command\": json_data[\"command\"],\n                \"cpus\": json_data[\"cpus\"],\n                \"mem\": json_data[\"mem\"],\n                \"disk\": json_data[\"disk\"],\n                \"cap_add\": json_data[\"cap_add\"],\n                \"cap_drop\": json_data[\"cap_drop\"],\n                \"docker_image\": json_data[\"docker_image\"],\n                \"env\": json_data[\"env\"],\n                \"node_selectors\": json_data[\"node_selectors\"],\n                \"labels\": json_data[\"labels\"],\n                \"idempotent\": json_data.get(\"idempotent\", False),\n                \"annotations\": json_data[\"annotations\"],\n                \"service_account_name\": json_data[\"service_account_name\"],\n                \"ports\": json_data[\"ports\"],\n            }\n        except Exception:\n            log.exception(\"Error deserializing ActionCommandConfig from JSON\")\n            raise\n        return deserialized_data\n\n    @staticmethod\n    def to_json(state_data: dict) -> str:\n        \"\"\"Serialize the ActionCommandConfig instance to a JSON string.\"\"\"\n\n        def serialize_namedtuple(obj):\n            if isinstance(obj, tuple) and hasattr(obj, \"_fields\"):\n                # checks if obj is a namedtuple and convert it to a dict\n                # TODO: future improvement here would be to use a custom json encoder to\n                # have json.dumps() automatically convert namedtuples to dictionaries\n                return obj._asdict()\n            return obj\n\n        try:\n            # NOTE: you'll notice that there's a lot of get() accesses of state_data for\n            # pretty common fields - this is because ActionCommandConfig is used by more\n            # than one type of ActionRun (Kubernetes, Mesos, SSH) and these generally look\n            # different. Alternatively, some of these fields are used by KubernetesActionRun\n            # but are relatively new and older runs do not have data for them.\n            # Once we get rid of the SSH and Mesos code as well as older runs in DynamoDB,\n            # we'll likely be able to clean this up.\n            return json.dumps(\n                {\n                    \"command\": state_data[\"command\"],\n                    \"cpus\": state_data[\"cpus\"],\n                    \"mem\": state_data[\"mem\"],\n                    \"disk\": state_data[\"disk\"],\n                    \"cap_add\": state_data[\"cap_add\"],\n                    \"cap_drop\": state_data[\"cap_drop\"],\n                    \"constraints\": [\n                        serialize_namedtuple(constraint) for constraint in state_data.get(\"constraints\", [])\n                    ],  # convert each ConfigConstraint to dictionary, so it would be a list of dicts\n                    \"docker_image\": state_data[\"docker_image\"],\n                    \"docker_parameters\": [\n                        serialize_namedtuple(parameter) for parameter in state_data.get(\"docker_parameters\", [])\n                    ],\n                    \"env\": state_data.get(\"env\", {}),\n                    \"secret_env\": {\n                        key: serialize_namedtuple(val) for key, val in state_data.get(\"secret_env\", {}).items()\n                    },\n                    \"secret_volumes\": [serialize_namedtuple(volume) for volume in state_data.get(\"secret_volumes\", [])],\n                    \"projected_sa_volumes\": [\n                        serialize_namedtuple(volume) for volume in state_data.get(\"projected_sa_volumes\", [])\n                    ],\n                    \"field_selector_env\": {\n                        key: serialize_namedtuple(val) for key, val in state_data.get(\"field_selector_env\", {}).items()\n                    },\n                    \"extra_volumes\": [serialize_namedtuple(volume) for volume in state_data.get(\"extra_volumes\", [])],\n                    \"node_selectors\": state_data.get(\"node_selectors\", {}),\n                    \"node_affinities\": [\n                        serialize_namedtuple(affinity) for affinity in state_data.get(\"node_affinities\", [])\n                    ],\n                    \"labels\": state_data.get(\"labels\", {}),\n                    \"idempotent\": state_data.get(\"idempotent\", False),\n                    \"annotations\": state_data.get(\"annotations\", {}),\n                    \"service_account_name\": state_data.get(\"service_account_name\"),\n                    \"ports\": state_data.get(\"ports\", []),\n                    \"topology_spread_constraints\": [\n                        serialize_namedtuple(constraint)\n                        for constraint in state_data.get(\"topology_spread_constraints\", [])\n                    ],\n                }\n            )\n        except KeyError:\n            log.exception(\"Missing key in state_data:\")\n            raise\n        except Exception:\n            log.exception(\"Error serializing ActionCommandConfig to JSON:\")\n            raise\n\n\n@dataclass\nclass Action:\n    \"\"\"A configurable data object for an Action.\"\"\"\n\n    name: str\n    command_config: ActionCommandConfig\n    node_pool: str\n    retries: int | None = None\n    retries_delay: datetime.timedelta | None = None\n    expected_runtime: datetime.timedelta | None = None\n    executor: str | None = None\n    trigger_downstreams: bool | dict | None = None\n    triggered_by: set | None = None\n    on_upstream_rerun: str | None = None\n    trigger_timeout: datetime.timedelta | None = None\n    idempotent: bool = False\n\n    @property\n    def is_cleanup(self):\n        return self.name == CLEANUP_ACTION_NAME\n\n    @property\n    def command(self):\n        return self.command_config.command\n\n    @classmethod\n    def from_config(cls, config: ConfigAction) -> \"Action\":\n        \"\"\"Factory method for creating a new Action.\"\"\"\n        node_repo = node.NodePoolRepository.get_instance()\n        command_config = ActionCommandConfig(\n            command=config.command,\n            cpus=config.cpus,\n            mem=config.mem,\n            disk=(1024.0 if config.disk is None else config.disk),\n            docker_image=config.docker_image,\n            constraints=set(config.constraints or []),\n            docker_parameters=set(config.docker_parameters or []),\n            extra_volumes=set(config.extra_volumes or []),\n            env=config.env or {},\n            secret_env=config.secret_env or {},\n            secret_volumes=config.secret_volumes or [],\n            projected_sa_volumes=config.projected_sa_volumes or [],\n            field_selector_env=config.field_selector_env or {},\n            cap_add=config.cap_add or [],\n            cap_drop=config.cap_drop or [],\n            node_selectors=config.node_selectors or {},\n            node_affinities=config.node_affinities or [],\n            topology_spread_constraints=config.topology_spread_constraints or [],\n            labels=config.labels or {},\n            idempotent=config.idempotent,\n            annotations=config.annotations or {},\n            service_account_name=config.service_account_name or None,\n            ports=config.ports or [],\n        )\n        kwargs = dict(\n            name=config.name,\n            command_config=command_config,\n            node_pool=node_repo.get_by_name(config.node),\n            retries=config.retries,\n            retries_delay=config.retries_delay,\n            expected_runtime=config.expected_runtime,\n            executor=config.executor,\n            trigger_downstreams=config.trigger_downstreams,\n            triggered_by=config.triggered_by,\n            on_upstream_rerun=config.on_upstream_rerun,\n            trigger_timeout=config.trigger_timeout,\n            idempotent=config.idempotent,\n        )\n\n        return cls(**kwargs)\n"
  },
  {
    "path": "tron/core/actiongraph.py",
    "content": "import logging\nfrom collections import namedtuple\nfrom collections.abc import Mapping\nfrom collections.abc import Sequence\n\nfrom tron.core.action import Action\nfrom tron.utils.timeutils import delta_total_seconds\n\nlog = logging.getLogger(__name__)\nTrigger = namedtuple(\"Trigger\", [\"name\", \"command\"])\n\n\nclass ActionGraph:\n    \"\"\"A directed graph of actions and their requirements for a specific job.\"\"\"\n\n    def __init__(\n        self,\n        action_map: Mapping[str, Action],\n        required_actions: Mapping[str, set[str]],\n        required_triggers: Mapping[str, set[str]],\n    ) -> None:\n        self.action_map = action_map\n        self.required_actions = required_actions\n        self.required_triggers = required_triggers\n        self.all_triggers = set(self.required_triggers)\n        for action_triggers in self.required_triggers.values():\n            self.all_triggers |= action_triggers\n        self.all_triggers -= set(self.action_map)\n\n    def get_dependencies(self, action_name: str, include_triggers: bool = False) -> Sequence[Action | Trigger]:\n        \"\"\"Given an Action's name return the Actions (and optionally, Triggers)\n        required to run before that Action.\n        \"\"\"\n        if action_name not in set(self.action_map) | self.all_triggers:\n            return []\n\n        dependencies = [self.action_map[action] for action in self.required_actions[action_name]]\n        if include_triggers:\n            dependencies += [self[trigger_name] for trigger_name in self.required_triggers[action_name]]\n        return dependencies\n\n    def names(self, include_triggers=False):\n        names = set(self.action_map)\n        if include_triggers:\n            names |= self.all_triggers\n        return names\n\n    @property\n    def expected_runtime(self):\n        return {name: delta_total_seconds(self.action_map[name].expected_runtime) for name in self.action_map.keys()}\n\n    def __getitem__(self, name):\n        if name in self.action_map:\n            return self.action_map[name]\n        elif name in self.all_triggers:\n            # we don't have the Trigger config to know what the real command is,\n            # so we just fill in the command with 'TRIGGER'\n            return Trigger(name, \"TRIGGER\")\n        else:\n            raise KeyError(f\"{name} is not a valid action\")\n\n    def __eq__(self, other):\n        return (\n            self.action_map == other.action_map\n            and self.required_actions == other.required_actions\n            and self.required_triggers == other.required_triggers\n        )\n\n    def __ne__(self, other):\n        return not self == other\n"
  },
  {
    "path": "tron/core/actionrun.py",
    "content": "\"\"\"\ntron.core.actionrun\n\"\"\"\nimport datetime\nimport json\nimport logging\nimport os\nfrom collections.abc import Callable\nfrom dataclasses import dataclass\nfrom dataclasses import fields\nfrom typing import Any\nfrom typing import cast\nfrom typing import Literal\nfrom typing import Optional\nfrom typing import TYPE_CHECKING\n\nfrom twisted.internet import reactor\nfrom twisted.internet.base import DelayedCall\n\nfrom tron import command_context\nfrom tron import node\nfrom tron import prom_metrics\nfrom tron.actioncommand import ActionCommand\nfrom tron.actioncommand import NoActionRunnerFactory\nfrom tron.actioncommand import SubprocessActionRunnerFactory\nfrom tron.bin.action_runner import build_environment\nfrom tron.bin.action_runner import build_labels\nfrom tron.command_context import CommandContext\nfrom tron.config import schema\nfrom tron.config.config_utils import StringFormatter\nfrom tron.config.schema import ExecutorTypes\nfrom tron.core import action\nfrom tron.core.action import ActionCommandConfig\nfrom tron.core.actiongraph import ActionGraph\nfrom tron.eventbus import EventBus\nfrom tron.kubernetes import KubernetesClusterRepository\nfrom tron.kubernetes import KubernetesTask\nfrom tron.mesos import MesosCluster\nfrom tron.mesos import MesosClusterRepository\nfrom tron.mesos import MesosTask\nfrom tron.serialize import filehandler\nfrom tron.utils import exitcode\nfrom tron.utils import maybe_decode\nfrom tron.utils import proxy\nfrom tron.utils import timeutils\nfrom tron.utils.observer import Observable\nfrom tron.utils.observer import Observer\nfrom tron.utils.persistable import Persistable\nfrom tron.utils.state import Machine\n\nif TYPE_CHECKING:\n    from twisted.internet.epollreactor import EPollReactor\n\n\nlog = logging.getLogger(__name__)\nMAX_RECOVER_TRIES = 5\nINITIAL_RECOVER_DELAY = 3\nKUBERNETES_ACTIONRUN_EXECUTORS: set[str] = {\n    ExecutorTypes.kubernetes.value,\n    ExecutorTypes.spark.value,\n}\n\n\nclass ActionRunFactory:\n    \"\"\"Construct ActionRuns and ActionRunCollections for a JobRun and\n    ActionGraph.\n    \"\"\"\n\n    @classmethod\n    def build_action_run_collection(cls, job_run, action_runner):\n        \"\"\"Create an ActionRunCollection from an ActionGraph and JobRun.\"\"\"\n        action_run_map = {\n            maybe_decode(\n                name\n            ): cls.build_run_for_action(  # TODO: TRON-2293 maybe_decode is a relic of Python2->Python3 migration. Remove it.\n                job_run,\n                action_inst,\n                action_runner,\n            )\n            for name, action_inst in job_run.action_graph.action_map.items()\n        }\n        return ActionRunCollection(job_run.action_graph, action_run_map)\n\n    @classmethod\n    def action_run_collection_from_state(\n        cls,\n        job_run,\n        runs_state_data,\n        cleanup_action_state_data,\n    ):\n        action_runs = [cls.action_run_from_state(job_run, state_data) for state_data in runs_state_data]\n        if cleanup_action_state_data:\n            action_runs.append(\n                cls.action_run_from_state(\n                    job_run,\n                    cleanup_action_state_data,\n                    cleanup=True,\n                ),\n            )\n\n        # TODO: TRON-2293 maybe_decode is a relic of Python2->Python3 migration. Remove it.\n        action_run_map = {maybe_decode(action_run.action_name): action_run for action_run in action_runs}\n        return ActionRunCollection(job_run.action_graph, action_run_map)\n\n    @classmethod\n    def build_run_for_action(cls, job_run, action, action_runner):\n        \"\"\"Create an ActionRun for a JobRun and Action.\"\"\"\n        run_node = action.node_pool.next() if action.node_pool else job_run.node\n\n        if action.trigger_timeout:\n            trigger_timeout = job_run.run_time + action.trigger_timeout\n        else:\n            trigger_timeout = job_run.run_time + datetime.timedelta(days=1)\n\n        args = {\n            \"job_run_id\": job_run.id,\n            \"name\": action.name,\n            \"node\": run_node,\n            \"command_config\": action.command_config,\n            \"parent_context\": job_run.context,\n            \"output_path\": job_run.output_path.clone(),\n            \"cleanup\": action.is_cleanup,\n            \"action_runner\": action_runner,\n            \"retries_remaining\": action.retries,\n            \"retries_delay\": action.retries_delay,\n            \"executor\": action.executor,\n            \"trigger_downstreams\": action.trigger_downstreams,\n            \"triggered_by\": action.triggered_by,\n            \"on_upstream_rerun\": action.on_upstream_rerun,\n            \"trigger_timeout_timestamp\": trigger_timeout.timestamp(),\n        }\n        if action.executor == ExecutorTypes.mesos.value:\n            return MesosActionRun(**args)\n        elif action.executor in KUBERNETES_ACTIONRUN_EXECUTORS:\n            return KubernetesActionRun(**args)\n        return SSHActionRun(**args)\n\n    @classmethod\n    def action_run_from_state(cls, job_run, state_data, cleanup=False):\n        \"\"\"Restore an ActionRun for this JobRun from the state data.\"\"\"\n        args = {\n            \"state_data\": state_data,\n            \"parent_context\": job_run.context,\n            \"output_path\": job_run.output_path.clone(),\n            \"job_run_node\": job_run.node,\n            \"cleanup\": cleanup,\n            \"action_graph\": job_run.action_graph,\n        }\n\n        if state_data.get(\"executor\") == ExecutorTypes.mesos.value:\n            return MesosActionRun.from_state(**args)\n        if state_data.get(\"executor\") in KUBERNETES_ACTIONRUN_EXECUTORS:\n            return KubernetesActionRun.from_state(**args)\n        return SSHActionRun.from_state(**args)\n\n\n@dataclass\nclass ActionRunAttempt(Persistable):\n    \"\"\"Stores state about one try of an action run.\"\"\"\n\n    command_config: action.ActionCommandConfig\n    start_time: datetime.datetime | None = None\n    end_time: datetime.datetime | None = None\n    rendered_command: str | None = None\n    exit_status: int | None = None\n    mesos_task_id: str | None = None\n    kubernetes_task_id: str | None = None\n\n    def exit(self, exit_status, end_time=None):\n        if self.end_time is None:\n            self.exit_status = exit_status\n            self.end_time = end_time or timeutils.current_time()\n\n    @property\n    def display_command(self):\n        return self.rendered_command or self.command_config.command\n\n    @property\n    def state_data(self):\n        state_data = {\n            \"command_config\": self.command_config.state_data,\n        }\n        for field in fields(self):\n            if field.name not in state_data:\n                state_data[field.name] = getattr(self, field.name)\n        return state_data\n\n    @staticmethod\n    def to_json(state_data: dict) -> str:\n        \"\"\"Serialize the ActionRunAttempt instance to a JSON string.\"\"\"\n        try:\n            return json.dumps(\n                {\n                    \"command_config\": ActionCommandConfig.to_json(state_data[\"command_config\"]),\n                    \"start_time\": state_data[\"start_time\"].isoformat() if state_data[\"start_time\"] else None,\n                    \"end_time\": state_data[\"end_time\"].isoformat() if state_data[\"end_time\"] else None,\n                    \"rendered_command\": state_data[\"rendered_command\"],\n                    \"exit_status\": state_data[\"exit_status\"],\n                    # NOTE: mesos_task_id can be deleted once we delete all Mesos\n                    # code and run data - and kubernetes_task_id can then be\n                    # accessed unconditionally :)\n                    # (see note in ActionCommandConfig::to_json() for more\n                    # information about why we do this)\n                    \"mesos_task_id\": state_data.get(\"mesos_task_id\"),\n                    \"kubernetes_task_id\": state_data.get(\"kubernetes_task_id\"),\n                }\n            )\n        except KeyError:\n            log.exception(\"Missing key in state_data:\")\n            raise\n        except Exception:\n            log.exception(\"Error serializing ActionRunAttempt to JSON:\")\n            raise\n\n    @staticmethod\n    def from_json(state_data: str) -> dict[str, Any]:  # TODO: use a TypedDict\n        \"\"\"Deserialize the ActionRunAttempt instance from a JSON string.\"\"\"\n        try:\n            json_data = json.loads(state_data)\n            deserialized_data = {\n                \"command_config\": ActionCommandConfig.from_json(json_data[\"command_config\"]),\n                \"start_time\": (\n                    datetime.datetime.fromisoformat(json_data[\"start_time\"]) if json_data[\"start_time\"] else None\n                ),\n                \"end_time\": datetime.datetime.fromisoformat(json_data[\"end_time\"]) if json_data[\"end_time\"] else None,\n                \"rendered_command\": json_data[\"rendered_command\"],\n                \"exit_status\": json_data[\"exit_status\"],\n                \"mesos_task_id\": json_data[\"mesos_task_id\"],\n                \"kubernetes_task_id\": json_data[\"kubernetes_task_id\"],\n            }\n        except Exception:\n            log.exception(\"Error deserializing ActionRunAttempt from JSON\")\n            raise\n        return deserialized_data\n\n    @classmethod\n    def from_state(cls, state_data):\n        # it's possible that we've rolled back to an older Tron version that doesn't support data that we've persisted\n        # (e.g., new fields for an ActionCommandConfig) so ensure that we only load what we currently support\n        valid_command_config_entries_from_state = {\n            field.name: state_data[\"command_config\"][field.name]\n            for field in fields(action.ActionCommandConfig)\n            if field.name in state_data[\"command_config\"]\n        }\n        state_data[\"command_config\"] = action.ActionCommandConfig(**valid_command_config_entries_from_state)\n\n        valid_actionrun_attempt_entries_from_state = {\n            field.name: state_data[field.name] for field in fields(cls) if field.name in state_data\n        }\n        return cls(**valid_actionrun_attempt_entries_from_state)\n\n\nclass ActionRun(Observable, Persistable):\n    \"\"\"Base class for tracking the state of a single run of an Action.\n\n    ActionRun's state machine is observed by a parent JobRun.\n    \"\"\"\n\n    CANCELLED = \"cancelled\"\n    FAILED = \"failed\"\n    QUEUED = \"queued\"\n    RUNNING = \"running\"\n    SCHEDULED = \"scheduled\"\n    SKIPPED = \"skipped\"\n    STARTING = \"starting\"\n    SUCCEEDED = \"succeeded\"\n    WAITING = \"waiting\"\n    UNKNOWN = \"unknown\"\n\n    default_transitions = {\"fail\": FAILED, \"success\": SUCCEEDED}\n    STATE_MACHINE = Machine(\n        SCHEDULED,\n        **{\n            CANCELLED: {\n                \"skip\": SKIPPED,\n                # special case for when a human manually runs a cancelled run and wants to update Tron\n                **default_transitions,\n            },\n            FAILED: {\n                \"skip\": SKIPPED,\n                # NOTE: This is a special case for when a human manually runs a failed run and wants to update Tron\n                \"success\": SUCCEEDED,\n            },\n            SUCCEEDED: {\n                # NOTE: this entire set of transitions is a special case for when a human wants to update Tron's state\n                # (e.g., maybe the run succeeded, but a human noticed issues with the output, deleted it, and wants to\n                # track this failure in Tron)\n                \"skip\": SKIPPED,\n                **default_transitions,\n            },\n            RUNNING: {\n                \"cancel\": CANCELLED,\n                \"fail_unknown\": UNKNOWN,\n                **default_transitions,\n            },\n            STARTING: {\n                \"started\": RUNNING,\n                \"fail\": FAILED,\n                \"fail_unknown\": UNKNOWN,\n                \"cancel\": CANCELLED,\n                # special case for when Tron gets into a state where it lost events for whatever reason\n                \"success\": SUCCEEDED,\n            },\n            UNKNOWN: {\n                \"running\": RUNNING,\n                \"fail_unknown\": UNKNOWN,\n                **default_transitions,\n            },\n            WAITING: {\n                \"cancel\": CANCELLED,\n                \"start\": STARTING,\n                **default_transitions,\n            },\n            QUEUED: {\n                \"ready\": WAITING,\n                \"cancel\": CANCELLED,\n                \"start\": STARTING,\n                \"schedule\": SCHEDULED,\n                **default_transitions,\n            },\n            SCHEDULED: {\n                \"ready\": WAITING,\n                \"queue\": QUEUED,\n                \"cancel\": CANCELLED,\n                \"start\": STARTING,\n                **default_transitions,\n            },\n        },\n    )\n\n    # The set of states that are considered end states. Technically some of\n    # these states can be manually transitioned to other states.\n    END_STATES = {FAILED, SUCCEEDED, CANCELLED, SKIPPED, UNKNOWN}\n\n    # Failed render command is false to ensure that it will fail when run\n    FAILED_RENDER = \"false # Command failed to render correctly. See the Tron error log.\"\n    NOTIFY_TRIGGER_READY = \"trigger_ready\"\n\n    # This is a list of \"alternate locations\" that we can look for stdout/stderr in\n    # The PR in question is https://github.com/Yelp/Tron/pull/735/files, which changed\n    # the format of the stdout/stderr paths\n    STDOUT_PATHS = [\n        os.path.join(\n            \"{namespace}.{jobname}\",\n            \"{namespace}.{jobname}.{run_num}\",\n            \"{namespace}.{jobname}.{run_num}.{action}\",\n        ),  # old style paths (pre-#735 PR)\n        os.path.join(\n            \"{namespace}.{jobname}\",\n            \"{namespace}.{jobname}.{run_num}\",\n            \"{namespace}.{jobname}.{run_num}.{action}\",\n            \"{namespace}.{jobname}.{run_num}.recovery-{namespace}.{jobname}.{run_num}.{action}\",\n        ),  # old style recovery paths (pre-#735 PR)\n        os.path.join(\n            \"{namespace}\",\n            \"{jobname}\",\n            \"{run_num}\",\n            \"{action}-recovery\",\n        ),  # new style recovery paths (post-#735 PR)\n    ]\n\n    context_class = command_context.ActionRunContext\n\n    # TODO: create a class for ActionRunId, JobRunId, Etc\n    def __init__(\n        self,\n        job_run_id: str,\n        name: str,\n        node: node.Node,\n        command_config: action.ActionCommandConfig,\n        parent_context: CommandContext | None = None,\n        output_path: filehandler.OutputPath | None = None,\n        cleanup: bool = False,\n        start_time: datetime.datetime | None = None,\n        end_time: datetime.datetime | None = None,\n        run_state: str = SCHEDULED,\n        exit_status: int | None = None,\n        attempts: list[ActionRunAttempt] | None = None,\n        action_runner: NoActionRunnerFactory | SubprocessActionRunnerFactory | None = None,\n        retries_remaining: int | None = None,\n        retries_delay: datetime.timedelta | None = None,\n        machine: Machine | None = None,\n        executor: str | None = None,\n        trigger_downstreams: bool | dict | None = None,\n        triggered_by: list[str] | None = None,\n        on_upstream_rerun: schema.ActionOnRerun | None = None,\n        trigger_timeout_timestamp: float | None = None,\n        original_command: str | None = None,\n    ):\n        super().__init__()\n        self.job_run_id = maybe_decode(\n            job_run_id\n        )  # TODO: TRON-2293 maybe_decode is a relic of Python2->Python3 migration. Remove it.\n        self.action_name = maybe_decode(\n            name\n        )  # TODO: TRON-2293 maybe_decode is a relic of Python2->Python3 migration. Remove it.\n        self.node = node\n        self.start_time = start_time\n        self.end_time = end_time\n        self.exit_status = exit_status\n        self.action_runner = action_runner or NoActionRunnerFactory()\n        self.machine = machine or Machine.from_machine(\n            ActionRun.STATE_MACHINE,\n            None,\n            run_state,\n        )\n        self.is_cleanup = cleanup\n\n        self.executor = executor\n        self.command_config = command_config\n        self.original_command = original_command or command_config.command\n        self.attempts = attempts or []\n        self.output_path = output_path or filehandler.OutputPath()\n        self.output_path.append(self.action_name)\n        self.context = command_context.build_context(self, parent_context)\n        self.retries_remaining = retries_remaining\n        self.retries_delay = retries_delay\n        self.trigger_downstreams = trigger_downstreams\n        self.triggered_by = triggered_by\n        self.on_upstream_rerun = on_upstream_rerun\n        self.trigger_timeout_timestamp = trigger_timeout_timestamp\n        self.trigger_timeout_call = None\n\n        self.action_command = None\n        self.in_delay = None  # type: Optional[DelayedCall]\n\n    @property\n    def state(self) -> str:\n        return self.machine.state\n\n    @property\n    def id(self):\n        return f\"{self.job_run_id}.{self.action_name}\"\n\n    @property\n    def name(self):\n        return self.action_name\n\n    @property\n    def last_attempt(self):\n        if self.attempts:\n            return self.attempts[-1]\n        return None\n\n    @property\n    def exit_statuses(self):\n        if self.attempts:\n            return [a.exit_status for a in self.attempts if a.end_time]\n        return []\n\n    @property\n    def command(self):\n        if self.attempts:\n            return self.attempts[-1].display_command\n        else:\n            return self.command_config.command\n\n    @property\n    def rendered_command(self):\n        if self.attempts:\n            return self.attempts[-1].rendered_command\n        return None\n\n    @classmethod\n    def attempts_from_state(cls, state_data, command_config):\n        attempts = []\n        if \"attempts\" in state_data:\n            attempts = [ActionRunAttempt.from_state(a) for a in state_data[\"attempts\"]]\n        else:\n            rendered_command = maybe_decode(\n                state_data.get(\"rendered_command\")\n            )  # TODO: TRON-2293 maybe_decode is a relic of Python2->Python3 migration. Remove it.\n            exit_statuses = state_data.get(\"exit_statuses\", [])\n            # If the action has started, add an attempt for the final try\n            if state_data.get(\"start_time\"):\n                exit_statuses = exit_statuses + [state_data.get(\"exit_status\")]\n            for exit_status in exit_statuses:\n                attempts.append(\n                    ActionRunAttempt(\n                        command_config=command_config,\n                        rendered_command=rendered_command,\n                        exit_status=exit_status,\n                        start_time=\"unknown\",\n                        end_time=\"unknown\",\n                    ),\n                )\n            if attempts:\n                # only one of these should ever be valid - and we'll want to clean\n                # this up once we're off of mesos such that we only restore the k8s\n                # task id\n                attempts[-1].mesos_task_id = state_data.get(\"mesos_task_id\")\n                attempts[-1].kubernetes_task_id = state_data.get(\"kubernetes_task_id\")\n        return attempts\n\n    @classmethod\n    def from_state(\n        cls,\n        state_data,\n        parent_context,\n        output_path,\n        job_run_node,\n        action_graph,\n        cleanup=False,\n    ):\n        \"\"\"Restore the state of this ActionRun from a serialized state.\"\"\"\n        pool_repo = node.NodePoolRepository.get_instance()\n\n        # Support state from older version\n        if \"id\" in state_data:\n            job_run_id, action_name = state_data[\"id\"].rsplit(\".\", 1)\n        else:\n            job_run_id = state_data[\"job_run_id\"]\n            action_name = state_data[\"action_name\"]\n\n        job_run_node = pool_repo.get_node(\n            state_data.get(\"node_name\"),\n            job_run_node,\n        )\n\n        action_runner_data = state_data.get(\"action_runner\")\n        if action_runner_data:\n            action_runner = SubprocessActionRunnerFactory(**action_runner_data)\n        else:\n            action_runner = NoActionRunnerFactory()\n\n        action_config = action_graph.action_map.get(action_name)\n        if action_config:\n            command_config = action_config.command_config\n        else:\n            command_config = action.ActionCommandConfig(command=\"\")\n\n        attempts = cls.attempts_from_state(state_data, command_config)\n        run = cls(\n            job_run_id=job_run_id,\n            name=action_name,\n            node=job_run_node,\n            parent_context=parent_context,\n            output_path=output_path,\n            command_config=command_config,\n            original_command=state_data.get(\"original_command\"),\n            cleanup=cleanup,\n            start_time=state_data[\"start_time\"],\n            end_time=state_data[\"end_time\"],\n            run_state=state_data[\"state\"],\n            exit_status=state_data.get(\"exit_status\"),\n            attempts=attempts,\n            retries_remaining=state_data.get(\"retries_remaining\"),\n            retries_delay=state_data.get(\"retries_delay\"),\n            action_runner=action_runner,\n            executor=state_data.get(\"executor\", ExecutorTypes.ssh.value),\n            trigger_downstreams=state_data.get(\"trigger_downstreams\"),\n            triggered_by=state_data.get(\"triggered_by\"),\n            on_upstream_rerun=state_data.get(\"on_upstream_rerun\"),\n            trigger_timeout_timestamp=state_data.get(\"trigger_timeout_timestamp\"),\n        )\n\n        # Transition running to fail unknown because exit status was missed\n        # Recovery will look for unknown runs\n        if run.is_active:\n            run.transition_and_notify(\"fail_unknown\")\n        return run\n\n    def start(self, original_command: bool = True) -> bool | ActionCommand | None:\n        \"\"\"Start this ActionRun.\"\"\"\n        if self.in_delay is not None:\n            log.warning(f\"{self} cancelling suspend timer\")\n            self.in_delay.cancel()\n            self.in_delay = None\n\n        if not self.machine.check(\"start\"):\n            return False\n\n        if len(self.attempts) == 0:\n            log.info(f\"{self} starting\")\n        else:\n            log.info(f\"{self} restarting, retry {len(self.attempts)}\")\n\n        new_attempt = self.create_attempt(original_command=original_command)\n        self.start_time = new_attempt.start_time\n        self.transition_and_notify(\"start\")\n\n        if not self.command_config.command:\n            log.error(f\"{self} no longer configured in tronfig, cannot run\")\n            self.fail(exitcode.EXIT_INVALID_COMMAND)\n\n        if not self.is_valid_command(new_attempt.rendered_command):\n            log.error(f\"{self} invalid command: {new_attempt.command_config.command}\")\n            self.fail(exitcode.EXIT_INVALID_COMMAND)\n            return None\n\n        result = self.submit_command(new_attempt)\n\n        # We only count ActionRuns that were successfully submitted for execution. We do\n        # this instead of counting all ActionRuns that were created because submission\n        # represents the meaningful work boundary.\n        if result:\n            prom_metrics.tron_action_runs_created_counter.labels(executor=self.executor).inc()\n\n        return result\n\n    def create_attempt(self, original_command=True):\n        current_time = timeutils.current_time()\n        command_config = self.command_config.copy()\n        if original_command:\n            command_config.command = self.original_command\n        rendered_command = self.render_command(command_config.command)\n        new_attempt = ActionRunAttempt(\n            command_config=command_config,\n            start_time=current_time,\n            rendered_command=rendered_command,\n        )\n        self.attempts.append(new_attempt)\n        return new_attempt\n\n    def submit_command(self, attempt: ActionRunAttempt) -> bool | ActionCommand | None:\n        raise NotImplementedError()\n\n    def stop(self):\n        raise NotImplementedError()\n\n    def kill(self, final=True):\n        raise NotImplementedError()\n\n    def recover(self) -> ActionCommand | None:\n        raise NotImplementedError()\n\n    def _done(self, target: str, exit_status: int | None = 0) -> bool | None:\n        if self.machine.check(target):\n            if self.triggered_by:\n                EventBus.clear_subscriptions(self.__hash__())\n            self.clear_trigger_timeout()\n            self.exit_status = exit_status\n            self.end_time = timeutils.current_time()\n            if self.last_attempt is not None and self.last_attempt.end_time is None:\n                self.last_attempt.exit(exit_status, self.end_time)\n\n            prom_metrics.tron_action_runs_completed_counter.labels(\n                executor=self.executor, outcome=target, exit_status=str(exit_status)\n            ).inc()\n\n            log.info(\n                f\"{self} completed with {target}, transitioned to \" f\"{self.state}, exit status: {exit_status}\",\n            )\n            return self.transition_and_notify(target)\n        else:\n            log.debug(\n                f\"{self} cannot transition from {self.state} via {target}\",\n            )\n        return None\n\n    def retry(self, original_command=True):\n        \"\"\"Invoked externally (via API) when action needs to be re-tried\n        manually.\n        \"\"\"\n\n        # Manually retrying means we force the retries to be 1 and\n        # Cancel any delay, so the retry is kicked off asap\n        if self.retries_remaining is None or self.retries_remaining <= 0:\n            self.retries_remaining = 1\n        if self.in_delay is not None:\n            self.in_delay.cancel()\n            self.in_delay = None\n        self.retries_delay = None\n\n        if self.is_done:\n            self.machine.reset()\n            return self._exit_unsuccessful(self.exit_status, retry_original_command=original_command)\n        else:\n            log.info(f\"{self} getting killed for a retry\")\n            return self.kill(final=False)\n\n    def start_after_delay(self):\n        log.info(f\"{self} resuming after retry delay\")\n        self.machine.reset()\n        self.in_delay = None\n        self.start()\n\n    def restart(self, original_command: bool = True) -> bool | ActionCommand | None:\n        \"\"\"Used by `fail` when action run has to be re-tried\"\"\"\n        if self.retries_delay is not None:\n            self.in_delay = reactor.callLater(  # type: ignore  # no twisted stubs\n                self.retries_delay.total_seconds(),\n                self.start_after_delay,\n            )\n            log.info(f\"{self} delaying for a retry in {self.retries_delay}s\")\n            return True\n        else:\n            self.machine.reset()\n            return self.start(original_command=original_command)\n\n    def fail(self, exit_status=None):\n        if self.retries_remaining:\n            self.retries_remaining = -1\n\n        return self._done(\"fail\", exit_status)\n\n    def _exit_unsuccessful(\n        self,\n        exit_status: int | None = None,\n        retry_original_command: bool = True,\n        # TODO: delete this feature or refactor to not have a mutable default value\n        non_retryable_exit_codes: list[int] | None = [],\n    ) -> bool | ActionCommand | None:\n        if non_retryable_exit_codes is None:\n            non_retryable_exit_codes = []\n\n        if self.is_done:\n            log.info(\n                f\"{self} got exit code {exit_status} but already in terminal \" f'state \"{self.state}\", not retrying',\n            )\n            return None\n        if self.last_attempt is not None:\n            self.last_attempt.exit(exit_status)\n        if self.retries_remaining is not None:\n            if exit_status in non_retryable_exit_codes:\n                self.retries_remaining = 0\n                log.info(f\"{self} skipping auto-retries, received non-retryable exit code ({exit_status}).\")\n            else:\n                if self.retries_remaining > 0:\n                    self.retries_remaining -= 1\n                    return self.restart(original_command=retry_original_command)\n                else:\n                    log.info(\n                        f\"Reached maximum number of retries: {len(self.attempts)}\",\n                    )\n        if exit_status is None or exit_status in non_retryable_exit_codes:\n            return self._done(\"fail_unknown\", exit_status)\n        else:\n            return self._done(\"fail\", exit_status)\n\n    def triggers_to_emit(self) -> list[str]:\n        if not self.trigger_downstreams:\n            return []\n\n        if isinstance(self.trigger_downstreams, bool):\n            templates = [\"shortdate.{shortdate}\"]\n        elif isinstance(self.trigger_downstreams, dict):\n            templates = [f\"{k}.{v}\" for k, v in self.trigger_downstreams.items()]\n\n        return [self.render_template(trig) for trig in templates]\n\n    def emit_triggers(self):\n        triggers = self.triggers_to_emit()\n        if not triggers:\n            return\n\n        log.info(f\"{self} publishing triggers: [{', '.join(triggers)}]\")\n        job_id = \".\".join(self.job_run_id.split(\".\")[:-1])\n        for trigger in triggers:\n            EventBus.publish(f\"{job_id}.{self.action_name}.{trigger}\")\n\n    # TODO: cache if safe\n    @property\n    def rendered_triggers(self) -> list[str]:\n        return [self.render_template(trig) for trig in self.triggered_by or []]\n\n    # TODO: subscribe for events and maintain a list of remaining triggers\n    @property\n    def remaining_triggers(self):\n        return [trig for trig in self.rendered_triggers if not EventBus.has_event(trig)]\n\n    def success(self) -> bool | None:\n        transition_valid = self._done(\"success\")\n        if transition_valid:\n            if self.trigger_downstreams:\n                self.emit_triggers()\n\n        return transition_valid\n\n    def fail_unknown(self):\n        \"\"\"Failed with unknown reason.\"\"\"\n        log.warning(f\"{self} failed with no exit code\")\n        return self._done(\"fail_unknown\", None)\n\n    def cancel_delay(self):\n        if self.in_delay is not None:\n            self.in_delay.cancel()\n            self.in_delay = None\n            self.fail(exitcode.EXIT_STOP_KILL)\n            return True\n\n    @property\n    def state_data(self):\n        \"\"\"This data is used to serialize the state of this action run.\"\"\"\n\n        if isinstance(self.action_runner, NoActionRunnerFactory):\n            action_runner = None\n        else:\n            action_runner = dict(\n                status_path=self.action_runner.status_path,\n                exec_path=self.action_runner.exec_path,\n            )\n\n        return {\n            \"job_run_id\": self.job_run_id,\n            \"action_name\": self.action_name,\n            \"state\": self.state,\n            \"original_command\": self.original_command,\n            \"start_time\": self.start_time,\n            \"end_time\": self.end_time,\n            \"node_name\": self.node.get_name() if self.node else None,\n            \"exit_status\": self.exit_status,\n            \"attempts\": [a.state_data for a in self.attempts],\n            \"retries_remaining\": self.retries_remaining,\n            \"retries_delay\": self.retries_delay,\n            \"action_runner\": action_runner,\n            \"executor\": self.executor,\n            \"trigger_downstreams\": self.trigger_downstreams,\n            \"triggered_by\": self.triggered_by,\n            \"on_upstream_rerun\": self.on_upstream_rerun,\n            \"trigger_timeout_timestamp\": self.trigger_timeout_timestamp,\n        }\n\n    @staticmethod\n    def from_json(\n        state_data: str,\n    ) -> dict[str, Any]:  # TODO: would be nice to have a TypedDict here\n        \"\"\"Deserialize the ActionRun instance from a JSON Dictionary.\"\"\"\n        try:\n            json_data = json.loads(state_data)\n            if json_data.get(\"action_runner\") is None:\n                action_runner_json = NoActionRunnerFactory.from_json()\n            else:\n                action_runner_json = SubprocessActionRunnerFactory.from_json(json_data[\"action_runner\"])\n            deserialized_data = {\n                \"job_run_id\": json_data[\"job_run_id\"],\n                \"action_name\": json_data[\"action_name\"],\n                \"state\": json_data[\"state\"],\n                \"original_command\": json_data[\"original_command\"],\n                \"start_time\": (\n                    datetime.datetime.fromisoformat(json_data[\"start_time\"]) if json_data[\"start_time\"] else None\n                ),\n                \"end_time\": datetime.datetime.fromisoformat(json_data[\"end_time\"]) if json_data[\"end_time\"] else None,\n                \"node_name\": json_data[\"node_name\"],\n                \"exit_status\": json_data[\"exit_status\"],\n                \"attempts\": [ActionRunAttempt.from_json(a) for a in json_data[\"attempts\"]],\n                \"retries_remaining\": json_data[\"retries_remaining\"],\n                \"retries_delay\": (\n                    datetime.timedelta(seconds=json_data[\"retries_delay\"]) if json_data[\"retries_delay\"] else None\n                ),\n                \"executor\": json_data[\"executor\"],\n                \"trigger_downstreams\": json_data[\"trigger_downstreams\"],\n                \"triggered_by\": json_data[\"triggered_by\"],\n                \"on_upstream_rerun\": json_data[\"on_upstream_rerun\"],\n                \"trigger_timeout_timestamp\": json_data[\"trigger_timeout_timestamp\"],\n                \"action_runner\": action_runner_json,\n            }\n        except Exception:\n            log.exception(\"Error deserializing ActionRun from JSON\")\n            raise\n        return deserialized_data\n\n    @staticmethod\n    def to_json(state_data: dict) -> str:\n        \"\"\"Serialize the ActionRun instance to a JSON string.\"\"\"\n\n        action_runner = state_data.get(\"action_runner\")\n        if action_runner is None:\n            action_runner_json = NoActionRunnerFactory.to_json()\n        else:\n            action_runner_json = SubprocessActionRunnerFactory.to_json(action_runner)\n\n        try:\n            return json.dumps(\n                {\n                    \"job_run_id\": state_data[\"job_run_id\"],\n                    \"action_name\": state_data[\"action_name\"],\n                    \"state\": state_data[\"state\"],\n                    \"original_command\": state_data.get(\"original_command\"),\n                    \"start_time\": state_data[\"start_time\"].isoformat() if state_data[\"start_time\"] else None,\n                    \"end_time\": state_data[\"end_time\"].isoformat() if state_data[\"end_time\"] else None,\n                    \"node_name\": state_data[\"node_name\"],\n                    \"exit_status\": state_data[\"exit_status\"],\n                    \"attempts\": [ActionRunAttempt.to_json(attempt) for attempt in state_data.get(\"attempts\", [])],\n                    \"retries_remaining\": state_data[\"retries_remaining\"],\n                    \"retries_delay\": (\n                        state_data[\"retries_delay\"].total_seconds() if state_data[\"retries_delay\"] is not None else None\n                    ),\n                    \"action_runner\": action_runner_json,\n                    \"executor\": state_data[\"executor\"],\n                    \"trigger_downstreams\": state_data[\"trigger_downstreams\"],\n                    \"triggered_by\": state_data[\"triggered_by\"],\n                    \"on_upstream_rerun\": state_data[\"on_upstream_rerun\"],\n                    \"trigger_timeout_timestamp\": state_data[\"trigger_timeout_timestamp\"],\n                }\n            )\n        except KeyError:\n            log.exception(\"Missing key in state_data:\")\n            raise\n        except Exception:\n            log.exception(\"Error serializing ActionRun to JSON:\")\n            raise\n\n    def render_template(self, template):\n        \"\"\"Render our configured command using the command context.\"\"\"\n        return StringFormatter(self.context).format(template)\n\n    def render_command(self, command):\n        \"\"\"Render our configured command using the command context.\"\"\"\n        try:\n            return self.render_template(command)\n        except Exception as e:\n            log.error(f\"{self} failed rendering command: {e}\")\n            # Return a command string that will always fail\n            return self.FAILED_RENDER\n\n    def is_valid_command(self, command):\n        return command != self.FAILED_RENDER\n\n    @property\n    def is_done(self):\n        return self.state in self.END_STATES\n\n    @property\n    def is_complete(self):\n        return self.is_succeeded or self.is_skipped\n\n    @property\n    def is_broken(self):\n        return self.is_failed or self.is_cancelled or self.is_unknown\n\n    @property\n    def is_active(self):\n        return self.is_starting or self.is_running\n\n    def cleanup(self):\n        self.clear_observers()\n        if self.triggered_by:\n            EventBus.clear_subscriptions(self.__hash__())\n        self.clear_trigger_timeout()\n        self.cancel()\n\n    def clear_trigger_timeout(self):\n        if self.trigger_timeout_call:\n            self.trigger_timeout_call.cancel()\n            self.trigger_timeout_call = None\n\n    def setup_subscriptions(self):\n        remaining_triggers = self.remaining_triggers\n        if not remaining_triggers:\n            return\n\n        if self.trigger_timeout_timestamp:\n            now = timeutils.current_time().timestamp()\n            delay = max(self.trigger_timeout_timestamp - now, 1)\n            self.trigger_timeout_call = reactor.callLater(\n                delay,\n                self.trigger_timeout_reached,\n            )\n        else:\n            log.error(f\"{self} has no trigger_timeout_timestamp\")\n\n        for trigger in remaining_triggers:\n            EventBus.subscribe(trigger, self.__hash__(), self.trigger_notify)\n\n    def trigger_timeout_reached(self):\n        if self.remaining_triggers:\n            self.trigger_timeout_call = None\n            log.warning(\n                f\"{self} reached timeout waiting for: {self.remaining_triggers}\",\n            )\n            self.fail(exitcode.EXIT_TRIGGER_TIMEOUT)\n        else:\n            self.notify(ActionRun.NOTIFY_TRIGGER_READY)\n\n    def trigger_notify(self, *_):\n        if not self.remaining_triggers:\n            self.clear_trigger_timeout()\n            self.notify(ActionRun.NOTIFY_TRIGGER_READY)\n\n    @property\n    def is_blocked_on_trigger(self):\n        return not self.is_done and bool(self.remaining_triggers)\n\n    def clear_end_state(self):\n        self.exit_status = None\n        self.end_time = None\n        last_attempt = self.last_attempt\n        if last_attempt:\n            last_attempt.exit_status = None\n            last_attempt.end_time = None\n\n    def __getattr__(self, name: str) -> Callable[[], bool | None] | bool:\n        \"\"\"Support convenience properties for checking if this ActionRun is in\n        a specific state (Ex: self.is_running would check if self.state is\n        STATE_RUNNING) or for transitioning to a new state (ex: ready).\n        \"\"\"\n        if name in self.machine.transition_names:\n            return lambda: self.transition_and_notify(name)\n\n        if name.startswith(\"is_\"):\n            state_name = name.replace(\"is_\", \"\")\n            if state_name not in self.machine.states:\n                raise AttributeError(f\"{name} is not a state\")\n            return self.state == state_name\n        else:\n            raise AttributeError(name)\n\n    def __str__(self):\n        return f\"ActionRun: {self.id}\"\n\n    def transition_and_notify(self, target: str) -> bool | None:\n        if self.machine.transition(target):\n            self.notify(self.state)\n            return True\n        return None\n\n\nclass SSHActionRun(ActionRun, Observer):\n    \"\"\"An ActionRun that executes the command on a node through SSH.\"\"\"\n\n    def __init__(self, *args, **kwargs):\n        super().__init__(*args, **kwargs)\n        self.recover_tries = 0\n\n    def submit_command(self, attempt):\n        action_command = self.build_action_command(attempt)\n        try:\n            self.node.submit_command(action_command)\n        except node.Error as e:\n            log.warning(\"Failed to start %s: %r\", self.id, e)\n            self._exit_unsuccessful(exitcode.EXIT_NODE_ERROR)\n            return\n        return True\n\n    def stop(self):\n        if self.retries_remaining is not None:\n            self.retries_remaining = -1\n\n        if self.cancel_delay():\n            return\n\n        stop_command = self.action_runner.build_stop_action_command(\n            self.id,\n            \"terminate\",\n        )\n        self.node.submit_command(stop_command)\n\n    def kill(self, final=True):\n        if self.retries_remaining is not None and final:\n            self.retries_remaining = -1\n\n        if self.cancel_delay():\n            return\n\n        kill_command = self.action_runner.build_stop_action_command(\n            self.id,\n            \"kill\",\n        )\n        self.node.submit_command(kill_command)\n\n    def build_action_command(self, attempt):\n        \"\"\"Create a new ActionCommand instance to send to the node.\"\"\"\n        serializer = filehandler.OutputStreamSerializer(self.output_path)\n        self.action_command = self.action_runner.create(\n            id=self.id,\n            command=attempt.rendered_command,\n            serializer=serializer,\n        )\n        self.watch(self.action_command)\n        return self.action_command\n\n    def handle_unknown(self):\n        if isinstance(self.action_runner, NoActionRunnerFactory):\n            log.info(\n                f\"Unable to recover action_run {self.id}: \" \"action_run has no action_runner\",\n            )\n            return self.fail_unknown()\n\n        if self.recover_tries >= MAX_RECOVER_TRIES:\n            log.info(f\"Reached maximum tries {MAX_RECOVER_TRIES} for recovering {self.id}\")\n            return self.fail_unknown()\n\n        desired_delay = INITIAL_RECOVER_DELAY * (3**self.recover_tries)\n        self.recover_tries += 1\n        log.info(f\"Starting try #{self.recover_tries} to recover {self.id}, waiting {desired_delay}\")\n        return self.do_recover(delay=desired_delay)\n\n    def recover(self) -> DelayedCall | Literal[True] | None:  # type: ignore[override]  # this ActionRun subclass is not long for this world (hopefully)\n        log.info(f\"Creating recovery run for actionrun {self.id}\")\n        if isinstance(self.action_runner, NoActionRunnerFactory):\n            log.info(\n                f\"Unable to recover action_run {self.id}: \" \"action_run has no action_runner\",\n            )\n            return None\n\n        if not self.machine.check(\"running\"):\n            log.error(\n                f\"Unable to transition action run {self.id} \"\n                f\"from {self.machine.state} to running. \"\n                f\"Only UNKNOWN actions can be recovered. \",\n            )\n            return None\n\n        return self.do_recover(delay=0)\n\n    def do_recover(self, delay: float) -> DelayedCall | Literal[True] | None:\n        recovery_command = f\"{self.action_runner.exec_path}/recover_batch.py {self.action_runner.status_path}/{self.id}/status\"  # type: ignore[union-attr]  # hopefully we can remove the Union with TRON-2304\n        command_config = action.ActionCommandConfig(command=recovery_command)\n        rendered_command = self.render_command(recovery_command)\n        attempt = ActionRunAttempt(\n            command_config=command_config,\n            rendered_command=rendered_command,\n        )\n\n        # Put the \"recovery\" output at the same directory level as the original action_run's output\n        self.output_path.parts = []\n\n        # Might not need a separate action run\n        # Using for the separate name\n        recovery_run = SSHActionRun(\n            job_run_id=self.job_run_id,\n            name=f\"{self.name}-recovery\",\n            node=self.node,\n            command_config=command_config,\n            output_path=self.output_path,\n        )\n        recovery_action_command = recovery_run.build_action_command(attempt)\n        recovery_action_command.write_stdout(\n            f\"Recovering action run {self.id}\",\n        )\n        # Put action command in \"running\" state so if it fails to connect\n        # and exits with no exit code, the real action run will not retry.\n        recovery_action_command.started()\n\n        # this line is where the magic happens.\n        # the action run watches another actioncommand,\n        # and updates its internal state according to its result.\n        self.watch(recovery_action_command)\n\n        self.clear_end_state()\n        self.machine.transition(\"running\")\n\n        # Still want the action to appear running while we're waiting to submit the recovery\n        # So we do the delay at the end, after the transition to 'running' above\n        if not delay:\n            return self.submit_recovery_command(recovery_run, recovery_action_command)\n        else:\n            return cast(\"EPollReactor\", reactor).callLater(\n                delay,\n                self.submit_recovery_command,\n                recovery_run,\n                recovery_action_command,\n            )\n\n    def submit_recovery_command(\n        self, recovery_run: \"SSHActionRun\", recovery_action_command: ActionCommand\n    ) -> Literal[True] | None:\n        log.info(\n            f\"Submitting recovery job with command {recovery_action_command.command} \" f\"to node {recovery_run.node}\",\n        )\n        try:\n            deferred = recovery_run.node.submit_command(recovery_action_command)\n            deferred.addCallback(\n                lambda x: log.info(f\"Completed recovery run {recovery_run.id}\"),\n            )\n            return True\n        except node.Error as e:\n            log.warning(f\"Failed to submit recovery for {self.id}: {e!r}\")\n\n        return None\n\n    def handle_action_command_state_change(self, action_command, event, event_data=None):\n        \"\"\"Observe ActionCommand state changes.\"\"\"\n        log.debug(\n            f\"{self} action_command state change: {action_command.state}\",\n        )\n\n        if event == ActionCommand.RUNNING:\n            return self.transition_and_notify(\"started\")\n\n        if event == ActionCommand.FAILSTART:\n            return self._exit_unsuccessful(exitcode.EXIT_NODE_ERROR)\n\n        if event == ActionCommand.EXITING:\n            if action_command.exit_status is None:\n                return self.handle_unknown()\n\n            if not action_command.exit_status:\n                return self.success()\n\n            return self._exit_unsuccessful(action_command.exit_status)\n\n    handler = handle_action_command_state_change\n\n\nclass MesosActionRun(ActionRun, Observer):\n    \"\"\"An ActionRun that executes the command on a Mesos cluster.\"\"\"\n\n    def _create_mesos_task(\n        self,\n        mesos_cluster: MesosCluster,\n        serializer: filehandler.OutputStreamSerializer,\n        attempt: ActionRunAttempt,\n        task_id: str | None = None,\n    ) -> MesosTask | None:\n        command_config = attempt.command_config\n        return mesos_cluster.create_task(\n            action_run_id=self.id,\n            command=attempt.rendered_command,\n            cpus=command_config.cpus,\n            mem=command_config.mem,\n            disk=1024.0 if command_config.disk is None else command_config.disk,\n            constraints=[[c.attribute, c.operator, c.value] for c in command_config.constraints],\n            docker_image=command_config.docker_image,\n            docker_parameters=[e._asdict() for e in command_config.docker_parameters],\n            env=build_environment(original_env=command_config.env, run_id=self.id),\n            extra_volumes=[e._asdict() for e in command_config.extra_volumes],\n            serializer=serializer,\n            task_id=task_id,\n        )\n\n    def submit_command(self, attempt):\n        serializer = filehandler.OutputStreamSerializer(self.output_path)\n        mesos_cluster = MesosClusterRepository.get_cluster()\n        task = self._create_mesos_task(mesos_cluster, serializer, attempt)\n        if not task:  # Mesos is disabled\n            self.fail(exitcode.EXIT_MESOS_DISABLED)\n            return None\n\n        attempt.mesos_task_id = task.get_mesos_id()\n\n        # Watch before submitting, in case submit causes a transition\n        self.watch(task)\n        mesos_cluster.submit(task)\n        return task\n\n    def recover(self) -> MesosTask | None:\n        if not self.machine.check(\"running\"):\n            log.error(\n                f\"{self} unable to transition from {self.machine.state}\" \"to running for recovery\",\n            )\n            return None\n\n        if not self.attempts or self.attempts[-1].mesos_task_id is None:\n            log.error(f\"{self} no task ID, cannot recover\")\n            self.fail_unknown()\n            return None\n\n        last_attempt = self.attempts[-1]\n\n        log.info(f\"{self} recovering Mesos run\")\n\n        serializer = filehandler.OutputStreamSerializer(self.output_path)\n        mesos_cluster = MesosClusterRepository.get_cluster()\n        task = self._create_mesos_task(\n            mesos_cluster,\n            serializer,\n            last_attempt,\n            last_attempt.mesos_task_id,\n        )\n        if not task:\n            log.warning(\n                f\"{self} cannot recover, Mesos is disabled or invalid task ID {last_attempt.mesos_task_id!r}\",\n            )\n            self.fail_unknown()\n            return None\n\n        self.watch(task)\n        mesos_cluster.recover(task)\n\n        # Reset status\n        self.clear_end_state()\n        self.transition_and_notify(\"running\")\n\n        return task\n\n    def stop(self):\n        if self.retries_remaining is not None:\n            self.retries_remaining = -1\n\n        if self.cancel_delay():\n            return\n\n        return self._kill_mesos_task()\n\n    def kill(self, final=True):\n        if self.retries_remaining is not None and final:\n            self.retries_remaining = -1\n\n        if self.cancel_delay():\n            return\n\n        return self._kill_mesos_task()\n\n    def _kill_mesos_task(self):\n        msgs = []\n        if not self.is_active:\n            msgs.append(\n                f\"Action is {self.state}, not running. Continuing anyway.\",\n            )\n\n        mesos_cluster = MesosClusterRepository.get_cluster()\n        last_attempt = self.last_attempt\n        if last_attempt is None or last_attempt.mesos_task_id is None:\n            msgs.append(\"Error: Can't find task id for the action.\")\n        else:\n            msgs.append(f\"Sending kill for {last_attempt.mesos_task_id}...\")\n            succeeded = mesos_cluster.kill(last_attempt.mesos_task_id)\n            if succeeded:\n                msgs.append(\n                    \"Sent! It can take up to docker_stop_timeout (current setting is 2 mins) to stop.\",\n                )\n            else:\n                msgs.append(\n                    \"Error while sending kill request. Please try again.\",\n                )\n\n        return \"\\n\".join(msgs)\n\n    def handle_action_command_state_change(self, action_command, event, event_data=None):\n        \"\"\"Observe ActionCommand state changes.\"\"\"\n        log.debug(\n            f\"{self} action_command state change: {action_command.state}\",\n        )\n\n        if event == ActionCommand.RUNNING:\n            return self.transition_and_notify(\"started\")\n\n        if event == ActionCommand.FAILSTART:\n            return self._exit_unsuccessful(action_command.exit_status)\n\n        if event == ActionCommand.EXITING:\n            if action_command.exit_status is None:\n                # This is different from SSHActionRun\n                # Allows retries to happen, if configured\n                return self._exit_unsuccessful(None)\n\n            if not action_command.exit_status:\n                return self.success()\n\n            return self._exit_unsuccessful(action_command.exit_status)\n\n    handler = handle_action_command_state_change\n\n\nclass KubernetesActionRun(ActionRun, Observer):\n    \"\"\"An ActionRun that executes the command on a Kubernetes cluster.\"\"\"\n\n    def submit_command(self, attempt: ActionRunAttempt) -> KubernetesTask | None:\n        \"\"\"\n        Attempt to run a given ActionRunAttempt on the configured Kubernetes cluster.\n\n        If k8s usage is not toggled off, a KubernetesTask representing what was scheduled\n        onto the cluster will be returned - otherwise, None.\n        \"\"\"\n        k8s_cluster = KubernetesClusterRepository.get_cluster()\n        if not k8s_cluster:\n            self.fail(exitcode.EXIT_KUBERNETES_NOT_CONFIGURED)\n            return None\n\n        if attempt.rendered_command is None:\n            self.fail(exitcode.EXIT_INVALID_COMMAND)\n            return None\n\n        if attempt.command_config.docker_image is None:\n            self.fail(exitcode.EXIT_KUBERNETES_TASK_INVALID)\n            return None\n        try:\n            task = k8s_cluster.create_task(\n                action_run_id=self.id,\n                command=attempt.rendered_command,\n                cpus=attempt.command_config.cpus,\n                mem=attempt.command_config.mem,\n                disk=attempt.command_config.disk,\n                docker_image=attempt.command_config.docker_image,\n                env=build_environment(original_env=attempt.command_config.env, run_id=self.id),\n                secret_env=attempt.command_config.secret_env,\n                secret_volumes=attempt.command_config.secret_volumes,\n                projected_sa_volumes=attempt.command_config.projected_sa_volumes,\n                field_selector_env=attempt.command_config.field_selector_env,\n                serializer=filehandler.OutputStreamSerializer(self.output_path),\n                volumes=attempt.command_config.extra_volumes,\n                cap_add=attempt.command_config.cap_add,\n                cap_drop=attempt.command_config.cap_drop,\n                node_selectors=attempt.command_config.node_selectors,\n                node_affinities=attempt.command_config.node_affinities,\n                topology_spread_constraints=attempt.command_config.topology_spread_constraints,\n                pod_labels=build_labels(\n                    run_id=self.id,\n                    original_labels=attempt.command_config.labels,\n                    attempt_number=len(self.attempts) - 1,\n                ),\n                pod_annotations=attempt.command_config.annotations,\n                service_account_name=attempt.command_config.service_account_name,\n                ports=attempt.command_config.ports,\n            )\n        except Exception:\n            log.exception(f\"Unable to create task for ActionRun {self.id}\")\n            self.fail(exitcode.EXIT_KUBERNETES_TASK_INVALID)\n            return None\n\n        if not task:\n            # generally, if we didn't get a task back that means that k8s usage is disabled\n            self.fail(exitcode.EXIT_KUBERNETES_DISABLED)\n            return None\n\n        attempt.kubernetes_task_id = task.get_kubernetes_id()\n\n        # Watch before submitting, in case submit causes a transition\n        self.watch(task)\n\n        try:\n            k8s_cluster.submit(task)\n        except Exception:\n            log.exception(f\"Unable to submit task for ActionRun {self.id}\")\n            self.fail(exitcode.EXIT_KUBERNETES_TASK_INVALID)\n            return None\n\n        return task\n\n    def recover(self) -> KubernetesTask | None:\n        \"\"\"\n        Called on Tron restart per previously running ActionRun to attempt to restart Tron's tracking\n        of this run. See tron.core.recovery\n\n        If we're able to successfully recover, a KubernetesTask representing what is currently being run\n        will be returned - otherwise, None.\n        \"\"\"\n        k8s_cluster = KubernetesClusterRepository.get_cluster()\n        if not k8s_cluster:\n            self.fail(exitcode.EXIT_KUBERNETES_NOT_CONFIGURED)\n            return None\n\n        # We cannot recover if we can't transition to running\n        if not self.machine.check(\"running\"):\n            log.error(f\"{self} unable to transition from {self.machine.state} to running for recovery\")\n            return None\n\n        if not self.attempts or self.attempts[-1].kubernetes_task_id is None:\n            log.error(f\"{self} no task ID, cannot recover\")\n            self.fail_unknown()\n            return None\n        last_attempt = self.attempts[-1]\n\n        if last_attempt.rendered_command is None:\n            log.error(f\"{self} rendered_command is None, cannot recover\")\n            self.fail(exitcode.EXIT_INVALID_COMMAND)\n            return None\n\n        if last_attempt.command_config.docker_image is None:\n            log.error(f\"{self} docker_image is None, cannot recover\")\n            self.fail(exitcode.EXIT_KUBERNETES_TASK_INVALID)\n            return None\n\n        log.info(f\"{self} recovering Kubernetes run\")\n        # try/except block here is necessary cause if this fails, jobs will get resetted to 0 and we dont want that to happen\n        try:\n            task = k8s_cluster.create_task(\n                action_run_id=self.id,\n                command=last_attempt.rendered_command,\n                cpus=last_attempt.command_config.cpus,\n                mem=last_attempt.command_config.mem,\n                disk=last_attempt.command_config.disk,\n                docker_image=last_attempt.command_config.docker_image,\n                env=build_environment(original_env=last_attempt.command_config.env, run_id=self.id),\n                secret_env=last_attempt.command_config.secret_env,\n                # the field_selector_env = {'PAASTA_POD_IP': ['status.podIP']} is in a diff format than\n                # the field_selector_env in submit_command function.\n                field_selector_env=last_attempt.command_config.field_selector_env,\n                serializer=filehandler.OutputStreamSerializer(self.output_path),\n                secret_volumes=last_attempt.command_config.secret_volumes,\n                projected_sa_volumes=last_attempt.command_config.projected_sa_volumes,\n                volumes=last_attempt.command_config.extra_volumes,\n                cap_add=last_attempt.command_config.cap_add,\n                cap_drop=last_attempt.command_config.cap_drop,\n                task_id=last_attempt.kubernetes_task_id,\n                node_selectors=last_attempt.command_config.node_selectors,\n                node_affinities=last_attempt.command_config.node_affinities,\n                topology_spread_constraints=last_attempt.command_config.topology_spread_constraints,\n                pod_labels=build_labels(\n                    run_id=self.id,\n                    original_labels=last_attempt.command_config.labels,\n                    attempt_number=len(self.attempts) - 1,\n                ),\n                pod_annotations=last_attempt.command_config.annotations,\n                service_account_name=last_attempt.command_config.service_account_name,\n                ports=last_attempt.command_config.ports,\n            )\n        except Exception:\n            log.exception(f\"Unable to create task for ActionRun {self.id}\")\n            raise\n        if not task:\n            log.warning(\n                f\"{self} cannot recover, Kubernetes is disabled or \"\n                f\"invalid task ID {last_attempt.kubernetes_task_id!r}\",\n            )\n            self.fail_unknown()\n            return None\n\n        self.watch(task)\n        k8s_cluster.recover(task)\n\n        # Reset status\n        self.clear_end_state()\n        self.transition_and_notify(\"running\")\n\n        return task\n\n    def stop(self) -> str | None:\n        \"\"\"\n        Compatibility alias for KubernetesActionRun::kill().\n\n        Kills the Kubernetes Pod for this ActionRun and consumes a retry.\n        May return an error/diagnostic message suitible for displaying to users.\n        \"\"\"\n        return self.kill()\n\n    def kill(self, final: bool = True) -> str | None:\n        \"\"\"\n        Kills the Kubernetes Pod for this ActionRun and consumes a retry.\n\n        May return an error/diagnostic message suitible for displaying to users.\n        \"\"\"\n        if self.retries_remaining is not None and final:\n            self.retries_remaining = -1\n\n        # it's possible that a user wants to kill an action that has delayed it's start\n        # (e.g., they're killing a retry of a failed action that has a retry_delay set),\n        # so let's check if there's such a delay present and cancel that since in this case\n        # there's nothing actually running in k8s yet\n        if self.cancel_delay():\n            return None\n\n        msgs = []\n        if not self.is_active:\n            msgs.append(f\"Action is {self.state}, not running. Continuing anyway.\")\n\n        k8s_cluster = KubernetesClusterRepository.get_cluster()\n        if not k8s_cluster:\n            return f\"Unable to kill action {self.action_name} - could not get Kubernetes cluster.\"\n        last_attempt = self.last_attempt\n        if last_attempt is None or last_attempt.kubernetes_task_id is None:\n            msgs.append(\"Error: Can't find task id for the action.\")\n        else:\n            msgs.append(f\"Sending kill for {last_attempt.kubernetes_task_id}...\")\n            succeeded = k8s_cluster.kill(last_attempt.kubernetes_task_id)\n            if succeeded:\n                msgs.append(\"Sent! Note: the Docker container may not stop immediately.\")\n            else:\n                msgs.append(\"Error while sending kill request. Please try again.\")\n\n        return \"\\n\".join(msgs)\n\n    def _exit_unsuccessful(\n        self,\n        exit_status: int | None = None,\n        retry_original_command: bool = True,\n        # TODO: remove this feature or refactor so that we don't have this useless parameter on the subclass\n        non_retryable_exit_codes: list[int] | None = None,\n    ) -> bool | ActionCommand | None:\n\n        k8s_cluster = KubernetesClusterRepository.get_cluster()\n        real_non_retryable_exit_codes = [] if not k8s_cluster else k8s_cluster.non_retryable_exit_codes\n\n        return super()._exit_unsuccessful(\n            exit_status=exit_status,\n            retry_original_command=retry_original_command,\n            non_retryable_exit_codes=real_non_retryable_exit_codes,\n        )\n\n    def handle_action_command_state_change(\n        self, action_command: ActionCommand, event: str, event_data: Any | None = None\n    ) -> bool | ActionCommand | None:\n        \"\"\"\n        Observe ActionCommand state changes and transition the ActionCommand state machine to a new state.\n        \"\"\"\n        log.debug(f\"{self} action_command state change: {action_command.state} for event: {event}.\")\n\n        if event == ActionCommand.RUNNING:\n            return self.transition_and_notify(\"started\")\n\n        if event == ActionCommand.FAILSTART:\n            return self._exit_unsuccessful(action_command.exit_status)\n\n        if event == ActionCommand.EXITING:\n            if action_command.exit_status is None:\n                # This is different from SSHActionRun - allows retries to happen, if configured\n                return self._exit_unsuccessful(None)\n\n            if not action_command.exit_status:\n                return self.success()\n\n            return self._exit_unsuccessful(action_command.exit_status)\n        return None\n\n    handler = handle_action_command_state_change\n\n\ndef min_filter(seq):\n    seq = list(filter(None, seq))\n    return min(seq) if any(seq) else None\n\n\ndef eager_all(seq):\n    return all(list(seq))\n\n\nclass ActionRunCollection:\n    \"\"\"A collection of ActionRuns used by a JobRun.\"\"\"\n\n    def __init__(self, action_graph: ActionGraph, run_map: dict[str, ActionRun]):\n        self.action_graph = action_graph\n        self.run_map: dict[str, ActionRun] = run_map\n        # Setup proxies\n        self.proxy_action_runs_with_cleanup = proxy.CollectionProxy(\n            self.get_action_runs_with_cleanup,\n            [\n                proxy.attr_proxy(\"is_running\", any),\n                proxy.attr_proxy(\"is_starting\", any),\n                proxy.attr_proxy(\"is_scheduled\", any),\n                proxy.attr_proxy(\"is_cancelled\", any),\n                proxy.attr_proxy(\"is_active\", any),\n                proxy.attr_proxy(\"is_waiting\", any),\n                proxy.attr_proxy(\"is_queued\", all),\n                proxy.attr_proxy(\"is_complete\", all),\n                proxy.func_proxy(\"queue\", eager_all),\n                proxy.func_proxy(\"cancel\", eager_all),\n                proxy.func_proxy(\"success\", eager_all),\n                proxy.func_proxy(\"fail\", eager_all),\n                proxy.func_proxy(\"ready\", eager_all),\n                proxy.func_proxy(\"cleanup\", eager_all),\n                proxy.func_proxy(\"stop\", eager_all),\n                proxy.attr_proxy(\"start_time\", min_filter),\n                proxy.attr_proxy(\"state_data\", eager_all),\n            ],\n        )\n\n    def action_runs_for_actions(self, actions):\n        return (self.run_map[a.name] for a in actions if a.name in self.run_map)\n\n    def get_action_runs_with_cleanup(self):\n        return self.run_map.values()\n\n    action_runs_with_cleanup = property(get_action_runs_with_cleanup)\n\n    def get_action_runs(self):\n        return (run for run in self.run_map.values() if not run.is_cleanup)\n\n    action_runs = property(get_action_runs)\n\n    def update_action_config(self, action_graph):\n        # If there are new command configs that match the action name, update them\n        # Do not update the actual action_graph\n        updated = False\n        for action_run in self.get_action_runs_with_cleanup():\n            new_action = action_graph.action_map.get(action_run.action_name)\n            if new_action and new_action.command_config != action_run.command_config:\n                action_run.command_config = new_action.command_config\n                updated = True\n        return updated\n\n    @property\n    def cleanup_action_run(self) -> ActionRun | None:\n        return self.run_map.get(action.CLEANUP_ACTION_NAME)\n\n    @property\n    def state_data(self):\n        return [run.state_data for run in self.action_runs]\n\n    @property\n    def cleanup_action_state_data(self):\n        if self.cleanup_action_run:\n            return self.cleanup_action_run.state_data\n\n    def get_startable_action_runs(self):\n        \"\"\"Returns any actions that are scheduled or queued that can be run.\"\"\"\n\n        return [r for r in self.action_runs if r.machine.check(\"start\") and not self._is_run_blocked(r)]\n\n    @property\n    def has_startable_action_runs(self):\n        return any(self.get_startable_action_runs())\n\n    def _is_run_blocked(self, action_run, in_job_only=False):\n        \"\"\"Returns True if the ActionRun is waiting on a required run to\n        finish before it can run.\n\n        If in_job_only is True, only considers required actions in this job,\n        not triggers.\n        \"\"\"\n        if action_run.is_done or action_run.is_active:\n            return False\n\n        required_actions = self.action_graph.get_dependencies(\n            action_run.action_name,\n        )\n\n        if required_actions:\n            required_runs = self.action_runs_for_actions(required_actions)\n            if any(not run.is_complete for run in required_runs):\n                return True\n\n        if action_run.is_blocked_on_trigger and not in_job_only:\n            return True\n\n        return False\n\n    @property\n    def is_blocked_on_trigger(self):\n        return any(r.is_blocked_on_trigger for r in self.action_runs)\n\n    @property\n    def is_done(self):\n        \"\"\"Returns True when there are no running ActionRuns and all\n        non-blocked ActionRuns are done.\n        \"\"\"\n        if self.is_running:\n            return False\n\n        def done_or_blocked(action_run):\n            # Can't make progress if blocked by actions in the job, and other actions are done.\n            # On the other hand, not necessarily done if still waiting for cross-job dependencies.\n            return action_run.is_done or self._is_run_blocked(action_run, in_job_only=True)\n\n        return all(done_or_blocked(run) for run in self.action_runs)\n\n    @property\n    def is_failed(self):\n        \"\"\"Return True if there are failed actions and all ActionRuns are\n        done or blocked.\n        \"\"\"\n        return self.is_done and any(run.is_failed for run in self.action_runs)\n\n    @property\n    def is_complete_without_cleanup(self):\n        return all(run.is_complete for run in self.action_runs)\n\n    @property\n    def names(self):\n        return self.run_map.keys()\n\n    @property\n    def end_time(self):\n        if not self.is_done:\n            return None\n        end_times = list(run.end_time for run in self.get_action_runs_with_cleanup() if run.end_time)\n        return max(end_times) if any(end_times) else None\n\n    def __str__(self):\n        def blocked_state(action_run):\n            return \":blocked\" if self._is_run_blocked(action_run) else \"\"\n\n        run_states = \", \".join(f\"{a.action_name}({a.state}{blocked_state(a)})\" for a in self.run_map.values())\n        return f\"{self.__class__.__name__}[{run_states}]\"\n\n    def __getattr__(self, name):\n        return self.proxy_action_runs_with_cleanup.perform(name)\n\n    def __getitem__(self, name):\n        return self.run_map[name]\n\n    def __contains__(self, name):\n        return name in self.run_map\n\n    def __iter__(self):\n        return iter(self.run_map.values())\n\n    def get(self, name):\n        return self.run_map.get(name)\n"
  },
  {
    "path": "tron/core/job.py",
    "content": "import datetime\nimport json\nimport logging\nfrom typing import Any\nfrom typing import TypeVar\n\nfrom tron import command_context\nfrom tron import node\nfrom tron.actioncommand import SubprocessActionRunnerFactory\nfrom tron.core import jobrun\nfrom tron.core.actiongraph import ActionGraph\nfrom tron.core.actionrun import ActionRun\nfrom tron.core.jobrun import JobRunCollection\nfrom tron.node import NodePool\nfrom tron.scheduler import GeneralScheduler\nfrom tron.serialize import filehandler\nfrom tron.utils import maybe_decode\nfrom tron.utils.observer import Observable\nfrom tron.utils.observer import Observer\nfrom tron.utils.persistable import Persistable\n\n\nclass Error(Exception):\n    pass\n\n\nclass ConfigBuildMismatchError(Error):\n    pass\n\n\nclass InvalidStartStateError(Error):\n    pass\n\n\nlog = logging.getLogger(__name__)\n\nT = TypeVar(\"T\", bound=\"Job\")\n\n\nclass Job(Observable, Observer, Persistable):\n    \"\"\"A configurable data object.\n\n    Job uses JobRunCollection to manage its runs, and ActionGraph to manage its\n    actions and their dependency graph.\n    \"\"\"\n\n    STATUS_DISABLED = \"disabled\"\n    STATUS_ENABLED = \"enabled\"\n    STATUS_UNKNOWN = \"unknown\"\n    STATUS_RUNNING = \"running\"\n\n    NOTIFY_STATE_CHANGE = \"notify_state_change\"\n    NOTIFY_RUN_DONE = \"notify_run_done\"\n    NOTIFY_NEW_RUN = \"notify_new_run\"\n\n    context_class = command_context.JobContext\n\n    # These attributes determine equality between two Job objects\n    equality_attributes = [\n        \"name\",\n        \"queueing\",\n        \"scheduler\",\n        \"node_pool\",\n        \"all_nodes\",\n        \"action_graph\",\n        \"output_path\",\n        \"action_runner\",\n        \"max_runtime\",\n        \"allow_overlap\",\n        \"monitoring\",\n        \"time_zone\",\n        \"expected_runtime\",\n        \"run_limit\",\n    ]\n\n    def __init__(\n        self,\n        name: str,\n        scheduler: GeneralScheduler,\n        queueing: bool = True,\n        all_nodes: bool = False,\n        monitoring: dict[str, Any] | None = None,\n        node_pool: NodePool | None = None,\n        enabled: bool = True,\n        action_graph: ActionGraph | None = None,\n        run_collection: JobRunCollection | None = None,\n        parent_context: command_context.CommandContext | None = None,\n        output_path: filehandler.OutputPath | None = None,\n        allow_overlap: bool | None = None,\n        action_runner: SubprocessActionRunnerFactory | None = None,\n        max_runtime: datetime.timedelta | None = None,\n        time_zone: datetime.tzinfo | None = None,\n        expected_runtime: datetime.timedelta | None = None,\n        run_limit: int | None = None,\n    ):\n        super().__init__()\n        self.name = maybe_decode(\n            name\n        )  # TODO: TRON-2293 maybe_decode is a relic of Python2->Python3 migration. Remove it.\n        self.monitoring = monitoring\n        self.action_graph = action_graph\n        self.scheduler = scheduler\n        self.runs = run_collection\n        self.queueing = queueing\n        self.all_nodes = all_nodes\n        self.enabled = enabled  # current enabled setting\n        self.config_enabled = enabled  # enabled attribute from file\n        self.node_pool = node_pool\n        self.allow_overlap = allow_overlap\n        self.action_runner = action_runner\n        self.max_runtime = max_runtime\n        self.time_zone = time_zone\n        self.expected_runtime = expected_runtime\n        self.output_path = output_path or filehandler.OutputPath()\n        # if the name doesn't have a period, the \"namespace\" and the \"job-name\" will\n        # be the same, we don't have to worry about a crash here\n        self.output_path.append(name.split(\".\")[0])  # namespace\n        self.output_path.append(name.split(\".\")[-1])  # job-name\n        self.context = command_context.build_context(self, parent_context)\n        self.run_limit = run_limit\n        log.info(f\"{self} created\")\n\n    @staticmethod\n    def from_json(state_data: str) -> dict[str, Any]:  # TODO: make a TypedDict for this\n        \"\"\"deserialize the JSON string to python objects.\"\"\"\n        # We store the following fields for jobs in DynamoDB: enabled and list of run numbers\n        try:\n            json_data = json.loads(state_data)\n            deserialized_data = {\n                \"enabled\": json_data[\"enabled\"],\n                \"run_nums\": json_data[\"run_nums\"],\n            }\n            return deserialized_data\n        except Exception:\n            log.exception(\"Error deserializing Job from JSON:\")\n            raise\n\n    @staticmethod\n    def to_json(state_data: dict) -> str:\n        \"\"\"Serialize the Job instance to a JSON string.\"\"\"\n        try:\n            return json.dumps(state_data)\n        except Exception:\n            log.exception(\"Error serializing Job to JSON:\")\n            raise\n\n    @classmethod\n    def from_config(\n        cls,\n        job_config,\n        scheduler,\n        parent_context,\n        output_path,\n        action_runner,\n        action_graph,\n    ):\n        \"\"\"Factory method to create a new Job instance from configuration.\"\"\"\n        runs = jobrun.JobRunCollection.from_config(job_config)\n        node_repo = node.NodePoolRepository.get_instance()\n\n        return cls(\n            name=job_config.name,\n            monitoring=job_config.monitoring,\n            time_zone=job_config.time_zone,\n            queueing=job_config.queueing,\n            all_nodes=job_config.all_nodes,\n            node_pool=node_repo.get_by_name(job_config.node),\n            scheduler=scheduler,\n            enabled=job_config.enabled,\n            run_collection=runs,\n            action_graph=action_graph,\n            parent_context=parent_context,\n            output_path=output_path,\n            allow_overlap=job_config.allow_overlap,\n            action_runner=action_runner,\n            max_runtime=job_config.max_runtime,\n            expected_runtime=job_config.expected_runtime,\n            run_limit=job_config.run_limit,\n        )\n\n    def watch(self, observable, event=True):\n        # Overrides default method from Observer.\n        # Allows job's watchers to handle updates from job runs independently.\n        super().watch(observable, event)\n        if isinstance(observable, jobrun.JobRun):\n            self.notify(self.NOTIFY_NEW_RUN, event_data=observable)\n\n    def update_from_job(self, job):\n        \"\"\"Update this Jobs configuration from a new config. This method\n        actually takes an already constructed job and copies out its\n        configuration data.\n        \"\"\"\n        for attr in self.equality_attributes:\n            setattr(self, attr, getattr(job, attr))\n\n        self.update_action_config()\n\n        # the run_limit is a property on the JobRunCollection, not on the\n        # Job itself so we need to handle that separately\n        self.runs.run_limit = job.run_limit\n        log.info(f\"{self} reconfigured\")\n\n    def update_action_config(self):\n        for job_run in self.runs:\n            job_run.update_action_config(self.action_graph)\n\n    @property\n    def status(self):\n        \"\"\"Current status.\"\"\"\n        if not self.enabled:\n            return self.STATUS_DISABLED\n        if self.runs.get_active():\n            return self.STATUS_RUNNING\n\n        if self.runs.get_run_by_state(ActionRun.SCHEDULED):\n            return self.STATUS_ENABLED\n\n        log.warning(f\"{self} in an unknown state: {self.runs}\")\n        return self.STATUS_UNKNOWN\n\n    def get_name(self):\n        return self.name\n\n    def get_monitoring(self):\n        return self.monitoring\n\n    def get_time_zone(self):\n        return self.time_zone\n\n    def get_runs(self):\n        return self.runs\n\n    @property\n    def state_data(self):\n        \"\"\"\n        This data is used to serialize the state of this job.\n        State of job runs is serialized separately.\n        \"\"\"\n        return {\n            \"run_nums\": self.runs.get_run_nums(),\n            \"enabled\": self.enabled,\n        }\n\n    def get_job_runs_from_state(self, state_data):\n        \"\"\"Apply a previous state to this Job.\"\"\"\n        self.enabled = state_data[\"enabled\"]\n        job_runs = jobrun.job_runs_from_state(\n            state_data[\"runs\"],\n            self.action_graph,\n            self.output_path.clone(),\n            self.context,\n            self.node_pool,\n        )\n        return job_runs\n\n    def build_new_runs(self, run_time, manual=False):\n        \"\"\"Uses its JobCollection to build new JobRuns. If all_nodes is set,\n        build a run for every node, otherwise just builds a single run on a\n        single node.\n        \"\"\"\n        pool = self.node_pool\n        nodes = pool.nodes if self.all_nodes else [pool.next()]\n        for n in nodes:\n            run = self.runs.build_new_run(self, run_time, n, manual=manual)\n            self.watch(run)\n            yield run\n\n    def handle_job_run_state_change(self, _job_run, event, event_data=None):\n        \"\"\"Handle state changes from JobRuns and propagate changes to any\n        observers.\n        \"\"\"\n        # Propagate state change for serialization\n        if event == jobrun.JobRun.NOTIFY_STATE_CHANGED:\n            self.notify(self.NOTIFY_STATE_CHANGE)\n            return\n\n        # Propagate DONE JobRun notifications to JobScheduler\n        if event == jobrun.JobRun.NOTIFY_DONE:\n            self.notify(self.NOTIFY_RUN_DONE)\n            return\n\n    handler = handle_job_run_state_change\n\n    def __eq__(self, other):\n        return all(getattr(other, attr, None) == getattr(self, attr, None) for attr in self.equality_attributes)\n\n    def __ne__(self, other):\n        return not self == other\n\n    def __str__(self):\n        return \"Job:%s\" % self.name\n"
  },
  {
    "path": "tron/core/job_collection.py",
    "content": "import logging\n\nfrom tron.core.job import Job\nfrom tron.utils import collections\nfrom tron.utils import proxy\n\nlog = logging.getLogger(__name__)\n\n\nclass JobCollection:\n    \"\"\"A collection of jobs.\"\"\"\n\n    def __init__(self):\n        self.jobs = collections.MappingCollection(\"jobs\")\n        self.proxy = proxy.CollectionProxy(\n            self.jobs.values,\n            [\n                proxy.func_proxy(\"enable\", lambda seq: all(list(seq))),\n                proxy.func_proxy(\"disable\", lambda seq: all(list(seq))),\n                proxy.func_proxy(\"schedule\", lambda seq: all(list(seq))),\n                proxy.func_proxy(\"run_queue_schedule\", lambda seq: all(list(seq))),\n            ],\n        )\n\n    def update_from_config(self, job_configs, factory, reconfigure, namespace_to_reconfigure=None):\n        \"\"\"Apply a configuration to this collection and return a generator of\n        jobs which were added.\n        \"\"\"\n        self.jobs.filter_by_name(job_configs)\n\n        def map_to_job_and_schedule(job_schedulers):\n            for job_scheduler in job_schedulers:\n                if reconfigure:\n                    job_scheduler.schedule()\n                yield job_scheduler.get_job()\n\n        def reconfigure_filter(config):\n            if not reconfigure or not namespace_to_reconfigure:\n                return True\n            else:\n                return config.namespace == namespace_to_reconfigure\n\n        # NOTE: as this is a generator expression, we will only go through job configs\n        # and build a scheduler for them once something iterates over us (i.e, once\n        # `self.state_watcher.watch_all()` is called)\n        seq = (factory.build(config) for config in job_configs.values() if reconfigure_filter(config))\n        return map_to_job_and_schedule(filter(self.add, seq))\n\n    def add(self, job_scheduler):\n        return self.jobs.add(job_scheduler, self.update)\n\n    def move(self, old_name, new_name):\n        job_scheduler = self.get_by_name(old_name)\n\n        # check if job is running\n        if job_scheduler.get_job().status == Job.STATUS_RUNNING:\n            return f\"Moving {old_name} to {new_name} failed. Job is still running.\"\n\n        log.info(f\"Moving {old_name} to {new_name}\")\n        job_scheduler.update_name(new_name)\n        self.add(self.jobs.pop(old_name))\n\n        return f\"Moving {old_name} to {new_name} succeeded.\"\n\n    def update(self, new_job_scheduler):\n        log.info(f\"Updating {new_job_scheduler}\")\n        job_scheduler = self.get_by_name(new_job_scheduler.get_name())\n        job_scheduler.update_from_job_scheduler(new_job_scheduler)\n        job_scheduler.schedule_reconfigured()\n        return True\n\n    def restore_state(self, job_state_data, config_action_runner):\n        \"\"\"\n        Loops through the jobs and their runs in order to load their\n        state for each run. As we load the state, we will also schedule the next\n        runs for each job\n        \"\"\"\n        for name, state in job_state_data.items():\n            self.jobs[name].restore_state(state, config_action_runner)\n        log.info(f\"Loaded state for {len(job_state_data)} jobs\")\n\n    def get_by_name(self, name):\n        return self.jobs.get(name)\n\n    def get_names(self):\n        return self.jobs.keys()\n\n    def get_jobs(self):\n        return [sched.get_job() for sched in self]\n\n    def get_job_run_collections(self):\n        return [sched.get_job_runs() for sched in self]\n\n    def __iter__(self):\n        yield from self.jobs.values()\n\n    def __getattr__(self, name):\n        return self.proxy.perform(name)\n\n    def __contains__(self, name):\n        return name in self.jobs\n"
  },
  {
    "path": "tron/core/job_scheduler.py",
    "content": "import logging\n\nimport humanize\nfrom twisted.internet import reactor\n\nfrom tron.core import recovery\nfrom tron.core.job import Job\nfrom tron.core.jobrun import JobRun\nfrom tron.scheduler import scheduler_from_config\nfrom tron.serialize import filehandler\nfrom tron.utils import timeutils\nfrom tron.utils.observer import Observer\n\nlog = logging.getLogger(__name__)\n\n\nclass JobScheduler(Observer):\n    \"\"\"A JobScheduler is responsible for scheduling Jobs and running JobRuns\n    based on a Jobs configuration. Runs jobs by setting a callback to fire\n    x seconds into the future.\n    \"\"\"\n\n    def __init__(self, job: Job):\n        self.job = job\n        self.watch(job)\n\n    def restore_state(self, job_state_data, config_action_runner):\n        \"\"\"Load the job state and schedule any JobRuns.\"\"\"\n        job_runs = self.job.get_job_runs_from_state(job_state_data)\n        for run in job_runs:\n            self.job.watch(run)\n        self.job.runs.runs.extend(job_runs)\n        log.info(f\"{self} restored\")\n\n        # Tron will recover any action run that has UNKNOWN status\n        # and will start connecting to task_proc\n        recovery.launch_recovery_actionruns_for_job_runs(\n            job_runs=job_runs,\n            master_action_runner=config_action_runner,\n        )\n\n        scheduled = self.job.runs.get_scheduled()\n        # for those that were already scheduled, we reschedule them to run.\n        for job_run in scheduled:\n            self._set_callback(job_run)\n\n        # Ensure we have at least 1 scheduled run\n        self.schedule()\n\n    def enable(self):\n        \"\"\"Enable the job and start its scheduling cycle.\"\"\"\n        if self.job.enabled:\n            return\n\n        self.job.enabled = True\n        self.create_and_schedule_runs(next_run_time=None)\n\n    def create_and_schedule_runs(self, next_run_time=None):\n        runs_to_schedule = self.get_runs_to_schedule(next_run_time)\n        if not runs_to_schedule:\n            return\n        # Eagerly save new runs in case tron gets restarted\n        # runs_to_schedule is a generator, so we can only iterate\n        # through it once\n        for r in runs_to_schedule:\n            r.notify(JobRun.NOTIFY_STATE_CHANGED)\n            self._set_callback(r)\n        self.job.notify(Job.NOTIFY_STATE_CHANGE)\n\n    def disable(self):\n        \"\"\"Disable the job and cancel and pending scheduled jobs.\"\"\"\n        self.job.enabled = False\n        self.job.runs.cancel_pending()\n\n    def manual_start(self, run_time=None):\n        \"\"\"Trigger a job run manually (instead of from the scheduler).\"\"\"\n        run_time = run_time or timeutils.current_time(tz=self.job.time_zone)\n        manual_runs = list(self.job.build_new_runs(run_time, manual=True))\n        for r in manual_runs:\n            r.start()\n        return manual_runs\n\n    def schedule_reconfigured(self):\n        \"\"\"Remove the pending run and create new runs with the new JobScheduler.\"\"\"\n        if not self.job.enabled:\n            return\n\n        # when reconfiguring, preserve the latest scheduled run's time\n        pending_run_times = [j.run_time for j in list(self.job.runs.get_pending())]\n        if len(pending_run_times) != 1:\n            log.warning(f\"{self.job} has {len(pending_run_times)} pending runs, not 1\")\n        next_run_time = None if len(pending_run_times) == 0 else pending_run_times[0]\n\n        self.job.runs.remove_pending()\n        self.create_and_schedule_runs(next_run_time=next_run_time)\n\n    def schedule(self):\n        \"\"\"Schedule the next run for this job by setting a callback to fire\n        at the appropriate time.\n        \"\"\"\n        if not self.job.enabled:\n            return\n        last_run = self.job.runs.get_newest(include_manual=False)\n        last_run_time = last_run.run_time if last_run else None\n        next_run_time = self.job.scheduler.next_run_time(last_run_time)\n        self.create_and_schedule_runs(next_run_time=next_run_time)\n\n    def update_from_job_scheduler(self, job_scheduler):\n        \"\"\"Update a job scheduler by copying another.\"\"\"\n        curr_job = self.get_job()\n        new_job = job_scheduler.get_job()\n\n        curr_job.update_from_job(new_job)\n\n        # Since job updating only copies equality attributes (defined in the Job\n        # class), we need to now enable or disable the job depending on if the\n        # new job says so.\n        if curr_job.enabled is not new_job.enabled and curr_job.config_enabled is not new_job.config_enabled:\n            if new_job.config_enabled:\n                log.info(f\"{curr_job} re-enabled during reconfiguration\")\n                self.enable()\n            else:\n                log.info(f\"{curr_job} disabled during reconfiguration\")\n                self.disable()\n        curr_job.config_enabled = new_job.config_enabled\n\n    def _set_callback(self, job_run):\n        \"\"\"Set a callback for JobRun to fire at the appropriate time.\"\"\"\n        seconds = job_run.seconds_until_run_time()\n        human_time = humanize.naturaltime(seconds, future=True)\n        log.info(f\"Scheduling {job_run} {human_time} ({seconds} seconds)\")\n        reactor.callLater(seconds, self.run_job, job_run)\n\n    # TODO: new class for this method\n    def run_job(self, job_run, run_queued=False):\n        \"\"\"Triggered by a callback to actually start the JobRun. Also\n        schedules the next JobRun.\n        \"\"\"\n        # If the Job has been disabled after this run was scheduled, then cancel\n        # the JobRun and do not schedule another\n        if not self.job.enabled:\n            log.info(f\"Cancelled {job_run} because job has been disabled.\")\n            return job_run.cancel()\n\n        # This is a callback on a job run that has been already cleaned up due to\n        # reconfiguration. Do nothing.\n        if not job_run.action_runs:\n            return\n\n        # If the JobRun was cancelled we won't run it.  A JobRun may be\n        # cancelled if the job was disabled, or manually by a user. It's\n        # also possible this job was run (or is running) manually by a user.\n        # Alternatively, if run_queued is True, this job_run is already queued.\n        if not run_queued and not job_run.is_scheduled:\n            log.info(\n                f\"{job_run} in state {job_run.state} is not scheduled, \" \"scheduling a new run instead of running\",\n            )\n            return self.schedule()\n\n        node = job_run.node if self.job.all_nodes else None\n        # If there is another job run still running, queue or cancel this one\n        if not self.job.allow_overlap and any(self.job.runs.get_active(node)):\n            self._queue_or_cancel_active(job_run)\n            return\n        job_run.start()\n        self.schedule_termination(job_run)\n        if not self.job.scheduler.schedule_on_complete:\n            self.schedule()\n\n    def schedule_termination(self, job_run):\n        if self.job.max_runtime:\n            seconds = timeutils.delta_total_seconds(self.job.max_runtime)\n            reactor.callLater(seconds, job_run.stop)\n\n    def _queue_or_cancel_active(self, job_run):\n        if self.job.queueing:\n            log.info(f\"{self.job} still running, queueing {job_run}\")\n            return job_run.queue()\n\n        log.info(f\"{self.job} still running, cancelling {job_run}\")\n        job_run.cancel()\n        self.schedule()\n\n    def handle_job_events(self, _observable, event, event_data=None):\n        \"\"\"Handle notifications from observables. If a JobRun has completed\n        look for queued JobRuns that may need to start now.\n        \"\"\"\n        if event != Job.NOTIFY_RUN_DONE:\n            return\n        self.run_queue_schedule()\n\n    def run_queue_schedule(self):\n        # TODO: this should only start runs on the same node if this is an\n        # all_nodes job, but that is currently not possible\n        queued_run = self.job.runs.get_first_queued()\n        if queued_run:\n            reactor.callLater(0, self.run_job, queued_run, run_queued=True)\n\n        # Attempt to schedule a new run. This will only schedule a run if the\n        # previous run was cancelled from a scheduled state, or if the job\n        # scheduler is `schedule_on_complete`.\n        self.schedule()\n\n    handler = handle_job_events\n\n    def get_runs_to_schedule(self, next_run_time):\n        \"\"\"Build and return the runs to schedule.\"\"\"\n        if self.job.runs.has_pending:\n            log.info(f\"{self.job} has pending runs, can't schedule more.\")\n            return []\n\n        if next_run_time is None:\n            next_run_time = self.job.scheduler.next_run_time(None)\n        return self.job.build_new_runs(next_run_time)\n\n    def update_name(self, name):\n        self.job.name = name\n        for job_run in self.get_job_runs():\n            for action_run in job_run._get_action_runs():\n                action_run.job_run_id = action_run.job_run_id.replace(job_run.job_name, name, 1)\n            job_run.job_name = name\n\n    def __str__(self):\n        return f\"{self.__class__.__name__}({self.job})\"\n\n    def get_name(self):\n        return self.job.name\n\n    def get_job(self):\n        return self.job\n\n    def get_job_runs(self):\n        return self.job.runs\n\n    def __eq__(self, other):\n        return bool(other and self.get_job() == other.get_job())\n\n    def __ne__(self, other):\n        return not self == other\n\n\nclass JobSchedulerFactory:\n    \"\"\"Construct JobScheduler instances from configuration.\"\"\"\n\n    def __init__(self, context, output_stream_dir, time_zone, action_runner, job_graph):\n        self.context = context\n        self.output_stream_dir = output_stream_dir\n        self.time_zone = time_zone\n        self.action_runner = action_runner\n        self.job_graph = job_graph\n\n    def build(self, job_config):\n        log.debug(f\"Building new job scheduler {job_config.name}\")\n        output_path = filehandler.OutputPath(self.output_stream_dir)\n        time_zone = job_config.time_zone or self.time_zone\n        scheduler = scheduler_from_config(job_config.schedule, time_zone)\n        action_graph = self.job_graph.get_action_graph_for_job(job_config.name)\n        job = Job.from_config(\n            job_config=job_config,\n            scheduler=scheduler,\n            parent_context=self.context,\n            output_path=output_path,\n            action_runner=self.action_runner,\n            action_graph=action_graph,\n        )\n        return JobScheduler(job)\n"
  },
  {
    "path": "tron/core/jobgraph.py",
    "content": "from collections import defaultdict\nfrom collections import namedtuple\nfrom typing import DefaultDict\n\nfrom tron.config.config_parse import ConfigContainer\nfrom tron.core.action import Action\nfrom tron.core.actiongraph import ActionGraph\nfrom tron.utils import maybe_decode\n\nAdjListEntry = namedtuple(\"AdjListEntry\", [\"action_name\", \"is_trigger\"])\n\n\nclass JobGraph:\n    \"\"\"A JobGraph stores the entire DAG of jobs and actions, including\n    cross-job dependencies (aka triggers)\n    \"\"\"\n\n    def __init__(self, config_container: ConfigContainer, should_validate_missing_dependency: bool | None = False):\n        \"\"\"Build an adjacency list and a reverse adjacency list for the graph,\n        and store all the actions as well as which actions belong to which job\n        \"\"\"\n        self.action_map: dict[str, Action] = {}\n        self._actions_for_job: DefaultDict[str, list[str]] = defaultdict(list)\n        self._adj_list: DefaultDict[str, list[AdjListEntry]] = defaultdict(list)\n        self._rev_adj_list: DefaultDict[str, list[AdjListEntry]] = defaultdict(list)\n\n        all_actions = set()\n        for job_name, job_config in config_container.get_jobs().items():\n            for action_name, action_config in job_config.actions.items():\n                full_name = self._save_action(action_name, job_name, action_config)\n                all_actions.add(full_name)\n\n                for required_action in action_config.requires or []:\n                    required_action_name = f\"{job_name}.{required_action}\"\n                    self._rev_adj_list[full_name].append(AdjListEntry(required_action_name, False))\n\n                for trigger in action_config.triggered_by or []:\n                    trigger_action_name = \".\".join(trigger.split(\".\")[:3])\n                    self._rev_adj_list[full_name].append(AdjListEntry(trigger_action_name, True))\n\n                for parent_action, is_trigger in self._rev_adj_list[full_name]:\n                    self._adj_list[parent_action].append(AdjListEntry(full_name, is_trigger))\n\n            cleanup_action_config = job_config.cleanup_action\n            if cleanup_action_config:\n                self._save_action(cleanup_action_config.name, job_name, cleanup_action_config)\n\n        if should_validate_missing_dependency:\n            missing_dependent_actions = defaultdict(list)\n            for action_name in self._rev_adj_list:\n                for dependent_action_entry in self._rev_adj_list[action_name]:\n                    if dependent_action_entry.action_name not in all_actions:\n                        missing_dependent_actions[dependent_action_entry.action_name].append(action_name)\n\n            error_messages = []\n            for action_name, child_action_names in missing_dependent_actions.items():\n                error_messages.append(\n                    \"Action {} is dependency of actions:\\n{}\".format(\n                        action_name,\n                        \"\\n\".join(\n                            [f\"  - {child_action_name}\" for child_action_name in child_action_names],\n                        ),\n                    ),\n                )\n\n            if error_messages:\n                raise ValueError(\n                    (\n                        \"The following actions are dependencies of other actions but missing:\\n\"\n                        \"{}\\n\"\n                        \"Please check if you have deleted/renamed any of them or their containing jobs.\"\n                    ).format(\n                        \"\\n\".join(error_messages),\n                    ),\n                )\n\n    def get_action_graph_for_job(self, job_name):\n        \"\"\"Traverse the JobGraph for a specific job to construct an ActionGraph for it\"\"\"\n        job_action_map = {}\n        required_actions, required_triggers = defaultdict(set), defaultdict(set)\n\n        for action_name in self._actions_for_job[job_name]:\n            # Any actions that belong to _this job_ are not prefixed by the job name\n            short_action_name = action_name.split(\".\")[-1]\n            job_action_map[short_action_name] = self.action_map[action_name]\n            required_actions[short_action_name] = {\n                entry.action_name.split(\".\")[-1] for entry in self._rev_adj_list[action_name] if not entry.is_trigger\n            }\n\n            # We call this twice to build the complete DAG for the job; the first time\n            # we search the forward adjacency list and the second time we search the\n            # reverse adjancency list.  This ensures we don't miss any triggers\n            required_triggers = self._get_required_triggers(action_name, required_triggers)\n            required_triggers = self._get_required_triggers(action_name, required_triggers, search_up=False)\n        return ActionGraph(job_action_map, required_actions, required_triggers)\n\n    def _save_action(self, action_name, job_name, config):\n        action_name = maybe_decode(\n            action_name\n        )  # TODO: TRON-2293 maybe_decode is a relic of Python2->Python3 migration. Remove it.\n        full_name = f\"{job_name}.{action_name}\"\n        self.action_map[full_name] = Action.from_config(config)\n        self._actions_for_job[job_name].append(full_name)\n        return full_name\n\n    def _get_required_triggers(self, action_name, triggers, search_up=True):\n        stack = [action_name]\n        visited = set()\n\n        # Do DFS to search the adjacency list and find all of the required triggers\n        # for a particular action\n        while stack:\n            current_action = stack.pop()\n            visited.add(current_action)\n            adj_list = self._rev_adj_list if search_up else self._adj_list\n            for next_action, is_trigger in adj_list[current_action]:\n                if not is_trigger:\n                    continue\n\n                if next_action not in visited:\n                    stack.append(next_action)\n\n                if current_action == action_name:\n                    current_action = current_action.split(\".\")[-1]\n\n                if search_up:\n                    triggers[current_action].add(next_action)\n                else:\n                    triggers[next_action].add(current_action)\n\n        return triggers\n"
  },
  {
    "path": "tron/core/jobrun.py",
    "content": "\"\"\"\n Classes to manage job runs.\n\"\"\"\nimport datetime\nimport json\nimport logging\nimport time\nfrom collections import deque\nfrom typing import Any\n\nimport pytz\n\nimport tron.metrics as metrics\nfrom tron import command_context\nfrom tron import node\nfrom tron import prom_metrics\nfrom tron.core.actiongraph import ActionGraph\nfrom tron.core.actionrun import ActionRun\nfrom tron.core.actionrun import ActionRunCollection\nfrom tron.core.actionrun import ActionRunFactory\nfrom tron.serialize import filehandler\nfrom tron.utils import maybe_decode\nfrom tron.utils import next_or_none\nfrom tron.utils import proxy\nfrom tron.utils import timeutils\nfrom tron.utils.observer import Observable\nfrom tron.utils.observer import Observer\nfrom tron.utils.persistable import Persistable\n\nlog = logging.getLogger(__name__)\nstate_logger = logging.getLogger(f\"{__name__}.state_changes\")\n\n\nclass Error(Exception):\n    pass\n\n\ndef get_job_run_id(job_name: str, run_num: int) -> str:\n    return f\"{job_name}.{run_num}\"\n\n\nclass JobRun(Observable, Observer, Persistable):\n    \"\"\"A JobRun is an execution of a Job.  It has a list of ActionRuns and is\n    responsible for starting ActionRuns in the correct order and managing their\n    dependencies.\n    \"\"\"\n\n    NOTIFY_DONE = \"notify_done\"\n    NOTIFY_STATE_CHANGED = \"notify_state_changed\"\n    NOTIFY_REMOVED = \"notify_removed\"\n\n    context_class = command_context.JobRunContext\n\n    # TODO: use config object\n    def __init__(\n        self,\n        job_name: str,\n        run_num: int,\n        run_time: datetime.datetime,\n        node: node.Node,\n        output_path: filehandler.OutputPath | None = None,\n        base_context: command_context.CommandContext | None = None,\n        action_runs: ActionRunCollection | None = None,\n        action_graph: ActionGraph | None = None,\n        manual: bool | None = None,\n    ):\n        super().__init__()\n        self.job_name = maybe_decode(\n            job_name\n        )  # TODO: TRON-2293 - maybe_decode is a relic of Python2->Python3 migration. Remove it.\n        self.run_num = run_num\n        self.run_time = run_time\n        self.node = node\n        self.output_path = output_path or filehandler.OutputPath()\n        self.output_path.append(str(self.run_num))\n        self.action_runs_proxy = None\n        self._action_runs = None\n        self.action_graph = action_graph\n        self.manual = manual\n\n        if action_runs:\n            self.action_runs = action_runs\n\n        self.context = command_context.build_context(self, base_context)\n\n    @staticmethod\n    def to_json(state_data: dict) -> str:\n        \"\"\"Serialize the JobRun instance to a JSON string.\"\"\"\n        try:\n            return json.dumps(\n                {\n                    \"job_name\": state_data[\"job_name\"],\n                    \"run_num\": state_data[\"run_num\"],\n                    \"run_time\": state_data[\"run_time\"].isoformat() if state_data[\"run_time\"] else None,\n                    \"time_zone\": (\n                        state_data[\"run_time\"].tzinfo.zone\n                        if state_data[\"run_time\"] and state_data[\"run_time\"].tzinfo\n                        else None\n                    ),\n                    \"node_name\": state_data[\"node_name\"],\n                    \"runs\": [ActionRun.to_json(run) for run in state_data[\"runs\"]],\n                    \"cleanup_run\": ActionRun.to_json(state_data[\"cleanup_run\"]) if state_data[\"cleanup_run\"] else None,\n                    \"manual\": state_data[\"manual\"],\n                }\n            )\n        except KeyError:\n            log.exception(\"Missing key in state_data:\")\n            raise\n        except Exception:\n            log.exception(\"Error serializing JobRun to JSON:\")\n            raise\n\n    @staticmethod\n    def from_json(state_data: str) -> dict[str, Any]:  # TODO: make a TypedDict for this\n        \"\"\"Deserialize the JobRun instance from a JSON string.\"\"\"\n        try:\n            json_data = json.loads(state_data)\n            raw_run_time = json_data[\"run_time\"]\n            if raw_run_time:\n                run_time = datetime.datetime.fromisoformat(raw_run_time)\n                if json_data[\"time_zone\"]:\n                    tz = pytz.timezone(json_data[\"time_zone\"])\n                    if run_time.tzinfo is None:\n                        # if runtime is timezone naive (i.e has no tz information) then localize it\n                        # otherwise we would get a ValueError if we attempt to localize a datetime object that has tz info\n                        run_time = tz.localize(run_time)\n                    else:\n                        # Convert to the desired timezone if it already has timezone information\n                        run_time = run_time.astimezone(tz)\n            else:\n                run_time = None\n            deserialized_data = {\n                \"job_name\": json_data[\"job_name\"],\n                \"run_num\": json_data[\"run_num\"],\n                \"node_name\": json_data[\"node_name\"],\n                \"manual\": json_data[\"manual\"],\n                \"runs\": [ActionRun.from_json(run) for run in json_data[\"runs\"]],\n                \"cleanup_run\": ActionRun.from_json(json_data[\"cleanup_run\"]) if json_data[\"cleanup_run\"] else None,\n                \"run_time\": run_time,\n                \"time_zone\": json_data[\"time_zone\"],\n            }\n        except Exception:\n            log.exception(\"Error deserializing JobRun from JSON\")\n            raise\n        return deserialized_data\n\n    @property\n    def id(self):\n        return get_job_run_id(self.job_name, self.run_num)\n\n    @property\n    def name(self):\n        \"\"\"Property used by state manager to identify objects.\"\"\"\n        return self.id\n\n    @classmethod\n    def for_job(cls, job, run_num, run_time, node, manual):\n        \"\"\"Create a JobRun for a job.\"\"\"\n        run = cls(\n            job.get_name(),\n            run_num,\n            run_time,\n            node,\n            output_path=job.output_path.clone(),\n            base_context=job.context,\n            action_graph=job.action_graph,\n            manual=manual,\n        )\n\n        # We do this at creation to ensure each JobRun is counted once, regardless of when it actually executes.\n        prom_metrics.tron_job_runs_created_counter.inc()\n\n        action_runs = ActionRunFactory.build_action_run_collection(\n            run,\n            job.action_runner,\n        )\n        run.action_runs = action_runs\n        return run\n\n    @classmethod\n    def from_state(\n        cls,\n        state_data,\n        action_graph,\n        output_path,\n        context,\n        run_node,\n    ):\n        \"\"\"Restore a JobRun from a serialized state.\"\"\"\n        pool_repo = node.NodePoolRepository.get_instance()\n        run_node = pool_repo.get_node(state_data.get(\"node_name\"), run_node)\n        job_name = state_data[\"job_name\"]\n\n        job_run = cls(\n            job_name,\n            state_data[\"run_num\"],\n            state_data[\"run_time\"],\n            run_node,\n            action_graph=action_graph,\n            manual=state_data.get(\"manual\", False),\n            output_path=output_path,\n            base_context=context,\n        )\n        action_runs = ActionRunFactory.action_run_collection_from_state(\n            job_run,\n            state_data[\"runs\"],\n            state_data[\"cleanup_run\"],\n        )\n        job_run.action_runs = action_runs\n        return job_run\n\n    @property\n    def state_data(self):\n        \"\"\"This data is used to serialize the state of this job run.\"\"\"\n        return {\n            \"job_name\": self.job_name,\n            \"run_num\": self.run_num,\n            \"run_time\": self.run_time,\n            \"node_name\": self.node.get_name() if self.node else None,\n            \"runs\": self.action_runs.state_data,\n            \"cleanup_run\": self.action_runs.cleanup_action_state_data,\n            \"manual\": self.manual,\n        }\n\n    def _get_action_runs(self):\n        return self._action_runs\n\n    def _set_action_runs(self, run_collection):\n        \"\"\"Store action runs and register callbacks.\"\"\"\n        if self._action_runs is not None:\n            raise ValueError(\"ActionRunCollection already set on %s\" % self)\n\n        self._action_runs = run_collection\n        for action_run in run_collection.action_runs_with_cleanup:\n            self.watch(action_run)\n            action_run.setup_subscriptions()\n\n        self.action_runs_proxy = proxy.AttributeProxy(\n            run_collection,\n            [\n                \"queue\",\n                \"cancel\",\n                \"success\",\n                \"fail\",\n                \"start_time\",\n                \"end_time\",\n            ],\n        )\n\n    def _del_action_runs(self):\n        self._action_runs = None\n        self.action_runs_proxy = None\n\n    action_runs = property(\n        _get_action_runs,\n        _set_action_runs,\n        _del_action_runs,\n    )\n\n    def update_action_config(self, action_graph):\n        self.action_graph = action_graph\n        self.action_runs.update_action_config(action_graph)\n\n    def seconds_until_run_time(self):\n        run_time = self.run_time\n        if run_time.tzinfo:\n            now = timeutils.current_time(tz=run_time.tzinfo)\n        else:\n            now = timeutils.current_time()\n        return max(0, timeutils.delta_total_seconds(run_time - now))\n\n    def start(self):\n        self.log_state_update(state=\"start\")\n        if self._do_start():\n            return True\n\n    def _do_start(self):\n        log.info(f\"{self} starting\")\n        self.action_runs.ready()\n        if any(self._start_action_runs()):\n            log.info(f\"{self} started\")\n            return True\n\n    def stop(self):\n        if self.action_runs.is_done:\n            return\n        self.action_runs.stop()\n\n    def _start_action_runs(self):\n        \"\"\"Start all startable action runs, and return any that were\n        successfully started.\n        \"\"\"\n        started_runs = [action_run for action_run in self.action_runs.get_startable_action_runs() if action_run.start()]\n\n        if not started_runs:\n            for r in self.action_runs:\n                if r.is_blocked_on_trigger:\n                    log.debug(f\"{r} is blocked on triggers: {r.remaining_triggers}\")\n\n        return started_runs\n\n    def handle_action_run_state_change(self, action_run: ActionRun, event: str, event_data: Any | None = None) -> None:\n        \"\"\"Handle events triggered by JobRuns.\"\"\"\n        log.info(f\"{self} got an event: {event}\")\n        metrics.meter(f\"tron.actionrun.{event}\")\n\n        if event == ActionRun.NOTIFY_TRIGGER_READY:\n            if self.is_scheduled or self.is_queued:\n                log.info(f\"{self} triggers are satisfied but run not started yet\")\n                return None\n\n            started = self._start_action_runs()\n            if any(started):\n                log.info(\n                    f\"{self} action runs triggered: \" f\"{', '.join(str(s) for s in started)}\",\n                )\n            return None\n\n        # propagate all state changes (from action runs) up to state serializer\n        self.notify(self.NOTIFY_STATE_CHANGED)\n        self.log_state_update(\n            state=action_run.state,\n            action_name=action_run.name,\n        )\n\n        if not action_run.is_done:\n            return None\n\n        if action_run.is_skipped and self.action_runs.is_scheduled:\n            return None\n\n        if not action_run.is_broken:\n            started = self._start_action_runs()\n            if any(started):\n                log.info(\n                    f\"{self} action runs started: \" f\"{', '.join(str(s) for s in started)}\",\n                )\n                return None\n\n        if not self.action_runs.is_done:\n            log.info(f\"{self} still has running or waiting actions\")\n            return None\n\n        # If we can't make any progress, we're done\n        cleanup_run: ActionRun = self.action_runs.cleanup_action_run\n        if not cleanup_run or cleanup_run.is_done:\n            return self.finalize()\n\n        cleanup_run.start()\n\n    handler = handle_action_run_state_change\n\n    def finalize(self) -> None:\n        \"\"\"The last step of a JobRun. Called when the cleanup action\n        completes or if the job has no cleanup action, called once all action\n        runs have reached a 'done' state.\n\n        Triggers an event to notifies the Job that is is done.\n        \"\"\"\n        if self.action_runs.is_failed:\n            prom_metrics.tron_job_runs_completed_counter.labels(outcome=\"fail\").inc()\n            log.error(f\"{self} failed\")\n        else:\n            prom_metrics.tron_job_runs_completed_counter.labels(outcome=\"success\").inc()\n            log.info(f\"{self} succeeded\")\n\n        # Notify Job that this JobRun is complete\n        self.notify(self.NOTIFY_DONE)\n        self.log_state_update(state=self.state)\n\n    def cleanup(self):\n        \"\"\"Cleanup any resources used by this JobRun.\"\"\"\n        log.info(f\"{self} removed\")\n        self.notify(self.NOTIFY_REMOVED)\n        self.clear_observers()\n        self.action_runs.cleanup()\n        self.node = None\n        self.action_graph = None\n        self._del_action_runs()\n        self.output_path.delete()\n\n    def get_action_run(self, action_name):\n        return self.action_runs.get(action_name)\n\n    def log_state_update(self, state: str, action_name: str | None = None) -> None:\n        if action_name is None:\n            state = f\"job_{state}\"\n        else:\n            action_name = str(action_name)\n\n        # Tron currently manges two major types of workloads: batch jobs and Spark jobs\n        # and the team that manages Spark would like to be able to monitor the runtime\n        # of Spark actions in Tron at an aggregate level. We currently ingest these state updates\n        # into various systems and have dashboards on this data, but it's currently non-trivial to\n        # separate out what actions belong to which workload without tagging these state updates\n        executor = None  # we log job state updates with this function - which don't have an executor\n        if self.action_graph and action_name and self.action_graph.action_map.get(action_name):\n            action = self.action_graph.action_map[action_name]\n            # TODO: replace this with checking if the action executor is spark once we're running\n            # the spark driver in k8s\n            if \"spark-submit\" in action.command:\n                executor = \"spark\"\n            else:\n                executor = action.executor\n\n        data = {\n            \"job_name\": str(self.job_name),\n            \"run_num\": str(self.run_num),\n            \"action_name\": action_name,\n            \"state\": str(state),\n            \"executor\": executor,\n            \"timestamp\": time.time(),\n        }\n        state_logger.info(json.dumps(data))\n\n    @property\n    def state(self):\n        \"\"\"The overall state of this job run. Based on the state of its actions.\"\"\"\n        if not self.action_runs:\n            log.info(f\"{self} has no action runs to determine state\")\n            return ActionRun.UNKNOWN\n\n        if self.action_runs.is_complete:\n            return ActionRun.SUCCEEDED\n        if self.action_runs.is_cancelled:\n            return ActionRun.CANCELLED\n        if self.action_runs.is_running:\n            return ActionRun.RUNNING\n        if self.action_runs.is_starting:\n            return ActionRun.STARTING\n        if self.action_runs.is_failed:\n            return ActionRun.FAILED\n        if self.action_runs.is_waiting and self.action_runs.is_blocked_on_trigger:\n            return ActionRun.WAITING\n        if self.action_runs.is_scheduled:\n            return ActionRun.SCHEDULED\n        if self.action_runs.is_queued:\n            return ActionRun.QUEUED\n\n        return ActionRun.UNKNOWN\n\n    def cancel(self):\n        return self.action_runs.cancel()\n\n    def __getattr__(self, name):\n        if name.startswith(\"is_\"):\n            state_name = name[3:]\n            if state_name not in ActionRun.STATE_MACHINE.states:\n                raise RuntimeError(f\"{state_name} not in ActionRun.VALID_STATES\")\n            return self.state == state_name\n        elif self.action_runs_proxy:\n            return self.action_runs_proxy.perform(name)\n        else:\n            raise AttributeError(name)\n\n    def __str__(self):\n        return f\"JobRun:{self.id}\"\n\n\nclass JobRunCollection:\n    \"\"\"A JobRunCollection is a deque of JobRun objects. Responsible for\n    ordering and logic related to a group of JobRuns which should all be runs\n    for the same Job.\n\n    A JobRunCollection is created in two stages. First it's populated from a\n    configuration object, and second its state is loaded from a serialized\n    state dict.\n\n    Runs in a JobRunCollection should always remain sorted by their run_num.\n    \"\"\"\n\n    def __init__(self, run_limit):\n        self.run_limit = run_limit\n        self.runs = deque()\n\n    @classmethod\n    def from_config(cls, job_config):\n        \"\"\"Factory method for creating a JobRunCollection from a config.\"\"\"\n        return cls(job_config.run_limit)\n\n    def build_new_run(self, job, run_time, node, manual=False):\n        \"\"\"Create a new run for the job, add it to the runs list,\n        and return it.\n        \"\"\"\n        run_num = self.next_run_num()\n        run = JobRun.for_job(job, run_num, run_time, node, manual)\n        log.info(f\"{run} created on {node.name} at {run_time}\")\n        self.runs.appendleft(run)\n        self.remove_old_runs()\n        return run\n\n    def cancel_pending(self):\n        \"\"\"Find any queued or scheduled runs and cancel them.\"\"\"\n        for pending in self.get_pending():\n            pending.cancel()\n\n    def remove_pending(self):\n        \"\"\"Remove pending runs from the run list.\"\"\"\n        for pending in list(self.get_pending()):\n            pending.cleanup()\n            self.runs.remove(pending)\n\n    def get_run_by_state(self, state):\n        \"\"\"Returns the most recent run which matches the state.\"\"\"\n        return next_or_none(r for r in self.runs if r.state == state)\n\n    def get_run_by_num(self, num):\n        \"\"\"Return a the run with run number which matches num.\"\"\"\n        return next_or_none(r for r in self.runs if r.run_num == num)\n\n    def get_run_by_index(self, index):\n        \"\"\"Return the job run at index. Jobs are indexed from oldest to newest.\"\"\"\n        try:\n            return self.runs[index * -1 - 1]\n        except IndexError:\n            return None\n\n    def get_newest(self, include_manual=True):\n        \"\"\"Returns the most recently created JobRun.\"\"\"\n        return next_or_none(r for r in self.runs if include_manual or not r.manual)\n\n    def get_pending(self):\n        \"\"\"Return the job runs that are queued or scheduled.\"\"\"\n        return [r for r in self.runs if r.state in (ActionRun.SCHEDULED, ActionRun.QUEUED)]\n\n    @property\n    def has_pending(self):\n        return any(self.get_pending())\n\n    def get_active(self, node=None):\n        active_states = {ActionRun.RUNNING, ActionRun.STARTING, ActionRun.WAITING}\n        return [r for r in self.runs if (r.state in active_states) and (not node or r.node == node)]\n\n    def get_first_queued(self, node=None):\n        return next_or_none(\n            r for r in reversed(self.runs) if (not node or r.node == node) and r.state == ActionRun.QUEUED\n        )\n\n    def get_scheduled(self):\n        # Find the scheduled runs for the jobs and return it\n        # in most cases, there should just be a single run - but it's possible that a delayed job could have N scheduled runs built up\n        return [r for r in self.runs if r.state == ActionRun.SCHEDULED]\n\n    def next_run_num(self):\n        \"\"\"Return the next run number to use.\"\"\"\n        if not self.runs:\n            return 0\n        return max(r.run_num for r in self.runs) + 1\n\n    def remove_old_runs(self):\n        \"\"\"Remove old runs to reduce the number of completed runs\n        to within RUN_LIMIT.\n        \"\"\"\n        while len(self.runs) > self.run_limit:\n            run = self.runs.pop()\n            run.cleanup()\n\n    def get_action_runs(self, action_name):\n        return [job_run.get_action_run(action_name) for job_run in self.runs]\n\n    def get_run_nums(self):\n        return [r.run_num for r in self.runs]\n\n    @property\n    def state_data(self):\n        \"\"\"Return the state data to serialize.\"\"\"\n        return [r.state_data for r in self.runs]\n\n    @property\n    def last_success(self):\n        return self.get_run_by_state(ActionRun.SUCCEEDED)\n\n    @property\n    def next_run(self):\n        return self.get_run_by_state(ActionRun.SCHEDULED)\n\n    def __iter__(self):\n        return iter(self.runs)\n\n    def __str__(self):\n        return \"{}[{}]\".format(\n            type(self).__name__,\n            \", \".join(f\"{r.run_num}({r.state})\" for r in self.runs),\n        )\n\n\ndef job_runs_from_state(\n    runs,\n    action_graph,\n    output_path,\n    context,\n    node_pool,\n):\n    return [\n        JobRun.from_state(\n            run,\n            action_graph,\n            output_path.clone(),\n            context,\n            node_pool.next(),\n        )\n        for run in runs\n    ]\n"
  },
  {
    "path": "tron/core/recovery.py",
    "content": "import logging\n\nfrom tron.core.actionrun import ActionRun\nfrom tron.core.actionrun import KubernetesActionRun\nfrom tron.core.actionrun import MesosActionRun\nfrom tron.core.actionrun import SSHActionRun\n\nlog = logging.getLogger(__name__)\n\n\ndef filter_action_runs_needing_recovery(action_runs):\n    ssh_runs = []\n    mesos_runs = []\n    kubernetes_runs = []\n    for action_run in action_runs:\n        if isinstance(action_run, SSHActionRun):\n            if action_run.state == ActionRun.UNKNOWN:\n                ssh_runs.append(action_run)\n        elif isinstance(action_run, MesosActionRun):\n            if action_run.state == ActionRun.UNKNOWN and action_run.end_time is None:\n                mesos_runs.append(action_run)\n        elif isinstance(action_run, KubernetesActionRun):\n            if action_run.state == ActionRun.UNKNOWN and action_run.end_time is None:\n                kubernetes_runs.append(action_run)\n    return ssh_runs, mesos_runs, kubernetes_runs\n\n\ndef launch_recovery_actionruns_for_job_runs(job_runs, master_action_runner):\n    for run in job_runs:\n        if not run._action_runs:\n            log.info(f\"Skipping recovery of {run} with no action runs (may have been cleaned up)\")\n            continue\n\n        # TODO: Why do we do this separately if we just need to call recover()\n        ssh_runs, mesos_runs, kubernetes_runs = filter_action_runs_needing_recovery(run._action_runs)\n        for action_run in ssh_runs:\n            action_run.recover()\n\n        for action_run in mesos_runs:\n            action_run.recover()\n\n        for action_run in kubernetes_runs:\n            action_run.recover()\n"
  },
  {
    "path": "tron/default_config.yaml",
    "content": "ssh_options:\n    ## Tron needs SSH keys to allow the effective user to login to each of the\n    ## nodes specified in the \"nodes\" section. You can choose to use either an\n    ## SSH agent or list\n    # identities:\n    #     - /home/tron/.ssh/id_dsa\n    agent: false\n\n## Directory used to store stdout/stderr from jobs. Defaults\n## to the working directory\n# output_stream_dir: /tmp/tron/streams/\n\n#state_persistence:\n    ## Configuration for how to store Tron state data\n    # name: 'shelve'\n    # store_type: 'tron_State.shelve'\n    # buffer_size:\n\nnodes:\n    ## You'll need to list out all the available nodes for doing work.\n    # - name: \"node\"\n    #   hostname: 'localhost'\n    #   username: 'tronuser'\n\n## Optionally you can list 'pools' of nodes where selection of a node will\n## be randomly determined or jobs can be configured to be run on all nodes\n## in the pool\n# node_pools:\n    # - name: NodePool\n    #   nodes: [node]\n\ncommand_context:\n    # Variable subsitution\n    # There are some built-in values such as 'node', 'runid', 'actionname' and\n    # run-time based variables such as 'shortdate'. (See tronfig.1 for\n    # reference.) You can specify whatever else you want similiar to\n    # environment variables:\n    # PYTHON: \"/usr/bin/python\"\n\njobs:\n    ## Configure your jobs here by specifing a name, node, schedule and the\n    ## work flow that should executed.\n    # - name: \"sample_job\"\n    #   node: node\n    #   schedule: \"daily\"\n    #   actions:\n    #     - name: \"uname\"\n    #       command: \"uname -a\"\n    #   cleanup_action:\n    #     command: \"rm -rf /tmp/sample_job_scratch\"\n"
  },
  {
    "path": "tron/eventbus.py",
    "content": "import logging\nimport os\nimport pickle\nimport signal\nimport time\nfrom collections import defaultdict\nfrom collections import deque\n\nfrom twisted.internet import reactor\n\nlog = logging.getLogger(__name__)\n\n\ndef consume_dequeue(queue, func):\n    queue_length = len(queue)\n    for _ in range(queue_length):\n        func(queue.popleft())\n\n\nclass EventBus:\n    instance = None\n\n    @staticmethod\n    def create(log_dir):\n        \"\"\"Create log directory and link to current log if those don't\n        already exist\"\"\"\n        EventBus.shutdown()\n        eb = EventBus(log_dir)\n\n        if not os.path.exists(eb.log_dir):\n            log.warning(f\"creating {eb.log_dir}\")\n            os.mkdir(eb.log_dir)\n\n        if not os.path.exists(eb.log_current) or not os.path.exists(\n            os.readlink(eb.log_current),\n        ):\n            log.warning(f\"creating {eb.log_current}\")\n            eb.sync_save_log(\"initial save\")\n\n        EventBus.instance = eb\n        return eb\n\n    @staticmethod\n    def start():\n        if not EventBus.instance:\n            return\n        return EventBus.instance._start()\n\n    @staticmethod\n    def shutdown():\n        if not EventBus.instance:\n            return\n        EventBus.instance._shutdown()\n        EventBus.instance = None\n\n    @staticmethod\n    def publish(event):\n        if not EventBus.instance:\n            return\n        return EventBus.instance._publish(event)\n\n    @staticmethod\n    def subscribe(prefix, subscriber, callback):\n        if not EventBus.instance:\n            return\n        return EventBus.instance._subscribe(prefix, subscriber, callback)\n\n    @staticmethod\n    def clear_subscriptions(subscriber):\n        if not EventBus.instance:\n            return\n        return EventBus.instance._clear_subscriptions(subscriber)\n\n    @staticmethod\n    def has_event(event):\n        if not EventBus.instance:\n            return\n        return EventBus.instance._has_event(event)\n\n    @staticmethod\n    def discard(event):\n        if not EventBus.instance:\n            return\n        return EventBus.instance._discard(event)\n\n    def __init__(self, log_dir):\n        self.enabled = False\n        self.event_log = {}\n        self.event_subscribers = defaultdict(list)\n        self.publish_queue = deque()\n        self.subscribe_queue = deque()\n        self.clear_subscription_queue = deque()\n        self.log_dir = log_dir\n        self.log_current = os.path.join(self.log_dir, \"current\")\n        self.log_updates = 0\n        self.log_last_save = 0\n        self.log_save_interval = 60  # save every minute\n        self.log_save_updates = 100  # save every 100 updates\n\n    def _start(self):\n        self.enabled = True\n        log.info(\"starting\")\n        self.sync_load_log()\n        reactor.callLater(0, self.sync_loop)\n\n    def _shutdown(self):\n        if self.enabled:\n            self.enabled = False\n            self.sync_save_log(\"shutdown\")\n            log.info(\"shutdown completed\")\n\n    def _publish(self, event):\n        if isinstance(event, str):\n            event = {\"id\": event}\n        if isinstance(event, dict):\n            self.publish_queue.append(event)\n            log.debug(f\"publish of {event['id']} enqueued\")\n            return True\n        else:\n            log.error(f\"can't publish {event!r}, must be dict\")\n            return False\n\n    def _discard(self, event: str) -> bool:\n        if not self._has_event(event):\n            return False\n        del self.event_log[event]\n        return True\n\n    def _subscribe(self, prefix, subscriber, callback):\n        self.subscribe_queue.append((prefix, subscriber, callback))\n        log.debug(f\"subscription ({prefix}, {subscriber}) enqueued\")\n\n    def _clear_subscriptions(self, subscriber):\n        self.clear_subscription_queue.append(subscriber)\n        log.debug(f\"clearing subscriptions for {subscriber}\")\n\n    def _has_event(self, event_id: str) -> bool:\n        return event_id in self.event_log\n\n    def sync_load_log(self):\n        started = time.time()\n        with open(self.log_current, \"rb\") as f:\n            self.event_log = pickle.load(f)\n        duration = time.time() - started\n        log.info(f\"log read from disk, took {duration:.4}s\")\n\n    def sync_save_log(self, reason: str) -> bool:\n        started = time.time()\n        new_file = os.path.join(self.log_dir, f\"{int(started)}.pickle\")\n        previous_file: str | None = os.path.realpath(os.path.join(self.log_dir, \"current\"))\n        # if we're starting  a fresh Tron server, there won't be a current symlink\n        # and the above line will give us the path to what will eventually be the current\n        # symlink...which is undesirable since we clean up whatever this points to :p\n        # we can tell if this is happening since this previous_file variable should\n        # always point to a file that ends in .pickle under normal operation\n        if previous_file and previous_file.endswith(\"current\"):\n            previous_file = None\n\n        try:\n            with open(new_file, \"xb\") as f:\n                pickle.dump(self.event_log, f)\n        except FileExistsError:\n            log.exception(\n                f\"unable to dump the log, file {new_file} already exists, \"\n                f\"too many updates/sec? current: {self.log_updates}, \"\n                f\"threshold: {self.log_save_updates}\",\n            )\n            return False\n\n        # atomically replace `current` symlink\n        tmplink = os.path.join(self.log_dir, \"tmp\")\n        try:\n            os.remove(tmplink)\n        except FileNotFoundError:\n            pass\n        os.symlink(new_file, tmplink)\n        os.replace(src=tmplink, dst=self.log_current)\n        # once we get here, `self.log_current` is now pointing to `new_file`\n        # so we can safely delete the previous `self.log_current` target without\n        # fear of losing data\n        if previous_file:\n            try:\n                os.remove(previous_file)\n            except Exception:\n                # this shouldn't happen - but we also shouldn't crash if the impossible happens\n                log.exception(f\"unable to delete {previous_file} - continuing anyway.\")\n\n        duration = time.time() - started\n        log.info(f\"log dumped to disk because {reason}, took {duration:.4}s\")\n        return True\n\n    def sync_loop(self):\n        if not self.enabled:\n            return\n\n        try:\n            self.sync_process()\n        except Exception:\n            log.error(\"eventbus exception:\", exc_info=1)\n            os.kill(os.getpid(), signal.SIGTERM)\n\n        reactor.callLater(1, self.sync_loop)\n\n    def sync_process(self):\n        save_reason = None\n        if time.time() > self.log_last_save + self.log_save_interval:\n            if self.log_updates > 0:\n                save_reason = f\"{self.log_save_interval}s passed, \" f\"{self.log_updates} updates\"\n            else:\n                self.log_last_save = time.time()\n                log.debug(\"skipping save, no updates\")\n        elif self.log_updates > self.log_save_updates:\n            save_reason = f\"{self.log_save_updates} updates\"\n\n        if save_reason and self.sync_save_log(save_reason):\n            self.log_last_save = time.time()\n            self.log_updates = 0\n\n        consume_dequeue(self.subscribe_queue, self.sync_subscribe)\n        consume_dequeue(\n            self.clear_subscription_queue,\n            self.sync_clear_subscriptions,\n        )\n        consume_dequeue(self.publish_queue, self.sync_publish)\n\n    def sync_publish(self, event):\n        event = pickle.loads(pickle.dumps(event))\n        event_id = event[\"id\"]\n        del event[\"id\"]\n        if event_id in self.event_log:\n            if self.event_log[event_id] != event:\n                log.info(f\"replacing event: {event_id}\")\n            else:\n                log.debug(f\"duplicate event: {event}\")\n                return\n\n        self.event_log[event_id] = event\n        self.log_updates += 1\n        log.debug(f\"event stored: {event_id} {event}\")\n\n        reactor.callLater(0, self.sync_notify, event_id)\n\n    def sync_subscribe(self, prefix_subscriber_cb):\n        prefix, subscriber, cb = prefix_subscriber_cb\n        self.event_subscribers[prefix].append((subscriber, cb))\n        log.debug(f\"subscriber registered: {prefix_subscriber_cb}\")\n\n    def sync_unsubscribe(self, prefix_sub):\n        prefix, sub = prefix_sub\n\n        if prefix not in self.event_subscribers:\n            log.debug(f\"can't unsubscribe, not found for prefix {prefix}\")\n            return\n\n        new_subs = [sub_cb for sub_cb in self.event_subscribers[prefix] if sub_cb[0] != sub]\n        if new_subs:\n            self.event_subscribers[prefix] = new_subs\n        else:\n            del self.event_subscribers[prefix]\n        log.debug(f\"subscription removed: {prefix} / {sub}\")\n\n    def sync_clear_subscriptions(self, subscriber):\n        new_subscriptions = defaultdict(list)\n        removed = 0\n        for prefix, subs in self.event_subscribers.items():\n            for (sub, cb) in subs:\n                if sub == subscriber:\n                    removed += 1\n                    continue\n                new_subscriptions[prefix].append((sub, cb))\n        self.event_subscribers = new_subscriptions\n\n        if removed > 0:\n            log.debug(f\"subscriptions of {subscriber} removed: {removed}\")\n\n    def sync_notify(self, event_id):\n        event = self.event_log[event_id]\n        log.debug(f\"notifying subscribers about {event_id}\")\n        for prefix, subscribers in self.event_subscribers.items():\n            log.debug(f\"check {prefix}: {event_id.startswith(prefix)}\")\n            if event_id.startswith(prefix):\n                for (sub, cb) in subscribers:\n                    log.debug(f\"notifying {sub} about {event_id}\")\n                    reactor.callLater(0, cb, dict(id=event_id, **event))\n"
  },
  {
    "path": "tron/kubernetes.py",
    "content": "import logging\nfrom collections.abc import Collection\nfrom logging import Logger\nfrom typing import cast\nfrom typing import Optional\nfrom typing import TYPE_CHECKING\n\nfrom task_processing.interfaces.event import Event\nfrom task_processing.plugins.kubernetes.task_config import KubernetesTaskConfig\nfrom task_processing.runners.subscription import Subscription\nfrom task_processing.task_processor import TaskProcessor\nfrom twisted.internet.defer import Deferred\nfrom twisted.internet.defer import logError\n\nimport tron.metrics as metrics\nimport tron.prom_metrics as prom_metrics\nfrom tron import __version__\nfrom tron.actioncommand import ActionCommand\nfrom tron.config.schema import ConfigFieldSelectorSource\nfrom tron.config.schema import ConfigKubernetes\nfrom tron.config.schema import ConfigNodeAffinity\nfrom tron.config.schema import ConfigProjectedSAVolume\nfrom tron.config.schema import ConfigSecretSource\nfrom tron.config.schema import ConfigSecretVolume\nfrom tron.config.schema import ConfigTopologySpreadConstraints\nfrom tron.config.schema import ConfigVolume\nfrom tron.serialize.filehandler import OutputStreamSerializer\nfrom tron.utils import exitcode\nfrom tron.utils.queue import PyDeferredQueue\n\nif TYPE_CHECKING:\n    from tron.serialize.runstate.statemanager import StateChangeWatcher\n\nDEFAULT_POD_LAUNCH_TIMEOUT_S = 300  # arbitrary number, same as Mesos offer timeout of yore\nDEFAULT_DISK_LIMIT = 1024.0  # arbitrary, same as what was chosen for Mesos-based Tronjobs\n\nKUBERNETES_TASK_LOG_FORMAT = \"%(asctime)s %(name)s %(levelname)s %(message)s\"\nKUBERNETES_TASK_OUTPUT_LOGGER = \"tron.kubernetes.task_output\"\nKUBERNETES_TERMINAL_TYPES = {\"finished\", \"failed\", \"killed\"}\nKUBERNETES_FAILURE_TYPES = {\"failed\", \"killed\"}\nKUBERNETES_LOST_NODE_EXIT_CODES = {exitcode.EXIT_KUBERNETES_SPOT_INTERRUPTION, exitcode.EXIT_KUBERNETES_NODE_SCALEDOWN}\n\nlog = logging.getLogger(__name__)\n\n\ndef combine_volumes(\n    defaults: Collection[ConfigVolume],\n    overrides: Collection[ConfigVolume],\n) -> list[ConfigVolume]:\n    \"\"\"Helper to reconcile lists of volume mounts.\n\n    If any volumes have the same container path, the one in overrides wins.\n    \"\"\"\n    result = {mount.container_path: mount for mount in defaults}\n    for mount in overrides:\n        result[mount.container_path] = mount\n    return list(result.values())\n\n\nclass KubernetesTask(ActionCommand):\n    def __init__(\n        self, action_run_id: str, task_config: KubernetesTaskConfig, serializer: OutputStreamSerializer | None = None\n    ) -> None:\n        super().__init__(id=action_run_id, command=task_config.command, serializer=serializer)\n\n        self.task_config = task_config\n\n        self.log = self.get_event_logger()\n\n        self.log.info(f\"Kubernetes task {self.get_kubernetes_id()} created with config {self.get_config()}\")\n\n    def get_event_logger(self) -> Logger:\n        \"\"\"\n        Get or create a logger for a the action run associated with this task.\n\n        Used to make it easier to disambiguate what the log messages emitted\n        for event handling and such belong to.\n        \"\"\"\n        event_log = logging.getLogger(f\"{__name__}.{self.id}\")\n        # Every time a task gets created, this function runs and will add\n        # more stderr handlers to the logger, which results in duplicate log\n        # output. We only want to add the stderr handler if the logger does not\n        # have a handler yet.\n        if not len(event_log.handlers):\n            handler = logging.StreamHandler(self.stderr)\n            handler.setFormatter(logging.Formatter(KUBERNETES_TASK_LOG_FORMAT))\n            event_log.addHandler(handler)\n\n        return event_log\n\n    def report_resources(self, decrement: bool = False) -> None:\n        \"\"\"\n        Update internal resource utilization statistics of all tronjobs running for this task's Tron master.\n        \"\"\"\n        multiplier = -1 if decrement else 1\n\n        prom_metrics.tron_cpu_gauge.inc(self.task_config.cpus * multiplier)\n        prom_metrics.tron_memory_gauge.inc(self.task_config.memory * multiplier)\n        prom_metrics.tron_disk_gauge.inc(self.task_config.disk * multiplier)\n\n        metrics.count(\"tron.mesos.cpus\", self.task_config.cpus * multiplier)\n        metrics.count(\"tron.mesos.mem\", self.task_config.memory * multiplier)\n        metrics.count(\"tron.mesos.disk\", self.task_config.disk * multiplier)\n\n    def get_kubernetes_id(self) -> str:\n        \"\"\"\n        Get the Kubernetes identifier representing this task.\n\n        This will generally be of the form {pod_name}.{unique_suffix}\n        \"\"\"\n        return self.task_config.pod_name\n\n    def get_config(self) -> KubernetesTaskConfig:\n        \"\"\"\n        Get the task_processing config used to create this task.\n        \"\"\"\n        return self.task_config\n\n    def log_event_info(self, event: Event) -> None:\n        \"\"\"\n        Helper to log nice-to-have information (may fail).\n        \"\"\"\n        k8s_type = getattr(event, \"platform_type\", None)\n        # when Tron restarts, we'll get a number of events with an unfilled raw attribute\n        # these are safe to skip since we'll already have printed out the hostname of the\n        # box running the task corresponding to this event\n        if k8s_type == \"running\" and event.raw:\n            hostname = event.raw.get(\"spec\", {}).get(\"nodeName\", \"UNKNOWN\")\n            self.log.info(f\"Running on hostname: {hostname}\")\n\n    # TODO: TRON-2436: Refactor this\n    def handle_event(self, event: Event) -> None:\n        \"\"\"\n        Transitions Tron's state machine for this task based on events from task_processing.\n        \"\"\"\n        try:\n            # we wrap this entire thing in a try-except as otherwise an error in\n            # logging (which is useful, but not critical) will result in us not\n            # processing an event at all (which is critical!)\n            event_id = getattr(event, \"task_id\", None)\n            if event_id != self.get_kubernetes_id():\n                self.log.warning(\n                    f\"Event task id (id={event_id}) does not match current task id (id={self.get_kubernetes_id()}), ignoring.\",\n                )\n                return\n\n            k8s_type = getattr(event, \"platform_type\", None)\n            self.log.info(f\"Got event for task={event_id} (Kubernetes type={k8s_type}).\")\n\n            try:\n                self.log_event_info(event=event)\n            except Exception:\n                self.log.exception(f\"Unable to log event info for id={event_id}.\")\n\n            if k8s_type == \"running\":\n                self.started()\n            elif k8s_type in KUBERNETES_TERMINAL_TYPES:\n                raw_object = getattr(event, \"raw\", {}) or {}\n                pod_status = raw_object.get(\"status\", {}) or {}\n                container_statuses = pod_status.get(\"containerStatuses\", []) or []\n                exit_code = 0 if k8s_type == \"finished\" else exitcode.EXIT_KUBERNETES_ABNORMAL\n\n                if len(container_statuses) > 1 or len(container_statuses) == 0:\n                    # shouldn't happen right now, but who knows what future us will do :p\n                    self.log.error(\n                        \"Got an event for a Pod with zero or multiple containers - not inspecting payload to verify success.\"\n                    )\n                    self.log.error(f\"Event with >1 || 0 containers: {raw_object}\")\n                else:\n                    main_container_statuses = container_statuses[0]\n                    main_container_state = main_container_statuses.get(\"state\", {}) or {}\n                    main_container_last_state = main_container_statuses.get(\"lastState\", {}) or {}\n\n                    event_missing_state = not main_container_state\n                    event_missing_previous_state = not main_container_last_state\n\n                    # We are expecting this code to never be hit as we are expecting both state and last_state have values\n                    # The else statement should handle the situation gracefully when either current/last state are missing\n                    if event_missing_state and event_missing_previous_state:\n                        self.log.error(\n                            f\"Got an event with missing state - assuming {'success' if exit_code==0 else 'failure'}.\"\n                        )\n                        self.log.error(f\"Event with missing state: {raw_object}\")\n                    else:\n                        state_termination_metadata = main_container_state.get(\"terminated\", {}) or {}\n                        # Assume the current state is the 'last' state if last state does not exist\n                        last_state_termination_metadata = (\n                            main_container_last_state.get(\"terminated\", {}) or state_termination_metadata\n                        )\n                        if k8s_type == \"finished\":\n                            # this is kinda wild: we're seeing that a kubelet will sometimes fail to start a container (usually\n                            # due to what appear to be race conditions like those mentioned in\n                            # https://github.com/kubernetes/kubernetes/issues/100047#issuecomment-797624208) and then decide that\n                            # these Pods should be phase=Succeeded with an exit code of 0 - even though the container never actually\n                            # started. So far, we've noticed that when this happens, the finished_at and reason fields will be None\n                            # and thus we'll check for at least one of these conditions to detect an abnormal exit and actually \"fail\"\n                            # the affected action\n                            # NOTE: hopefully this won't change too drastically in future k8s upgrades without the actual problem (incorrect\n                            # success) being fixed :p\n                            if state_termination_metadata.get(\"exitCode\") == 0 and (\n                                state_termination_metadata.get(\"finishedAt\") is None\n                                and state_termination_metadata.get(\"reason\") is None\n                            ):\n                                exit_code = exitcode.EXIT_KUBERNETES_ABNORMAL\n                                self.log.warning(\"Container never started due to a Kubernetes/infra flake!\")\n                                self.log.warning(\n                                    f\"If automatic retries are not enabled, run `tronctl retry {self.id}` to retry.\"\n                                )\n                        elif k8s_type in KUBERNETES_FAILURE_TYPES:\n                            pod_status_reason = pod_status.get(\"reason\")\n                            pod_status_message = pod_status.get(\"message\", \"\").lower()\n\n                            # pod killed due to ephemeral storage eviction\n                            # XXX: if we need to handle more messages, we should probably regex this instead :p\n                            if pod_status_reason == \"Evicted\" and (\n                                \"ephemeral storage\" in pod_status_message\n                                or \"ephemeral local storage\" in pod_status_message\n                            ):\n                                exit_code = exitcode.EXIT_KUBERNETES_EPHEMERAL_STORAGE_EVICTION\n                                self.log.warning(\n                                    f\"Tronjob failed due to ephemeral storage eviction: {pod_status_message}\"\n                                )\n                            # pod killed before it reached terminal state, assume node scaledown\n                            elif not (state_termination_metadata or last_state_termination_metadata):\n                                self.log.warning(\"Container did not complete, likely due to scaling down a node.\")\n                                exit_code = exitcode.EXIT_KUBERNETES_NODE_SCALEDOWN\n                            # Handling spot terminations\n                            elif (\n                                last_state_termination_metadata.get(\"exitCode\") == 137\n                                and last_state_termination_metadata.get(\"reason\") == \"ContainerStatusUnknown\"\n                            ):\n                                exit_code = exitcode.EXIT_KUBERNETES_SPOT_INTERRUPTION\n                                self.log.warning(\"Tronjob failed due to spot interruption.\")\n                            # Handling K8s scaling down a node\n                            elif state_termination_metadata.get(\"exitCode\") == 143 and (\n                                state_termination_metadata.get(\"reason\") == \"Error\"\n                            ):\n                                exit_code = exitcode.EXIT_KUBERNETES_NODE_SCALEDOWN\n                                self.log.warning(\"Tronjob failed due to Kubernetes scaling down a node.\")\n                            else:\n                                # Capture the real exit code\n                                state_exit_code = state_termination_metadata.get(\"exitCode\")\n                                last_state_exit_code = last_state_termination_metadata.get(\"exitCode\")\n                                if state_exit_code:\n                                    exit_code = state_exit_code\n                                elif last_state_exit_code:\n                                    exit_code = last_state_exit_code\n\n                            if exit_code in KUBERNETES_LOST_NODE_EXIT_CODES:\n                                self.log.warning(\n                                    f\"If automatic retries are not enabled, run `tronctl retry {self.id}` to retry.\"\n                                )\n                                self.log.warning(\n                                    \"If this action is idempotent, then please consider enabling automatic retries for your action. If your action is not idempotent, then please configure this action to run on the stable pool rather than the default.\"\n                                )\n                self.exited(exit_code)\n            elif k8s_type == \"lost\":\n                # Using 'lost' instead of 'unknown' for now until we are sure that before reconcile() is called,\n                # the tasks inside task_metadata map are all UNKNOWN\n                self.log.warning(\"Kubernetes does not know anything about this task, it is LOST\")\n                self.log.warning(\n                    \"This can happen for any number of reasons, and Tron can't know if the task ran or not at all!\"\n                )\n                self.log.warning(\"If you want Tron to RUN it (again) anyway, retry it with:\")\n                self.log.warning(f\"    tronctl retry {self.id}\")\n                self.log.warning(\"If you want Tron to NOT run it and consider it as a success, skip it with:\")\n                self.log.warning(f\"    tronctl skip {self.id}\")\n                self.log.warning(\"If you want Tron to NOT run it and consider it as a failure, fail it with:\")\n                self.log.warning(f\"    tronctl fail {self.id}\")\n                self.exited(exitcode.EXIT_KUBERNETES_TASK_LOST)\n            else:\n                self.log.info(\n                    f\"Did not handle unknown kubernetes event type: {event}\",\n                )\n\n            if event.terminal:\n                self.log.info(\"This Kubernetes event was terminal, ending this action\")\n                self.report_resources(decrement=True)\n\n                exit_code = int(not getattr(event, \"success\", False))\n                # Returns False if we've already exited normally above\n                unexpected_error = self.exited(exit_code)\n                if unexpected_error:\n                    self.log.error(\"Unexpected failure, exiting\")\n\n                self.done()\n        except Exception:\n            self.log.exception(f\"unable to handle an event for id={event_id} for event={str(event)}\")\n\n\nclass KubernetesCluster:\n    def __init__(\n        self,\n        kubeconfig_path: str,\n        enabled: bool = True,\n        default_volumes: list[ConfigVolume] | None = None,\n        pod_launch_timeout: int | None = None,\n        watcher_kubeconfig_paths: list[str] | None = None,\n        non_retryable_exit_codes: list[int] | None = [],\n    ):\n        # general k8s config\n        self.kubeconfig_path = kubeconfig_path\n        self.enabled = enabled\n        self.non_retryable_exit_codes = non_retryable_exit_codes\n        self.default_volumes: list[ConfigVolume] | None = default_volumes or []\n        self.pod_launch_timeout = pod_launch_timeout or DEFAULT_POD_LAUNCH_TIMEOUT_S\n        self.watcher_kubeconfig_paths = watcher_kubeconfig_paths or []\n        # creating a task_proc executor has a couple steps:\n        # * create a TaskProcessor\n        # * load the desired plugin (in this case, the k8s one)\n        # * and then actually create the executor (which we call a runner in tron)\n        # this last step requires a bit of setup, which is why we don't do it in-line\n        # in this constructor\n        self.processor = TaskProcessor()\n        self.processor.load_plugin(provider_module=\"task_processing.plugins.kubernetes\")\n        self.runner: Subscription | None = None\n\n        # queue to to use for tron<->task_proc communication - will hold k8s events seen\n        # by task_processing and held for tron to process.\n        self.queue = PyDeferredQueue()\n        # this will hold the current event to process (retrieved from the PyDeferredQueue above)\n        # which we will eventually wrap with some callbacks to actually process using the Twisted\n        # reactor started as part of tron's startup process\n        self.deferred: Deferred | None = None\n\n        # map from k8s pod names to the task that said pod corresponds to\n        self.tasks: dict[str, KubernetesTask] = {}\n\n        # actually create the executor/runner, as mentioned above.\n        self.connect()\n        log.info(\"Tron connected to task_proc. task_proc will start scheduling now the jobs on k8s\")\n\n    def connect(self) -> None:\n        \"\"\"\n        Starts running our Kubernetes task_processing.\n        \"\"\"\n        self.runner = self.get_runner(kubeconfig_path=self.kubeconfig_path, queue=self.queue)\n        self.handle_next_event()\n\n    def get_runner(self, kubeconfig_path: str, queue: PyDeferredQueue) -> Subscription | None:\n        \"\"\"\n        Gets or creates an instance of our Kubernetes task_processing plugin.\n        \"\"\"\n        if not self.enabled:\n            log.info(\"Kubernetes usage is disabled, not creating a runner.\")\n            return None\n\n        # TODO: Add a stopping/terminating state to the task_proc runner\n        if self.runner is not None:\n            log.info(\"Reusing previously created runner.\")\n            return self.runner\n\n        try:\n            # TODO(TRON-1701): we'll need to figure out a good way to support multiple clusters here\n            # (with each cluster only using a single namespace for tron purposes)\n            executor = self.processor.executor_from_config(\n                provider=\"kubernetes\",\n                provider_config={\n                    \"namespace\": \"tron\",\n                    \"version\": __version__,\n                    \"kubeconfig_path\": self.kubeconfig_path,\n                    \"watcher_kubeconfig_paths\": self.watcher_kubeconfig_paths,\n                    \"task_configs\": [task.get_config() for task in self.tasks.values()],\n                },\n            )\n\n            return Subscription(executor, queue)\n        except Exception:\n            log.exception(\"Unhandled exception while attempting to instantiate k8s task_proc plugin\")\n            return None\n\n    def handle_next_event(self, _: None = None) -> None:\n        \"\"\"\n        Pops events off of the shared tron<->task_proc queue and processes them.\n\n        We only expect two types of events:\n        * control: events regarding how the task_proc plugin is running - handled directly\n        * task: events regarding how the actual tasks/Pods we're running our doing - forwarded to KubernetesTask\n        \"\"\"\n        if self.deferred is not None and not self.deferred.called:\n            log.warning(\"Already have handlers waiting for next event in queue, not adding more\")\n            return\n\n        self.deferred = self.queue.get()\n        if self.deferred is None:\n            log.warning(\"Unable to get a handler for next event in queue - this should never happen!\")\n            # TODO: figure out how to recover if we were unable to get a handler\n            # Not adding a callback is very bad here as this means we will never handle future events\n        # we want to process the event we just popped off the queue, but we also want\n        # to form a sort of event loop, so we add two callbacks:\n        # * one to actually deal with the event\n        # * and another to grab the next event, in this way creating an event loop :)\n        self.deferred.addCallback(self.process_event)  # type: ignore\n        self.deferred.addCallback(self.handle_next_event)  # type: ignore\n\n        # should an exception be thrown, these callbacks will be run instead\n        self.deferred.addErrback(logError)  # type: ignore\n        self.deferred.addErrback(self.handle_next_event)  # type: ignore\n\n    def process_event(self, event: Event) -> None:\n        \"\"\"\n        Central router for all events received from task_processing.\n        \"\"\"\n        if event.kind == \"control\":\n            self._handle_control_event(event)\n        elif event.kind == \"task\":\n            self._handle_task_event(event)\n        else:\n            log.warning(f\"Unknown event type ({event.kind}): {event}\")\n\n    def _handle_control_event(self, event: Event) -> None:\n        \"\"\"\n        Helper method to handle any control-plane events sent from task_processing.\n        \"\"\"\n        message = getattr(event, \"message\", None)\n        log.info(f\"Processing a control event with message: {message}\")\n\n    def _handle_task_event(self, event: Event) -> None:\n        \"\"\"\n        Helper method to correctly route task-related events to the appropriate task.\n        \"\"\"\n        task_id = getattr(event, \"task_id\", None)\n        if task_id is None:\n            log.warning(f\"Received a malformed event with no task_id: {event}\")\n            return\n\n        if task_id not in self.tasks.keys():\n            # NOTE: we don't log killed events for tasks we don't know about, as we do some slightly\n            # funky things with these events: namely, we'll send our own synthetic killed event to\n            # work around some weird k8s event behavior we've seen in the past where the coalesced\n            # event that we get in the task_processing watch loop either doesn't have the correct state\n            # or is missing entirely. This is a bit of a hack, I'm sorry :(\n            # That said, without this we'd get somewhat annoying logspam in the tron logs whenever our\n            # workaround logic runs but k8s sends the correct event faster than we can send our synthetic\n            # one and the hackiness of this is somewhat removed by the `event.raw` check - that should only\n            # exclude our synthetic event.\n            if not (event.platform_type == \"killed\" and event.raw is None):\n                log.warning(f\"Got event for unknown task ({task_id} not in {self.tasks.keys()}): {event}\")\n            return\n\n        task = self.tasks[task_id]\n        task.handle_event(event)\n        if task.is_done and event.task_id is not None:\n            del self.tasks[event.task_id]\n\n    def kill(self, task_id: str) -> bool:\n        \"\"\"\n        Instructs task_processing to stop running a given task given a Pod name.\n        \"\"\"\n        return self.runner.kill(task_id)  # type: ignore  # we need to add type annotation to task_proc\n\n    def stop(self, fail_tasks: bool = False) -> None:\n        \"\"\"\n        Stops the configured task_processing runner and optionally fails all currently running tasks.\n\n        Will also clear the message queue and any unprocessed events.\n        \"\"\"\n        if self.runner:\n            self.runner.stop()\n\n        # Clear message queue\n        if self.deferred:\n            self.deferred.cancel()\n            self.deferred = None\n        self.queue = PyDeferredQueue()\n\n        if fail_tasks:\n            # NOTE: we're turning this into a list on purpose: otherwise we're modifying the dict we're iterating over\n            for key, task in list(self.tasks.items()):\n                # set the task status to unknown\n                task.exited(exit_status=None)\n                del self.tasks[key]\n\n    def set_enabled(self, is_enabled: bool) -> None:\n        \"\"\"\n        Toggles use of the configured Kubernetes cluster.\n\n        Will fail all running tasks if toggled off.\n        \"\"\"\n        self.enabled = is_enabled\n        if self.enabled:\n            self.connect()\n        else:\n            self.stop(fail_tasks=True)\n\n    def configure_tasks(self, default_volumes: list[ConfigVolume] | None) -> None:\n        self.default_volumes = default_volumes\n\n    def create_task(\n        self,\n        action_run_id: str,\n        serializer: OutputStreamSerializer,\n        command: str,\n        cpus: float | None,\n        mem: float | None,\n        disk: float | None,\n        docker_image: str,\n        env: dict[str, str],\n        secret_env: dict[str, ConfigSecretSource],\n        secret_volumes: Collection[ConfigSecretVolume],\n        projected_sa_volumes: Collection[ConfigProjectedSAVolume],\n        field_selector_env: dict[str, ConfigFieldSelectorSource],\n        volumes: Collection[ConfigVolume],\n        cap_add: Collection[str],\n        cap_drop: Collection[str],\n        node_selectors: dict[str, str],\n        node_affinities: list[ConfigNodeAffinity],\n        topology_spread_constraints: list[ConfigTopologySpreadConstraints],\n        pod_labels: dict[str, str],\n        pod_annotations: dict[str, str],\n        service_account_name: str | None,\n        ports: list[int],\n        task_id: str | None = None,\n    ) -> KubernetesTask | None:\n        \"\"\"\n        Given the execution parameters for a task, create a KubernetesTask that encapsulate those parameters.\n\n        This task will not actually be run until KubernetesCluster::submit() is called.\n        \"\"\"\n        if self.runner is None:\n            log.error(\n                f\"Attempted to create a task for {action_run_id}, but no task_processing runner has been started.\"\n            )\n            return None\n\n        task_config = cast(\n            KubernetesTaskConfig,\n            self.runner.TASK_CONFIG_INTERFACE(\n                name=action_run_id,\n                command=command,\n                image=docker_image,\n                cpus=cpus,\n                memory=mem,\n                disk=DEFAULT_DISK_LIMIT if disk is None else disk,\n                environment=env,\n                secret_environment={k: v._asdict() for k, v in secret_env.items()},\n                secret_volumes=[volume._asdict() for volume in secret_volumes],\n                projected_sa_volumes=[volume._asdict() for volume in projected_sa_volumes],\n                field_selector_environment={k: v._asdict() for k, v in field_selector_env.items()},\n                cap_add=cap_add,\n                cap_drop=cap_drop,\n                volumes=[\n                    volume._asdict()\n                    for volume in combine_volumes(defaults=self.default_volumes or [], overrides=volumes)\n                ],\n                node_selectors=node_selectors,\n                node_affinities=[affinity._asdict() for affinity in node_affinities],\n                topology_spread_constraints=[tsc._asdict() for tsc in topology_spread_constraints],\n                labels=pod_labels,\n                annotations=pod_annotations,\n                service_account_name=service_account_name,\n                ports=ports,\n            ),\n        )\n\n        # this should only ever be non-null when we're recovering from a Tron restart\n        # and are recreating the previous state - when actually creating a new task\n        # we'll always let task_processing come up with a Pod name for us\n        if task_id is not None:\n            try:\n                task_config = task_config.set_pod_name(task_id)\n            except ValueError:\n                log.error(f\"Invalid {task_id} for {action_run_id}\")\n                return None\n\n        return KubernetesTask(\n            action_run_id=action_run_id,\n            task_config=task_config,\n            serializer=serializer,\n        )\n\n    def _check_connection(self) -> None:\n        \"\"\"\n        Helper to ensure that the task_processing plugin is in a running state and event handling\n        is correctly setup in case we've disabled k8s at some point during operation.\n        \"\"\"\n        if self.runner is None or self.runner.stopping:\n            log.info(\"k8s plugin never created or stopped, restarting.\")\n            self.connect()\n        # re-add callbacks just in case they're missing\n        elif self.deferred is None or self.deferred.called:\n            self.handle_next_event()\n\n    def submit(self, task: KubernetesTask) -> None:\n        \"\"\"\n        Given a KubernetesTask, submit it to the configured Kubernetes cluster in order to attempt to run it.\n        \"\"\"\n        # Submitting a task while k8s usage is disabled should fail the task so that\n        # users know that they have to take action and re-run whatever was scheduled\n        # during the time this killswitch is active\n        if not self.enabled:\n            task.log.info(\"Not starting task, Kubernetes usage is disabled.\")\n            task.exited(1)\n            return\n\n        # it's possible that we're the first task submission following k8s going from\n        # disabled -> enabled, so make sure everything is correctly setup\n        self._check_connection()\n        assert self.runner is not None, \"Unable to correctly setup k8s runner!\"\n\n        # store the task to be launched before actually launching it so that there's\n        # no race conditions later on with processing an event for that Pod before\n        # Tron know that that Pod is for a task it cares about\n        self.tasks[task.get_kubernetes_id()] = task\n\n        # XXX: if spark-on-k8s ends up running through task_processing, we'll need to revisit\n        # reimplementing the clusterman resource reporting that MesosCluster::submit() used to do\n        if not self.runner.run(task.get_config()):\n            log.warning(f\"Unable to submit task {task.get_kubernetes_id()} to configured k8s cluster.\")\n            task.exited(1)\n        log.info(f\"Submitted task {task.get_kubernetes_id()} to configured k8s cluster.\")\n\n        # update internal resource usage tracker (this isn't connected at all to clusterman)\n        task.report_resources()\n\n    def recover(self, task: KubernetesTask) -> None:\n        \"\"\"\n        Given an instance of a KubernetesTask, attempt to reconcile the current state of the task from Kubernetes.\n        \"\"\"\n        if not task:\n            return\n\n        if not self.enabled:\n            task.log.info(\"Could not recover task, Kubernetes usage is disabled.\")\n            task.exited(None)\n            return\n\n        self._check_connection()\n        assert self.runner is not None, \"Unable to correctly setup k8s runner!\"\n\n        # the task/kubernetes id is really just the pod name\n        task_id = task.get_kubernetes_id()\n        self.tasks[task_id] = task\n        task.log.info(\"TRON RESTARTED! Starting recovery procedure by reconciling state for this task from Kubernetes\")\n        task.started()\n        self.runner.reconcile(task.get_config())\n        task.report_resources()\n\n\nclass KubernetesClusterRepository:\n    # Kubernetes config\n    kubernetes_enabled: bool = False\n    kubernetes_non_retryable_exit_codes: list[int] | None = []\n    kubeconfig_path: str | None = None\n    pod_launch_timeout: int | None = None\n    default_volumes: list[ConfigVolume] | None = None\n    watcher_kubeconfig_paths: list[str] | None = None\n    non_retryable_exit_codes: list[int] | None = None\n\n    # metadata config\n    clusters: dict[str, KubernetesCluster] = {}\n\n    # state management config\n    state_data = {}  # type: ignore  # not used yet\n    state_watcher: Optional[\"StateChangeWatcher\"] = None\n\n    @classmethod\n    def attach(cls, _, observer):\n        cls.state_watcher = observer\n\n    @classmethod\n    def get_cluster(cls, kubeconfig_path: str | None = None) -> KubernetesCluster | None:\n        if kubeconfig_path is None:\n            if cls.kubeconfig_path is None:\n                return None\n            kubeconfig_path = cls.kubeconfig_path\n\n        if kubeconfig_path not in cls.clusters:\n            # will create the task_proc executor\n            cluster = KubernetesCluster(\n                kubeconfig_path=kubeconfig_path,\n                enabled=cls.kubernetes_enabled,\n                default_volumes=cls.default_volumes,\n                watcher_kubeconfig_paths=cls.watcher_kubeconfig_paths,\n                non_retryable_exit_codes=cls.non_retryable_exit_codes,\n            )\n            cls.clusters[kubeconfig_path] = cluster\n\n        return cls.clusters[kubeconfig_path]\n\n    @classmethod\n    def shutdown(cls) -> None:\n        for cluster in cls.clusters.values():\n            cluster.stop()\n\n    @classmethod\n    def configure(cls, kubernetes_options: ConfigKubernetes) -> None:\n        cls.kubeconfig_path = kubernetes_options.kubeconfig_path\n        cls.kubernetes_enabled = kubernetes_options.enabled\n        cls.kubernetes_non_retryable_exit_codes = kubernetes_options.non_retryable_exit_codes\n        cls.default_volumes = kubernetes_options.default_volumes\n        cls.watcher_kubeconfig_paths = kubernetes_options.watcher_kubeconfig_paths\n        cls.non_retryable_exit_codes = kubernetes_options.non_retryable_exit_codes\n\n        for cluster in cls.clusters.values():\n            cluster.set_enabled(cls.kubernetes_enabled)\n            cluster.configure_tasks(default_volumes=cls.default_volumes)\n"
  },
  {
    "path": "tron/logging.conf",
    "content": "[loggers]\nkeys=root, twisted, tron, tron.serialize, task_processing, tron.mesos.task_output, pymesos\n\n[handlers]\nkeys=timedRotatingFileHandler, syslogHandler, nullHandler\n\n[formatters]\nkeys=defaultFormatter, syslogFormatter\n\n[logger_root]\nlevel=WARNING\nhandlers=timedRotatingFileHandler\n\n[logger_twisted]\nlevel=WARNING\nhandlers=timedRotatingFileHandler\nqualname=twisted\npropagate=0\n\n[logger_tron]\nlevel=WARNING\nhandlers=timedRotatingFileHandler\nqualname=tron\npropagate=0\n\n[logger_tron.serialize]\nlevel=CRITICAL\nhandlers=timedRotatingFileHandler\nqualname=tron\npropagate=0\n\n[logger_task_processing]\nlevel=WARNING\nhandlers=timedRotatingFileHandler\nqualname=task_processing\npropagate=0\n\n[logger_pymesos]\nlevel=DEBUG\nhandlers=syslogHandler\nqualname=pymesos\npropagate=0\n\n[logger_tron.mesos.task_output]\nlevel=INFO\nhandlers=nullHandler\nqualname=tron.mesos.task_output\npropagate=0\n\n[handler_timedRotatingFileHandler]\nclass=logging.handlers.TimedRotatingFileHandler\nlevel=INFO\nformatter=defaultFormatter\nargs=('/var/log/tron/tron.log', 'D')\n\n[handler_syslogHandler]\nclass=logging.handlers.SysLogHandler\nlevel=WARNING\nformatter=syslogFormatter\nargs=('/dev/log',)\n\n[handler_nullHandler]\nclass=logging.NullHandler\nlevel=DEBUG\nargs=()\n\n[formatter_defaultFormatter]\nformat=%(asctime)s %(name)s %(levelname)s %(message)s\n\n[formatter_syslogFormatter]\nformat=tron[%(process)d]: %(message)s\n"
  },
  {
    "path": "tron/manhole.py",
    "content": "from twisted.conch.insults import insults\nfrom twisted.conch.manhole import ColoredManhole\nfrom twisted.conch.telnet import TelnetBootstrapProtocol\nfrom twisted.conch.telnet import TelnetTransport\nfrom twisted.internet import protocol\n\n\ndef make_manhole(namespace):\n    f = protocol.ServerFactory()\n    f.protocol = lambda: TelnetTransport(\n        TelnetBootstrapProtocol,\n        insults.ServerProtocol,\n        ColoredManhole,\n        namespace,\n    )\n    return f\n"
  },
  {
    "path": "tron/mcp.py",
    "content": "import logging\nimport time\n\nfrom tron import actioncommand\nfrom tron import command_context\nfrom tron import node\nfrom tron import prom_metrics\nfrom tron.config import manager\nfrom tron.config.schema import MASTER_NAMESPACE\nfrom tron.core.job import Job\nfrom tron.core.job_collection import JobCollection\nfrom tron.core.job_scheduler import JobSchedulerFactory\nfrom tron.core.jobgraph import JobGraph\nfrom tron.eventbus import EventBus\nfrom tron.kubernetes import KubernetesClusterRepository\nfrom tron.mesos import MesosClusterRepository\nfrom tron.serialize.runstate import statemanager\n\nlog = logging.getLogger(__name__)\n\n\ndef apply_master_configuration(mapping, master_config):\n    def get_config_value(seq):\n        return [getattr(master_config, item) for item in seq]\n\n    # Map various MASTER.yaml config options to functions that will apply said options\n    # for example, we will have MasterControlProgram.configure_eventbus function mapped to eventbus_enabled option\n    for entry in mapping:\n        func, args = entry[0], get_config_value(entry[1:])\n        func(*args)\n\n\nclass MasterControlProgram:\n    \"\"\"Central state object for the Tron daemon.\"\"\"\n\n    def __init__(self, working_dir, config_path, boot_time):\n        super().__init__()\n        self.jobs = JobCollection()\n        self.working_dir = working_dir\n        self.config = manager.ConfigManager(config_path)\n        self.context = command_context.CommandContext()\n        self.state_watcher = statemanager.StateChangeWatcher()\n        self.boot_time = boot_time\n        current_time = time.strftime(\"%a, %d %b %Y %H:%M:%S\", time.localtime(boot_time))\n        log.info(f\"Initialized. Tron started on {current_time}!\")\n\n    def shutdown(self):\n        EventBus.shutdown()\n        self.state_watcher.shutdown()\n\n    def reconfigure(self, namespace=None):\n        \"\"\"Reconfigure MCP while Tron is already running.\"\"\"\n        log.info(\"reconfigured\")\n        try:\n            self._load_config(reconfigure=True, namespace_to_reconfigure=namespace)\n        except Exception as e:\n            log.exception(f\"reconfigure failure: {e.__class__.__name__}: {e}\")\n            raise e\n\n    def _load_config(self, reconfigure=False, namespace_to_reconfigure=None):\n        \"\"\"Read config data and apply it.\"\"\"\n        with self.state_watcher.disabled():\n            self.apply_config(\n                self.config.load(),\n                reconfigure=reconfigure,\n                namespace_to_reconfigure=namespace_to_reconfigure,\n            )\n\n    def _update_metrics(self) -> None:\n        \"\"\"Update Prometheus metrics related to jobs and actions\"\"\"\n        try:\n            job_names = self.jobs.get_names()\n            job_count = len(job_names) if job_names else 0\n            prom_metrics.tron_job_count_gauge.set(job_count)\n\n            total_actions = 0\n            if self.jobs:\n                for job_scheduler in self.jobs:\n                    job = job_scheduler.get_job()\n                    if job and job.action_graph and job.action_graph.action_map:\n                        num_actions_in_job = len(job.action_graph.action_map)\n                        total_actions += num_actions_in_job\n\n            prom_metrics.tron_action_count_gauge.set(total_actions)\n        except Exception:\n            log.exception(\"Failed to update job and action count metrics\")\n\n    def initial_setup(self):\n        \"\"\"When the MCP is initialized the config is applied before the state.\n        In this case jobs shouldn't be scheduled until the state is applied.\n        \"\"\"\n        overall_startup_start_time = time.time()\n\n        # The job schedule factories will be created in the function below\n        self._load_config()\n\n        # Jobs will also get scheduled (internally) once the state for action runs are restored in restore_state\n        with prom_metrics.timer(\n            operation_name=\"full_restore_process\",\n            log=log,\n            histogram_metric=prom_metrics.tron_restore_duration_seconds_histogram,\n            gauge_metric=prom_metrics.tron_last_restore_duration_seconds_gauge,\n        ):\n            self.restore_state(\n                actioncommand.create_action_runner_factory_from_config(\n                    self.config.load().get_master().action_runner,\n                ),\n            )\n\n        # Any job with existing state would have been scheduled already. Jobs\n        # without any state will be scheduled here.\n        self.jobs.run_queue_schedule()\n\n        overall_startup_duration = time.time() - overall_startup_start_time\n        prom_metrics.tron_startup_duration_seconds_histogram.observe(overall_startup_duration)\n        prom_metrics.tron_last_startup_duration_seconds_gauge.set(overall_startup_duration)\n        log.info(f\"Tron total startup finished in {overall_startup_duration:.2f}s.\")\n\n    def apply_config(self, config_container, reconfigure=False, namespace_to_reconfigure=None):\n        \"\"\"Apply a configuration.\"\"\"\n        master_config_directives = [\n            (self.update_state_watcher_config, \"state_persistence\"),\n            (self.set_context_base, \"command_context\"),\n            (\n                node.NodePoolRepository.update_from_config,\n                \"nodes\",\n                \"node_pools\",\n                \"ssh_options\",\n            ),\n            (MesosClusterRepository.configure, \"mesos_options\"),\n            (KubernetesClusterRepository.configure, \"k8s_options\"),\n            (self.configure_eventbus, \"eventbus_enabled\"),\n        ]\n        master_config = config_container.get_master()\n        apply_master_configuration(master_config_directives, master_config)\n\n        self.state_watcher.watch(MesosClusterRepository)\n        self.state_watcher.watch(KubernetesClusterRepository)\n\n        # If the master namespace was updated, we should update jobs in all namespaces\n        if namespace_to_reconfigure == MASTER_NAMESPACE:\n            namespace_to_reconfigure = None\n\n        # TODO: unify NOTIFY_STATE_CHANGE and simplify this\n        self.job_graph = JobGraph(config_container)\n        # This factory is how Tron internally manages scheduling jobs\n        factory = self.build_job_scheduler_factory(master_config, self.job_graph)\n        updated_jobs = self.jobs.update_from_config(\n            config_container.get_jobs(),\n            factory,\n            reconfigure,\n            namespace_to_reconfigure,\n        )\n\n        # We will build the schedulers once the watcher is invoked\n        log.info(\n            f\"Tron built the schedulers for Tron jobs internally! Time elapsed since Tron started {time.time() - self.boot_time}s\"\n        )\n        self.state_watcher.watch_all(updated_jobs, [Job.NOTIFY_STATE_CHANGE, Job.NOTIFY_NEW_RUN])\n\n        # Do this last so that all Job objects, schedulers, and action graphs are fully built and linked within the JobCollection\n        self._update_metrics()\n\n    def build_job_scheduler_factory(self, master_config, job_graph):\n        \"\"\"Creates JobSchedulerFactory, which are how Tron tracks job schedules internally\"\"\"\n        output_stream_dir = master_config.output_stream_dir or self.working_dir\n        action_runner = actioncommand.create_action_runner_factory_from_config(\n            master_config.action_runner,\n        )\n        return JobSchedulerFactory(\n            self.context,\n            output_stream_dir,\n            master_config.time_zone,\n            action_runner,\n            job_graph,\n        )\n\n    def update_state_watcher_config(self, state_config):\n        \"\"\"Update the StateChangeWatcher, and save all state if the state config\n        changed.\n        \"\"\"\n        if self.state_watcher.update_from_config(state_config):\n            for job_scheduler in self.jobs:\n                self.state_watcher.save_job(job_scheduler.get_job())\n\n    def set_context_base(self, command_context):\n        self.context.base = command_context\n\n    def configure_eventbus(self, enabled):\n        if enabled:\n            if not EventBus.instance:\n                EventBus.create(f\"{self.working_dir}/_events\")\n                EventBus.start()\n        else:\n            EventBus.shutdown()\n\n    def get_job_collection(self):\n        return self.jobs\n\n    def get_config_manager(self):\n        return self.config\n\n    def restore_state(self, action_runner):\n        \"\"\"Use the state manager to retrieve the persisted state from dynamodb and apply it\n        to the configured Jobs.\n        \"\"\"\n        log.info(\"Restoring from DynamoDB\")\n\n        with prom_metrics.timer(\n            operation_name=\"state_data_retrieval_from_dynamodb\",\n            log=log,\n            histogram_metric=prom_metrics.tron_dynamodb_data_retrieval_duration_seconds_histogram,\n            gauge_metric=prom_metrics.tron_last_dynamodb_data_retrieval_duration_seconds_gauge,\n        ):\n            # restores the state of the jobs and their runs from DynamoDB\n            states = self.state_watcher.restore(self.jobs.get_names())\n\n        log.info(\"Applying retrieved state to Tron objects...\")\n\n        with prom_metrics.timer(\n            operation_name=\"apply_state_to_job_objects\",\n            log=log,\n            histogram_metric=prom_metrics.tron_job_state_application_duration_seconds_histogram,\n            gauge_metric=prom_metrics.tron_last_job_state_application_duration_seconds_gauge,\n        ):\n            self.jobs.restore_state(states.get(\"job_state\", {}), action_runner)\n\n        log.info(\"Tron state restore complete.\")\n\n    def __str__(self):\n        return \"MCP\"\n"
  },
  {
    "path": "tron/mesos.py",
    "content": "import json\nimport logging\nimport re\nimport socket\nimport time\nfrom typing import Any\nfrom urllib.parse import urlparse\n\nimport requests\nimport staticconf\nfrom task_processing.runners.subscription import Subscription\nfrom task_processing.task_processor import TaskProcessor\nfrom twisted.internet.defer import logError\n\nimport tron.metrics as metrics\nfrom tron.actioncommand import ActionCommand\nfrom tron.utils.queue import PyDeferredQueue\n\nTASK_LOG_FORMAT = \"%(asctime)s %(name)s %(levelname)s %(message)s\"\nTASK_OUTPUT_LOGGER = \"tron.mesos.task_output\"\nCLUSTERMAN_YAML_FILE_PATH = \"/nail/srv/configs/clusterman.yaml\"\nCLUSTERMAN_METRICS_YAML_FILE_PATH = \"/nail/srv/configs/clusterman_metrics.yaml\"\n\nlog = logging.getLogger(__name__)\n\n\ndef get_clusterman_metrics():\n    try:\n        import clusterman_metrics\n        import clusterman_metrics.util.costs\n\n        staticconf.YamlConfiguration(\n            CLUSTERMAN_YAML_FILE_PATH,\n            namespace=\"clusterman\",\n        )\n        staticconf.YamlConfiguration(\n            CLUSTERMAN_METRICS_YAML_FILE_PATH,\n            namespace=\"clusterman_metrics\",\n        )\n    except (ImportError, FileNotFoundError):\n        clusterman_metrics = None\n\n    return clusterman_metrics\n\n\ndef get_mesos_leader(master_address, mesos_master_port):\n    url = f\"{master_address}:{mesos_master_port}/redirect\"\n    response = requests.get(url)\n    return f\"{urlparse(response.url).hostname}:{mesos_master_port}\"\n\n\ndef combine_volumes(defaults, overrides):\n    \"\"\"Helper to reconcile lists of volume mounts.\n\n    If any volumes have the same container path, the one in overrides wins.\n    \"\"\"\n    result = {mount[\"container_path\"]: mount for mount in defaults}\n    for mount in overrides:\n        result[mount[\"container_path\"]] = mount\n    return list(result.values())\n\n\ndef get_secret_from_file(file_path):\n    if file_path is not None:\n        with open(file_path) as f:\n            secret = f.read().strip()\n    else:\n        secret = None\n    return secret\n\n\nclass MesosClusterRepository:\n    \"\"\"A class that stores MesosCluster objects and configuration.\"\"\"\n\n    # Config values\n    mesos_enabled = False\n    master_address = None\n    master_port = None\n    secret_file = None\n    role = None\n    principal = None\n    default_volumes = ()\n    dockercfg_location = None\n    offer_timeout = None\n    secret = None\n\n    name = \"frameworks\"\n    clusters: dict[str, \"MesosCluster\"] = {}\n    state_data: dict[str, Any] = {}\n    state_watcher = None\n\n    @classmethod\n    def attach(cls, _, observer):\n        cls.state_watcher = observer\n\n    @classmethod\n    def get_cluster(cls, master_address=None):\n        if master_address is None:\n            master_address = cls.master_address\n        if master_address not in cls.clusters:\n            framework_id = cls.state_data.get(master_address)\n            cluster = MesosCluster(\n                mesos_address=master_address,\n                mesos_master_port=cls.master_port,\n                secret=cls.secret,\n                principal=cls.principal,\n                mesos_role=cls.role,\n                framework_id=framework_id,\n                enabled=cls.mesos_enabled,\n                default_volumes=cls.default_volumes,\n                dockercfg_location=cls.dockercfg_location,\n                offer_timeout=cls.offer_timeout,\n            )\n            cls.clusters[master_address] = cluster\n        return cls.clusters[master_address]\n\n    @classmethod\n    def shutdown(cls):\n        for cluster in cls.clusters.values():\n            cluster.stop()\n\n    @classmethod\n    def configure(cls, mesos_options):\n        cls.master_address = mesos_options.master_address\n        cls.master_port = mesos_options.master_port\n        cls.secret_file = mesos_options.secret_file\n        cls.role = mesos_options.role\n        cls.secret = get_secret_from_file(cls.secret_file)\n        cls.principal = mesos_options.principal\n        cls.mesos_enabled = mesos_options.enabled\n        cls.default_volumes = [vol._asdict() for vol in mesos_options.default_volumes]\n        cls.dockercfg_location = mesos_options.dockercfg_location\n        cls.offer_timeout = mesos_options.offer_timeout\n\n        for cluster in cls.clusters.values():\n            cluster.set_enabled(cls.mesos_enabled)\n            cluster.configure_tasks(\n                default_volumes=cls.default_volumes,\n                dockercfg_location=cls.dockercfg_location,\n                offer_timeout=cls.offer_timeout,\n            )\n\n    @classmethod\n    def restore_state(cls, mesos_state):\n        cls.state_data = mesos_state.get(cls.name, {})\n\n    @classmethod\n    def save(cls, master_address, framework_id):\n        cls.state_data[master_address] = framework_id\n        cls.state_watcher.handler(cls, None)\n\n    @classmethod\n    def remove(cls, master_address):\n        if master_address in cls.state_data:\n            del cls.state_data[master_address]\n            cls.state_watcher.handler(cls, None)\n\n\nclass MesosTask(ActionCommand):\n    ERROR_STATES = frozenset([\"failed\", \"killed\", \"error\"])\n\n    def __init__(self, id, task_config, serializer=None):\n        super().__init__(id, task_config.cmd, serializer)\n        self.task_config = task_config\n\n        self.log = self.get_event_logger()\n        self.setup_output_logging()\n\n        config_str = str(self.get_config())\n        # AWS_SECRET_ACCESS_KEYs are base64-encoded so it uses alphanumerics plus +, /, and =\n        config_str = re.sub(\n            \"'AWS_SECRET_ACCESS_KEY': '[a-zA-Z0-9+/=]+'\",\n            \"AWS_SECRET_ACCESS_KEY_REDACTED\",\n            config_str,\n        )\n        config_str = re.sub(\n            \"'AWS_ACCESS_KEY_ID': '[a-zA-Z0-9]+'\",\n            \"AWS_ACCESS_KEY_ID_REDACTED\",\n            config_str,\n        )\n        self.log.info(\n            f\"Mesos task {self.get_mesos_id()} created with config {config_str}\",\n        )\n\n    def get_event_logger(self):\n        log = logging.getLogger(__name__ + \".\" + self.id)\n        # Every time a task gets created, this function runs and will add\n        # more stderr handlers to the logger, which results in duplicate log\n        # output. We only want to add the stderr handler if the logger does not\n        # have a handler yet.\n        if not len(log.handlers):\n            handler = logging.StreamHandler(self.stderr)\n            handler.setFormatter(logging.Formatter(TASK_LOG_FORMAT))\n            log.addHandler(handler)\n        return log\n\n    def setup_output_logging(self):\n        task_id = self.get_mesos_id()\n        stdout_logger = logging.getLogger(\n            \"{}.{}.{}\".format(TASK_OUTPUT_LOGGER, task_id, \"stdout\"),\n        )\n        stdout_logger.addHandler(logging.StreamHandler(self.stdout))\n        stderr_logger = logging.getLogger(\n            \"{}.{}.{}\".format(TASK_OUTPUT_LOGGER, task_id, \"stderr\"),\n        )\n        stderr_logger.addHandler(logging.StreamHandler(self.stderr))\n\n    def get_mesos_id(self):\n        return self.task_config.task_id\n\n    def get_config(self):\n        return self.task_config\n\n    def report_resources(self, decrement=False):\n        multiplier = -1 if decrement else 1\n        metrics.count(\"tron.mesos.cpus\", self.task_config.cpus * multiplier)\n        metrics.count(\"tron.mesos.mem\", self.task_config.mem * multiplier)\n        metrics.count(\"tron.mesos.disk\", self.task_config.disk * multiplier)\n\n    def log_event_info(self, event):\n        # Separate out so task still transitions even if this nice-to-have logging fails.\n        mesos_type = getattr(event, \"platform_type\", None)\n        if mesos_type == \"staging\":\n            # TODO: Save these in state?\n            agent = event.raw.get(\"offer\", {}).get(\"agent_id\", {}).get(\"value\")\n            hostname = event.raw.get(\"offer\", {}).get(\"hostname\")\n            self.log.info(\n                f\"Staging task on agent {agent} (hostname {hostname})\",\n            )\n        elif mesos_type == \"running\":\n            agent = event.raw.get(\"agent_id\", {}).get(\"value\")\n            self.log.info(f\"Running on agent {agent}\")\n        elif mesos_type == \"finished\":\n            pass\n        elif mesos_type in self.ERROR_STATES:\n            self.log.error(f\"Error from Mesos: {event.raw}\")\n        elif mesos_type is None:\n            self.log.info(f\"Non-Mesos event: {event.raw}\")\n            if \"Failed due to offer timeout\" in str(event.raw):\n                self.log.info(\"Explanation:\")\n                self.log.info(\"This error means that Tron timed out waiting for Mesos to give it the\")\n                self.log.info(\"resources requested (ram, cpu, disk, pool, etc).\")\n                self.log.info(\"This can happen if the cluster is low on resources, or if the resource\")\n                self.log.info(\"requests are too high.\")\n                self.log.info(\"Try reducing the resource request, or adding retries + retries_delay.\")\n                self.log.info(\"\")\n\n        # Mesos events may have task reasons\n        if mesos_type:\n            message = event.raw.get(\"message\", \"\")\n            reason = event.raw.get(\"reason\", \"\")\n            if message or reason:\n                self.log.info(f\"More info: {reason}: {message}\")\n\n    def handle_event(self, event):\n        event_id = getattr(event, \"task_id\", None)\n        if event_id != self.get_mesos_id():\n            self.log.warning(\n                f\"Event task id {event_id} does not match, ignoring\",\n            )\n            return\n        mesos_type = getattr(event, \"platform_type\", None)\n\n        self.log.info(\n            f\"Got event for task {event_id}, Mesos type {mesos_type}\",\n        )\n        try:\n            self.log_event_info(event)\n        except Exception as e:\n            self.log.warning(f\"Exception while logging event: {e}\")\n\n        if mesos_type == \"staging\":\n            pass\n        elif mesos_type == \"starting\":\n            self.started()\n        elif mesos_type == \"running\":\n            self.started()\n        elif mesos_type == \"finished\":\n            self.exited(0)\n        elif mesos_type == \"lost\":\n            self.log.warning(\"Mesos does not know anything about this task, it is LOST\")\n            self.log.warning(\n                \"This can happen for any number of reasons, and Tron can't know if the task ran or not at all!\"\n            )\n            self.log.warning(\"If you want Tron to RUN it (again) anyway, retry it with:\")\n            self.log.warning(f\"    tronctl retry {self.id}\")\n            self.log.warning(\"If you want Tron to NOT run it and consider it as a success, skip it with:\")\n            self.log.warning(f\"    tronctl skip {self.id}\")\n            self.log.warning(\"If you want Tron to NOT run it and consider it as a failure, fail it with:\")\n            self.log.warning(f\"    tronctl fail {self.id}\")\n            self.exited(None)\n        elif mesos_type in self.ERROR_STATES:\n            self.exited(1)\n        elif mesos_type is None:\n            pass\n        else:\n            self.log.info(\n                f\"Did not handle unknown mesos event type: {event}\",\n            )\n\n        if event.terminal:\n            self.log.info(\"This Mesos event was terminal, ending this action\")\n            self.report_resources(decrement=True)\n\n            exit_code = int(not getattr(event, \"success\", False))\n            # Returns False if we've already exited normally above\n            unexpected_error = self.exited(exit_code)\n            if unexpected_error:\n                self.log.error(\"Unexpected failure, exiting\")\n\n            self.done()\n\n\nclass MesosCluster:\n    def __init__(\n        self,\n        mesos_address,\n        mesos_master_port=None,\n        secret=None,\n        principal=None,\n        mesos_role=None,\n        framework_id=None,\n        enabled=True,\n        default_volumes=None,\n        dockercfg_location=None,\n        offer_timeout=None,\n    ):\n        self.mesos_address = mesos_address\n        self.mesos_master_port = mesos_master_port\n        self.secret = secret\n        self.principal = principal\n        self.mesos_role = mesos_role\n        self.enabled = enabled\n        self.default_volumes = default_volumes or []\n        self.dockercfg_location = dockercfg_location\n        self.offer_timeout = offer_timeout\n        self.framework_id = framework_id\n\n        self.processor = TaskProcessor()\n        self.queue = PyDeferredQueue()\n        self.deferred = None\n        self.runner = None\n        self.tasks = {}\n\n        self.processor.load_plugin(\n            provider_module=\"task_processing.plugins.mesos\",\n        )\n        self.connect()\n\n    def set_enabled(self, is_enabled):\n        self.enabled = is_enabled\n        if is_enabled:\n            self.connect()\n        else:\n            self.stop(fail_tasks=True)\n\n    def configure_tasks(\n        self,\n        default_volumes,\n        dockercfg_location,\n        offer_timeout,\n    ):\n        self.default_volumes = default_volumes\n        self.dockercfg_location = dockercfg_location\n        self.offer_timeout = offer_timeout\n\n    def connect(self):\n        self.runner = self.get_runner(self.mesos_address, self.queue)\n        self.handle_next_event()\n\n    def handle_next_event(self, deferred_result=None):\n        if self.deferred and not self.deferred.called:\n            log.warning(\n                \"Already have handlers waiting for next event in queue, \" \"not adding more\",\n            )\n            return\n        self.deferred = self.queue.get()\n        self.deferred.addCallback(self._process_event)\n        self.deferred.addCallback(self.handle_next_event)\n        self.deferred.addErrback(logError)\n        self.deferred.addErrback(self.handle_next_event)\n\n    def _check_connection(self):\n        if self.runner.stopping:\n            # Last framework was terminated for some reason, re-connect.\n            log.info(\"Last framework stopped, re-connecting\")\n            self.connect()\n        elif self.deferred.called:\n            # Just in case callbacks are missing, re-add.\n            self.handle_next_event()\n\n    def submit(self, task):\n        if not task:\n            return\n\n        if not self.enabled:\n            task.log.info(\"Task failed to start, Mesos is disabled.\")\n            task.exited(1)\n            return\n        self._check_connection()\n\n        mesos_task_id = task.get_mesos_id()\n        self.tasks[mesos_task_id] = task\n        env = task.get_config()[\"environment\"]\n        clusterman_resource_str = env.get(\"CLUSTERMAN_RESOURCES\")\n        clusterman_metrics = get_clusterman_metrics()\n        if clusterman_resource_str and clusterman_metrics:\n            clusterman_resources = json.loads(clusterman_resource_str)\n            cluster = env.get(\"EXECUTOR_CLUSTER\", env.get(\"PAASTA_CLUSTER\"))\n            pool = env.get(\"EXECUTOR_POOL\", env.get(\"PAASTA_POOL\"))\n            aws_region = staticconf.read(f\"clusters.{cluster}.aws_region\", namespace=\"clusterman\")\n            metrics_client = clusterman_metrics.ClustermanMetricsBotoClient(\n                region_name=aws_region,\n                app_identifier=pool,\n            )\n            with metrics_client.get_writer(\n                clusterman_metrics.APP_METRICS,\n                aggregate_meteorite_dims=True,\n            ) as writer:\n                for metric_key, metric_value in clusterman_resources.items():\n                    writer.send((metric_key, int(time.time()), metric_value))\n        self.runner.run(task.get_config())\n        log.info(\n            f\"Submitting task {mesos_task_id} to {self.mesos_address}\",\n        )\n        task.report_resources()\n\n    def recover(self, task):\n        if not task:\n            return\n\n        if not self.enabled:\n            task.log.info(\"Could not recover task, Mesos is disabled.\")\n            task.exited(None)\n            return\n        self._check_connection()\n\n        mesos_task_id = task.get_mesos_id()\n        self.tasks[mesos_task_id] = task\n        task.log.info(\"TRON RESTARTED! Starting recovery procedure by reconciling state for this task from Mesos\")\n        task.started()\n        self.runner.reconcile(task.get_config())\n        task.report_resources()\n\n    def create_task(  # type: ignore[no-untyped-def]  # this file is not long for this world\n        self,\n        action_run_id,\n        command,\n        cpus,\n        mem,\n        disk,\n        constraints,\n        docker_image,\n        docker_parameters,\n        env,\n        extra_volumes,\n        serializer,\n        task_id=None,\n    ) -> MesosTask | None:\n        if not self.runner:\n            return None\n\n        uris = [self.dockercfg_location] if self.dockercfg_location else []\n        volumes = combine_volumes(self.default_volumes, extra_volumes)\n        task_kwargs = {\n            \"name\": action_run_id,\n            \"cmd\": command,\n            \"cpus\": cpus,\n            \"mem\": mem,\n            \"disk\": disk,\n            \"constraints\": constraints,\n            \"image\": docker_image,\n            \"docker_parameters\": docker_parameters,\n            \"environment\": env,\n            \"volumes\": volumes,\n            \"uris\": uris,\n            \"offer_timeout\": self.offer_timeout,\n        }\n        task_config = self.runner.TASK_CONFIG_INTERFACE(**task_kwargs)\n\n        if task_id is not None:\n            try:\n                task_config = task_config.set_task_id(task_id)\n            except ValueError:\n                log.error(f\"Invalid {task_id} for {action_run_id}\")\n                return None\n\n        return MesosTask(action_run_id, task_config, serializer)\n\n    def get_runner(self, mesos_address, queue):\n        if not self.enabled:\n            log.info(\"Mesos is disabled, not creating a framework.\")\n            return None\n\n        if self.runner and not self.runner.stopping:\n            log.info(\"Already have a running framework, not creating one.\")\n            return self.runner\n\n        framework_name = f\"tron-{socket.gethostname()}\"\n        executor = self.processor.executor_from_config(\n            provider=\"mesos_task\",\n            provider_config={\n                \"secret\": self.secret,\n                \"principal\": self.principal,\n                \"mesos_address\": get_mesos_leader(mesos_address, self.mesos_master_port),\n                \"role\": self.mesos_role,\n                \"framework_name\": framework_name,\n                \"framework_id\": self.framework_id,\n                \"failover\": True,\n            },\n        )\n\n        def log_output(task_id, message, stream):\n            logger = logging.getLogger(\n                f\"{TASK_OUTPUT_LOGGER}.{task_id}.{stream}\",\n            )\n            logger.info(message)\n\n        logging_executor = self.processor.executor_from_config(\n            provider=\"logging\",\n            provider_config={\n                \"downstream_executor\": executor,\n                \"handler\": log_output,\n                \"format_string\": \"{line}\",\n            },\n        )\n        return Subscription(logging_executor, queue)\n\n    def _process_event(self, event):\n        if event.kind == \"control\":\n            message = getattr(event, \"message\", None)\n            if message == \"stop\":\n                # Framework has been removed, stop it.\n                log.warning(f\"Framework has been stopped: {event.raw}\")\n                self.stop()\n                MesosClusterRepository.remove(self.mesos_address)\n            elif message == \"unknown\":\n                log.warning(\n                    f\"Unknown error from Mesos master: {event.raw}\",\n                )\n            elif message == \"registered\":\n                framework_id = event.raw[\"framework_id\"][\"value\"]\n                MesosClusterRepository.save(self.mesos_address, framework_id)\n            else:\n                log.warning(f\"Unknown type of control event: {event}\")\n\n        elif event.kind == \"task\":\n            if not hasattr(event, \"task_id\"):\n                log.warning(f\"Task event missing task_id: {event}\")\n                return\n            if event.task_id not in self.tasks:\n                log.warning(\n                    f\"Received event for unknown task {event.task_id}: {event}\",\n                )\n                return\n            task = self.tasks[event.task_id]\n            task.handle_event(event)\n            if task.is_done:\n                del self.tasks[event.task_id]\n        else:\n            log.warning(f\"Unknown type of event: {event}\")\n\n    def stop(self, fail_tasks=False):\n        self.framework_id = None\n        if self.runner:\n            self.runner.stop()\n\n        # Clear message queue\n        if self.deferred:\n            self.deferred.cancel()\n            self.deferred = None\n        self.queue = PyDeferredQueue()\n\n        if fail_tasks:\n            for key, task in list(self.tasks.items()):\n                task.exited(None)\n                del self.tasks[key]\n\n    def kill(self, task_id):\n        return self.runner.kill(task_id)\n"
  },
  {
    "path": "tron/metrics.py",
    "content": "from pyformance.meters import Counter  # type: ignore\nfrom pyformance.meters import Histogram\nfrom pyformance.meters import Meter\nfrom pyformance.meters import SimpleGauge\nfrom pyformance.meters import Timer\n\nall_metrics = {}  # type: ignore\n\n\ndef get_metric(metric_type, name, dimensions, default):\n    global all_metrics\n    dimensions = tuple(sorted(dimensions.items())) if dimensions else ()\n    key = (metric_type, name, dimensions)\n    return all_metrics.setdefault(key, default)\n\n\ndef timer(name, delta, dimensions=None):\n    timer = get_metric(\"timer\", name, dimensions, Timer())\n    timer._update(delta)\n\n\ndef count(name, inc=1, dimensions=None):\n    counter = get_metric(\"counter\", name, dimensions, Counter())\n    counter.inc(inc)\n\n\ndef meter(name, dimensions=None):\n    meter = get_metric(\"meter\", name, dimensions, Meter())\n    meter.mark()\n\n\ndef gauge(name, value, dimensions=None):\n    gauge = get_metric(\"gauge\", name, dimensions, SimpleGauge())\n    gauge.set_value(value)\n\n\ndef histogram(name, value, dimensions=None):\n    histogram = get_metric(\"histogram\", name, dimensions, Histogram())\n    histogram.add(value)\n\n\ndef view_timer(timer):\n    data = view_meter(timer)\n    data.update(view_histogram(timer))\n    return data\n\n\ndef view_counter(counter):\n    return {\"count\": counter.get_count()}\n\n\ndef view_meter(meter):\n    return {\n        \"count\": meter.get_count(),\n        \"m1_rate\": meter.get_one_minute_rate(),\n        \"m5_rate\": meter.get_five_minute_rate(),\n        \"m15_rate\": meter.get_fifteen_minute_rate(),\n    }\n\n\ndef view_gauge(gauge):\n    return {\"value\": gauge.get_value()}\n\n\ndef view_histogram(histogram):\n    snapshot = histogram.get_snapshot()\n    return {\n        \"count\": histogram.get_count(),\n        \"mean\": histogram.get_mean(),\n        \"min\": histogram.get_min(),\n        \"max\": histogram.get_max(),\n        \"p50\": snapshot.get_median(),\n        \"p75\": snapshot.get_75th_percentile(),\n        \"p95\": snapshot.get_95th_percentile(),\n        \"p99\": snapshot.get_99th_percentile(),\n    }\n\n\nmetrics_to_viewers = {\n    \"counter\": view_counter,\n    \"gauge\": view_gauge,\n    \"histogram\": view_histogram,\n    \"meter\": view_meter,\n    \"timer\": view_timer,\n}\n\n\ndef view_all_metrics():\n    all_data = {metric_type: [] for metric_type in metrics_to_viewers}\n    for (metric_type, name, dims), metric in all_metrics.items():\n        data = {\"name\": name, **metrics_to_viewers[metric_type](metric)}\n        if dims:\n            data.update({\"dimensions\": dict(dims)})\n        all_data[metric_type].append(data)\n    return all_data\n"
  },
  {
    "path": "tron/node.py",
    "content": "import itertools\nimport logging\nimport random\n\nfrom twisted.conch.client.knownhosts import KnownHostsFile\nfrom twisted.internet import defer\nfrom twisted.internet import protocol\nfrom twisted.internet import reactor\nfrom twisted.python import failure\nfrom twisted.python.filepath import FilePath\n\nfrom tron import ssh\nfrom tron.utils import collections\nfrom tron.utils import twistedutils\n\nlog = logging.getLogger(__name__)\n\n# We should also only wait a certain amount of time for a new channel to be\n# established when we already have an open connection.  This timeout will\n# usually get triggered prior to even a TCP timeout, so essentially it's our\n# shortcut to discovering the connection died.\nRUN_START_TIMEOUT = 120\n\n# Love to run this, but we need to finish connecting to our node first\nRUN_STATE_CONNECTING = 0\n\n# We are connected and trying to open a channel to exec the process\nRUN_STATE_STARTING = 5\n\n# Process has been exec'ed, just waiting for it to exit\nRUN_STATE_RUNNING = 10\n\n# Process has exited\nRUN_STATE_COMPLETE = 100\n\n\nclass Error(Exception):\n    pass\n\n\nclass ConnectError(Error):\n    \"\"\"There was a problem connecting, run was never started\"\"\"\n\n    pass\n\n\nclass ResultError(Error):\n    \"\"\"There was a problem retrieving the result from this run\n\n    We did try to execute the command, but we don't know if it succeeded or\n    failed.\n    \"\"\"\n\n    pass\n\n\nclass NodePoolRepository:\n    \"\"\"A Singleton to store Node and NodePool objects.\"\"\"\n\n    _instance = None\n\n    def __init__(self):\n        if self._instance is not None:\n            raise ValueError(\"NodePoolRepository is already instantiated.\")\n        super().__init__()\n        self.nodes = collections.MappingCollection(\"nodes\")\n        self.pools = collections.MappingCollection(\"pools\")\n\n    @classmethod\n    def get_instance(cls):\n        if cls._instance is None:\n            cls._instance = cls()\n        return cls._instance\n\n    def filter_by_name(self, node_configs, node_pool_configs):\n        self.nodes.filter_by_name(node_configs)\n        self.pools.filter_by_name(\n            list(node_configs.keys()) + list(node_pool_configs.keys()),\n        )\n\n    @classmethod\n    def update_from_config(cls, node_configs, node_pool_configs, ssh_config):\n        instance = cls.get_instance()\n        ssh_options = ssh.SSHAuthOptions.from_config(ssh_config)\n        known_hosts = KnownHosts.from_path(ssh_config.known_hosts_file)\n        instance.filter_by_name(node_configs, node_pool_configs)\n        instance._update_nodes(\n            node_configs,\n            ssh_options,\n            known_hosts,\n            ssh_config,\n        )\n        instance._update_node_pools(node_pool_configs)\n\n    def _update_nodes(\n        self,\n        node_configs,\n        ssh_options,\n        known_hosts,\n        ssh_config,\n    ):\n        for config in node_configs.values():\n            pub_key = known_hosts.get_public_key(config.hostname)\n            node = Node.from_config(config, ssh_options, pub_key, ssh_config)\n            self.add_node(node)\n\n    def _update_node_pools(self, node_pool_configs):\n        for config in node_pool_configs.values():\n            nodes = self._get_nodes_by_name(config.nodes)\n            pool = NodePool.from_config(config, nodes)\n            self.pools.replace(pool)\n\n    def add_node(self, node):\n        self.nodes.replace(node)\n        self.pools.replace(NodePool.from_node(node))\n\n    def get_node(self, node_name, default=None):\n        return self.nodes.get(node_name, default)\n\n    def __contains__(self, node):\n        return node.get_name() in self.pools\n\n    def get_by_name(self, name, default=None):\n        return self.pools.get(name, default)\n\n    def _get_nodes_by_name(self, names):\n        return [self.nodes[name] for name in names]\n\n    def clear(self):\n        self.nodes.clear()\n        self.pools.clear()\n\n\nclass NodePool:\n    \"\"\"A pool of Node objects.\"\"\"\n\n    def __init__(self, nodes, name):\n        self.nodes = nodes\n        self.disabled = False\n        self.name = name or \"_\".join(n.get_name() for n in nodes)\n        self.iter = itertools.cycle(self.nodes)\n\n    @classmethod\n    def from_config(cls, node_pool_config, nodes):\n        return cls(nodes, node_pool_config.name)\n\n    @classmethod\n    def from_node(cls, node):\n        return cls([node], node.get_name())\n\n    def __eq__(self, other):\n        return isinstance(other, NodePool) and self.nodes == other.nodes\n\n    def __ne__(self, other):\n        return not self == other\n\n    def get_name(self):\n        return self.name\n\n    def get_nodes(self):\n        return self.nodes\n\n    def next(self):\n        \"\"\"Return a random node from the pool.\"\"\"\n        return random.choice(self.nodes)\n\n    def next_round_robin(self):\n        \"\"\"Return the next node cycling in a consistent order.\"\"\"\n        return next(self.iter)\n\n    def disable(self):\n        \"\"\"Required for MappingCollection.Item interface.\"\"\"\n        self.disabled = True\n\n    def get_by_hostname(self, hostname):\n        for node in self.nodes:\n            if node.hostname == hostname:\n                return node\n\n    def __str__(self):\n        return \"NodePool:%s\" % self.name\n\n\nclass KnownHosts(KnownHostsFile):\n    \"\"\"Lookup host key for a hostname.\"\"\"\n\n    @classmethod\n    def from_path(cls, file_path):\n        if not file_path:\n            return cls(None)\n        return cls.fromPath(FilePath(file_path))\n\n    def get_public_key(self, hostname):\n        for entry in self.iterentries():\n            if entry.matchesHost(hostname):\n                return entry.publicKey\n        log.warning(\"Missing host key for: %s\", hostname)\n\n\nclass RunState:\n    def __init__(self, action_run):\n        self.run = action_run\n        self.state = RUN_STATE_CONNECTING\n        self.deferred = defer.Deferred()\n        self.channel = None\n\n    def __repr__(self):\n        return f\"RunState(run: {self.run!r}, state: {self.state!r}, channel: {self.channel!r})\"\n\n\ndef determine_jitter(count, node_settings):\n    \"\"\"Return a pseudo-random number of seconds to delay a run.\"\"\"\n    count *= node_settings.jitter_load_factor\n    min_count = node_settings.jitter_min_load\n    max_jitter = max(0.0, count - min_count)\n    max_jitter = min(node_settings.jitter_max_delay, max_jitter)\n    return random.random() * float(max_jitter)\n\n\nclass Node:\n    \"\"\"A node is tron's interface to communicating with an actual machine.\"\"\"\n\n    def __init__(self, config, ssh_options, pub_key, node_settings):\n        self.config = config\n        self.node_settings = node_settings\n\n        # SSH Options\n        self.conch_options = ssh_options\n\n        # The SSH connection we use to open channels on. If present, means we\n        # are connected.\n        self.connection = None\n\n        # If present, means we are trying to connect\n        self.connection_defer = None\n\n        # Map of run id to instance of RunState\n        self.run_states = {}\n\n        self.idle_timer = None\n        self.disabled = False\n        self.pub_key = pub_key\n\n    @property\n    def hostname(self):\n        return self.config.hostname\n\n    @property\n    def username(self):\n        return self.config.username\n\n    @property\n    def port(self):\n        return self.config.port\n\n    @classmethod\n    def from_config(cls, node_config, ssh_options, pub_key, node_settings):\n        return cls(node_config, ssh_options, pub_key, node_settings)\n\n    def get_name(self):\n        return self.config.name\n\n    name = property(get_name)\n\n    def disable(self):\n        \"\"\"Required for MappingCollection.Item interface.\"\"\"\n        self.disabled = True\n\n    def __eq__(self, other):\n        if not isinstance(other, self.__class__):\n            return False\n        return (\n            self.config == other.config\n            and self.conch_options == other.conch_options\n            and self.pub_key == other.pub_key\n            and self.node_settings == other.node_settings\n        )\n\n    def __ne__(self, other):\n        return not self == other\n\n    # TODO: Test\n    def submit_command(self, command):\n        \"\"\"Submit an ActionCommand to be run on this node. Optionally provide\n        an error callback which will be called on error.\n        \"\"\"\n        deferred = self.run(command)\n        deferred.addErrback(command.handle_errback)\n        return deferred\n\n    def run(self, run):\n        \"\"\"Execute the specified run\n\n        A run consists of a very specific set of interfaces which allow us to\n        execute a command on this remote machine and return results.\n        \"\"\"\n        log.info(\"Running %s for %s on %s\", run.command, run.id, self.hostname)\n\n        # When this run completes, for good or bad, we'll inform the caller by\n        # calling 'succeed' or 'fail' on the run Since the defined interface\n        # is on these specific callbacks, we won't bother returning the\n        # deferred here. This allows the caller to not really care about\n        # twisted specific stuff at all, all it needs to know is that one of\n        # those functions will eventually be called back\n\n        if run.id in self.run_states:\n            log.warning(\n                \"Run %s(%r) already running !?!\",\n                run.id,\n                self.run_states[run.id],\n            )\n\n        if self.idle_timer and self.idle_timer.active():\n            self.idle_timer.cancel()\n\n        self.run_states[run.id] = RunState(run)\n\n        # TODO: have this return a runner instead of number\n        fudge_factor = determine_jitter(\n            len(self.run_states),\n            self.node_settings,\n        )\n        if fudge_factor == 0.0:\n            self._do_run(run)\n        else:\n            log.info(\n                \"Delaying execution of %s for %.2f secs\",\n                run.id,\n                fudge_factor,\n            )\n            reactor.callLater(fudge_factor, self._do_run, run)\n\n        # We return the deferred here, but really we're trying to keep the rest\n        # of the world from getting too involved with twisted.\n        return self.run_states[run.id].deferred\n\n    def stop(self, command):\n        \"\"\"Stop this command by marking it as failed.\"\"\"\n        exc = failure.Failure(exc_value=ResultError(\"Run stopped\"))\n        self._fail_run(command, exc)\n\n    def _do_run(self, run):\n        \"\"\"Finish starting to execute a run\n\n        This step may have been delayed.\n        \"\"\"\n\n        # Now let's see if we need to start this off by establishing a\n        # connection or if we are already connected\n        if self.connection is None:\n            self._connect_then_run(run)\n        else:\n            self._open_channel(run)\n\n    def _cleanup(self, run):\n        # TODO: why set to None before deleting it?\n        self.run_states[run.id].channel = None\n        del self.run_states[run.id]\n\n        if not self.run_states:\n            self.idle_timer = reactor.callLater(\n                self.node_settings.idle_connection_timeout,\n                self._connection_idle_timeout,\n            )\n\n    def _connection_idle_timeout(self):\n        if self.connection:\n            log.info(\n                \"Connection to %s idle for %d secs. Closing.\",\n                self.hostname,\n                self.node_settings.idle_connection_timeout,\n            )\n            self.connection.transport.loseConnection()\n\n    def _fail_run(self, run, result):\n        \"\"\"Indicate the run has failed, and cleanup state\"\"\"\n        log.debug(\"Run %s has failed\", run.id)\n        if not self._is_run_id_tracked(run):\n            log.warning(\"Run %s no longer tracked (_fail_run)\", run.id)\n            return\n\n        # Add a dummy errback handler to prevent Unhandled error messages.\n        # Unless someone is explicitly caring about this defer the error will\n        # have been reported elsewhere.\n        self.run_states[run.id].deferred.addErrback(lambda failure: None)\n\n        cb = self.run_states[run.id].deferred.errback\n\n        self._cleanup(run)\n\n        log.info(\"Calling fail_run callbacks\")\n        run.exited(None)\n        cb(result)\n\n    def _is_run_id_tracked(self, run):\n        return run.id in self.run_states and self.run_states[run.id].run is run\n\n    def _connect_then_run(self, run):\n        # Have we started the connection process ?\n        if self.connection_defer is None:\n            self.connection_defer = self._connect()\n\n        def call_open_channel(arg):\n            self._open_channel(run)\n            return arg\n\n        def connect_fail(result):\n            log.warning(\n                \"Cannot run %s, Failed to connect to %s: %s\",\n                run,\n                self.hostname,\n                repr(result),\n            )\n            self.connection_defer = None\n            self._fail_run(\n                run,\n                failure.Failure(\n                    exc_value=ConnectError(\n                        \"Connection to %s@%s:%d failed\"\n                        % (\n                            self.username,\n                            self.hostname,\n                            self.port,\n                        ),\n                    ),\n                ),\n            )\n\n        self.connection_defer.addCallback(call_open_channel)\n        self.connection_defer.addErrback(connect_fail)\n\n    def _service_stopped(self, connection):\n        \"\"\"Called when the SSH service has disconnected fully.\n\n        We should be in a state where we know there are no runs in progress\n        because all the SSH channels should have disconnected them.\n        \"\"\"\n        if self.connection is not connection:\n            log.warning(\"Service stop has been called twice\")\n            return\n        self.connection = None\n\n        log.info(f\"Service to node {self.hostname} stopped\")\n\n        for run_id, run in self.run_states.items():\n            if run.state == RUN_STATE_CONNECTING:\n                # Now we can trigger a reconnect and re-start any waiting runs.\n                self._connect_then_run(run)\n            elif run.state == RUN_STATE_RUNNING:\n                self._fail_run(run, None)\n            elif run.state == RUN_STATE_STARTING:\n                if run.channel and run.channel.start_defer is not None:\n\n                    # This means our run IS still waiting to start. There\n                    # should be an outstanding timeout sitting on this guy as\n                    # well. We'll just short circuit it.\n                    twistedutils.defer_timeout(run.channel.start_defer, 0)\n                else:\n                    # Doesn't seem like this should ever happen.\n                    log.warning(\n                        \"Run %r caught in starting state, but\" \" start_defer is over.\",\n                        run_id,\n                    )\n                    self._fail_run(run, None)\n            else:\n                # Service ended. The open channels should know how to handle\n                # this (and cleanup) themselves, so if there should not be any\n                # runs except those waiting to connect\n                raise Error(\n                    \"Run %s in state %s when service stopped\",\n                    run_id,\n                    run.state,\n                )\n\n    def _connect(self):\n        # This is complicated because we have to deal with a few different\n        # steps before our connection is really available for us:\n        #  1. Transport is created (our client creator does this)\n        #  2. Our transport is secure, and we can create our connection\n        #  3. The connection service is started, so we can use it\n\n        client_creator = protocol.ClientCreator(\n            reactor,\n            ssh.ClientTransport,\n            self.username,\n            self.conch_options,\n            self.pub_key,\n        )\n        create_defer = client_creator.connectTCP(\n            self.hostname,\n            self.config.port,\n            timeout=self.node_settings.connect_timeout,\n        )\n\n        # We're going to create a deferred, returned to the caller, that will\n        # be called back when we have an established, secure connection ready\n        # for opening channels. The value will be this instance of node.\n        connect_defer = defer.Deferred()\n        twistedutils.defer_timeout(\n            connect_defer,\n            self.node_settings.connect_timeout,\n        )\n\n        def on_service_started(connection):\n            # Booyah, time to start doing stuff\n            if self.connection:\n                log.error(\n                    \"Host %s service started called before disconnect(%s, %s)\",\n                    self.hostname,\n                    self.connection,\n                    connection,\n                )\n            self.connection = connection\n            self.connection_defer = None\n\n            connect_defer.callback(self)\n            return connection\n\n        def on_connection_secure(connection):\n            # We have a connection, but it might not be fully ready....\n            connection.service_start_defer = defer.Deferred()\n            connection.service_stop_defer = defer.Deferred()\n\n            connection.service_start_defer.addCallback(on_service_started)\n            connection.service_stop_defer.addCallback(self._service_stopped)\n            return connection\n\n        def on_transport_create(transport):\n            transport.connection_defer = defer.Deferred()\n            transport.connection_defer.addCallback(on_connection_secure)\n            return transport\n\n        def on_transport_fail(fail):\n            log.warning(\"Cannot connect to %s\", self.hostname)\n            connect_defer.errback(fail)\n\n        create_defer.addCallback(on_transport_create)\n        create_defer.addErrback(on_transport_fail)\n\n        return connect_defer\n\n    def _open_channel(self, run):\n        assert self.connection\n        if not self._is_run_id_tracked(run):\n            log.warning(\"Run %s no longer tracked (_open_channel)\", run.id)\n            return\n        assert self.run_states[run.id].state < RUN_STATE_RUNNING\n\n        self.run_states[run.id].state = RUN_STATE_STARTING\n\n        chan = ssh.ExecChannel(conn=self.connection)\n\n        chan.addOutputCallback(run.write_stdout)\n        chan.addErrorCallback(run.write_stderr)\n        chan.addEndCallback(run.done)\n\n        chan.command = run.command\n        chan.start_defer = defer.Deferred()\n        chan.start_defer.addCallback(self._run_started, run)\n        chan.start_defer.addErrback(self._run_start_error, run)\n\n        chan.exit_defer = defer.Deferred()\n        chan.exit_defer.addCallback(self._channel_complete, run)\n        chan.exit_defer.addErrback(self._channel_complete_unknown, run)\n\n        twistedutils.defer_timeout(chan.start_defer, RUN_START_TIMEOUT)\n\n        self.run_states[run.id].channel = chan\n        # TODO: I believe this needs to be checking the health of the connection\n        # before trying to open a new channel.  If the connection is gone it\n        # needs to re-establish, or if the connection is not responding\n        # we shouldn't create this new channel\n        self.connection.openChannel(chan)\n\n    def _channel_complete(self, channel, run):\n        \"\"\"Callback once our channel has completed it's operation\n\n        This is how we let our run know that we succeeded or failed.\n        \"\"\"\n        log.info(\"Run %s has completed with %r\", run.id, channel.exit_status)\n        if not self._is_run_id_tracked(run):\n            log.warning(\"Run %s no longer tracked\", run.id)\n            return\n\n        assert self.run_states[run.id].state < RUN_STATE_COMPLETE\n\n        self.run_states[run.id].state = RUN_STATE_COMPLETE\n        cb = self.run_states[run.id].deferred.callback\n        self._cleanup(run)\n\n        run.exited(channel.exit_status)\n        cb(channel.exit_status)\n\n    def _channel_complete_unknown(self, result, run):\n        \"\"\"Channel has closed on a running process without a proper exit\n\n        We don't actually know if the run succeeded\n        \"\"\"\n        log.error(\"Failure waiting on channel completion: %s\", repr(result))\n        self._fail_run(run, failure.Failure(exc_value=ResultError()))\n\n    def _run_started(self, channel, run):\n        \"\"\"Our run is actually a running process now, update the state\"\"\"\n        log.info(\"Run %s started for %s\", run.id, self.hostname)\n        channel.start_defer = None\n        if not self._is_run_id_tracked(run):\n            log.warning(\"Run %s no longer tracked (_run_started)\", run.id)\n            return\n        run.started()\n\n    def _run_start_error(self, result, run):\n        \"\"\"We failed to even run the command due to communication difficulties\n\n        Once all the runs have closed out we can try to reconnect.\n        \"\"\"\n        log.error(\n            \"Error running %s, disconnecting from %s: %s\",\n            run.id,\n            self.hostname,\n            repr(result),\n        )\n\n        # We clear out the deferred that likely called us because there are\n        # actually more than one error paths because of user timeouts.\n        if run.id in self.run_states and self.run_states[run.id].channel:\n            self.run_states[run.id].channel.start_defer = None\n\n        self._fail_run(\n            run,\n            failure.Failure(\n                exc_value=ConnectError(\n                    \"Connection to %s@%s:%d failed\"\n                    % (\n                        self.username,\n                        self.hostname,\n                        self.port,\n                    ),\n                ),\n            ),\n        )\n\n        # We want to hard hangup on this connection. It could theoretically\n        # come back thanks to the magic of TCP, but something is up, best to\n        # fail right now then limp along for and unknown amount of time.\n        # self.connection.transport.connectionLost(failure.Failure())\n\n    def __str__(self):\n        return \"Node:{}@{}:{}\".format(\n            self.username or \"<default>\",\n            self.hostname,\n            self.config.port,\n        )\n\n    def __repr__(self):\n        return self.__str__()\n"
  },
  {
    "path": "tron/prom_metrics.py",
    "content": "import logging\nimport time\nfrom collections.abc import Generator\nfrom contextlib import contextmanager\n\nfrom prometheus_client import Counter\nfrom prometheus_client import Gauge\nfrom prometheus_client import Histogram\n\n\ntron_cpu_gauge = Gauge(\"tron_k8s_cpus\", \"Total number of CPUs allocated to Tron-launched containers\")\ntron_memory_gauge = Gauge(\"tron_k8s_mem\", \"Total amount of memory allocated to Tron-launched containers (in megabytes)\")\ntron_disk_gauge = Gauge(\"tron_k8s_disk\", \"Total amount of disk allocated to Tron-launched containers (in megabytes)\")\n\n# TODO: prefix with tron_ to be consistent with other metrics\njson_serialization_errors_counter = Counter(\n    \"json_serialization_errors_total\",\n    \"Total number of errors encountered while serializing state_data as JSON. These errors occur before writing to DynamoDB.\",\n)\n\n# TODO: prefix with tron_ to be consistent with other metrics\njson_deserialization_errors_counter = Counter(\n    \"json_deserialization_errors_total\",\n    \"Total number of errors encountered while deserializing state_data from JSON. These errors occur after reading from DynamoDB.\",\n)\n\n# Our current peak is about 10-12 partitions, so this should be more than sufficient.\n# Anything above 20 would get grouped into an inf bucket until we expand this.\n# This Histogram tracks the distribution of partition counts *per save/set operation*.\ntron_dynamodb_partitions_histogram = Histogram(\n    \"tron_dynamodb_partitions\",\n    \"Distribution of partitions per item observed during save operations in DynamoDB\",\n    buckets=[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, float(\"inf\")],\n)\n\ntron_job_count_gauge = Gauge(\"tron_job_count\", \"Total number of Jobs configured in Tron\")\ntron_job_runs_created_counter = Counter(\"tron_job_runs_created\", \"Total number of JobRuns created\")\ntron_job_runs_completed_counter = Counter(\n    \"tron_job_runs_completed\",\n    \"Total number of JobRuns that completed execution\",\n    [\"outcome\"],\n)\n\ntron_action_count_gauge = Gauge(\"tron_action_count\", \"Total number of Actions configured in Tron (sum across all jobs)\")\ntron_action_runs_created_counter = Counter(\n    \"tron_action_runs_created\",\n    \"Total number of ActionRuns successfully submitted to executors for execution\",\n    [\"executor\"],\n)\ntron_action_runs_completed_counter = Counter(\n    \"tron_action_runs_completed\",\n    \"Total number of ActionRuns that completed execution\",\n    [\"executor\", \"outcome\", \"exit_status\"],\n)\n# We experience some variability in the time it takes to restore, but\n# this captures the distribution in different environments pretty well.\nduration_buckets_sec = [\n    1.0,\n    2.5,\n    5.0,\n    10.0,\n    15.0,\n    30.0,\n    45.0,\n    60.0,\n    90.0,\n    120.0,\n    180.0,\n    240.0,\n    300.0,\n    360.0,\n    420.0,\n    480.0,\n    540.0,\n    600.0,\n    750.0,\n    900.0,\n    1200.0,\n    float(\"inf\"),\n]\n\n# We can get more granular with these, but it's a good start. As it is right now, this looks like:\n#\n# Total Startup Time (tron_last_startup_duration_seconds)\n#   |-- _load_config()\n#   |-- Total Restore Time (tron_last_restore_duration_seconds)\n#   |     |-- state_watcher.restore() (tron_last_dynamodb_data_retrieval_duration_seconds_gauge)\n#   |     |-- jobs.restore_state() (tron_last_job_state_application_duration_seconds_gauge)\n#   |     |-- overhead within restore_state()\n#   |-- jobs.run_queue_schedule()\n#   |-- initial_setup()\ntron_restore_duration_seconds_histogram = Histogram(\n    \"tron_restore_duration_seconds\",\n    \"Distribution of time taken for the complete state restore process during startup\",\n    buckets=duration_buckets_sec,\n)\ntron_dynamodb_data_retrieval_duration_seconds_histogram = Histogram(\n    \"tron_dynamodb_data_retrieval_duration_seconds\",\n    \"Distribution of time taken to retrieve all state data from DynamoDB during restore\",\n    buckets=duration_buckets_sec,\n)\ntron_job_state_application_duration_seconds_histogram = Histogram(\n    \"tron_job_state_application_duration_seconds\",\n    \"Distribution of time taken to apply retrieved state data to Job/JobRun objects during restore\",\n    buckets=duration_buckets_sec,\n)\ntron_startup_duration_seconds_histogram = Histogram(\n    \"tron_startup_duration_seconds\",\n    \"Distribution of total time taken for Tron to start up and be ready\",\n    buckets=duration_buckets_sec,\n)\n\n# We use a gauge for the *last* restore. Histogram is useful for quantiles (p90, p95, etc.),\n# but this is nice for breaking down the most recent restore duration into components.\ntron_last_restore_duration_seconds_gauge = Gauge(\n    \"tron_last_restore_duration_seconds\", \"Duration of the most recent complete state restore process during startup\"\n)\ntron_last_dynamodb_data_retrieval_duration_seconds_gauge = Gauge(\n    \"tron_last_dynamodb_data_retrieval_duration_seconds\",\n    \"Duration of the most recent retrieval of all state data from DynamoDB during restore\",\n)\ntron_last_job_state_application_duration_seconds_gauge = Gauge(\n    \"tron_last_job_state_application_duration_seconds\",\n    \"Duration of the most recent application of retrieved state data to Job/JobRun objects\",\n)\ntron_last_startup_duration_seconds_gauge = Gauge(\n    \"tron_last_startup_duration_seconds\", \"Duration of the most recent total Tron startup process\"\n)\n\ntron_dynamodb_consecutive_save_errors_gauge = Gauge(\n    \"tron_dynamodb_consecutive_save_errors\",\n    \"Consecutive save loop iterations with errors\",\n)\n\ntron_dynamodb_save_errors_counter = Counter(\n    \"tron_dynamodb_save_errors_total\",\n    \"Total DynamoDB save errors\",\n)\n\n\n@contextmanager\ndef timer(\n    operation_name: str,\n    log: logging.Logger,\n    histogram_metric: Histogram | None = None,\n    gauge_metric: Gauge | None = None,\n) -> Generator[None, None, None]:\n    \"\"\"Context manager for timing operations with optional Prometheus metrics.\"\"\"\n    start_time = time.time()\n    log.info(f\"Starting {operation_name}...\")\n    duration = 0.0\n    try:\n        yield\n    except Exception:\n        log.exception(f\"Exception during timed operation: {operation_name}\")\n        raise\n    finally:\n        end_time = time.time()\n        duration = end_time - start_time\n        if histogram_metric:\n            histogram_metric.observe(duration)\n        if gauge_metric:\n            gauge_metric.set(duration)\n        log.info(f\"Execution time for {operation_name}: {duration:.2f}s\")\n"
  },
  {
    "path": "tron/scheduler.py",
    "content": "\"\"\"\nTron schedulers\n\n A scheduler has a simple interface.\n\n class Scheduler(object):\n\n    schedule_on_complete = <bool>\n\n    def next_run_time(self, last_run_time):\n        <returns datetime>\n\n\n next_run_time() should return a datetime which is the time the next job run\n will be run.\n\n schedule_on_complete is a bool that identifies if this scheduler should have\n jobs scheduled with the start_time of the previous run (False), or the\n end time of the previous run (False).\n\"\"\"\nimport datetime\nimport logging\nimport random\n\nfrom tron.config import schedule_parse\nfrom tron.utils import timeutils\nfrom tron.utils import trontimespec\n\nlog = logging.getLogger(__name__)\n\n\ndef scheduler_from_config(config, time_zone):\n    \"\"\"A factory for creating a scheduler from a configuration object.\"\"\"\n    if isinstance(config, schedule_parse.ConfigGrocScheduler):\n        return GeneralScheduler(\n            time_zone=time_zone,\n            timestr=config.timestr or \"00:00\",\n            ordinals=config.ordinals,\n            monthdays=config.monthdays,\n            months=config.months,\n            weekdays=config.weekdays,\n            name=\"groc\",\n            original=config.original,\n            jitter=config.jitter,\n        )\n\n    if isinstance(config, schedule_parse.ConfigCronScheduler):\n        return GeneralScheduler(\n            time_zone=time_zone,\n            minutes=config.minutes,\n            hours=config.hours,\n            monthdays=config.monthdays,\n            months=config.months,\n            weekdays=config.weekdays,\n            ordinals=config.ordinals,\n            seconds=[0],\n            name=\"cron\",\n            original=config.original,\n            jitter=config.jitter,\n        )\n\n    if isinstance(config, schedule_parse.ConfigDailyScheduler):\n        return GeneralScheduler(\n            hours=[config.hour],\n            time_zone=time_zone,\n            minutes=[config.minute],\n            seconds=[config.second],\n            weekdays=config.days,\n            name=\"daily\",\n            original=config.original,\n            jitter=config.jitter,\n        )\n\n\ndef get_jitter(time_delta):\n    if not time_delta:\n        return datetime.timedelta()\n    seconds = timeutils.delta_total_seconds(time_delta)\n    return datetime.timedelta(seconds=random.randint(-seconds, seconds))\n\n\ndef get_jitter_str(time_delta):\n    if not time_delta:\n        return \"\"\n    return \" (+/- %s)\" % time_delta\n\n\nclass GeneralScheduler:\n    \"\"\"Scheduler which uses a TimeSpecification.\"\"\"\n\n    schedule_on_complete = False\n\n    def __init__(\n        self,\n        ordinals=None,\n        weekdays=None,\n        months=None,\n        monthdays=None,\n        timestr=None,\n        minutes=None,\n        hours=None,\n        seconds=None,\n        time_zone=None,\n        name=None,\n        original=None,\n        jitter=None,\n    ):\n        \"\"\"Parameters:\n        timestr     - the time of day to run, as 'HH:MM'\n        ordinals    - first, second, third &c, as a set of integers in 1..5 to\n                      be used with \"1st <weekday>\", etc.\n        monthdays   - set of integers to be used with \"<month> 3rd\", etc.\n        months      - the months that this should run, as a set of integers in\n                      1..12\n        weekdays    - the days of the week that this should run, as a set of\n                      integers, 0=Sunday, 6=Saturday\n        timezone    - the optional timezone as a string for this specification.\n                      Defaults to UTC - valid entries are things like\n                      Australia/Victoria or PST8PDT.\n        \"\"\"\n        self.time_zone = time_zone\n        self.jitter = jitter\n        self.name = name or \"daily\"\n        self.original = original or \"\"\n        self.time_spec = trontimespec.TimeSpecification(\n            ordinals=ordinals,\n            weekdays=weekdays,\n            months=months,\n            monthdays=monthdays,\n            timestr=timestr,\n            hours=hours,\n            minutes=minutes,\n            seconds=seconds,\n            timezone=time_zone.zone if time_zone else None,\n        )\n\n    def next_run_time(self, start_time):\n        \"\"\"Find the next time to run.\"\"\"\n        if not start_time:\n            start_time = timeutils.current_time(tz=self.time_zone)\n        elif self.time_zone:\n            if (\n                start_time.tzinfo is None\n                or start_time.tzinfo.utcoffset(\n                    start_time,\n                )\n                is None\n            ):\n                # tz-naive start times need to be localized first to the requested\n                # time zone.\n                start_time = trontimespec.naive_as_timezone(start_time, self.time_zone)\n\n        return self.time_spec.get_match(start_time) + get_jitter(self.jitter)\n\n    def __str__(self):\n        return f\"{self.name} {self.original}{get_jitter_str(self.jitter)}\"\n\n    def __eq__(self, other):\n        return (\n            hasattr(\n                other,\n                \"time_spec\",\n            )\n            and self.time_spec == other.time_spec\n        )\n\n    def __ne__(self, other):\n        return not self == other\n\n    def get_jitter(self):\n        return self.jitter\n\n    def get_name(self):\n        return self.name\n\n    def get_value(self):\n        return self.original\n"
  },
  {
    "path": "tron/serialize/__init__.py",
    "content": ""
  },
  {
    "path": "tron/serialize/filehandler.py",
    "content": "\"\"\"\nTools for managing and properly closing file handles.\n\"\"\"\nimport logging\nimport os.path\nimport shutil\nimport sys\nimport time\nfrom collections import OrderedDict\nfrom subprocess import PIPE\nfrom subprocess import Popen\nfrom threading import RLock\n\nfrom tron.utils import maybe_encode\n\nlog = logging.getLogger(__name__)\n\n\nclass NullFileHandle:\n    \"\"\"A No-Op object that supports a File interface.\"\"\"\n\n    closed = True\n\n    @classmethod\n    def write(cls, _):\n        pass\n\n    @classmethod\n    def close(cls):\n        pass\n\n\nclass FileHandleWrapper:\n    \"\"\"Acts as a proxy to file handles.  Wrap a file handle and stores\n    access time and metadata.  These objects should only be created\n    by FileHandleManager. Do not instantiate them on their own.\n    \"\"\"\n\n    __slots__ = [\"manager\", \"name\", \"last_accessed\", \"_fh_lock\", \"_fh\"]\n\n    def __init__(self, manager, name):\n        self.manager = manager\n        self.name = name\n        self.last_accessed = time.time()\n        self._fh_lock = RLock()\n        self._fh = NullFileHandle\n\n    def close(self):\n        self.close_wrapped()\n        self.manager.remove(self)\n\n    def close_wrapped(self):\n        \"\"\"Close only the underlying file handle.\"\"\"\n        with self._fh_lock:\n            if self._fh is not None:\n                self._fh.close()\n                self._fh = NullFileHandle\n\n    def write(self, content):\n        \"\"\"Write content to the fh. Re-open if necessary.\"\"\"\n        with self._fh_lock:\n            if self._fh is not None:\n                if self._fh == NullFileHandle:\n                    try:\n                        self._fh = open(self.name, \"ab\")\n                    except OSError as e:\n                        log.error(\"Failed to open %s: %s\", self.name, e)\n                        return\n\n                self.last_accessed = time.time()\n                self._fh.write(\n                    maybe_encode(content)\n                )  # TODO: TRON-2293 maybe_encode is a relic of Python2->Python3 migration. Remove it.\n                self.manager.update(self)\n\n    def __enter__(self):\n        return self\n\n    def __exit__(self, _exc_type, _exc_val, _exc_tb):\n        with self._fh_lock:\n            self.close()\n            self._fh = None\n\n\nclass FileHandleManager:\n    \"\"\"Creates FileHandleWrappers, closes handles when they have\n    been inactive for a period of time, and transparently re-open the next\n    time they are needed. All files are opened in append mode.\n\n    This class is singleton.  An already configured instance can be\n    retrieving by using get_instance() (and will be created if None),\n    max_idle_time can be set by calling the classmethod set_max_idle_time()\n    \"\"\"\n\n    _instance = None\n\n    def __init__(self, max_idle_time=60):\n        \"\"\"\n        Create a new instance.\n        max_idle_time           - max idle time in seconds\n        \"\"\"\n        if self.__class__._instance:\n            msg = \"FileHandleManager is a singleton. Call get_instance()\"\n            raise ValueError(msg)\n        self.max_idle_time = max_idle_time\n        self.cache = OrderedDict()\n        self.__class__._instance = self\n\n    @classmethod\n    def set_max_idle_time(cls, max_idle_time):\n        inst = cls.get_instance()\n        inst.max_idle_time = max_idle_time\n\n    @classmethod\n    def get_instance(cls):\n        if cls._instance is None:\n            cls._instance = cls()\n        return cls._instance\n\n    @classmethod\n    def reset(cls):\n        \"\"\"Empty the cache and reset the instance to it's original state.\"\"\"\n        inst = cls.get_instance()\n        for fh_wrapper in list(inst.cache.values()):\n            inst.remove(fh_wrapper)\n\n    def open(self, filename):\n        \"\"\"Retrieve a file handle from the cache based on name.  Returns a\n        FileHandleWrapper. If the handle is not in the cache, create a new\n        instance.\n        \"\"\"\n        if filename in self.cache:\n            return self.cache[filename]\n        fhw = FileHandleWrapper(self, filename)\n        self.cache[filename] = fhw\n        return fhw\n\n    def cleanup(self, time_func=time.time):\n        \"\"\"Close any file handles that have been idle for longer than\n        max_idle_time. time_func is primary used for testing.\n        \"\"\"\n        if not self.cache:\n            return\n\n        cur_time = time_func()\n        for name, fh_wrapper in list(self.cache.items()):\n            if cur_time - fh_wrapper.last_accessed > self.max_idle_time:\n                fh_wrapper.close()\n            else:\n                break\n\n    def remove(self, fh_wrapper):\n        \"\"\"Remove the fh_wrapper from the cache and access_order.\"\"\"\n        if fh_wrapper.name in self.cache:\n            del self.cache[fh_wrapper.name]\n\n    def update(self, fh_wrapper):\n        \"\"\"Remove and re-add the file handle to the cache so that it's keys\n        are still ordered by last access. Calls cleanup() to remove any file\n        handles that have been idle for too long.\n        \"\"\"\n        self.remove(fh_wrapper)\n        self.cache[fh_wrapper.name] = fh_wrapper\n        self.cleanup()\n\n\nclass OutputStreamSerializer:\n    \"\"\"Manage writing to and reading from files in a directory hierarchy.\"\"\"\n\n    def __init__(self, base_path):\n        self.base_path = os.path.join(*base_path)\n        if not os.path.exists(self.base_path):\n            os.makedirs(self.base_path)\n\n    def full_path(self, filename):\n        return os.path.join(self.base_path, filename)\n\n    # TODO: do not use subprocess\n    def tail(self, filename: str, num_lines: int | None = None) -> list[str]:\n        \"\"\"Tail a file using `tail`.\"\"\"\n        path = self.full_path(filename)\n        if not path or not os.path.exists(path):\n            return []\n        if not num_lines:\n            num_lines = sys.maxsize\n\n        try:\n            cmd = (\"tail\", \"-n\", str(num_lines), path)\n            tail_sub = Popen(cmd, stdout=PIPE)\n            return list(line.rstrip().decode() for line in (tail_sub.stdout if tail_sub.stdout else []))\n        except OSError as e:\n            log.error(f\"Could not tail {path}: {e}\")\n            return []\n\n    def open(self, filename):\n        \"\"\"Return a FileHandleManager for the output path.\"\"\"\n        path = self.full_path(filename)\n        return FileHandleManager.get_instance().open(path)\n\n\nclass OutputPath:\n    \"\"\"A list like object used to construct a file path for output. The\n    file path is constructed by joining the base path with any additional\n    path elements.\n    \"\"\"\n\n    __slots__ = [\"base\", \"parts\"]\n\n    def __init__(self, base=\".\", *path_parts):\n        self.base = base\n        self.parts = list(path_parts or [])\n\n    def append(self, part):\n        self.parts.append(part)\n\n    def __iter__(self):\n        yield self.base\n        yield from self.parts\n\n    def __str__(self):\n        return os.path.join(*self)\n\n    def clone(self, *parts):\n        \"\"\"Return a new OutputPath object which has a base of the str value\n        of this object.\n        \"\"\"\n        return type(self)(str(self), *parts)\n\n    def delete(self):\n        \"\"\"Remove the directory and its contents.\"\"\"\n        try:\n            shutil.rmtree(str(self))\n        except OSError as e:\n            log.warning(f\"Failed to delete {self}: {e}\")\n\n    def __eq__(self, other):\n        return self.base == other.base and self.parts == other.parts\n\n    def __ne__(self, other):\n        return not self == other\n"
  },
  {
    "path": "tron/serialize/runstate/__init__.py",
    "content": "# State types\nJOB_STATE = \"job_state\"\nJOB_RUN_STATE = \"job_run_state\"\nMESOS_STATE = \"mesos_state\"\n"
  },
  {
    "path": "tron/serialize/runstate/dynamodb_state_store.py",
    "content": "import concurrent.futures\nimport copy\nimport gzip\nimport logging\nimport math\nimport pickle\nimport sys\nimport threading\nimport time\nfrom collections import defaultdict\nfrom collections import OrderedDict\nfrom collections.abc import Sequence\nfrom typing import Any\nfrom typing import Literal\nfrom typing import TypeVar\n\nimport boto3\nimport botocore\nfrom botocore.config import Config\n\nimport tron.prom_metrics as prom_metrics\nfrom tron.core.job import Job\nfrom tron.core.jobrun import JobRun\nfrom tron.metrics import timer\nfrom tron.serialize import runstate\n\n# Max DynamoDB object size is 400KB. Since we save two copies of the object (pickled and JSON),\n# we need to consider this max size applies to the entire item, so we use a max size of 200KB\n# for each version.\n#\n# In testing I could get away with 201_000 for both partitions so this should be enough overhead\n# to contain other attributes like object name and number of partitions.\nOBJECT_SIZE = 150_000  # TODO: TRON-2240 - consider swapping back to 400_000 now that we've removed pickles\nMAX_SAVE_QUEUE = 500\n# This is distinct from the number of retries in the retry_config as this is used for handling unprocessed\n# keys outside the bounds of something like retrying on a ThrottlingException. We need this limit to avoid\n# infinite loops in the case where a key is truly unprocessable. We allow for more retries than it should\n# ever take to avoid failing restores due to transient issues.\nMAX_UNPROCESSED_KEYS_RETRIES = 30\nlog = logging.getLogger(__name__)\nT = TypeVar(\"T\")\n\n\nclass DynamoDBStateStore:\n    def __init__(\n        self, name: str, dynamodb_region: str, stopping: bool = False, max_transact_write_items: int = 8\n    ) -> None:\n        # Standard mode includes an exponential backoff by a base factor of 2 for a\n        # maximum backoff time of 20 seconds (min(b*r^i, MAX_BACKOFF) where b is a\n        # random number between 0 and 1 and r is the base factor of 2). This might\n        # look like:\n        #\n        # seconds_to_sleep = min(1 × 2^1, 20) = min(2, 20) = 2 seconds\n        #\n        # By our 5th retry (2^5 is 32) we will be sleeping *up to* 20 seconds, depending\n        # on the random jitter.\n        #\n        # It handles transient errors like RequestTimeout and ConnectionError, as well\n        # as Service-side errors like Throttling, SlowDown, and LimitExceeded.\n        retry_config = Config(retries={\"max_attempts\": 5, \"mode\": \"standard\"})\n\n        self.dynamodb = boto3.resource(\"dynamodb\", region_name=dynamodb_region, config=retry_config)\n        self.client = boto3.client(\"dynamodb\", region_name=dynamodb_region, config=retry_config)\n        self.name = name\n        self.dynamodb_region = dynamodb_region\n        self.table = self.dynamodb.Table(name)\n        self.stopping = stopping\n        self.max_transact_write_items = max_transact_write_items\n        self.save_queue: OrderedDict = OrderedDict()\n        self.save_lock = threading.Lock()\n        self.save_errors = 0\n        self.save_thread = threading.Thread(target=self._save_loop, args=(), daemon=True)\n        self.save_thread.start()\n\n    def build_key(self, type: str, iden: str) -> str:\n        \"\"\"\n        It builds a unique partition key. The key could be objects with __str__ method.\n        \"\"\"\n        return f\"{type} {iden}\"\n\n    def restore(self, keys: list[str]) -> dict[str, Any]:\n        \"\"\"\n        Fetch all under the same partition key(s).\n        ret: <dict of key to states>\n        \"\"\"\n        # format of the keys always passed here is\n        # job_state job_name --> high level info about the job: enabled, run_nums\n        # job_run_state job_run_name --> high level info about the job run\n        first_items = self._get_first_partitions(keys)\n        remaining_items = self._get_remaining_partitions(first_items)\n        vals = self._merge_items(first_items, remaining_items)\n        return vals\n\n    def chunk_keys(self, keys: Sequence[T]) -> list[Sequence[T]]:\n        \"\"\"Generates a list of chunks of keys to be used to read from DynamoDB\"\"\"\n        # have a for loop here for all the key chunks we want to go over\n        cand_keys_chunks = []\n        for i in range(0, len(keys), 100):\n            # chunks of at most 100 keys will be in this list as there could be smaller chunks\n            cand_keys_chunks.append(keys[i : min(len(keys), i + 100)])\n        return cand_keys_chunks\n\n    def _calculate_backoff_delay(self, attempt: int) -> int:\n        # Clamp attempt to 1 to avoid negative or zero exponent\n        safe_attempt = max(attempt, 1)\n        base_delay_seconds = 1\n        max_delay_seconds = 10\n        delay: int = min(base_delay_seconds * (2 ** (safe_attempt - 1)), max_delay_seconds)\n        return delay\n\n    def _get_items(self, table_keys: list[dict[str, Any]]) -> list[dict[str, Any]]:\n        items = []\n        # let's avoid potentially mutating our input :)\n        cand_keys_list = copy.copy(table_keys)\n        attempts = 0\n\n        # TODO: TRON-2363 - We should refactor this to not consume attempts when we are still making progress\n        while len(cand_keys_list) != 0 and attempts < MAX_UNPROCESSED_KEYS_RETRIES:\n            with concurrent.futures.ThreadPoolExecutor(max_workers=5) as executor:\n                responses = [\n                    executor.submit(\n                        self.client.batch_get_item,\n                        RequestItems={self.name: {\"Keys\": chunked_keys, \"ConsistentRead\": True}},\n                    )\n                    for chunked_keys in self.chunk_keys(cand_keys_list)\n                ]\n                # let's wipe the state so that we can loop back around\n                # if there are any un-processed keys\n                # NOTE: we'll re-chunk when submitting to the threadpool\n                # since it's possible that we've had several chunks fail\n                # enough keys that we'd otherwise send > 100 keys in a\n                # request otherwise\n                cand_keys_list = []\n            for resp in concurrent.futures.as_completed(responses):\n                try:\n                    result = resp.result()\n                    items.extend(result.get(\"Responses\", {}).get(self.name, []))\n\n                    # If DynamoDB returns unprocessed keys, we need to collect them and retry\n                    unprocessed_keys = result.get(\"UnprocessedKeys\", {}).get(self.name, {}).get(\"Keys\", [])\n                    if unprocessed_keys:\n                        cand_keys_list.extend(unprocessed_keys)\n                except botocore.exceptions.ClientError as e:\n                    log.exception(f\"ClientError during batch_get_item: {e.response}\")\n                    raise\n                except Exception:\n                    log.exception(\"Encountered issues retrieving data from DynamoDB\")\n                    raise\n            if cand_keys_list:\n                # We use _calculate_backoff_delay to get a delay that increases exponentially\n                # with each retry. These retry attempts are distinct from the boto3 retry_config\n                # and are used specifically to handle unprocessed keys.\n                attempts += 1\n                delay = self._calculate_backoff_delay(attempts)\n                log.warning(\n                    f\"Attempt {attempts}/{MAX_UNPROCESSED_KEYS_RETRIES} - \"\n                    f\"Retrying {len(cand_keys_list)} unprocessed keys after {delay}s delay.\"\n                )\n                time.sleep(delay)\n        if cand_keys_list:\n            msg = f\"tron_dynamodb_restore_failure: failed to retrieve items with keys \\n{cand_keys_list}\\n from dynamodb after {MAX_UNPROCESSED_KEYS_RETRIES} retries.\"\n            log.error(msg)\n\n            raise KeyError(msg)\n        return items\n\n    def _get_first_partitions(self, keys: list[str]) -> list[dict[str, Any]]:\n        new_keys = [{\"key\": {\"S\": key}, \"index\": {\"N\": \"0\"}} for key in keys]\n        return self._get_items(new_keys)\n\n    def _get_remaining_partitions(self, items: list) -> list[dict[str, Any]]:\n        \"\"\"Get items in the remaining partitions: N = 1 and beyond\"\"\"\n        keys_for_remaining_items = []\n        for item in items:\n            num_partitions = int(item[\"num_json_val_partitions\"][\"N\"])\n\n            prom_metrics.tron_dynamodb_partitions_histogram.observe(num_partitions)\n\n            # We already have the 0th partition, so fetch partitions 1 through num_partitions-1\n            remaining_items = [\n                {\"key\": {\"S\": str(item[\"key\"][\"S\"])}, \"index\": {\"N\": str(i)}} for i in range(1, num_partitions)\n            ]\n            keys_for_remaining_items.extend(remaining_items)\n        return self._get_items(keys_for_remaining_items)\n\n    def _merge_items(self, first_items: list[dict[str, Any]], remaining_items: list[dict[str, Any]]) -> dict[str, Any]:\n        \"\"\"\n        Helper to merge multi-partition compressed JSON data into a single entry\n        and deserialize it.\n        \"\"\"\n        partitions_by_key = defaultdict(list)\n\n        if remaining_items:\n            first_items.extend(remaining_items)\n\n        for item in first_items:\n            key = item[\"key\"][\"S\"]\n            partitions_by_key[key].append(item)\n\n        # Sort all partitions upfront\n        for key_items in partitions_by_key.values():\n            key_items.sort(key=lambda x: int(x[\"index\"][\"N\"]))\n\n        # Reassemble compressed JSON partitions and decompress\n        json_items: dict[str, str] = {}\n        for key, key_items in partitions_by_key.items():\n            compressed_data = bytearray()\n            for part in key_items:\n                if \"json_val\" in part and \"B\" in part[\"json_val\"]:\n                    compressed_data += part[\"json_val\"][\"B\"]\n\n            if not compressed_data:\n                raise ValueError(f\"No compressed json_val found for key {key}\")\n\n            json_items[key] = gzip.decompress(compressed_data).decode(\"utf-8\")\n\n        return {k: self._deserialize_item(k, v) for k, v in json_items.items()}\n\n    def save(self, key_value_pairs: list[tuple[str, dict[str, Any] | None]]) -> None:\n        \"\"\"Add items to the save_queue to be later consumed by _consume_save_queue\"\"\"\n        for key, val in key_value_pairs:\n            while True:\n                qlen = len(self.save_queue)\n                if qlen > MAX_SAVE_QUEUE:\n                    log.info(f\"save queue size {qlen} > {MAX_SAVE_QUEUE}, sleeping 5s\")\n                    time.sleep(5)\n                    continue\n                with self.save_lock:\n                    if val is None:\n                        self.save_queue[key] = (val, None)\n                    else:\n                        state_type = self.get_type_from_key(key)\n                        json_val = self._serialize_item(state_type, val)\n                        self.save_queue[key] = (val, json_val)\n                break\n\n    def _consume_save_queue(self) -> None:\n        \"\"\"Consume the save_queue and save the items to dynamodb\"\"\"\n        qlen = len(self.save_queue)\n        saved = 0\n        start = time.time()\n\n        for _ in range(qlen):\n            try:\n                with self.save_lock:\n                    key, (val, json_val) = self.save_queue.popitem(last=False)\n\n                # If val is non-None but json_val is None, serialization failed.\n                # Re-attempt serialization and requeue on failure since we don't\n                # want any pickle-only items these days.\n                if val is not None and json_val is None:\n                    state_type = self.get_type_from_key(key)\n                    json_val = self._serialize_item(state_type, val)\n                    if json_val is None:\n                        log.error(\n                            f'tron_dynamodb_save_failure: json serialization failed for key \"{key}\", '\n                            f\"preserving existing row and requeuing\"\n                        )\n                        prom_metrics.tron_dynamodb_save_errors_counter.inc()\n                        with self.save_lock:\n                            self.save_queue[key] = (val, None)\n                        continue\n\n                # Remove all previous data with the same partition key\n                # TODO: only remove excess partitions if new data has fewer\n                self._delete_item(key)\n                if val is not None:\n                    self[key] = (pickle.dumps(val), json_val)\n                # reset errors count if we can successfully save\n                saved += 1\n            except Exception as e:\n                log.error(f'tron_dynamodb_save_failure: failed to save key \"{key}\" to dynamodb:\\n{repr(e)}')\n                prom_metrics.tron_dynamodb_save_errors_counter.inc()\n\n                with self.save_lock:\n                    self.save_queue[key] = (val, json_val)\n\n        duration = time.time() - start\n        log.info(f\"saved {saved} items in {duration}s\")\n\n        if saved < qlen:\n            self.save_errors += 1\n        else:\n            self.save_errors = 0\n\n        prom_metrics.tron_dynamodb_consecutive_save_errors_gauge.set(self.save_errors)\n\n    def get_type_from_key(self, key: str) -> str:\n        return key.split()[0]\n\n    # TODO: TRON-2305 - In an ideal world, we wouldn't be passing around state/state_data dicts. It would be a lot nicer to have regular objects here\n    def _serialize_item(self, key: Literal[runstate.JOB_STATE, runstate.JOB_RUN_STATE], state: dict[str, Any]) -> bytes | None:  # type: ignore\n        try:\n            if key == runstate.JOB_STATE:\n                serialized_data = Job.to_json(state)\n            elif key == runstate.JOB_RUN_STATE:\n                serialized_data = JobRun.to_json(state)\n            else:\n                raise ValueError(f\"Unknown type: key {key}\")\n\n            if serialized_data:\n                return gzip.compress(serialized_data.encode(\"utf-8\"))\n            return None\n        except Exception:\n            log.exception(f\"Serialization error for key {key}\")\n            prom_metrics.json_serialization_errors_counter.inc()\n            return None\n\n    def _deserialize_item(self, key: str, state: str) -> dict[str, Any]:\n        try:\n            json_key = key.split(\" \")[0]\n            if json_key == runstate.JOB_STATE:\n                job_data = Job.from_json(state)\n                return job_data\n            elif json_key == runstate.JOB_RUN_STATE:\n                job_run_data = JobRun.from_json(state)\n                return job_run_data\n            else:\n                raise ValueError(f\"Unknown type: key {key}\")\n        except Exception:\n            log.exception(f\"Deserialization error for key {key}\")\n            prom_metrics.json_deserialization_errors_counter.inc()\n            raise\n\n    def _save_loop(self) -> None:\n        while True:\n            if self.stopping:\n                self._consume_save_queue()\n                return\n\n            if len(self.save_queue) == 0:\n                log.debug(\"save queue empty, sleeping 5s\")\n                time.sleep(5)\n                continue\n\n            self._consume_save_queue()\n            if self.save_errors > 100:\n                log.error(\"too many dynamodb errors in a row, crashing\")\n                sys.exit(1)\n\n    def __setitem__(self, key: str, value: tuple[bytes, bytes | None]) -> None:\n        \"\"\"\n        Partition the item and write up to self.max_transact_write_items\n        partitions atomically using TransactWriteItems.\n\n        The function examines the size of pickled_val and json_val,\n        splitting them into multiple segments based on OBJECT_SIZE,\n        storing each segment under the same partition key.\n\n        It relies on the boto3/botocore retry_config to handle\n        certain errors (e.g. throttling). If an error is not\n        addressed by boto3's internal logic, the transaction fails\n        and raises an exception. It is the caller's responsibility\n        to implement further retries.\n        \"\"\"\n        start = time.time()\n\n        pickled_val, json_val = value\n        num_partitions = math.ceil(len(pickled_val) / OBJECT_SIZE)\n        num_json_val_partitions = math.ceil(len(json_val) / OBJECT_SIZE) if json_val else 0\n        items = []\n\n        max_partitions = max(num_partitions, num_json_val_partitions)\n        prom_metrics.tron_dynamodb_partitions_histogram.observe(max_partitions)\n\n        for index in range(max_partitions):\n            item: dict[str, Any] = {  # TODO: replace this with a TypedDict\n                \"Put\": {\n                    \"Item\": {\n                        \"key\": {\n                            \"S\": key,\n                        },\n                        \"index\": {\n                            \"N\": str(index),\n                        },\n                        \"val\": {\n                            \"B\": pickled_val[\n                                index * OBJECT_SIZE : min(index * OBJECT_SIZE + OBJECT_SIZE, len(pickled_val))\n                            ],\n                        },\n                        \"num_partitions\": {\n                            \"N\": str(num_partitions),\n                        },\n                    },\n                    \"TableName\": self.name,\n                },\n            }\n\n            if json_val:\n                item[\"Put\"][\"Item\"][\"json_val\"] = {\n                    \"B\": json_val[index * OBJECT_SIZE : min(index * OBJECT_SIZE + OBJECT_SIZE, len(json_val))]\n                }\n                item[\"Put\"][\"Item\"][\"num_json_val_partitions\"] = {\n                    \"N\": str(num_json_val_partitions),\n                }\n\n            items.append(item)\n\n            # We want to write the items when we've either reached the max number of items\n            # for a transaction, or when we're done processing all partitions\n            if len(items) == self.max_transact_write_items or index == max_partitions - 1:\n                try:\n                    self.client.transact_write_items(TransactItems=items)\n                    items = []\n                except Exception:\n                    timer(\n                        name=\"tron.dynamodb.setitem\",\n                        delta=time.time() - start,\n                    )\n                    # TODO: TRON-2419 - We should be smarter here. While each batch is atomic, a sufficiently\n                    # large JobRun could exceed the max size of a single transaction (e.g. a JobRun with 12\n                    # partitions). While one batch might succeed (saving partitions 1-8), the next one (for\n                    # partitions 9-12) might fail. We should to handle this case or we will see more hanging\n                    # chads in DynamoDB.\n                    log.exception(f\"Failed to save partition for key: {key}\")\n                    raise\n        timer(\n            name=\"tron.dynamodb.setitem\",\n            delta=time.time() - start,\n        )\n\n    def _delete_item(self, key: str) -> None:\n        start = time.time()\n        try:\n            num_partitions, num_json_val_partitions = self._get_num_of_partitions(key)\n            max_partitions = max(num_partitions, num_json_val_partitions)\n            with self.table.batch_writer() as batch:\n                for index in range(max_partitions):\n                    batch.delete_item(\n                        Key={\n                            \"key\": key,\n                            \"index\": index,\n                        },\n                    )\n        finally:\n            timer(\n                name=\"tron.dynamodb.delete\",\n                delta=time.time() - start,\n            )\n\n    def _get_num_of_partitions(self, key: str) -> tuple[int, int]:\n        \"\"\"\n        Return the number of partitions an item is divided into for both pickled and JSON data.\n        \"\"\"\n        try:\n            partition = self.table.get_item(\n                Key={\n                    \"key\": key,\n                    \"index\": 0,\n                },\n                ProjectionExpression=\"num_partitions, num_json_val_partitions\",\n                ConsistentRead=True,\n            )\n            num_partitions = int(partition.get(\"Item\", {}).get(\"num_partitions\", 0))\n            num_json_val_partitions = int(partition.get(\"Item\", {}).get(\"num_json_val_partitions\", 0))\n            return num_partitions, num_json_val_partitions\n        except self.client.exceptions.ResourceNotFoundException:\n            return 0, 0\n\n    def cleanup(self) -> None:\n        self.stopping = True\n        self.save_thread.join()\n        return\n"
  },
  {
    "path": "tron/serialize/runstate/shelvestore.py",
    "content": "import logging\nimport operator\nimport pickle\nimport shelve\nimport sys\nfrom io import BytesIO\nfrom typing import Any\n\nimport bsddb3  # type: ignore\n\nfrom tron.utils import maybe_decode\n\nlog = logging.getLogger(__name__)\n\n\n# TODO: TRON-2293 This class does some Python 2 and Python 3 handling shenanigans. It should be cleaned up.\nclass Py2Shelf(shelve.Shelf):\n    def __init__(self, filename, flag=\"c\", protocol=2, writeback=False):\n        db = bsddb3.hashopen(filename, flag)\n        args = [self, db, protocol, writeback]\n        if sys.version_info[0] == 3:\n            args.append(\"utf8\")\n        shelve.Shelf.__init__(*args)\n\n    def __getitem__(self, key):\n        try:\n            value = self.cache[key]\n        except KeyError:\n            f = BytesIO(self.dict[key.encode(\"utf8\")])\n            if sys.version_info[0] == 3:\n                value = pickle.load(f, encoding=\"bytes\")\n            else:\n                value = pickle.load(f)\n            if self.writeback:\n                self.cache[key] = value\n        return value\n\n    def __setitem__(self, key, value):\n        if self.writeback:\n            self.cache[key] = value\n        f = BytesIO()\n        pickle.dump(obj=value, file=f, protocol=self._protocol)\n        self.dict[key.encode(\"utf8\")] = f.getvalue()\n\n    def delete(self, key):\n        if key in self.cache:\n            del self.cache[key]\n        encoded_key = key.encode(\"utf8\")\n        if encoded_key in self.dict:\n            del self.dict[encoded_key]\n\n\nclass ShelveKey:\n    __slots__ = [\"type\", \"iden\"]\n\n    def __init__(self, type, iden):\n        self.type = maybe_decode(\n            type\n        )  # TODO: TRON-2293 maybe_decode is a relic of Python2->Python3 migration. Remove it.\n        self.iden = maybe_decode(\n            iden\n        )  # TODO: TRON-2293 maybe_decode is a relic of Python2->Python3 migration. Remove it.\n\n    @property\n    def key(self):\n        return f\"{self.type}___{self.iden}\"\n\n    def __str__(self):\n        return f\"{self.type} {self.iden}\"\n\n    def __eq__(self, other):\n        return self.type == other.type and self.iden == other.iden\n\n    def __hash__(self):\n        return hash(self.key)\n\n\nclass ShelveStateStore:\n    \"\"\"Persist state using `shelve`.\"\"\"\n\n    def __init__(self, filename):\n        self.filename = filename\n        self.shelve = Py2Shelf(self.filename)\n\n    def build_key(self, type: str, iden: str) -> ShelveKey:\n        return ShelveKey(type, iden)\n\n    def save(self, key_value_pairs):\n        for key, state_data in key_value_pairs:\n            shelve_key = str(key.key)\n            if state_data is None:\n                self.shelve.delete(shelve_key)\n            else:\n                self.shelve[shelve_key] = state_data\n        self.shelve.sync()\n\n    def restore(self, keys: list[ShelveKey]) -> dict[ShelveKey, Any]:\n        items = zip(\n            keys,\n            (self.shelve.get(str(key.key)) for key in keys),\n        )\n        return dict(filter(operator.itemgetter(1), items))\n\n    def cleanup(self):\n        self.shelve.close()\n\n    def __repr__(self):\n        return \"ShelveStateStore('%s')\" % self.filename\n"
  },
  {
    "path": "tron/serialize/runstate/statemanager.py",
    "content": "import concurrent.futures\nimport copy\nimport itertools\nimport logging\nimport sys\nimport time\nfrom contextlib import contextmanager\nfrom typing import Any\nfrom typing import cast\n\nfrom tron.config import schema\nfrom tron.core import job\nfrom tron.core import jobrun\nfrom tron.mesos import MesosClusterRepository\nfrom tron.serialize import runstate\nfrom tron.serialize.runstate.dynamodb_state_store import DynamoDBStateStore\nfrom tron.serialize.runstate.shelvestore import ShelveStateStore\nfrom tron.serialize.runstate.yamlstore import YamlStateStore\nfrom tron.utils import observer\n\nlog = logging.getLogger(__name__)\n\n\nclass VersionMismatchError(ValueError):\n    \"\"\"Raised when the state has a newer version then tron.__version.__.\"\"\"\n\n\nclass PersistenceStoreError(ValueError):\n    \"\"\"Raised if the store can not be created or fails a read or write.\"\"\"\n\n\nclass PersistenceManagerFactory:\n    \"\"\"Create a PersistentStateManager.\"\"\"\n\n    @classmethod\n    def from_config(cls, persistence_config):\n        store_type = schema.StatePersistenceTypes(persistence_config.store_type)\n        name = persistence_config.name\n        buffer_size = persistence_config.buffer_size\n        store = None\n\n        if store_type == schema.StatePersistenceTypes.shelve:\n            store = ShelveStateStore(name)\n\n        if store_type == schema.StatePersistenceTypes.yaml:\n            store = YamlStateStore(name)\n\n        if store_type == schema.StatePersistenceTypes.dynamodb:\n            table_name = persistence_config.table_name\n            dynamodb_region = persistence_config.dynamodb_region\n            max_transact_write_items = persistence_config.max_transact_write_items\n            store = DynamoDBStateStore(table_name, dynamodb_region, max_transact_write_items=max_transact_write_items)\n\n        buffer = StateSaveBuffer(buffer_size)\n        return PersistentStateManager(store, buffer)\n\n\nclass StateSaveBuffer:\n    \"\"\"Buffer calls to save, and perform the saves when buffer reaches\n    buffer size. This buffer will only store one state_data for each key.\n    \"\"\"\n\n    def __init__(self, buffer_size):\n        self.buffer_size = buffer_size\n        self.buffer = {}\n        self.counter = itertools.cycle(range(buffer_size))\n\n    def save(self, key, state_data):\n        \"\"\"Save the state_data indexed by key and return True if the buffer\n        is full.\n        \"\"\"\n        self.buffer[key] = state_data\n        return not next(self.counter)\n\n    def __iter__(self):\n        \"\"\"Return all buffered data and clear the buffer.\"\"\"\n        yield from self.buffer.items()\n        self.buffer.clear()\n\n\nclass PersistentStateManager:\n    \"\"\"Provides an interface to persist the state of Tron.\n\n    The implementation of persisting and restoring the state from disk is\n    handled by a class which supports the StateStore interface:\n\n    class IStateStore(object):\n\n        def build_key(self, type, identifier):\n            return <a key>\n\n        def restore(self, keys):\n            return <dict of key to states>\n\n        def save(self, key, state_data):\n            pass\n\n        def cleanup(self):\n            pass\n\n    \"\"\"\n\n    def __init__(self, persistence_impl, buffer):\n        self.enabled = True\n        self._buffer = buffer\n        self._impl = persistence_impl\n\n    # TODO: get rid of the Any here - hopefully with a TypedDict\n    def restore(self, job_names: list[str]) -> dict[str, Any]:\n        \"\"\"Return the most recent serialized state.\"\"\"\n        log.info(f\"Restoring {len(job_names)} jobs\")\n        jobs = self._restore_dicts(runstate.JOB_STATE, job_names)\n        # jobs is a dict of job name -> job state\n        # e.g. {'MASTER.k8s': {'run_nums': [0], 'enabled': True}}\n\n        log.info(f\"Restoring JobRun state for {len(jobs)} jobs\")\n        with concurrent.futures.ThreadPoolExecutor(max_workers=3) as executor:\n            # Map each future to its job name so we can associate results later\n            results = {\n                executor.submit(self._restore_runs_for_job, job_name, job_state): job_name\n                for job_name, job_state in jobs.items()\n            }\n            for result in concurrent.futures.as_completed(results):\n                try:\n                    jobs[results[result]][\"runs\"] = result.result()\n                except Exception:\n                    log.exception(f\"Unable to restore state for {results[result]} - exiting to avoid corrupting data.\")\n                    sys.exit(1)\n\n        state = {\n            runstate.JOB_STATE: jobs,\n        }\n        return state\n\n    # TODO: get rid of the Any here - hopefully with a TypedDict\n    def _restore_runs_for_job(self, job_name: str, job_state: dict[str, Any]) -> list[dict[str, Any]]:\n        \"\"\"Restore the state for the runs of each job\"\"\"\n        run_nums = job_state[\"run_nums\"]\n        keys = [jobrun.get_job_run_id(job_name, run_num) for run_num in run_nums]\n        job_runs_restored_states = self._restore_dicts(runstate.JOB_RUN_STATE, keys)\n        all_job_runs = copy.copy(job_runs_restored_states)\n        for run_id, state in all_job_runs.items():\n            if state == {}:\n                log.error(f\"Failed to restore {run_id}, no state found for it!\")\n                job_runs_restored_states.pop(run_id)\n\n        runs = list(job_runs_restored_states.values())\n        # We need to sort below otherwise the runs will not be in order\n        runs.sort(key=lambda x: x[\"run_num\"], reverse=True)\n        return runs\n\n    def _keys_for_items(self, item_type, names):\n        \"\"\"Returns a dict of item to the key for that item.\"\"\"\n        keys = (self._impl.build_key(item_type, name) for name in names)\n        return dict(zip(keys, names))\n\n    # TODO: get rid of the Any here - hopefully with a TypedDict\n    def _restore_dicts(self, item_type: str, items: list[str]) -> dict[str, Any]:\n        \"\"\"Return a dict mapping of the items name to its state data.\"\"\"\n        key_to_item_map = self._keys_for_items(item_type, items)\n        key_to_state_map = self._impl.restore(key_to_item_map.keys())\n        return {key_to_item_map[key]: state_data for key, state_data in key_to_state_map.items()}\n\n    def delete(self, type_enum, name):\n        # A hack to use the save buffer, implementations of save\n        # need to delete if data is None.\n        self.save(type_enum, name, None)\n\n    def save(self, type_enum, name, state_data):\n        \"\"\"Persist an items state.\"\"\"\n        key = self._impl.build_key(type_enum, name)\n        log.debug(\"Buffering state save for: %s\", key)\n        if self._buffer.save(key, state_data):\n            if not self.enabled:\n                log.debug(f\"State manager disabled, not persisting {key}\")\n                return\n            self._save_from_buffer()\n\n    def _save_from_buffer(self):\n        key_state_pairs = list(self._buffer)\n        if not key_state_pairs:\n            return\n\n        with self._timeit():\n            try:\n                self._impl.save(key_state_pairs)\n            except Exception as e:\n                msg = f\"Error while saving: {repr(e)}\"\n                log.warning(msg)\n                raise PersistenceStoreError(msg)\n\n    def cleanup(self):\n        self._save_from_buffer()\n        self._impl.cleanup()\n\n    @contextmanager\n    def _timeit(self):\n        \"\"\"Log the time spent saving the state.\"\"\"\n        start_time = time.time()\n        yield\n        duration = time.time() - start_time\n        log.info(f\"State saved using {self._impl} in {duration:0.3f}s.\")\n\n    @contextmanager\n    def disabled(self):\n        \"\"\"Temporarily disable the state manager.\"\"\"\n        self.enabled, prev_enabled = False, self.enabled\n        try:\n            yield\n        finally:\n            self.enabled = prev_enabled\n\n\nclass NullStateManager:\n    enabled = False\n\n    @staticmethod\n    def cleanup():\n        pass\n\n    @classmethod\n    def disabled(cls):\n        return cls()\n\n    def __enter__(self):\n        return\n\n    def __exit__(self, *args):\n        return\n\n\nclass StateChangeWatcher(observer.Observer):\n    \"\"\"Observer of stateful objects.\"\"\"\n\n    def __init__(self):\n        self.state_manager = NullStateManager\n        self.config = None\n\n    def update_from_config(self, state_config):\n        if self.config == state_config:\n            return False\n\n        self.shutdown()\n        # NOTE: this will spin up a thread that will constantly persist data into dynamodb\n        self.state_manager = PersistenceManagerFactory.from_config(\n            state_config,\n        )\n        self.config = state_config\n        return True\n\n    def handler(self, observable, event, event_data=None):\n        \"\"\"Handle a state change in an observable by saving its state.\"\"\"\n        if observable == MesosClusterRepository:\n            self.save_frameworks(observable)\n        elif isinstance(observable, job.Job):\n            if event == job.Job.NOTIFY_NEW_RUN:\n                if event_data is None or not isinstance(event_data, jobrun.JobRun):\n                    log.warning(f\"Notified of new run, but no run to watch. Got {event_data}\")\n                else:\n                    log.debug(f\"Watching new run {event_data}\")\n                    self.watch(event_data)\n            else:\n                self.save_job(observable)\n        elif isinstance(observable, jobrun.JobRun):\n            if event == jobrun.JobRun.NOTIFY_REMOVED:\n                self.delete_job_run(observable)\n            else:\n                self.save_job_run(observable)\n\n    def save_job(self, job):\n        self._save_object(runstate.JOB_STATE, job)\n\n    def save_job_run(self, job_run):\n        self._save_object(runstate.JOB_RUN_STATE, job_run)\n\n    def delete_job_run(self, job_run):\n        # HACK: this cast is nasty, but we should probably refactor things so that the default self.state_manager\n        # in not a NullStateManager\n        cast(PersistentStateManager, self.state_manager).delete(runstate.JOB_RUN_STATE, job_run.name)\n\n    def save_frameworks(self, clusters):\n        self._save_object(runstate.MESOS_STATE, clusters)\n\n    def _save_object(self, state_type, obj):\n        # HACK: this cast is nasty, but we should probably refactor things so that the default self.state_manager\n        # in not a NullStateManager\n        cast(PersistentStateManager, self.state_manager).save(state_type, obj.name, obj.state_data)\n\n    def shutdown(self):\n        self.state_manager.enabled = False\n        self.state_manager.cleanup()\n\n    def disabled(self):\n        return self.state_manager.disabled()\n\n    def restore(self, jobs: list[str]) -> dict[str, Any]:\n        # HACK: this cast is nasty, but we should probably refactor things so that the default self.state_manager\n        # in not a NullStateManager\n        return cast(PersistentStateManager, self.state_manager).restore(jobs)\n"
  },
  {
    "path": "tron/serialize/runstate/yamlstore.py",
    "content": "\"\"\"Store state in a local YAML file.\n\nWARNING: Using this store is NOT recommended.  It will be far too slow for\nanything but the most trivial setups.  It should only be used with a high\nbuffer size (10+), and a low run_limit (< 10).\n\"\"\"\nimport operator\nimport os\nfrom collections import namedtuple\n\nfrom tron import yaml\nfrom tron.serialize import runstate\n\nYamlKey = namedtuple(\"YamlKey\", [\"type\", \"iden\"])\n\nTYPE_MAPPING = {\n    runstate.JOB_STATE: \"jobs\",\n}\n\n\nclass YamlStateStore:\n    def __init__(self, filename):\n        self.filename = filename\n        self.buffer = {}\n\n    def build_key(self, type, iden):\n        return YamlKey(TYPE_MAPPING[type], iden)\n\n    def restore(self, keys):\n        if not os.path.exists(self.filename):\n            return {}\n\n        with open(self.filename) as fh:\n            self.buffer = yaml.load(fh)\n\n        items = (self.buffer.get(key.type, {}).get(key.iden) for key in keys)\n        key_item_pairs = zip(keys, items)\n        return dict(filter(operator.itemgetter(1), key_item_pairs))\n\n    def save(self, key_value_pairs):\n        for key, state_data in key_value_pairs:\n            if state_data is None:\n                self._delete_from_buffer(key)\n            else:\n                self.buffer.setdefault(key.type, {})[key.iden] = state_data\n        self._write_buffer()\n\n    def _delete_from_buffer(self, key):\n        data_for_type = self.buffer.get(key.type, {})\n        if data_for_type.get(key.iden):\n            del data_for_type[key.iden]\n        if not data_for_type:  # No remaining data for this type\n            del self.buffer[key.type]\n\n    def _write_buffer(self):\n        with open(self.filename, \"w\") as fh:\n            yaml.dump(self.buffer, fh)\n\n    def cleanup(self):\n        pass\n\n    def __repr__(self):\n        return \"YamlStateStore('%s')\" % self.filename\n"
  },
  {
    "path": "tron/ssh.py",
    "content": "import logging\nimport struct\nimport warnings\n\nfrom cryptography.utils import CryptographyDeprecationWarning\nfrom twisted.conch.ssh import channel\nfrom twisted.conch.ssh import common\nfrom twisted.conch.ssh import connection\nfrom twisted.conch.ssh import keys\nfrom twisted.internet import defer\nfrom twisted.python import failure\n\n# Ignore CryptographyDeprecationWarning as we don't use `cryptography` directly and\n# the warnings are coming from one of our dependencies (Twisted) that does. There's\n# nothing we can do until they stop using the deprecated ciphers - so ignoring these warnings should be safe\nwarnings.filterwarnings(\"ignore\", category=CryptographyDeprecationWarning)\n# These need to be imported after filtering warnings\nfrom twisted.conch.client import default  # noqa: E402\nfrom twisted.conch.ssh import transport  # noqa: E402\n\n\nlog = logging.getLogger(\"tron.ssh\")\n\n\nclass Error(Exception):\n    pass\n\n\nclass ChannelClosedEarlyError(Error):\n    \"\"\"Indicates the SSH Channel has closed before we were done handling the\n    command\"\"\"\n\n    pass\n\n\nclass SSHAuthOptions:\n    \"\"\"An options class which can be used by NoPasswordAuthClient. This supports\n    the interface provided by: twisted.conch.client.options.ConchOptions.\n    \"\"\"\n\n    def __init__(self, identitys, use_agent):\n        self.use_agent = use_agent\n        self.identitys = identitys\n\n    @classmethod\n    def from_config(cls, ssh_config):\n        return cls(ssh_config.identities, ssh_config.agent)\n\n    def __getitem__(self, item):\n        if item != \"noagent\":\n            raise KeyError(item)\n        return not self.use_agent\n\n    def __eq__(self, other):\n        return other and (self.use_agent == other.use_agent and self.identitys == other.identitys)\n\n    def __ne__(self, other):\n        return not self == other\n\n    def __str__(self):\n        context = self.__class__.__name__, self.identitys, self.use_agent\n        return \"%s(%s, %s)\" % context\n\n\nclass NoPasswordAuthClient(default.SSHUserAuthClient):\n    \"\"\"Only support passwordless auth.\"\"\"\n\n    preferredOrder = [\"publickey\", \"keyboard-interactive\"]  # type: ignore\n    auth_password = None\n\n    def getGenericAnswers(self, name, instruction, prompts):\n        # We really only need to get input from the user if there is actually a prompt\n        # This works around an issue where some PAM modules that have \"keyboard-interactive\"\n        # but don't actually require any input from the end-user\n        if prompts:\n            return super().getGenericAnswers(self, name, instruction, prompts)\n\n        # Otherwise, just return an empty defer.succeed to satisfy the contract\n        return defer.succeed([])\n\n\nclass ClientTransport(transport.SSHClientTransport):\n\n    connection_defer = None\n\n    def __init__(self, username, options, expected_pub_key):\n        self.username = username\n        self.options = options\n        self.expected_pub_key = expected_pub_key\n\n    def verifyHostKey(self, public_key, fingerprint):\n        if not self.expected_pub_key:\n            return defer.succeed(1)\n\n        if self.expected_pub_key == keys.Key.fromString(public_key):\n            return defer.succeed(2)\n\n        msg = f\"Public key mismatch got {fingerprint} expected {self.expected_pub_key.fingerprint()}\"\n        log.error(msg)\n        return defer.fail(ValueError(msg))\n\n    def connectionSecure(self):\n        conn = ClientConnection()\n        # TODO: this should be initialized by the ClientConnection constructor\n        conn.service_defer = defer.Deferred()\n        # TODO: this should be initialized by the constructor\n        self.connection_defer.callback(conn)\n\n        auth_service = NoPasswordAuthClient(self.username, self.options, conn)\n        self.requestService(auth_service)\n\n\nclass ClientConnection(connection.SSHConnection):\n\n    service_start_defer = None\n    service_stop_defer = None\n\n    def setToFromStrings(self):\n        try:\n            self.from_string = f\"{self.transport.getHost().address.host}:{self.transport.getHost().address.port}\"\n        except Exception as e:\n            log.debug(e)\n            self.to_string = \"(unknown tcp source)\"\n        try:\n            self.to_string = f\"{self.transport.getPeer().address.host}:{self.transport.getPeer().address.port}\"\n        except Exception as e:\n            log.debug(e)\n            self.to_string = \"(unknown tcp destination)\"\n\n    def serviceStarted(self):\n        connection.SSHConnection.serviceStarted(self)\n        self.setToFromStrings()\n        log.info(f\"Started SSH connection from {self.from_string} to {self.to_string}\")\n        if not self.service_stop_defer.called:\n            self.service_start_defer.callback(self)\n\n    def serviceStopped(self):\n        connection.SSHConnection.serviceStopped(self)\n        log.info(f\"Stopped SSH connection from {self.from_string} to {self.to_string}\")\n        if not self.service_stop_defer.called:\n            self.service_stop_defer.callback(self)\n\n    def channelClosed(self, channel):\n        if not channel.conn:\n            log.warning(\"Channel %r failed to open\", channel.id)\n            # Channel has no connection, so we were still trying to open it The\n            # normal error handling won't notify us since the channel never\n            # successfully opened.\n            channel.openFailed(None)\n\n        connection.SSHConnection.channelClosed(self, channel)\n        if channel.id in self.deferreds:\n            del self.deferreds[channel.id]\n\n    def ssh_CHANNEL_REQUEST(self, packet):\n        \"\"\"\n        The other side is sending a request to a channel.  Payload::\n            uint32  local channel number\n            string  request name\n            bool    want reply\n            <request specific data>\n\n        Handles missing local channel.\n        \"\"\"\n        localChannel = struct.unpack(\">L\", packet[:4])[0]\n        if localChannel not in self.channels:\n            requestType, _ = common.getNS(packet[4:])\n            host = self.transport.transport.getPeer()\n            msg = \"Missing channel: %s, request_type: %s, host: %s\"\n            log.warn(msg, localChannel, requestType, host)\n            return\n        connection.SSHConnection.ssh_CHANNEL_REQUEST(self, packet)\n\n\nclass ExecChannel(channel.SSHChannel):\n\n    name = b\"session\"\n    exit_defer = None\n    start_defer = None\n\n    command = None\n    exit_status = None\n    running = False\n\n    def __init__(self, *args, **kwargs):\n        channel.SSHChannel.__init__(self, *args, **kwargs)\n        self.output_callbacks = []\n        self.end_callbacks = []\n        self.error_callbacks = []\n        self.data = []\n\n    def channelOpen(self, data):\n        self.data = []\n        self.running = True\n\n        if self.start_defer:\n            log.debug(\"Channel %s is open, calling deferred\", self.id)\n            self.start_defer.callback(self)\n\n            self.command = self.command.encode(\"utf-8\")\n\n            req = self.conn.sendRequest(\n                self,\n                b\"exec\",\n                common.NS(self.command),\n                wantReply=True,\n            )\n            req.addCallback(self._cbExecSendRequest)\n        else:\n            # A missing start defer means that we are no longer expected to do\n            # anything when the channel opens It probably means we gave up on\n            # this connection and failed the job, but later the channel opened\n            # up correctly.\n            log.warning(\"Channel open delayed, giving up and closing\")\n            self.loseConnection()\n\n    def addOutputCallback(self, output_callback):\n        self.output_callbacks.append(output_callback)\n\n    def addErrorCallback(self, error_callback):\n        self.error_callbacks.append(error_callback)\n\n    def addEndCallback(self, end_callback):\n        self.end_callbacks.append(end_callback)\n\n    def openFailed(self, reason):\n        log.error(\"Open failed due to %r\", reason)\n        if self.start_defer:\n            self.start_defer.errback(self)\n\n    def _cbExecSendRequest(self, ignored):\n        self.conn.sendEOF(self)\n\n    def request_exit_status(self, data):\n        # exit status is a 32-bit unsigned int in network byte format\n        status = struct.unpack_from(b\">L\", data, 0)[0]\n\n        log.debug(\"Received exit status request: %d\", status)\n        self.exit_status = status\n        self.exit_defer.callback(self)\n        self.running = False\n        return True\n\n    def dataReceived(self, data):\n        self.data = [data]\n        for callback in self.output_callbacks:\n            callback(data)\n\n    def extReceived(self, dataType, data):\n        self.data = [data]\n        for callback in self.error_callbacks:\n            callback(data)\n\n    def getStdout(self):\n        return \"\".join(self.data)\n\n    def closed(self):\n        if self.exit_status is None and self.running and self.exit_defer and not self.exit_defer.called:\n            log.warning(\n                \"Channel has been closed without receiving an exit\" \" status\",\n            )\n            f = failure.Failure(exc_value=ChannelClosedEarlyError())\n            self.exit_defer.errback(f)\n\n        for callback in self.end_callbacks:\n            callback()\n        # TODO: this is triggered by loseConnection, we shouldn't need to call it\n        # again here\n        self.loseConnection()\n"
  },
  {
    "path": "tron/trondaemon.py",
    "content": "\"\"\"\n Daemonize trond.\n\"\"\"\nimport contextlib\nimport logging.config\nimport os\nimport signal\nimport threading\nimport time\n\nimport ipdb  # type: ignore[import-untyped] # no stubs or py.typed marker; maybe move to pdb?\nimport pkg_resources\nfrom twisted.internet import defer\nfrom twisted.internet import reactor\nfrom twisted.python import log as twisted_log\n\nimport tron\nfrom tron.kubernetes import KubernetesClusterRepository\nfrom tron.manhole import make_manhole\nfrom tron.mesos import MesosClusterRepository\nfrom tron.utils import chdir\nfrom tron.utils import flock\nfrom tron.utils import signals\n\nlog = logging.getLogger(__name__)\n\n\ndef setup_logging(options):\n    default = pkg_resources.resource_filename(tron.__name__, \"logging.conf\")\n    logfile = options.log_conf or default\n\n    level = twist_level = None\n    if options.verbose > 0:\n        level = logging.INFO\n        twist_level = logging.WARNING\n    if options.verbose > 1:\n        level = logging.DEBUG\n        twist_level = logging.INFO\n    if options.verbose > 2:\n        twist_level = logging.DEBUG\n\n    tron_logger = logging.getLogger(\"tron\")\n    twisted_logger = logging.getLogger(\"twisted\")\n\n    logging.config.fileConfig(logfile)\n    if level is not None:\n        tron_logger.setLevel(level)\n    if twist_level is not None:\n        twisted_logger.setLevel(twist_level)\n\n    # Hookup twisted to standard logging\n    twisted_log.PythonLoggingObserver().start()\n\n    # Show stack traces for errors in twisted deferreds.\n    if options.debug:\n        defer.setDebugging(True)\n\n\n@contextlib.contextmanager\ndef no_daemon_context(workdir, lockfile=None, signal_map={}):\n    with chdir(workdir), flock(lockfile), signals(signal_map):\n        yield\n\n\nclass TronDaemon:\n    \"\"\"Daemonize and run the tron daemon.\"\"\"\n\n    def __init__(self, options):\n        self.options = options\n        setup_logging(self.options)\n\n        self.mcp = None\n        self.lock_file = self.options.lock_file\n        self.working_dir = self.options.working_dir\n        self.signals = {signal.SIGINT: signal.default_int_handler}\n        self.manhole_sock = f\"{self.options.working_dir}/manhole.sock\"\n\n    def run(self, boot_time):\n        with no_daemon_context(self.working_dir, self.lock_file, self.signals):\n            signal_map = {\n                signal.SIGHUP: self._handle_reconfigure,\n                signal.SIGINT: self._handle_shutdown,\n                signal.SIGTERM: self._handle_shutdown,\n                signal.SIGQUIT: self._handle_shutdown,\n                signal.SIGUSR1: self._handle_debug,\n            }\n            signal.pthread_sigmask(signal.SIG_BLOCK, signal_map.keys())\n            log.info(\"Starting setup processes...\")\n            self._run_mcp(boot_time=boot_time)\n            log.info(\n                f\"Master Control Program (MCP) setup complete. Time elapsed since Tron started: {time.time() - boot_time}s\"\n            )\n            self._run_www_api()\n            log.info(f\"Tron API setup complete. Time elapsed since Tron started: {time.time() - boot_time}s\")\n            self._run_manhole()\n            log.info(f\"Manhole setup complete. Time elapsed since Tron started: {time.time() - boot_time}s\")\n            self._run_reactor()\n            log.info(\n                f\"Twisted reactor has started. The Tron API should be up and ready now to receive requests. Time elapsed since Tron started: {time.time() - boot_time}s\"\n            )\n            log.info(\"Setup complete!\")\n\n            while True:\n                signum = signal.sigwait(list(signal_map.keys()))\n                if signum in signal_map:\n                    logging.info(f\"Got signal {str(signum)}\")\n                    signal_map[signum](signum, None)\n\n    def _run_manhole(self):\n        # This condition is made with the assumption that no existing daemon\n        # is running. If there is one, the following code could potentially\n        # cause problems for the other daemon by removing its socket.\n        if os.path.exists(self.manhole_sock):\n            log.info(\"Removing orphaned manhole socket\")\n            os.remove(self.manhole_sock)\n\n        self.manhole = make_manhole(dict(trond=self, mcp=self.mcp))\n        reactor.listenUNIX(self.manhole_sock, self.manhole)\n        log.info(f\"manhole started on {self.manhole_sock}\")\n\n    def _run_www_api(self):\n        # Local import required because of reactor import in server and www\n        from tron.api import resource\n\n        site = resource.TronSite.create(self.mcp, self.options.web_path)\n        port = self.options.listen_port\n        reactor.listenTCP(port, site, interface=self.options.listen_host)\n\n    def _run_mcp(self, boot_time=None):\n        # Local import required because of reactor import in mcp\n        from tron import mcp\n\n        working_dir = self.options.working_dir\n        config_path = self.options.config_path\n        self.mcp = mcp.MasterControlProgram(working_dir, config_path, boot_time)\n\n        try:\n            self.mcp.initial_setup()\n        except Exception as e:\n            msg = \"Error in configuration %s: %s\"\n            log.exception(msg % (config_path, e))\n            raise\n\n    def _run_reactor(self):\n        \"\"\"Run the twisted reactor.\"\"\"\n        # This is what actually starts the Tron server by starting the Twisted event loop\n        threading.Thread(\n            target=reactor.run,\n            daemon=True,\n            kwargs=dict(installSignalHandlers=0),\n        ).start()\n\n    def _handle_shutdown(self, sig_num, stack_frame):\n        log.info(f\"Shutdown requested via {str(sig_num)}\")\n        reactor.callLater(0, reactor.stop)\n        waited = 0\n        while reactor.running:\n            if waited > 5:\n                log.error(\"timed out waiting for reactor shutdown\")\n                break\n            time.sleep(0.1)\n            waited += 0.1\n        if self.mcp:\n            self.mcp.shutdown()\n        MesosClusterRepository.shutdown()\n        KubernetesClusterRepository.shutdown()\n        raise SystemExit(f\"Terminating on signal {str(sig_num)}\")\n\n    def _handle_reconfigure(self, _signal_number, _stack_frame):\n        log.info(\"Reconfigure requested by SIGHUP.\")\n        reactor.callLater(0, self.mcp.reconfigure)\n\n    def _handle_debug(self, _signal_number, _stack_frame):\n        ipdb.set_trace()\n"
  },
  {
    "path": "tron/utils/__init__.py",
    "content": "import contextlib\nimport fcntl\nimport logging\nimport os\nimport signal\n\nlog = logging.getLogger(__name__)\n\n\n# TODO: TRON-2293 maybe_decode is a relic of Python2->Python3 migration. Remove it.\ndef maybe_decode(maybe_string):\n    if type(maybe_string) is bytes:\n        return maybe_string.decode()\n    return maybe_string\n\n\n# TODO: TRON-2293 maybe_encode is a relic of Python2->Python3 migration. Remove it.\ndef maybe_encode(maybe_bytes):\n    if type(maybe_bytes) is not bytes:\n        return maybe_bytes.encode()\n    return maybe_bytes\n\n\ndef next_or_none(iterable):\n    try:\n        return next(iterable)\n    except StopIteration:\n        pass\n\n\n@contextlib.contextmanager\ndef flock(fd):\n    close = False\n    if isinstance(fd, str):\n        fd = open(fd, \"a\")\n        close = True\n\n    try:\n        fcntl.lockf(fd, fcntl.LOCK_EX | fcntl.LOCK_NB)\n    except BlockingIOError as e:  # locked by someone else\n        log.debug(f\"Locked by another process: {fd}\")\n        raise e\n\n    try:\n        yield\n    finally:\n        fcntl.lockf(fd, fcntl.LOCK_UN)\n        if close:\n            fd.close()\n\n\n@contextlib.contextmanager\ndef chdir(path):\n    cwd = os.getcwd()\n    os.chdir(path)\n    try:\n        yield\n    finally:\n        os.chdir(cwd)\n\n\n@contextlib.contextmanager\ndef signals(signal_map):\n    orig_map = {}\n    for signum, handler in signal_map.items():\n        orig_map[signum] = signal.signal(signum, handler)\n\n    try:\n        yield\n    finally:\n        for signum, handler in orig_map.items():\n            signal.signal(signum, handler)\n"
  },
  {
    "path": "tron/utils/collections.py",
    "content": "\"\"\"Utilities for working with collections.\"\"\"\nimport logging\n\nlog = logging.getLogger(__name__)\n\n\nclass MappingCollection(dict):\n    \"\"\"Dictionary like object for managing collections of items. Item is\n    expected to support the following interface, and should be hashable.\n\n    class Item(object):\n\n        def get_name(self): ...\n\n        def restore_state(self, state_data): ...\n\n        def disable(self): ...\n\n        def __eq__(self, other): ...\n\n    \"\"\"\n\n    def __init__(self, item_name):\n        dict.__init__(self)\n        self.item_name = item_name\n\n    def filter_by_name(self, names):\n        for name in set(self) - set(names):\n            self.remove(name)\n\n    def remove(self, name):\n        if name not in self:\n            raise ValueError(f\"{self.item_name} {name} unknown\")\n\n        log.info(\"Removing %s %s\", self.item_name, name)\n        self.pop(name).disable()\n\n    def contains_item(self, item, handle_update_func):\n        if item == self.get(item.get_name()):\n            return True\n\n        return handle_update_func(item) if item.get_name() in self else False\n\n    def add(self, item, update_func):\n        if self.contains_item(item, update_func):\n            return False\n\n        log.info(\"Adding new %s\" % item)\n        self[item.get_name()] = item\n        return True\n\n    def replace(self, item):\n        return self.add(item, self.remove_item)\n\n    def remove_item(self, item):\n        return self.remove(item.get_name())\n"
  },
  {
    "path": "tron/utils/crontab.py",
    "content": "\"\"\"Parse a crontab entry and return a dictionary.\"\"\"\nimport calendar\nimport itertools\nimport re\n\nPREDEFINED_SCHEDULE = {\n    \"@yearly\": \"0 0 1 1 *\",\n    \"@annually\": \"0 0 1 1 *\",\n    \"@monthly\": \"0 0 1 * *\",\n    \"@weekly\": \"0 0 * * 0\",\n    \"@daily\": \"0 0 * * *\",\n    \"@midnight\": \"0 0 * * *\",\n    \"@hourly\": \"0 * * * *\",\n}\n\n\ndef convert_predefined(line: str) -> str:\n    if not line.startswith(\"@\"):\n        return line\n\n    if line not in PREDEFINED_SCHEDULE:\n        raise ValueError(\"Unknown predefine: %s\" % line)\n    return PREDEFINED_SCHEDULE[line]\n\n\n# TODO: TRON-1761 - Fix cron validation. The pattern is not working as expected.\nclass FieldParser:\n    \"\"\"Parse and validate a field in a crontab entry.\"\"\"\n\n    name: str = \"\"\n    bounds: tuple[int, int] = (0, 0)\n    range_pattern = re.compile(\n        r\"\"\"\n        (?P<min>\\d+|\\*)         # Initial value\n        (?:-(?P<max>\\d+))?      # Optional max upper bound\n        (?:/(?P<step>\\d+))?     # Optional step increment\n        \"\"\",\n        re.VERBOSE,\n    )\n\n    def normalize(self, source: str) -> str:\n        return source.strip()\n\n    def get_groups(self, source: str) -> list[str]:\n        return source.split(\",\")\n\n    def parse(self, source: str) -> list[int] | list[int | str] | None:\n        if source == \"*\":\n            return None\n\n        groups: set[int | str] = set(\n            itertools.chain.from_iterable(self.get_values(group) for group in self.get_groups(source))\n        )\n        has_last = \"LAST\" in groups\n        if has_last:\n            groups.remove(\"LAST\")\n        sorted_groups: list[int | str] = sorted(groups, key=lambda x: (isinstance(x, str), x))\n        if has_last:\n            sorted_groups.append(\"LAST\")\n\n        return sorted_groups\n\n    def get_match_groups(self, source: str) -> dict:\n        match = self.range_pattern.match(source)\n        if not match:\n            raise ValueError(\"Unknown expression: %s\" % source)\n        return match.groupdict()\n\n    def get_values(self, source: str) -> list[int | str]:\n        source = self.normalize(source)\n        match_groups = self.get_match_groups(source)\n        step = 1\n        min_value, max_value = self.get_value_range(match_groups)\n\n        if match_groups[\"step\"]:\n            step = self.validate_bounds(match_groups[\"step\"])\n        return self.get_range(min_value, max_value, step)\n\n    def get_value_range(self, match_groups: dict) -> tuple[int, int]:\n        if match_groups[\"min\"] == \"*\":\n            return self.bounds\n\n        min_value = self.validate_bounds(match_groups[\"min\"])\n        if match_groups[\"max\"]:\n            # Cron expressions are inclusive, range is exclusive on upper bound\n            max_value = self.validate_bounds(match_groups[\"max\"]) + 1\n            return min_value, max_value\n\n        return min_value, min_value + 1\n\n    def get_range(self, min_value: int, max_value: int, step: int) -> list[int | str]:\n        if min_value < max_value:\n            return list(range(min_value, max_value, step))\n\n        min_bound, max_bound = self.bounds\n        diff = (max_bound - min_value) + (max_value - min_bound)\n        return [(min_value + i) % max_bound for i in list(range(0, diff, step))]\n\n    def validate_bounds(self, value: str) -> int:\n        min_value, max_value = self.bounds\n        int_value = int(value)\n        if not min_value <= int_value < max_value:\n            raise ValueError(f\"{self.name} value out of range: {int_value}\")\n        return int_value\n\n\nclass MinuteFieldParser(FieldParser):\n    name = \"minutes\"\n    bounds = (0, 60)\n\n\nclass HourFieldParser(FieldParser):\n    name = \"hours\"\n    bounds = (0, 24)\n\n\nclass MonthdayFieldParser(FieldParser):\n    name = \"monthdays\"\n    bounds = (1, 32)\n\n    def get_values(self, source: str) -> list[int | str]:\n        # Handle special case for last day of month\n        source = self.normalize(source)\n        if source == \"L\":\n            return [\"LAST\"]\n\n        return super().get_values(source)\n\n\nclass MonthFieldParser(FieldParser):\n    name = \"months\"\n    bounds = (1, 13)\n    month_names = calendar.month_abbr[1:]\n\n    def normalize(self, month: str) -> str:\n        month = super().normalize(month)\n        month = month.lower()\n        for month_num, month_name in enumerate(self.month_names, start=1):\n            month = month.replace(month_name.lower(), str(month_num))\n        return month\n\n\nclass WeekdayFieldParser(FieldParser):\n    name = \"weekdays\"\n    bounds = (0, 7)\n    day_names = [\"sun\", \"mon\", \"tue\", \"wed\", \"thu\", \"fri\", \"sat\"]\n\n    def normalize(self, day_of_week: str) -> str:\n        day_of_week = super().normalize(day_of_week)\n        day_of_week = day_of_week.lower()\n        for dow_num, dow_name in enumerate(self.day_names):\n            day_of_week = day_of_week.replace(dow_name, str(dow_num))\n        return day_of_week.replace(\"7\", \"0\").replace(\"?\", \"*\")\n\n\nminute_parser = MinuteFieldParser()\nhour_parser = HourFieldParser()\nmonthday_parser = MonthdayFieldParser()\nmonth_parser = MonthFieldParser()\nweekday_parser = WeekdayFieldParser()\n\n\n# TODO: support L (for dow), W, #\ndef parse_crontab(line: str) -> dict:\n    line = convert_predefined(line)\n    minutes, hours, dom, months, dow = line.split(None, 4)\n\n    return {\n        \"minutes\": minute_parser.parse(minutes),\n        \"hours\": hour_parser.parse(hours),\n        \"monthdays\": monthday_parser.parse(dom),\n        \"months\": month_parser.parse(months),\n        \"weekdays\": weekday_parser.parse(dow),\n        \"ordinals\": None,\n    }\n"
  },
  {
    "path": "tron/utils/exitcode.py",
    "content": "# TRON-1826\nEXIT_INVALID_COMMAND = -1\nEXIT_NODE_ERROR = -2\nEXIT_STOP_KILL = -3\nEXIT_TRIGGER_TIMEOUT = -4\nEXIT_MESOS_DISABLED = -5\nEXIT_KUBERNETES_DISABLED = -6\nEXIT_KUBERNETES_NOT_CONFIGURED = -7\nEXIT_KUBERNETES_TASK_INVALID = -8\nEXIT_KUBERNETES_ABNORMAL = -9\nEXIT_KUBERNETES_SPOT_INTERRUPTION = -10\nEXIT_KUBERNETES_NODE_SCALEDOWN = -11\nEXIT_KUBERNETES_TASK_LOST = -12\nEXIT_KUBERNETES_EPHEMERAL_STORAGE_EVICTION = -13\n\nEXIT_REASONS = {\n    EXIT_INVALID_COMMAND: \"Invalid command\",\n    EXIT_NODE_ERROR: \"Node error\",\n    EXIT_STOP_KILL: \"Stopped or killed\",\n    EXIT_TRIGGER_TIMEOUT: \"Timed out waiting for trigger\",\n    EXIT_MESOS_DISABLED: \"Mesos disabled\",\n    EXIT_KUBERNETES_DISABLED: \"Kubernetes disabled\",\n    EXIT_KUBERNETES_NOT_CONFIGURED: \"Kubernetes enabled, but not configured\",\n    EXIT_KUBERNETES_TASK_INVALID: \"Kubernetes task was not valid\",\n    EXIT_KUBERNETES_ABNORMAL: \"Kubernetes task failed in an unexpected manner\",\n    EXIT_KUBERNETES_SPOT_INTERRUPTION: \"Kubernetes task failed due to spot interruption\",\n    EXIT_KUBERNETES_NODE_SCALEDOWN: \"Kubernetes task failed due to the autoscaler scaling down a node\",\n    EXIT_KUBERNETES_TASK_LOST: \"Kubernetes task is lost and the final outcome unknown\",\n    EXIT_KUBERNETES_EPHEMERAL_STORAGE_EVICTION: \"Kubernetes task failed due to exceeding disk-space usage limits\",\n}\n"
  },
  {
    "path": "tron/utils/logreader.py",
    "content": "import datetime\nimport json\nimport logging\nimport operator\nfrom collections.abc import Iterator\nfrom functools import lru_cache\n\nimport staticconf\nimport yaml\n\nfrom tron.config.static_config import get_config_watcher\nfrom tron.config.static_config import NAMESPACE\n\n# NOTE: this is an internal-only package, so we won't be able to typecheck against it with mypy\n# without these hacky inlined stubs\ntry:\n    from logreader.readers import S3LogsReader  # type: ignore[import-not-found,import-untyped,unused-ignore]  # internal-only package, need py3.10 for typed version\n\n    s3reader_available = True\nexcept ImportError:\n    s3reader_available = False\n\n    class S3LogsReader:  # type: ignore[no-redef]  # stub class for internal-only package\n        def __init__(self, superregion: str) -> None:\n            raise ImportError(\"logreader (internal Yelp package) is not available - unable to display logs.\")\n\n        def get_log_reader(\n            self, log_name: str, start_datetime: datetime.datetime, end_datetime: datetime.datetime\n        ) -> Iterator[str]:\n            raise NotImplementedError(\"logreader (internal Yelp package) is not available - unable to display logs.\")\n\n\nlog = logging.getLogger(__name__)\nUSE_SRV_CONFIGS = -1\n\n\n@lru_cache(maxsize=1)\ndef get_superregion() -> str:\n    \"\"\"\n    Discover what region we're running in by reading this information from on-disk facts.\n\n    Yelpers: for more information, see y/habitat\n    \"\"\"\n    with open(\"/nail/etc/superregion\") as f:\n        return f.read().strip()\n\n\ndef decompose_action_id(action_run_id: str, paasta_cluster: str) -> tuple[str, str, str, str]:\n    namespace, job_name, run_num, action = action_run_id.split(\".\")\n    # NOTE: some services use an unfortunate feature that allows an action to use another service's\n    # image - thus we need to read from soaconfigs to determine the \"real\" service name since we're\n    # not passing this information down to tron atm\n    try:\n        with open(f\"/nail/etc/services/{namespace}/tron-{paasta_cluster}.yaml\") as f:\n            config = yaml.load(f, Loader=yaml.CSafeLoader)\n            service: str | None = config.get(job_name, {}).get(\"actions\", {}).get(action, {}).get(\"service\", None)\n            if service:\n                return service, job_name, run_num, action\n    except FileNotFoundError:\n        # afaict, this should only be possible if the service is getting deleted and we haven't run setup_tron_namespace yet\n        log.warning(f\"yelp-soaconfig file tron-{paasta_cluster}.yaml not found for action_run_id {action_run_id}.\")\n    except yaml.YAMLError:\n        log.exception(\n            f\"Error parsing YAML file tron-{paasta_cluster}.yaml for {action_run_id} - will default to using current namespace:\"\n        )\n    except Exception:\n        log.exception(\n            f\"Error reading service for {action_run_id} from file tron-{paasta_cluster}.yaml - will default to using current namespace:\"\n        )\n\n    return namespace, job_name, run_num, action\n\n\nclass PaaSTALogs:\n    def __init__(self, component: str, paasta_cluster: str, action_run_id: str) -> None:\n        self.component = component\n        self.paasta_cluster = paasta_cluster\n        self.action_run_id = action_run_id\n        namespace, job_name, run_num, action = decompose_action_id(action_run_id, paasta_cluster)\n        # in our logging infra, things are logged to per-instance streams - but\n        # since Tron PaaSTA instances are of the form `job_name.action`, we need\n        # to escape the period since some parts of our infra will reject streams\n        # containing them - thus, the \"weird\" __ separator\n        self.stream_name = f\"stream_paasta_app_output_{namespace}_{job_name}__{action}\"\n        self.run_num = int(run_num)\n        self.num_lines = 0\n        self.malformed_lines = 0\n        self.output: list[tuple[str, str]] = []\n        self.truncated_output = False\n\n    def fetch(self, stream: Iterator[str], max_lines: int | None) -> None:\n        for line in stream:\n            if max_lines is not None and self.num_lines == max_lines:\n                self.truncated_output = True\n                break\n            # it's possible for jobs to run multiple times a day and have obscenely large amounts of output\n            # so we can't just truncate after seeing X number of lines for the run number in question - we\n            # need to count how many total lines we've seen and bail out early to preserve tron's uptime\n            self.num_lines += 1\n\n            try:\n                payload = json.loads(line)\n            except json.decoder.JSONDecodeError:\n                log.error(\n                    f\"Unable to decode log line from stream ({self.stream_name}) for {self.action_run_id}: {line}\"\n                )\n                self.malformed_lines += 1\n                continue\n\n            if (\n                int(payload.get(\"tron_run_number\", -1)) == self.run_num\n                and payload.get(\"component\") == self.component\n                and payload.get(\"message\") is not None\n                and payload.get(\"timestamp\") is not None\n                and payload.get(\"cluster\") == self.paasta_cluster\n            ):\n                self.output.append((payload[\"timestamp\"], payload[\"message\"]))\n\n    def sorted_lines(self) -> list[str]:\n        self.output.sort(key=operator.itemgetter(0))\n        return [line for _, line in self.output]\n\n\ndef read_log_stream_for_action_run(\n    action_run_id: str,\n    component: str,\n    min_date: datetime.datetime | None,\n    max_date: datetime.datetime | None,\n    paasta_cluster: str | None,\n    max_lines: int | None = USE_SRV_CONFIGS,\n) -> list[str]:\n    if min_date is None:\n        return [f\"{action_run_id} has not started yet.\"]\n\n    if not s3reader_available:\n        return [\"logreader (internal Yelp package) is not available - unable to display logs.\"]\n\n    if max_lines == USE_SRV_CONFIGS:\n        config_watcher = get_config_watcher()\n        config_watcher.reload_if_changed()\n        max_lines = staticconf.read(\"logging.max_lines_to_display\", namespace=NAMESPACE)  # type: ignore[attr-defined]  # TODO: why can't mypy see that read() exists?\n\n    try:\n        superregion = get_superregion()\n    except OSError:\n        log.warning(\"Unable to read location mapping files from disk (/nail/etc/)\")\n        return [\n            \"Unable to determine where Tron is located. If you're seeing this inside Yelp, report this to #compute-infra\"\n        ]\n\n    if paasta_cluster is None:\n        paasta_cluster = superregion\n\n    paasta_logs = PaaSTALogs(component, paasta_cluster, action_run_id)\n    stream_name = paasta_logs.stream_name\n    end_date: datetime.date | None\n\n    # S3 reader accepts datetime objects and respects timezone information\n    # if min_date and max_date timezone is missing, astimezone() will assume local timezone and convert it to UTC\n    start_datetime = min_date.astimezone(datetime.timezone.utc)\n    end_datetime = (\n        max_date.astimezone(datetime.timezone.utc)\n        if max_date\n        else datetime.datetime.now().astimezone(datetime.timezone.utc)\n    )\n\n    log.debug(\"Using S3LogsReader to retrieve logs\")\n    s3_reader = S3LogsReader(superregion).get_log_reader(\n        log_name=stream_name, start_datetime=start_datetime, end_datetime=end_datetime\n    )\n    paasta_logs.fetch(s3_reader, max_lines)\n\n    # S3LogsReader does not guarantee order of logs in the output - so we'll sort based on log timestamp set by producer.\n    lines = paasta_logs.sorted_lines()\n    malformed = (\n        [f\"{paasta_logs.malformed_lines} encountered while retrieving logs\"] if paasta_logs.malformed_lines else []\n    )\n\n    truncation_message = (\n        [\n            f\"This output is truncated. Use this command to view all lines: logreader -s {superregion} {stream_name} --min-date {min_date.date()} --max-date {max_date.date()} | jq --raw-output 'select(.tron_run_number=={int(paasta_logs.run_num)} and .component == \\\"{component}\\\") | .message'\"\n        ]\n        if max_date\n        else [\n            f\"This output is truncated. Use this command to view all lines: logreader -s {superregion} {stream_name} --min-date {min_date.date()} | jq --raw-output 'select(.tron_run_number=={int(paasta_logs.run_num)} and .component == \\\"{component}\\\") | .message'\"\n        ]\n    )\n    truncated = truncation_message if paasta_logs.truncated_output else []\n\n    return lines + malformed + truncated\n"
  },
  {
    "path": "tron/utils/observer.py",
    "content": "\"\"\"Implements the Observer/Observable pattern,\"\"\"\nimport logging\n\nlog = logging.getLogger(__name__)\n\n\nclass Observable:\n    \"\"\"An Observable in the Observer/Observable pattern. It stores\n    specifications and Observers which can be notified of changes by calling\n    notify.\n    \"\"\"\n\n    def __init__(self):\n        self._observers = dict()\n\n    def attach(self, watch_spec, observer):\n        \"\"\"Attach another observer to the listen_spec.\n\n        Listener Spec matches on:\n            True                    Matches everything\n            <string>                Matches only that event\n            <sequence of strings>   Matches any of the events in the sequence\n        \"\"\"\n        if isinstance(watch_spec, (str, bool)):\n            self._observers.setdefault(watch_spec, []).append(observer)\n            return\n\n        for spec in watch_spec:\n            self._observers.setdefault(spec, []).append(observer)\n\n    def clear_observers(self, watch_spec=None):\n        \"\"\"Remove all observers for a given watch_spec. Removes all\n        observers if listen_spec is None\n        \"\"\"\n        if watch_spec is None or watch_spec is True:\n            self._observers.clear()\n            return\n\n        del self._observers[watch_spec]\n\n    def remove_observer(self, observer):\n        \"\"\"Remove an observer from all watch_specs.\"\"\"\n        for observers in self._observers.values():\n            if observer in observers:\n                observers.remove(observer)\n\n    def _get_handlers_for_event(self, event):\n        \"\"\"Returns the complete list of handlers for the event.\"\"\"\n        return self._observers.get(True, []) + self._observers.get(event, [])\n\n    def notify(self, event, event_data=None):\n        \"\"\"Notify all observers of the event.\"\"\"\n        handlers = self._get_handlers_for_event(event)\n        log.debug(\n            f\"Notifying {len(handlers)} listeners for new event {event!r}\",\n        )\n        for handler in handlers:\n            handler.handler(self, event, event_data)\n\n\nclass Observer:\n    \"\"\"An observer in the Observer/Observable pattern.  Given an observable\n    object will watch for notify calls.  Override handler to act on those\n    notifications.\n    \"\"\"\n\n    def watch(self, observable, event=True):\n        \"\"\"Adds this Observer as a watcher of the observable.\"\"\"\n        observable.attach(event, self)\n\n    def watch_all(self, observables, event=True):\n        for observable in observables:\n            self.watch(observable, event)\n\n    def handler(self, observable, event):\n        \"\"\"Override this method to call a method to handle events.\"\"\"\n        pass\n\n    def stop_watching(self, observable):\n        observable.remove_observer(self)\n"
  },
  {
    "path": "tron/utils/persistable.py",
    "content": "from abc import ABC\nfrom abc import abstractmethod\nfrom typing import Any\n\n\nclass Persistable(ABC):\n    @staticmethod\n    @abstractmethod\n    def to_json(state_data: dict[Any, Any]) -> str:\n        pass\n\n    @staticmethod\n    @abstractmethod\n    def from_json(state_data: str) -> dict[str, Any]:\n        # This method is called on because it is intended to handle the deserialization of JSON data into a\n        # dictionary representation of the state. This allows the method to be used in a more flexible and generic way,\n        # enabling different classes to implement their own specific logic for converting the dictionary into an instance of the\n        # class. By returning a dictionary, the method provides a common interface for deserialization, while allowing subclasses\n        # to define how the dictionary should be used to restore the state of the object.\n        pass\n"
  },
  {
    "path": "tron/utils/proxy.py",
    "content": "\"\"\"Utilities for creating classes that proxy function calls.\"\"\"\n\n\nclass CollectionProxy:\n    \"\"\"Proxy attribute lookups to a sequence of objects.\"\"\"\n\n    def __init__(self, obj_list_getter, definition_list=None):\n        \"\"\"See add() for a description of proxy definitions.\"\"\"\n        self.obj_list_getter = obj_list_getter\n        self._defs = {}\n        for definition in definition_list or []:\n            self.add(*definition)\n\n    def add(self, attribute_name, aggregate_func, is_callable):\n        \"\"\"Add attributes to proxy, the aggregate function to use on the\n        sequence of returned values, and a boolean identifying if this\n        attribute is a callable or not.\n\n            attribute_name - the name of the attribute to proxy\n            aggregate_func - a function that takes a sequence as its only argument\n            callable       - if this attribute is a callable on every object in\n                             the obj_list (boolean)\n        \"\"\"\n        self._defs[attribute_name] = (aggregate_func, is_callable)\n\n    def perform(self, name):\n        \"\"\"Attempt to perform the proxied lookup.  Raises AttributeError if\n        the name is not defined.\n        \"\"\"\n        if name not in self._defs:\n            raise AttributeError(name)\n\n        obj_list = self.obj_list_getter\n        aggregate_func, is_callable = self._defs[name]\n\n        if not is_callable:\n            return aggregate_func(getattr(i, name) for i in obj_list())\n\n        def func(*args, **kwargs):\n            return aggregate_func(getattr(item, name)(*args, **kwargs) for item in obj_list())\n\n        return func\n\n\ndef func_proxy(name, func):\n    return name, func, True\n\n\ndef attr_proxy(name, func):\n    return name, func, False\n\n\nclass AttributeProxy:\n    \"\"\"Proxy attribute lookups to another object.\"\"\"\n\n    def __init__(self, dest_obj, attribute_list=None):\n        self._attributes = set(attribute_list or [])\n        self.dest_obj = dest_obj\n\n    def add(self, attribute_name):\n        self._attributes.add(attribute_name)\n\n    def perform(self, attribute_name):\n        if attribute_name not in self._attributes:\n            raise AttributeError(attribute_name)\n\n        return getattr(self.dest_obj, attribute_name)\n"
  },
  {
    "path": "tron/utils/queue.py",
    "content": "import queue\n\nfrom twisted.internet import defer\n\n\nclass PyDeferredQueue(defer.DeferredQueue):\n    \"\"\"\n    Implements the stdlib queue.Queue get/put interface with a DeferredQueue.\n    \"\"\"\n\n    def __init__(self, *args, **kwargs):\n        super().__init__(*args, **kwargs)\n\n    def put(self, item, block=None, timeout=None):\n        # Call from reactor thread so callbacks from get() will be executed\n        # on the reactor thread, even if this is called from another thread.\n        from twisted.internet import reactor\n\n        try:\n            reactor.callFromThread(super().put, item)\n        except defer.QueueOverflow:\n            raise queue.Full\n\n    def get(self, block=None, timeout=None):\n        try:\n            return super().get()\n        except defer.QueueUnderflow:\n            raise queue.Empty\n"
  },
  {
    "path": "tron/utils/state.py",
    "content": "import logging\nfrom collections import defaultdict\nfrom collections.abc import Mapping\n\nlog = logging.getLogger(__name__)\n\n\nclass Machine:\n    @staticmethod\n    def from_machine(machine, initial=None, state=None):\n        if initial is None:\n            initial = machine.initial\n        if state is None:\n            state = initial\n        new_machine = Machine(initial, **machine.transitions)\n        new_machine.state = state\n        assert machine.transitions == new_machine.transitions\n        assert machine.states == new_machine.states\n        return new_machine\n\n    def __init__(self, initial: str, **transitions: Mapping[str, str]) -> None:\n        super().__init__()\n        self.transitions = defaultdict(dict, transitions)\n        self.transition_names = {\n            transition_name\n            for (_, transitions) in self.transitions.items()\n            for (transition_name, _) in (transitions or {}).items()\n        }\n        self.states = set(transitions.keys()).union(\n            state for (_, dst) in transitions.items() for (_, state) in (dst or {}).items()\n        )\n        if initial not in self.states:\n            raise RuntimeError(\n                f\"invalid machine: {initial} not in {self.states}\",\n            )\n        self.state = initial\n        self.initial = initial\n\n    def set_state(self, state):\n        if state not in self.states:\n            raise RuntimeError(f\"invalid state: {state} not in {self.states}\")\n        self.state = state\n\n    def reset(self):\n        self.state = self.initial\n\n    def check(self, transition):\n        \"\"\"Check if the state can be transitioned via `transition`. Returns the\n        destination state.\n        \"\"\"\n        next_state = self.transitions[self.state].get(transition, None)\n        return next_state\n\n    def transition(self, transition):\n        \"\"\"Checks if machine can be transitioned from current state using\n        provided transition name. Returns True if transition has taken place.\n        Listeners for this change will also be notified before returning.\n        \"\"\"\n        next_state = self.check(transition)\n        if next_state is None:\n            return False\n\n        log.debug(f\"transitioning from {self.state} to {next_state}\")\n        self.state = next_state\n        return True\n\n    def __repr__(self):\n        return f\"<Machine S={self.state} T=({self.transitions!r})>\"\n"
  },
  {
    "path": "tron/utils/timeutils.py",
    "content": "\"\"\"Functions for working with dates and timestamps.\"\"\"\nimport calendar\nimport datetime\nimport re\n\n\ndef current_time(tz=None):\n    \"\"\"Return the current datetime.\"\"\"\n    return datetime.datetime.now(tz=tz)\n\n\ndef current_timestamp():\n    \"\"\"Return the current time as a timestamp.\"\"\"\n    return current_time().timestamp()\n\n\ndef delta_total_seconds(td):\n    \"\"\"Equivalent to timedelta.total_seconds() available in Python 2.7.\"\"\"\n    microseconds, seconds, days = td.microseconds, td.seconds, td.days\n    return (microseconds + (seconds + days * 24 * 3600) * 10**6) / 10**6\n\n\ndef macro_timedelta(start_date, years=0, months=0, days=0, hours=0, minutes=0):\n    \"\"\"Since datetime doesn't provide timedeltas at the year or month level,\n    this function generates timedeltas of the appropriate sizes.\n    \"\"\"\n    delta = datetime.timedelta(days=days, hours=hours, minutes=minutes)\n\n    new_month = start_date.month + months\n    while new_month > 12:\n        new_month -= 12\n        years += 1\n    while new_month < 1:\n        new_month += 12\n        years -= 1\n    new_year = start_date.year + years\n\n    # TRON-1045: round day to the max days in a given month if the day doesn't\n    # exist for that month. (e.g. Feb 30 rounds to Feb 28 in a non-leap year)\n    _, days_in_month = calendar.monthrange(new_year, new_month)\n    new_day = min(start_date.day, days_in_month)\n\n    end_date = datetime.datetime(\n        new_year,\n        new_month,\n        new_day,\n        start_date.hour,\n        start_date.minute,\n    )\n    month_and_year_delta = end_date - start_date.replace(tzinfo=None)\n    delta += month_and_year_delta\n\n    return delta\n\n\ndef duration(start_time, end_time=None):\n    \"\"\"Get a timedelta between end_time and start_time, where end_time defaults\n    to now().\n\n    WARNING: mixing tz-aware and naive datetimes in start_time and end_time\n    will cause an error.\n    \"\"\"\n    if not start_time:\n        return None\n    last_time = end_time if end_time else current_time()\n    return last_time - start_time\n\n\nclass DateArithmetic:\n    \"\"\"Parses a string which contains a date arithmetic pattern and returns\n    a date with the delta added or subtracted.\n    \"\"\"\n\n    DATE_TYPE_PATTERN = re.compile(r\"(\\w+)([+-]\\d+)?\")\n\n    DATE_FORMATS = {\n        \"year\": \"%Y\",\n        \"month\": \"%m\",\n        \"day\": \"%d\",\n        \"hour\": \"%H\",\n        \"shortdate\": \"%Y-%m-%d\",\n        \"ym\": \"%Y-%m\",\n        \"ymd\": \"%Y-%m-%d\",\n        \"ymdh\": \"%Y-%m-%dT%H\",\n        \"ymdhm\": \"%Y-%m-%dT%H:%M\",\n    }\n\n    @classmethod\n    def parse(cls, date_str, dt=None):\n        \"\"\"Parse a date arithmetic pattern (Ex: 'shortdate-1'). Supports\n        date strings: shortdate, year, month, day, unixtime, daynumber.\n        Supports subtraction and addition operations of integers. Time unit is\n        based on date format (Ex: seconds for unixtime, days for day).\n        \"\"\"\n        dt = dt or current_time()\n        date_str = date_str.replace(\" \", \"\")\n        match = cls.DATE_TYPE_PATTERN.match(date_str)\n        if not match:\n            return\n        attr, value = match.groups()\n        delta = int(value) if value else 0\n\n        if attr in (\"shortdate\", \"year\", \"month\", \"day\", \"hour\"):\n            if delta:\n                kwargs = {\"days\" if attr == \"shortdate\" else attr + \"s\": delta}\n                dt += macro_timedelta(dt, **kwargs)\n            return dt.strftime(cls.DATE_FORMATS[attr])\n\n        if attr in (\"ym\", \"ymd\", \"ymdh\", \"ymdhm\"):\n            args = [0] * len(attr)\n            args[-1] = delta\n            dt += macro_timedelta(dt, *args)\n            return dt.strftime(cls.DATE_FORMATS[attr])\n\n        if attr == \"unixtime\":\n            return int(dt.timestamp()) + delta\n\n        if attr == \"daynumber\":\n            return dt.toordinal() + delta\n"
  },
  {
    "path": "tron/utils/trontimespec.py",
    "content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"A complete time specification based on the Google App Engine GROC spec.\"\"\"\nimport calendar\nimport datetime\n\nimport pytz\n\n\ndef get_timezone(timezone_string):\n    \"\"\"Converts a timezone string to a pytz timezone object.\n\n    Arguments:\n      timezone_string: a string representing a timezone, or None\n\n    Returns:\n      a pytz timezone object, or None if the input timezone_string is None\n\n    Raises:\n      ValueError: if timezone_string is not None and the pytz module could not be\n          loaded\n    \"\"\"\n    if timezone_string:\n        return pytz.timezone(timezone_string)\n    else:\n        return None\n\n\ndef to_timezone(t, tzinfo):\n    \"\"\"Converts 't' to the time zone 'tzinfo'.\n\n    Arguments:\n      t: a datetime object.  It may be in any pytz time zone, or it may be\n          timezone-naive (interpreted as UTC).\n      tzinfo: a pytz timezone object, or None (interpreted as UTC).\n\n    Returns:\n      a datetime object in the time zone 'tzinfo'\n    \"\"\"\n    if tzinfo:\n        if not t.tzinfo:\n            # Ensure we have a default timezone set (UTC) if no tzinfo was given\n            t = pytz.utc.localize(t)\n        # if tzinfo is provided, then the datetime object is converted to the given timezone\n        # and normalized to adjust for discrepancies that might arise from daylight savings\n        # time or other irregularities in the timezone.\n        return tzinfo.normalize(t.astimezone(tzinfo))\n    elif t.tzinfo:\n        # handles the case where tzinfo is not provided but t is timezone-aware\n        # then it is converted to UTC then normalized to adjust for discrepancies\n        # then lastly removing timezone info making t a timezone-naive datetime object\n        return pytz.utc.normalize(t.astimezone(pytz.utc)).replace(tzinfo=None)\n    else:\n        # handles the case where tzinfo is not provided and t is timezone-naive\n        return t\n\n\ndef naive_as_timezone(t, tzinfo):\n    \"\"\"Interprets the naive datetime with the given time zone.\"\"\"\n    try:\n        result = tzinfo.localize(t, is_dst=None)\n    except pytz.AmbiguousTimeError:\n        # We are in the infamous 1 AM block which happens twice on\n        # fall-back. Pretend like it's the first time, every time.\n        result = tzinfo.localize(t, is_dst=True)\n    except pytz.NonExistentTimeError:\n        # We are in the infamous 2:xx AM block which does not\n        # exist. Pretend like it's the later time, every time.\n        result = tzinfo.localize(t, is_dst=False)\n    return result\n\n\ndef get_time(time_string):\n    \"\"\"Converts a string to a datetime.time object.\n\n    Arguments:\n      time_string: a string representing a time ('hours:minutes')\n\n    Returns:\n      a datetime.time object\n    \"\"\"\n    try:\n        return datetime.datetime.strptime(time_string, \"%H:%M\").time()\n    except ValueError:\n        return None\n\n\nTOKEN_LAST = \"LAST\"\n\nordinal_range = range(1, 6)\nweekday_range = range(0, 7)\nmonth_range = range(1, 13)\nmonthday_range = range(1, 32)\nhour_range = range(0, 24)\nminute_range = second_range = range(0, 60)\n\n\ndef validate_spec(source, value_range, type, default=None, allow_last=False):\n    default = default if default is not None else value_range\n    if not source:\n        return default\n\n    has_last = False\n    source_wo_last = []\n    for item in source:\n        if allow_last and item == TOKEN_LAST:\n            has_last = True\n            continue\n        if item not in value_range:\n            raise ValueError(f\"{type} not in range {value_range}\")\n        source_wo_last.append(item)\n\n    sorted_source = sorted(source_wo_last)\n    if has_last:\n        sorted_source.append(TOKEN_LAST)\n\n    return sorted_source\n\n\nclass TimeSpecification:\n    \"\"\"TimeSpecification determines the next time which matches the\n    configured pattern.\n    \"\"\"\n\n    def __init__(\n        self,\n        ordinals=None,\n        weekdays=None,\n        months=None,\n        monthdays=None,\n        timestr=None,\n        timezone=None,\n        minutes=None,\n        hours=None,\n        seconds=None,\n    ):\n\n        if weekdays and monthdays:\n            raise ValueError(\"cannot supply both monthdays and weekdays\")\n\n        if timestr and (minutes or hours or seconds):\n            raise ValueError(\"cannot supply both timestr and h/m/s\")\n\n        if not any((timestr, minutes, hours, seconds)):\n            timestr = \"00:00\"\n\n        if timestr:\n            time = get_time(timestr)\n            hours = [time.hour]\n            minutes = [time.minute]\n            seconds = [0]\n\n        self.hours = validate_spec(hours, hour_range, \"hour\")\n        self.minutes = validate_spec(minutes, minute_range, \"minute\")\n        self.seconds = validate_spec(seconds, second_range, \"second\")\n        self.ordinals = validate_spec(ordinals, ordinal_range, \"ordinal\")\n        self.weekdays = validate_spec(\n            weekdays,\n            weekday_range,\n            \"weekdays\",\n            allow_last=True,\n        )\n        self.months = validate_spec(months, month_range, \"month\")\n        self.monthdays = validate_spec(\n            monthdays,\n            monthday_range,\n            \"monthdays\",\n            [],\n            True,\n        )\n        self.timezone = get_timezone(timezone)\n\n    def next_day(self, first_day, year, month):\n        \"\"\"Returns matching days for the given year and month.\"\"\"\n        first_day_of_month, last_day_of_month = calendar.monthrange(\n            year,\n            month,\n        )\n\n        def map_last(day):\n            return last_day_of_month if day == TOKEN_LAST else day\n\n        def day_filter(day):\n            return first_day <= day <= last_day_of_month\n\n        def sort_days(days):\n            return sorted(filter(day_filter, days))\n\n        if self.monthdays:\n            return sort_days(map_last(day) for day in self.monthdays)\n\n        start_day = (first_day_of_month + 1) % 7\n\n        def days_from_weekdays():\n            for ordinal in self.ordinals:\n                week = (ordinal - 1) * 7\n                for weekday in self.weekdays:\n                    yield ((weekday - start_day) % 7) + week + 1\n\n        return sort_days(days_from_weekdays())\n\n    def next_month(self, start_date):\n        \"\"\"Create a generator which yields valid months after the start month.\"\"\"\n        current = start_date.month\n        potential = [m for m in self.months if m >= current]\n        year_wraps = 0\n\n        while True:\n            if not potential:\n                year_wraps += 1\n                potential = list(self.months)\n\n            yield potential.pop(0), start_date.year + year_wraps\n\n    def next_time(self, start_date, is_start_day):\n        \"\"\"Return the next valid time.\"\"\"\n        start_hour = start_date.time().hour\n\n        def hour_filter(hour):\n            return not is_start_day or hour >= start_hour\n\n        for hour in filter(hour_filter, self.hours):\n            for minute in self.minutes:\n                for second in self.seconds:\n                    candidate = datetime.time(hour, minute, second)\n\n                    if is_start_day and start_date.time() >= candidate:\n                        continue\n\n                    return candidate\n\n    def get_match(self, start):\n        \"\"\"Returns the next datetime match after start.\"\"\"\n        start_date = to_timezone(start, self.timezone).replace(tzinfo=None)\n\n        def get_first_day(month, year):\n            if (month, year) != (start_date.month, start_date.year):\n                return 1\n            return start_date.day\n\n        for month, year in self.next_month(start_date):\n            first_day = get_first_day(month, year)\n\n            for day in self.next_day(first_day, year, month):\n                is_start_day = start_date.timetuple()[:3] == (year, month, day)\n\n                time = self.next_time(start_date, is_start_day)\n                if time is None:\n                    continue\n\n                candidate = start_date.replace(\n                    year,\n                    month,\n                    day,\n                    time.hour,\n                    time.minute,\n                    second=time.second,\n                    microsecond=0,\n                )\n                candidate = self.handle_timezone(candidate, start.tzinfo)\n                if not candidate:\n                    continue\n                return candidate\n\n    # TODO: test\n    def handle_timezone(self, out, tzinfo):\n        if self.timezone:\n            out = naive_as_timezone(out, self.timezone)\n        return to_timezone(out, tzinfo)\n\n    def __eq__(self, other):\n        attrs = [\n            \"hours\",\n            \"minutes\",\n            \"seconds\",\n            \"ordinals\",\n            \"weekdays\",\n            \"months\",\n            \"monthdays\",\n            \"timezone\",\n        ]\n        return all(getattr(other, attr, None) == getattr(self, attr, None) for attr in attrs)\n\n    def __ne__(self, other):\n        return not self == other\n"
  },
  {
    "path": "tron/utils/twistedutils.py",
    "content": "from twisted.internet import defer\nfrom twisted.internet import reactor\nfrom twisted.python import failure\n\n\nclass Error(Exception):\n    pass\n\n\ndef _cancel(deferred):\n    \"\"\"Re-implementing what's available in newer twisted in a crappy, but\n    workable way.\"\"\"\n\n    if not deferred.called:\n        deferred.errback(failure.Failure(Error()))\n    elif isinstance(deferred.result, defer.Deferred):\n        _cancel(deferred.result)\n\n\ndef defer_timeout(deferred, timeout):\n    try:\n        reactor.callLater(timeout, deferred.cancel)\n    except AttributeError:\n        reactor.callLater(timeout, lambda: _cancel(deferred))\n"
  },
  {
    "path": "tron/yaml.py",
    "content": "import yaml\n\n\ndef dump(*args, **kwargs):\n    kwargs[\"Dumper\"] = yaml.CSafeDumper\n    return yaml.dump(*args, **kwargs)\n\n\ndef load(*args, **kwargs):\n    kwargs[\"Loader\"] = yaml.CSafeLoader\n    return yaml.load(*args, **kwargs)\n\n\ndef load_all(*args, **kwargs):\n    kwargs[\"Loader\"] = yaml.CSafeLoader\n    return yaml.load_all(*args, **kwargs)\n\n\nsafe_dump = dump\nsafe_load = load\nsafe_load_all = load_all\n"
  },
  {
    "path": "tronweb/coffee/actionrun.coffee",
    "content": "window.modules = window.modules || {}\nwindow.modules.actionrun = module = {}\n\n\nclass module.ActionRun extends Backbone.Model\n    initialize: (options) =>\n        super options\n        options = options || {}\n        @refreshModel = options.refreshModel\n\n    idAttribute: \"action_name\"\n\n    urlRoot: ->\n        \"/jobs/#{ @get('job_name') }/#{ @get('run_num') }/\"\n\n    urlArgs: \"?include_stdout=1&include_stderr=1&include_meta=1&num_lines=0\"\n\n    url: =>\n        super() + @urlArgs\n\n    parse: (resp, options) =>\n        resp['job_url'] = \"#job/#{ @get('job_name') }\"\n        resp['job_run_url'] = \"#{ resp['job_url'] }/#{ @get('run_num') }\"\n        resp['url'] = \"#{ resp['job_run_url'] }/#{ @get('action_name') }\"\n        resp\n\n\nclass module.ActionRunHistoryEntry extends module.ActionRun\n    idAttribute: \"id\"\n\n    parse: (resp, options) =>\n        resp\n\n\nclass module.ActionRunHistory extends Backbone.Collection\n    initialize: (models, options) =>\n        options = options || {}\n        @job_name = options.job_name\n        @action_name = options.action_name\n\n    model: module.ActionRunHistoryEntry\n\n    url: =>\n        \"/jobs/#{ @job_name }/#{ @action_name }/\"\n\n    parse: (resp, options) =>\n        resp\n\n    reset: (models, options) =>\n        super models, options\n\n    add: (models, options) =>\n        super models, options\n\n\nclass module.ActionRunHistoryListEntryView extends ClickableListEntry\n    tagName: \"tr\"\n\n    template: _.template \"\"\"\n        <td>\n            <a href=\"#job/<%= job_name %>/<%= run_num %>/<%= action_name %>\">\n            <%= run_num %></a></td>\n        <td><%= formatState(state) %></td>\n        <td><%= displayNode(node) %></td>\n        <td><%= modules.actionrun.formatExit(exit_status) %></td>\n        <td><%= dateFromNow(start_time, \"None\") %></td>\n        <td><%= dateFromNow(end_time, \"\") %></td>\n        <td><%= duration %></td>\n    \"\"\"\n\n    render: ->\n        @$el.html @template(@model.attributes)\n        makeTooltips(@$el)\n        @\n\n\nclass module.ActionRunTimelineEntry\n    constructor: (@actionRun, @maxDate) ->\n\n    toString: =>\n        @actionRun.action_name\n\n    getYAxisLink: =>\n        \"#job/#{@actionRun.job_name}/#{@actionRun.run_num}/#{@actionRun.action_name}\"\n\n    getYAxisText: =>\n        @actionRun.action_name\n\n    getBarClass: =>\n        @actionRun.state\n\n    getStart: =>\n        @getDate(@actionRun.start_time)\n\n    getEnd: =>\n        @getDate(@actionRun.end_time)\n\n    getDate: (date) ->\n        if date then new Date(date) else @maxDate\n\n\nclass module.ActionRunListEntryView extends ClickableListEntry\n    initialize: (options) =>\n        @listenTo(@model, \"change\", @render)\n\n    tagName: \"tr\"\n\n    template: _.template \"\"\"\n        <td>\n            <a href=\"#job/<%= job_name %>/<%= run_num %>/<%= action_name %>\">\n            <%= formatName(action_name) %></a></td>\n        <td><%= formatState(state) %></td>\n        <td><code class=\"command\"><%= command || raw_command %></code></td>\n        <td><%= displayNode(node) %></td>\n        <td><%= dateFromNow(start_time, \"None\") %></td>\n        <td><%= dateFromNow(end_time, \"\") %></td>\n        <td><%= duration %></td>\n        \"\"\"\n\n    render: ->\n        @$el.html @template(@model.attributes)\n        makeTooltips(@$el)\n        @\n\n\nmodule.formatExit = (exit) ->\n    return '' if not exit? or exit == ''\n    template = _.template \"\"\"\n        <span class=\"badge badge-<%= type %>\"><%= exit %></span>\n    \"\"\"\n    template(exit: exit, type: if not exit then \"success\" else \"important\")\n\n\nclass module.ActionRunView extends Backbone.View\n    initialize: (options) =>\n        @listenTo(@model, \"change\", @render)\n        @refreshView = new RefreshToggleView(model: @model.refreshModel)\n        historyCollection = options.history\n        @historyView = new module.ActionRunHistoryView(model: historyCollection)\n        @listenTo(@refreshView, 'refreshView', => @model.fetch())\n        @listenTo(@refreshView, 'refreshView', => historyCollection.fetch())\n\n    tagName: \"div\"\n\n    template: _.template \"\"\"\n            <div class=\"span12\">\n                <h1>\n                    <small>Action Run</small>\n                    <a href=\"<%= job_url %>\"><%= formatName(job_name) %></a>.<a href=\"<%= job_run_url %>\"><%= run_num %></a>.<%= formatName(action_name) %>\n                    <span id=\"refresh\"></span>\n                </h1>\n            </div>\n            <div class=\"span12 outline-block\">\n                <h2>Details</h2>\n                <div>\n                <table class=\"table details\">\n                    <tbody>\n                    <tr><td class=\"span2\">State</td>\n                        <td><%= formatState(state) %><%= formatDelay(in_delay) %></td></tr>\n                    <tr><td>Node</td>\n                        <td><%= displayNode(node) %></td></tr>\n                    <tr><td>Raw original command</td>\n                        <td><code class=\"command\"><%- original_command %></code></td></tr>\n                    <tr><td>Config command</td>\n                        <td><code class=\"command\"><%- raw_command %></code></td></tr>\n                    <% if (command) { %>\n                    <tr><td>Last run command</td>\n                        <td><code class=\"command\"><%- command %></code></td></tr>\n                    <% } %>\n                    <tr><td>Exit codes</td>\n                        <td>\n                            <%= modules.actionrun.formatExit(exit_status) %>\n                            <% if (exit_statuses) { %>\n                                <small>\n                                    (exits of all attempts:\n                                    <%= _.map(\n                                            _.sortBy(\n                                                exit_statuses,\n                                                function(val, key) {\n                                                    return -key;\n                                                }\n                                            ),\n                                            modules.actionrun.formatExit\n                                        ).join(\", \") %>)\n                                </small>\n                            <% } %>\n                        </td>\n                    </tr>\n                    <tr><td>Start time</td>\n                        <td><% print(dateFromNow(start_time, ''))  %></td></tr>\n                    <tr><td>End time</td>\n                        <td><%= dateFromNow(end_time, 'Unknown') %></td></tr>\n                    <tr><td>Duration</td>\n                        <td><%= duration %></td></tr>\n                    <tr><td>Waits for triggers</td>\n                        <td><%= triggered_by %></td></tr>\n                    <tr><td>Publishes triggers</td>\n                        <td><%= trigger_downstreams %></td></tr>\n                    </tbody>\n                </table>\n                </div>\n            </div>\n            <div class=\"span12 outline-block\">\n                <h2>meta</h2>\n                <pre class=\"meta\" style=\"display: none;\"><%- meta.join('\\\\n') %></pre>\n            </div>\n            <div class=\"span12 outline-block\">\n                <h2>stdout</h2>\n                <pre class=\"stdout\"><%- stdout.join('\\\\n') %></pre>\n            </div>\n            <div class=\"span12 outline-block\">\n                <h2>stderr</h2>\n                <pre class=\"stderr\"><%- stderr.join('\\\\n') %></pre>\n            </div>\n\n            <div id=\"action-run-history\">\n            </div>\n        \"\"\"\n\n    render: ->\n        @$el.html @template(@model.attributes)\n        @$('#refresh').html(@refreshView.render().el)\n        @$('#action-run-history').html(@historyView.render().el)\n        makeTooltips(@$el)\n        modules.views.makeHeaderToggle(@$el)\n        @\n\n\nclass ActionRunHistorySliderModel\n    constructor: (@model) ->\n\n    length: =>\n        @model.models.length\n\n\nclass module.ActionRunHistoryView extends Backbone.View\n    initialize: (options) =>\n        @listenTo(@model, \"sync\", @render)\n        sliderModel = new ActionRunHistorySliderModel(@model)\n        @sliderView = new modules.views.SliderView(model: sliderModel)\n        @listenTo(@sliderView, \"slider:change\", @renderList)\n\n    tagName: \"div\"\n\n    className: \"span12 outline-block\"\n\n    template: _.template \"\"\"\n          <h2>History</h2>\n          <div>\n          <div id=\"slider\"></div>\n          <table class=\"table table-hover table-outline table-striped\">\n            <thead class=\"sub-header\">\n              <tr>\n                <th class=\"span1\">Run</th>\n                <th>State</th>\n                <th>Node</th>\n                <th>Exit</th>\n                <th>Start</th>\n                <th>End</th>\n                <th>Duration</th>\n              </tr>\n            </thead>\n            <tbody>\n            </tbody>\n          </table>\n          </div>\n       \"\"\"\n\n    renderList: =>\n        view = (model) ->\n            new module.ActionRunHistoryListEntryView(model: model).render().el\n        models = @model.models[...@sliderView.displayCount]\n        @$('tbody').html(view(model) for model in models)\n\n    render: =>\n        @$el.html @template()\n        @renderList()\n        @$('#slider').html @sliderView.render().el if @model.models.length\n        modules.views.makeHeaderToggle(@$el.parent())\n        @\n"
  },
  {
    "path": "tronweb/coffee/config.coffee",
    "content": "class window.NamespaceList extends Backbone.Model\n    url: \"/\"\n\n\nclass window.Config extends Backbone.Model\n    url: =>\n        \"/config?name=\" + @get('name')\n\n\nclass NamespaceListEntryView extends ClickableListEntry\n    tagName: \"tr\"\n\n    template: _.template \"\"\"\n        <td>\n            <a href=\"#config/<%= name %>\">\n                <span class=\"label label-inverse\"><%= name %></span>\n            </a>\n        </td>\n        \"\"\"\n\n    render: ->\n        @$el.html @template\n            name: @model\n        @\n\n\nclass window.NamespaceListView extends Backbone.View\n    initialize: (options) =>\n        @listenTo(@model, \"sync\", @render)\n\n    tagName: \"div\"\n\n    className: \"span12\"\n\n    template: _.template \"\"\"\n        <h1>\n            <i class=\"icon-wrench icon-white\"></i>\n            Configuration Namespaces\n        </h1>\n        <div class=\"outline-block\">\n        <table class=\"table table-hover table-outline\">\n          <thead class=\"header\">\n            <tr>\n              <th>Name</th>\n            </tr>\n          </thead>\n          <tbody>\n          </tbody>\n        </table>\n        </div>\n        \"\"\"\n\n\n    render: =>\n        @$el.html @template()\n        entry = (name) -> new NamespaceListEntryView(model: name).render().el\n        @$('tbody').append(entry(name) for name in @model.get('namespaces'))\n        @\n\n\nclass window.ConfigView extends Backbone.View\n    initialize: (options) =>\n        @listenTo(@model, \"change\", @render)\n\n    tagName: \"div\"\n\n    className: \"span12\"\n\n    template: _.template \"\"\"\n        <h1><small>Config</small> <%= name %></h1>\n        <div class=\"outline-block\"><div class=\"border-top\">\n            <textarea class=\"config-block\"><%= config %></textarea>\n        </div></div>\n        \"\"\"\n\n    render: =>\n        @$el.html @template(@model.attributes)\n        CodeMirror.fromTextArea(@$('textarea').get(0), readOnly: true)\n        @\n"
  },
  {
    "path": "tronweb/coffee/dashboard.coffee",
    "content": "window.modules = window.modules || {}\nwindow.modules.dashboard = module = {}\n\n\nclass window.Dashboard extends Backbone.Model\n    initialize: (options)->\n        options = options || {}\n        @refreshModel = new RefreshModel(interval: 30)\n        @filterModel = options.filterModel\n        @jobList = new JobCollection()\n        @listenTo(@jobList, \"sync\", @change)\n\n    fetch: =>\n        @jobList.fetch()\n\n    change: (args) ->\n        @trigger(\"change\", args)\n\n    models: =>\n        @jobList.models\n\n    sorted: =>\n        _.sortBy(@models(), (item) -> item.get('name'))\n\n    filter: (filter) =>\n        _.filter(@sorted(), filter)\n\n\nmatchType = (item, query) ->\n    switch query\n        when 'job' then true if item instanceof Job\n\n\nclass window.DashboardFilterModel extends FilterModel\n    filterTypes:\n        name:       buildMatcher(fieldGetter('name'), matchAny)\n        type:       buildMatcher(_.identity, matchType)\n\n\nclass window.DashboardFilterView extends FilterView\n    createtype: _.template \"\"\"\n        <div class=\"input-prepend\">\n           <i class=\"icon-markerright icon-grey\"></i>\n           <div class=\"filter-select\">\n             <select id=\"filter-<%= filterName %>\"\n                  class=\"span3\"\n                  data-filter-name=\"<%= filterName %>Filter\">\n              <option value=\"\">All</option>\n              <option <%= isSelected(defaultValue, 'job') %>\n                  value=\"job\">Scheduled Jobs</option>\n            </select>\n          </div>\n        </div>\n    \"\"\"\n\nclass window.DashboardView extends Backbone.View\n    initialize: (options) =>\n        @refreshView = new RefreshToggleView(model: @model.refreshModel)\n        @filterView = new DashboardFilterView(model: @model.filterModel)\n        @listenTo(@model, \"change\", @render)\n        @listenTo(@refreshView, 'refreshView', => @model.fetch())\n        @listenTo(@filterView, \"filter:change\", @renderBoxes)\n\n    tagName: \"div\"\n\n    className: \"span12 dashboard-view\"\n\n    template: _.template \"\"\"\n        <h1>\n            <i class=\"icon-th icon-white\"></i>\n            <small>Tron</small>\n            <a href=\"#dashboard\">Dashboard</a>\n            <span id=\"refresh\"></span>\n        </h1>\n        <div id=\"filter-bar\"></div>\n        <div id=\"status-boxes\">\n        </div>\n        \"\"\"\n\n    makeView: (model) =>\n        switch model.constructor.name\n            when Job.name then new module.JobStatusBoxView(model: model)\n\n    renderRefresh: ->\n        @$('#refresh').html(@refreshView.render().el)\n\n    renderBoxes: =>\n        models = @model.filter(@model.filterModel.createFilter())\n        views = (@makeView(model) for model in models)\n        @$('#status-boxes').html(item.render().el for item in views)\n\n    render: ->\n        @$el.html @template()\n        @$('#filter-bar').html(@filterView.render().el)\n        @renderBoxes()\n        @renderRefresh()\n        @\n\n\nclass window.StatusBoxView extends ClickableListEntry\n    initialize: (options) =>\n        @listenTo(@model, \"change\", @render)\n\n    tagName: \"div\"\n\n    # TODO: TRON-2386 - We don't really use these status=-box classes outside of overriding the yellow on disabled\n    # jobs for...whatever reason. We should just remove them.\n    className: =>\n        \"span2 clickable status-box #{@getState()}\"\n\n    template: _.template \"\"\"\n        <div class=\"status-header\">\n            <a href=\"<%= url %>\">\n            <%= name %></a>\n        </div>\n        <span class=\"count\">\n          <i class=\"<%= icon %> icon-white\"></i><%= count %>\n        </span>\n        \"\"\"\n\n    render: =>\n        context = _.extend {},\n            url: @buildUrl()\n            icon: @icon\n            count: @count()\n            name: formatName(@model.attributes.name)\n        @$el.html @template(context)\n        @\n\nclass module.JobStatusBoxView extends StatusBoxView\n    buildUrl: =>\n        \"#job/#{@model.get('name')}\"\n\n    icon: \"icon-time\"\n\n    # TODO: get state of last run if enabled\n    getState: =>\n        @model.get('status')\n\n    count: =>\n        if _.isEmpty(@model.get('runs')) then 0 else _.first(@model.get('runs')).run_num\n"
  },
  {
    "path": "tronweb/coffee/graph.coffee",
    "content": "# Action Graph\n# This file implements the visualization of Job action_graphs as node-edge diagrams.\n#\n# Flow:\n# 1. Views in job.coffee create GraphView instances with action_graph data\n# 2. GraphView renders the directed graph showing actions and their dependencies\n# 3. GraphModalView adds full screen functionality\nwindow.modules = window.modules || {}\nwindow.modules.graph = module = {}\n\nmodule.GraphUtils = {\n    getDefaultLayout: ->\n        {\n            # Dagre is a layout algorithm that positions nodes in a directed graph.\n            name: 'dagre'\n            # LR (left-to-right) is our default direction, but we can change this to TB (top-to-bottom). I found that LR\n            # works a bit better since modern screens are wider than they are tall. That said, some graphs may look better\n            # in TB. I think it'd be cool to add a toggle that lets users switch between these two options.\n            rankDir: 'LR'\n            padding: 30\n            fit: true\n            nodeSep: 50\n            rankSep: 80\n        }\n\n    getNodeWidth: (node) ->\n        # Use canvas to measure text width accurately. We need to do all this because Cytoscape no longer\n        # supports setting the node width as 'label'. See https://github.com/cytoscape/cytoscape.js/issues/2713\n        ctx = document.createElement('canvas').getContext(\"2d\")\n        fStyle = node.pstyle('font-style').strValue\n        size = node.pstyle('font-size').pfValue + 'px'\n        family = node.pstyle('font-family').strValue\n        weight = node.pstyle('font-weight').strValue\n        ctx.font = \"#{fStyle} #{weight} #{size} #{family}\"\n\n        return ctx.measureText(node.data('name')).width + 20\n\n    # Search based on node name. Highlights matching nodes and their connected edges, dims the rest.\n    applySearch: (cy, searchText) ->\n        if searchText\n            cy.nodes().forEach (node) ->\n                nodeName = node.data('name').toLowerCase()\n                if nodeName.includes(searchText)\n                    node.style('opacity', 1)\n                else\n                    node.style('opacity', 0.2)\n\n            cy.edges().style('opacity', 0.1)\n\n            matchingNodes = cy.nodes().filter (node) ->\n                node.data('name').toLowerCase().includes(searchText)\n\n            matchingNodes.connectedEdges().style('opacity', 0.8)\n        else\n            # Reset all styles when search is cleared\n            cy.nodes().style('opacity', 1)\n            cy.edges().style('opacity', 1)\n\n    # Reset the graph to its initial state, including layout and node positions\n    resetGraph: (cy) ->\n        cy.stop()\n        cy.nodes().style('opacity', 1)\n        cy.edges().style('opacity', 1)\n\n        # Reset node positions before re-layout\n        cy.nodes().positions (node) ->\n            return { x: 0, y: 0 }\n\n        layout = cy.layout(module.GraphUtils.getDefaultLayout())\n        layout.run()\n\n        # Fit all elements (if no elements are specified it fits all, so we pass undefined) with 25px padding\n        cy.fit(undefined, 25)\n        cy.center()\n\n        cy.nodes().forEach (node) ->\n            node.data 'manuallyPositioned', false\n\n    defaultStylesheet: [\n        {\n            selector: 'node',\n            style: {\n                'label': 'data(name)'\n                'text-valign': 'center'\n                'text-halign': 'center'\n                'background-color': '#F8F8F8'\n                'color': '#000'\n                'font-size': '16px'\n                'font-weight': 'bold'\n                'shape': 'roundrectangle'\n                'width': (node) -> module.GraphUtils.getNodeWidth(node)\n                'height': 25\n                'padding-top': '5px'\n                'padding-bottom': '5px'\n                'padding-left': '10px'\n                'padding-right': '10px'\n                'text-wrap': 'none'\n                'border-width': 3\n                'border-color': '#999'\n            }\n        },\n        {\n            # TODO: TRON-2387 - Bezier doesn't produce the best looking graph as nodes can overlap edges a bit, but there are some weird issues with a taxi curve-style\n            # that I can't figure out. Would be nice to fix this in the future.\n            selector: 'edge',\n            style: {\n                'curve-style': 'bezier'\n                'target-arrow-shape': 'triangle'\n                'target-arrow-color': '#999'\n                'line-color': '#999'\n                'width': 2\n            }\n        },\n        # This is unfortunately necessary. For consistency, this duplicates some of the colours and styles defined in our LESS.\n        # The LESS classes (.succeeded, .running, etc.) apply to HTML elements, while the styles here apply to the graph's\n        # Cytoscape nodes (SVG canvas elements). Those LESS variables only exist during LESS compilation, so we can't use them here.\n        #\n        # If you change a colour here, you should also change it in the LESS, unless you're trying to be more specific about states\n        # in the graph than the state mixins allow.\n        #\n        # Success states (.success mixin in LESS)\n        {\n            selector: '.succeeded',\n            style: {\n                'border-color': '#218E0B'      # @green from LESS\n                'background-color': '#F0FFE0'  # Light green\n            }\n        },\n        # Info states (.info mixin in LESS)\n        {\n            selector: '.running, .starting',\n            style: {\n                'border-color': '#2F47B8'      # @blue\n                'background-color': '#F0F5FF'  # Light blue\n            }\n        },\n        # Warning states (.warning mixin in LESS)\n        {\n            selector: '.cancelled, .skipped',\n            style: {\n                'border-color': '#A6790D'      # @yellow\n                'background-color': '#FFFBF0'  # Light yellow\n            }\n        },\n        # Error states (.error mixin in LESS)\n        {\n            selector: '.failed, .unknown',\n            style: {\n                'border-color': '#BA434F'      # @red\n                'background-color': '#FFF0F0'  # Light red\n            }\n        },\n        # Pending states (.pending mixin in LESS)\n        {\n            selector: '.scheduled, .waiting, .queued',\n            style: {\n                'border-color': '#999999'      # @medium-grey\n                'background-color': '#F9F9F9'  # Light grey\n            }\n        },\n        # Unknown state\n        {\n            selector: '.unknown',\n            style: {\n                'border-color': '#D66600'      # @orange\n                'background-color': '#FFDBBB'  # Light orange\n            }\n        },\n    ]\n}\n\nmodule.tooltips = {\n    # Shared tooltip template for graph nodes\n    nodeTooltipTemplate: _.template \"\"\"\n        <div class=\"tooltip-header\">\n            <h4><%= name || id %></h4>\n            <% if (typeof state !== 'undefined') { %>\n                <span class=\"state-badge\"><%= formatState(state) %></span>\n            <% } %>\n        </div>\n        <div class=\"tooltip-content\">\n            <code class=\"command tooltip-command\"><%= command || raw_command || \"No command available\" %></code>\n        </div>\n    \"\"\"\n\n    buildTooltipContent: (data, options = {}) ->\n        resultData = { formatState: window.formatState }\n\n        # If we have lookup data, use that as the primary data source\n        if options.actionLookup && data.name\n            fullData = options.actionLookup[data.name] || options.actionLookup[data.id]\n            if fullData\n                resultData = _.extend(resultData, fullData, {\n                    name: data.name || fullData.name || data.id,\n                    id: data.id || fullData.id\n                })\n                return @nodeTooltipTemplate(resultData)\n\n        # If no lookup data or match is found we should fall back to the provided data\n        return @nodeTooltipTemplate(_.extend(resultData, data))\n}\n\nclass window.GraphView extends Backbone.View\n    el: \"#action-graph\"\n    initialize: (options) =>\n        options = options || {}\n        @height = options.height || 250\n        @width = options.width || @$el.width()\n        @showZoom = if options.showZoom? then options.showZoom else true\n        @buildContent = options.buildContent\n        @nodeClass = options.nodeClass || \"node\"\n\n    # Create Cytoscape elements from graph data\n    formatGraphData: (data) =>\n        nodes = data.map (node) =>\n            {\n                data: {\n                    id: node.name\n                    name: node.name\n                    command: node.command\n                    nodeClass: if typeof @nodeClass is 'function' then @nodeClass(node) else @nodeClass\n                }\n            }\n\n        edges = []\n        for node in data\n            for dep in node.dependencies\n                edges.push {\n                    data: {\n                        id: \"#{dep}-#{node.name}\"\n                        source: dep\n                        target: node.name\n                    }\n                }\n\n        { nodes, edges }\n\n    buildCytoscape: (elements) =>\n        # We set an explicit container height so that the graph actually shows up\n        @$el.css('height', @height)\n\n        @cy = cytoscape({\n            container: @el\n            elements: {\n                nodes: elements.nodes,\n                edges: elements.edges\n            }\n            style: module.GraphUtils.defaultStylesheet\n            layout: module.GraphUtils.getDefaultLayout()\n            # We cap the zoom in and out. Turns out if you don't do this it is extremely easy to lose the\n            # graph. This + the reset button should make this a non-issue.\n            minZoom: 0.1\n            maxZoom: 3\n        })\n\n        @cy.ready =>\n            @cy.nodes().forEach (node) =>\n                nodeClass = node.data('nodeClass')\n                if nodeClass\n                    node.addClass(nodeClass)\n\n            # 50 ms is enough time for the graph to render before we resize and fit it. Is there a better way? Yeah, probably.\n            setTimeout(() =>\n                @cy.resize()\n                # Fit all elements (if no elements are specified it fits all, so we pass undefined) with 25px padding\n                @cy.fit(undefined, 25)\n            , 50)\n\n    # Set up the popovers (tooltip that displays the action command) on nodes\n    setupPopovers: =>\n        @$el.append('<div class=\"cy-tooltip\" style=\"display:none; position:absolute; z-index:999; background:white; padding:10px; border:1px solid #ccc; border-radius:4px; min-width:200px; max-width:400px; word-wrap:break-word; white-space:normal;\"></div>')\n        tooltip = @$('.cy-tooltip')\n\n        @cy.on 'mouseover', 'node', (e) =>\n            node = e.target\n            content = @buildContent(node.data())\n            tooltip.html(content)\n            tooltip.show()\n\n            @positionTooltip(e.originalEvent, tooltip)\n\n        @cy.on 'mouseout', 'node', =>\n            tooltip.hide()\n\n        @cy.on 'mousemove', 'node', (e) =>\n            @positionTooltip(e.originalEvent, tooltip)\n\n    # Relative positioning of the tooltip and some shenanigans to ensure it's actually visible on screen\n    positionTooltip: (event, tooltip) =>\n        containerOffset = @$el.offset()\n        containerWidth = @$el.width()\n        containerHeight = @$el.height()\n\n        # Relative position to the cursor\n        left = event.pageX - containerOffset.left - (tooltip.outerWidth() / 2)\n        top = event.pageY - containerOffset.top - tooltip.outerHeight() - 10\n\n        # Ensure tooltip is fully visible\n        if left < 0\n            left = 5\n        else if left + tooltip.outerWidth() > containerWidth\n            left = containerWidth - tooltip.outerWidth() - 5\n\n        # Show below cursor instead if it's too close to the top\n        if top < 0\n            top = event.pageY - containerOffset.top + 20\n\n        tooltip.css({\n            top: top + 'px'\n            left: left + 'px'\n        })\n\n    render: =>\n        @$el.html('')\n\n        elements = @formatGraphData(@model)\n        @buildCytoscape(elements)\n        @setupPopovers()\n\n        if @showZoom\n            new GraphModalView(el: @el, model: @model, graphOptions: this).render()\n\n        @\n\nclass GraphModalView extends Backbone.View\n    initialize: (options) =>\n        options = options || {}\n        @graphOptions = options.graphOptions\n\n    events:\n        'click #view-full-screen': 'toggleModal'\n\n    toggleModal: (event) ->\n        $('.modal').modal('toggle')\n\n    attachEvents: =>\n        @$('.modal').on('show', @showModal)\n        @$('.modal').on('hide', @removeGraph)\n\n        # Add controls to parent title block\n        title = @$el.closest('.outline-block').find('h2')\n        if title.length\n            # Only add if not already present\n            if title.find('.graph-search').length == 0\n                title.css({\n                    'display': 'flex',\n                    'justify-content': 'space-between',\n                    'align-items': 'center'\n                })\n\n                # Create container for controls and add search and reset fields\n                controlsHtml = '<div class=\"graph-controls\" style=\"display: inline-flex; align-items: center; margin-right: 45px;\">'\n                controlsHtml += '<span class=\"graph-search\" style=\"display: inline-flex; align-items: center;\"><input type=\"text\" placeholder=\"Search nodes...\" style=\"width:150px; padding:3px; margin: 0; height: 24px; font-weight: normal; vertical-align: middle;\"></span>'\n                controlsHtml += '<button class=\"reset-graph-btn btn btn-clear tt-enable\" title=\"Reset Graph\" data-placement=\"top\" style=\"margin-left: 10px;\"><i class=\"icon-refresh icon-white\"></i></button>'\n                controlsHtml += '</div>'\n                title.append(controlsHtml)\n\n                # Prevent control clicks from triggering section collapse (why do we have collapsing sections anyway?)\n                title.find('.graph-controls').on 'click', (e) ->\n                    e.stopPropagation()\n\n                # Set up reset button functionality\n                title.find('.reset-graph-btn').on 'click', (e) =>\n                    e.stopPropagation()\n                    return unless @graphOptions.cy\n                    title.find('.graph-search input').val('')\n                    module.GraphUtils.resetGraph(@graphOptions.cy)\n\n                # Set up search functionality\n                title.find('.graph-search input').on 'input', (e) =>\n                    e.stopPropagation() # Prevent event bubbling to title\n                    return unless @graphOptions.cy\n\n                    searchText = $(e.target).val().toLowerCase()\n                    module.GraphUtils.applySearch(@graphOptions.cy, searchText)\n\n    template: \"\"\"\n        <div class=\"top-right-corner\">\n        <button class=\"btn btn-clear tt-enable\"\n                title=\"Full view\"\n                data-placement=\"top\"\n                id=\"view-full-screen\"\n            >\n            <i class=\"icon-opennewwindow icon-white\"></i>\n        </button>\n        </div>\n        <div class=\"modal hide fade\">\n            <div class=\"modal-header\" style=\"display: flex; align-items: center;\">\n                <button class=\"btn btn-clear\"\n                    data-dismiss=\"modal\"\n                    style=\"margin-right: 10px;\">\n                    <i class=\"icon-circledown icon-white\"></i>\n                </button>\n                <h3 style=\"flex: 1; display: flex; align-items: center; justify-content: space-between; margin: 0;\">\n                    <div style=\"display: flex; align-items: center;\">\n                        <i class=\"icon-barchart icon-white\" style=\"margin-right: 5px;\"></i>\n                        <span>Action Graph</span>\n                    </div>\n                    <div class=\"graph-controls\" style=\"display: inline-flex; align-items: center;\">\n                        <span class=\"graph-search\" style=\"display: inline-flex; align-items: center;\">\n                            <input type=\"text\" placeholder=\"Search nodes...\" style=\"width:200px; padding:3px; margin: 0; height: 24px; font-weight: normal; vertical-align: middle;\">\n                        </span>\n                        <button class=\"reset-graph-btn btn btn-clear tt-enable\"\n                                title=\"Reset Graph\"\n                                data-placement=\"top\"\n                                style=\"margin-left: 10px;\">\n                            <i class=\"icon-refresh icon-white\"></i>\n                        </button>\n                    </div>\n                </h3>\n            </div>\n            <div class=\"modal-body graph job-view\">\n            </div>\n        </div>\n        \"\"\"\n\n    showModal: (event) =>\n        # If event.target isn't the modal, we want to return early without doing anything. This guards against\n        # responding to events from children of the modal.\n        return if event.target != $('.modal')[0]\n\n        # Get window dimensions and explicitly set the width on the modal body.\n        modalHeight = $(window).height() - 130\n        modalWidth = $(window).width() - 150\n        @$('.modal-body').css({\n            width: modalWidth + 'px',\n            height: modalHeight + 'px'\n        })\n\n        # Disable interactions on the main graph while the modal is open.\n        #\n        # This is necessary because Cytoscape graphs use canvas elements for rendering, and canvas\n        # events don't respect modal z-index in the same way as normal DOM elements. Without this,\n        # interactions in the full-screen modal would also affect the main graph when the mouse is\n        # over an area where they overlap.\n        if @graphOptions.cy\n            # Save current state to restore from when the modal is closed\n            @savedInteractionState = {\n                userPanningEnabled: @graphOptions.cy.userPanningEnabled(),\n                userZoomingEnabled: @graphOptions.cy.userZoomingEnabled(),\n                boxSelectionEnabled: @graphOptions.cy.boxSelectionEnabled(),\n                autoungrabify: @graphOptions.cy.autoungrabify(),\n                autounselectify: @graphOptions.cy.autounselectify()\n            }\n\n            # Disable all interactions\n            @graphOptions.cy.userPanningEnabled(false)\n            @graphOptions.cy.userZoomingEnabled(false)\n            @graphOptions.cy.boxSelectionEnabled(false)\n            @graphOptions.cy.autoungrabify(true)\n            @graphOptions.cy.autounselectify(true)\n\n        options = _.extend {},\n            @graphOptions,\n            model: @model\n            el: @$('.modal-body.graph').html('').get()\n            height: modalHeight\n            width: modalWidth\n            showZoom: false\n\n        modalGraph = new GraphView(options).render()\n\n        # Set up reset\n        @$('.modal-header .reset-graph-btn').off('click').on 'click', (e) =>\n            return unless modalGraph.cy\n            @$('.modal-header .graph-search input').val('')\n            module.GraphUtils.resetGraph(modalGraph.cy)\n\n        # Set up search\n        @$('.modal-header .graph-search input').off('input').on 'input', (e) =>\n            return unless modalGraph.cy\n            searchText = $(e.target).val().toLowerCase()\n            module.GraphUtils.applySearch(modalGraph.cy, searchText)\n\n        # Resize the graph to fit the modal. We do a longer timeout here because the modal takes a bit longer to render.\n        setTimeout(() =>\n            if modalGraph.cy\n                modalGraph.cy.resize()\n                # Fit all elements (if no elements are specified it fits all, so we pass undefined) with 25px padding\n                modalGraph.cy.fit(undefined, 25)\n        , 200)\n\n    removeGraph: (event) =>\n        return if event.target != $('.modal')[0]\n        @$('.modal-body.graph').empty()\n\n        # Restore all interaction capabilities on the main graph\n        if @graphOptions.cy && @savedInteractionState\n            @graphOptions.cy.userPanningEnabled(@savedInteractionState.userPanningEnabled)\n            @graphOptions.cy.userZoomingEnabled(@savedInteractionState.userZoomingEnabled)\n            @graphOptions.cy.boxSelectionEnabled(@savedInteractionState.boxSelectionEnabled)\n            @graphOptions.cy.autoungrabify(@savedInteractionState.autoungrabify)\n            @graphOptions.cy.autounselectify(@savedInteractionState.autounselectify)\n\n    render: =>\n        @$el.append(@template)\n        @attachEvents()\n        @delegateEvents()\n        @\n"
  },
  {
    "path": "tronweb/coffee/job.coffee",
    "content": "# Jobs\n# This file defines the models and views for Jobs and JobRuns.\n#\n# Flow:\n# 1. Routes in routes.coffee match URLs like #job/name or #job/name/run\n# 2. The router creates models from this file (Job, JobRun) and triggers API calls\n# 3. When data returns, the views defined here render the Job information\nwindow.modules = window.modules || {}\nwindow.modules.job = module = {}\n\n\nclass window.Job extends Backbone.Model\n\n    initialize: (options) =>\n        super options\n        options = options || {}\n        @refreshModel = options.refreshModel\n\n    idAttribute: \"name\"\n\n    urlRoot: \"/jobs\"\n\n    url: ->\n        super() + \"?include_action_graph=1\"\n\n\nclass window.JobCollection extends Backbone.Collection\n\n    initialize: (models, options) =>\n        super options\n        options = options || {}\n        @refreshModel = options.refreshModel\n        @filterModel = options.filterModel\n\n    model: Job\n\n    url: \"/jobs?include_job_runs=1\"\n\n    parse: (resp, options) =>\n        resp['jobs']\n\n    comparator: (job) =>\n        job.get('name')\n\n\nclass window.JobRun extends Backbone.Model\n\n    initialize: (options) =>\n        super options\n        options = options || {}\n        @refreshModel = options.refreshModel\n\n    idAttribute: \"run_num\"\n\n    urlRoot: ->\n        \"/jobs/\" + @get('name')\n\n    url: =>\n        super() + \"?include_action_graph=1&include_action_runs=1\"\n\n    parse: (resp, options) =>\n        resp['job_url'] = \"#job/\" + resp['job_name']\n        resp\n\n\nclass window.JobListFilterModel extends FilterModel\n\n    filterTypes:\n        name:       buildMatcher(fieldGetter('name'), matchAny)\n        status:     buildMatcher(fieldGetter('status'), _.str.startsWith)\n        node_pool:  buildMatcher(nestedName('node_pool'), _.str.startsWith)\n\n\nclass window.JobListView extends Backbone.View\n\n    initialize: (options) =>\n        @listenTo(@model, \"sync\", @render)\n        @refreshView = new RefreshToggleView(model: @model.refreshModel)\n        @filterView = new FilterView(model: @model.filterModel)\n        @listenTo(@refreshView, 'refreshView', => @model.fetch())\n        @listenTo(@filterView, \"filter:change\", @renderList)\n\n    tagName: \"div\"\n\n    className: \"span12\"\n\n    template: _.template \"\"\"\n        <h1>\n            <i class=\"icon-time icon-white\"></i> Scheduled Jobs\n            <span id=\"refresh\"></span>\n        </h1>\n        <div id=\"filter-bar\"></div>\n        <div class=\"outline-block\">\n        <table class=\"table table-hover table-outline table-striped\">\n            <thead class=\"header\">\n                <tr>\n                    <th class=\"span4\">Name</th>\n                    <th>Status</th>\n                    <th>Schedule</th>\n                    <th>Node Pool</th>\n                    <th>Last Success</th>\n                    <th>Next Run</th>\n                </tr>\n            </thead>\n            <tbody>\n            </tbody>\n        </table>\n        </div>\n        \"\"\"\n\n    render: ->\n        @$el.html @template()\n        @renderFilter()\n        @$('#refresh').html(@refreshView.render().el)\n        @renderList()\n        @\n\n    renderList: =>\n        models = @model.filter(@model.filterModel.createFilter())\n        entry = (model) -> new JobListEntryView(model: model).render().el\n        @$('tbody').html(entry(model) for model in models)\n\n    renderFilter: =>\n        @$('#filter-bar').html(@filterView.render().el)\n\n\nclass JobListEntryView extends ClickableListEntry\n\n    initialize: (options) =>\n        @listenTo(@model, \"change\", @render)\n\n    tagName: \"tr\"\n\n    className: \"clickable\"\n\n    template: _.template \"\"\"\n        <td><a href=\"#job/<%= name %>\"><%= formatName(name) %></a></td>\n        <td><%= formatState(status) %></td>\n        <td><%= formatScheduler(scheduler) %></td>\n        <td><%= displayNodePool(node_pool) %></td>\n        <td><%= dateFromNow(last_success, 'never') %></td>\n        <td><%= dateFromNow(next_run, 'none') %></td>\n        \"\"\"\n\n    render: ->\n        @$el.html @template(@model.attributes)\n        makeTooltips(@$el)\n        @\n\n\nclass JobRunTimelineEntry\n\n    constructor: (@jobRun, @maxDate) ->\n\n    toString: =>\n        @jobRun.run_num\n\n    getYAxisLink: =>\n        \"#job/#{@jobRun.job_name}/#{@jobRun.run_num}\"\n\n    getYAxisText: =>\n        @jobRun.run_num\n\n    getBarClass: =>\n        @jobRun.state\n\n    getStart: =>\n        new Date(@jobRun.start_time || @jobRun.run_time)\n\n    getEnd: =>\n        return @maxDate if @jobRun.state == 'running'\n        new Date(@jobRun.end_time || @jobRun.start_time || @jobRun.run_time)\n\n\nclass window.JobView extends Backbone.View\n\n    initialize: (options) =>\n        @listenTo(@model, \"change\", @render)\n        @refreshView = new RefreshToggleView(model: @model.refreshModel)\n        @jobRunListView = new module.JobRunListView(model: @model)\n        @listenTo(@refreshView, 'refreshView', => @model.fetch())\n        sliderModel = new JobRunListSliderModel(@model)\n        @sliderView = new modules.views.SliderView(model: sliderModel)\n        @listenTo(@sliderView, \"slider:change\", @renderTimeline)\n        @currentDate = new Date()\n\n    tagName: \"div\"\n\n    className: \"span12\"\n\n    template: _.template \"\"\"\n        <div class=\"row\">\n            <div class=\"span12\">\n                <h1>\n                    <small>Job</small>\n                    <%= formatName(name) %>\n                    <span id=\"refresh\"></span>\n                </h1>\n            </div>\n            <div class=\"span4 outline-block\">\n                <h2>Details</h2>\n                <div>\n                <table class=\"table details\">\n                    <tbody>\n                    <tr><td>Status</td>\n                        <td><%= formatState(status) %></td></tr>\n                    <tr><td>Node pool</td>\n                        <td><%= displayNodePool(node_pool) %></td></tr>\n                    <tr><td>Schedule</td>\n                        <td><%= formatScheduler(scheduler) %></td></tr>\n                    <tr><td>Settings</td>\n                        <td><%= settings %></td></tr>\n                    <tr><td>Last success</td>\n                        <td><%= dateFromNow(last_success) %></td></tr>\n                    <tr><td>Next run</td>\n                        <td><%= dateFromNow( next_run) %></td></tr>\n                    </tbody>\n                </table>\n                </div>\n            </div>\n            <div class=\"span8 outline-block\">\n                <h2>Action Graph</h2>\n                <div id=\"action-graph\" class=\"graph job-view\"></div>\n            </div>\n\n            <div class=\"span12 outline-block\">\n              <h2>Timeline</h2>\n              <div>\n                <div id=\"slider-chart\"></div>\n                <div id=\"timeline-graph\"></div>\n              </div>\n            </div>\n\n            <div id=\"job-runs\"></div>\n        </div>\n        \"\"\"\n\n\n    renderGraph: =>\n        new GraphView(\n            model: @model.get('action_graph')\n            buildContent: (d) ->\n                window.modules.graph.tooltips.buildTooltipContent(d)\n            height: @$('table.details').height()\n        ).render()\n\n\n    renderTimeline: =>\n        jobRuns = @model.get('runs')[...@sliderView.displayCount]\n        jobRuns = (new JobRunTimelineEntry(run, @currentDate) for run in jobRuns)\n        new modules.timeline.TimelineView(model: jobRuns).render()\n\n    formatSettings: (attrs) =>\n        template = _.template \"\"\"\n            <span class=\"label-icon tt-enable\" title=\"<%= title %>\">\n                <i class=\"icon-<%= icon %>\"></i>\n            </span>\n            \"\"\"\n\n        [icon, title] = if attrs.allow_overlap\n            ['layers', \"Allow overlapping runs\"]\n        else if attrs.queueing\n            ['circlepauseempty', \"Queue overlapping runs\"]\n        else\n            ['remove-circle', \"Cancel overlapping runs\"]\n\n        content = if attrs.all_nodes\n            template(icon: 'treediagram', title: \"Run on all nodes\")\n        else\n            \"\"\n        template(icon: icon, title: title) + content\n\n    render: ->\n        @$el.html @template _.extend {},\n            @model.attributes,\n            settings: @formatSettings(@model.attributes)\n\n        @$('#job-runs').html(@jobRunListView.render().el)\n        @$('#refresh').html(@refreshView.render().el)\n        @renderGraph()\n        @renderTimeline()\n        @$('#slider-chart').html @sliderView.render().el\n        makeTooltips(@$el)\n        modules.views.makeHeaderToggle(@$el)\n        @\n\n\nclass JobRunListSliderModel\n\n    constructor: (@model) ->\n\n    length: =>\n        @model.get('runs').length\n\n\nclass module.JobRunListView extends Backbone.View\n\n    initialize: (options) =>\n        sliderModel = new JobRunListSliderModel(@model)\n        @sliderView = new modules.views.SliderView(model: sliderModel)\n        @listenTo(@sliderView, \"slider:change\", @renderList)\n\n    tagName: \"div\"\n\n    className: \"span12 outline-block\"\n\n    template: _.template \"\"\"\n        <h2>Job Runs</h2>\n        <div>\n        <div id=\"slider-table\"></div>\n        <table class=\"table table-hover table-outline table-striped\">\n            <thead class=\"sub-header\">\n                <tr>\n                    <th>Id</th>\n                    <th>State</th>\n                    <th>Node</th>\n                    <th>Start</th>\n                    <th>End</th>\n                    <th>Duration</th>\n                </tr>\n            </thead>\n            <tbody class=\"jobruns\">\n            </tbody>\n        </table>\n        </div>\n        \"\"\"\n\n    renderList: =>\n        entry = (jobrun) ->\n            new JobRunListEntryView(model:new JobRun(jobrun)).render().el\n        models = @model.get('runs')[...@sliderView.displayCount]\n        @$('tbody').html(entry(model) for model in models)\n\n    render: =>\n        @$el.html @template(@model.attributes)\n        @$('#slider-table').html @sliderView.render().el\n        @renderList()\n        @\n\nmodule.formatManualRun = (manual) ->\n    if ! manual then \"\" else \"\"\"\n        <span class=\"label label-manual\">\n            <i class=\"icon-hand-down icon-white tt-enable\" title=\"Manual run\"></i>\n        </span>\n    \"\"\"\n\nformatInterval = (interval) ->\n    humanized = getDuration(interval).humanize()\n    \"\"\"\n        <span class=\"tt-enable\" title=\"#{interval}\">\n         #{humanized}\n        </span>\n    \"\"\"\n\n# TODO: TRON-1761 - Fix cron validation. Would be nice to not have all these...\nwindow.formatScheduler = (scheduler) ->\n    [icon, value] = switch scheduler.type\n        when 'interval' then ['icon-time', formatInterval(scheduler.value)]\n        when 'groc'     then ['icon-calendarthree', scheduler.value]\n        when 'daily'    then ['icon-notestasks', scheduler.value]\n        when 'cron'     then ['icon-calendaralt-cronjobs', scheduler.value]\n\n    _.template(\"\"\"\n            <i class=\"<%= icon %> tt-enable\"\n                title=\"<%= type %> scheduler\"></i>\n        <span class=\"scheduler label label-clear\">\n            <%= value %>\n        </span>\n        <% if (jitter) { %>\n            <i class=\"icon-random tt-enable\" title=\"Jitter<%= jitter %>\"></i>\n        <% } %>\n    \"\"\")(\n         icon: icon\n         type: scheduler.type\n         value: value\n         jitter: scheduler.jitter)\n\n\nclass JobRunListEntryView extends ClickableListEntry\n\n    initialize: (options) =>\n        @listenTo(@model, \"change\", @render)\n\n    tagName: \"tr\"\n\n    className: \"clickable\"\n\n    template: _.template \"\"\"\n        <td>\n            <a href=\"#job/<%= job_name %>/<%= run_num %>\"><%= run_num %></a>\n            <%= modules.job.formatManualRun(manual) %>\n        </td>\n        <td><%= formatState(state) %></td>\n        <td><%= displayNode(node) %></td>\n        <td><%= dateFromNow(start_time || run_time, \"Unknown\") %></td>\n        <td><%= dateFromNow(end_time, \"\") %></td>\n        <td><%= duration %></td>\n        \"\"\"\n\n    render: ->\n        @$el.html @template(@model.attributes)\n        makeTooltips(@$el)\n        @\n\n\nclass window.JobRunView extends Backbone.View\n\n    initialize: (options) =>\n        @listenTo(@model, \"change\", @render)\n        @refreshView = new RefreshToggleView(model: @model.refreshModel)\n        @listenTo(@refreshView, 'refreshView', => @model.fetch())\n\n    tagName: \"div\"\n\n    className: \"span12\"\n\n    template: _.template \"\"\"\n         <div class=\"row\">\n            <div class=\"span12\">\n                <h1>\n                    <small>Job Run</small>\n                    <a href=\"<%= job_url %>\">\n                        <%= formatName(job_name) %></a>.<%= run_num %>\n                    <span id=\"filter\"</span>\n                </h1>\n\n            </div>\n            <div class=\"span4 outline-block\">\n                <h2>Details</h2>\n                <div>\n                <table class=\"table details\">\n                    <tr><td class=\"span2\">State</td>\n                        <td><%= formatState(state) %></td></tr>\n                    <tr><td>Node</td>\n                        <td><%= displayNode(node) %></td></tr>\n                    <tr><td>Scheduled</td>\n                        <td>\n                            <%= modules.job.formatManualRun(manual) %>\n                            <span class=\"label label-clear\"><%= run_time %></span>\n                        </td></tr>\n                    <tr><td>Start</td>\n                        <td><%= dateFromNow(start_time, '') %></td>\n                    </tr>\n                    <tr><td>End</td>\n                        <td><%= dateFromNow(end_time, '') %></td>\n                    </tr>\n                    <tr><td>Duration</td>\n                        <td><%= duration %></td>\n                    </tr>\n                </table>\n                </div>\n            </div>\n            <div class=\"span8 outline-block\">\n                <h2>Action Graph</h2>\n                <div id=\"action-graph\" class=\"graph job-view\"></div>\n            </div>\n\n            <div class=\"span12 outline-block\">\n              <h2>Timeline</h2>\n              <div>\n                <div id=\"slider-chart\"></div>\n                <div id=\"timeline-graph\"></div>\n              </div>\n            </div>\n\n            <div class=\"span12 outline-block\">\n                <h2>Action Runs</h2>\n                <div>\n                <table class=\"table table-hover table-outline\">\n                    <thead class=\"sub-header\">\n                        <tr>\n                            <th>Name</th>\n                            <th>State</th>\n                            <th class=\"span3\">Command</th>\n                            <th>Node</th>\n                            <th>Start</th>\n                            <th>End</th>\n                            <th>Duration</th>\n                        </tr>\n                    </thead>\n                    <tbody class=\"actionruns\">\n                    </tbody>\n                </table>\n                </div>\n            </div>\n        </div>\n        \"\"\"\n\n    renderList: (actionRuns) =>\n        entry = (run) =>\n            run['job_name'] = @model.get('job_name')\n            run['run_num'] =  @model.get('run_num')\n            model = new modules.actionrun.ActionRun(run)\n            new modules.actionrun.ActionRunListEntryView(model: model).render().el\n        @$('tbody.actionruns').html(entry(model) for model in actionRuns)\n\n    getMaxDate: =>\n        actionRuns = @model.get('runs')\n        dates = (r.end_time || r.start_time for r in actionRuns)\n        dates = (new Date(date) for date in dates when date?)\n        dates.push(new Date(@model.get('run_time')))\n        _.max(dates)\n\n    renderTimeline: (actionRuns) =>\n        maxDate = @getMaxDate()\n        actionRuns = for actionRun in actionRuns\n            new modules.actionrun.ActionRunTimelineEntry(actionRun, maxDate)\n\n        new modules.timeline.TimelineView(\n            model: actionRuns\n            margins:\n                left: 150\n        ).render()\n\n    popupTemplate: _.template \"\"\"\n        <div class=\"tooltip-header\">\n            <h4><%= name %></h4>\n            <span class=\"state-badge\"><%= formatState(state) %></span>\n        </div>\n        <div class=\"tooltip-content\">\n            <code class=\"command\"><%= command || raw_command %></code>\n        </div>\n        \"\"\"\n\n    renderGraph: =>\n        @originalActionGraph = @model.get('action_graph')\n\n        @actionLookup = {}\n        for action in @originalActionGraph\n            @actionLookup[action.name] = action\n\n        new GraphView(\n            model: @originalActionGraph\n            # Look up the full action data before passing to template. This allows us to show action state on the popup tooltip.\n            buildContent: (nodeData) =>\n                window.modules.graph.tooltips.buildTooltipContent(nodeData, {actionLookup: @actionLookup})\n            nodeClass: (d) ->\n                \"node #{d.state || 'unknown'}\"\n            height: @$('table.details').height()\n        ).render()\n\n    sortActionRuns: =>\n        maxDate = @getMaxDate()\n        getStart = (item) ->\n            if item.start_time then new Date(item.start_time) else maxDate\n        _.sortBy @model.get('runs'), getStart\n\n    render: =>\n        @$el.html @template(@model.attributes)\n        @$('#filter').html(@refreshView.render().el)\n        actionRuns = @sortActionRuns()\n        @renderList(actionRuns)\n        @renderGraph()\n        @renderTimeline(actionRuns)\n        makeTooltips(@$el)\n        modules.views.makeHeaderToggle(@$el)\n        @\n"
  },
  {
    "path": "tronweb/coffee/models.coffee",
    "content": "# Models\n# This file provides core model functionality and sets up global Backbone behaviour\nwindow.modules = window.modules || {}\nwindow.modules.models = module = {}\n\n# Override Backbone's sync to prepend '/api' to all API URLs\nbackboneSync = Backbone.sync\nBackbone.sync = (method, model, options) ->\n    options.url = '/api' + _.result(model, 'url')\n    backboneSync(method, model, options)\n\n\nclass window.RefreshModel extends Backbone.Model\n    initialize: (options) =>\n        options = options || {}\n        @interval = (options.interval || 5) * 1000\n        @enabled = false\n        @timeout = null\n\n    toggle: (event) =>\n        if not @enabled\n            @enableRefresh()\n            @trigger('toggle:on')\n        else\n            @disableRefresh()\n            @trigger('toggle:off')\n\n    enableRefresh: =>\n        if not @enabled\n            @enabled = true\n            @scheduleRefresh()\n\n    disableRefresh: =>\n        @enabled = false\n        @clear()\n\n    clear: =>\n        clearTimeout(@timeout)\n        @timeout = null\n\n    doRefresh: =>\n        @clear()\n        if @enabled\n            @trigger('refresh')\n            @scheduleRefresh()\n\n    scheduleRefresh: =>\n        if not @timeout\n            @timeout = setTimeout(@doRefresh, @interval)\n\n\nclass window.StatusModel extends Backbone.Model\n    urlRoot: ->\n        \"/status/\"\n\n    parse: (resp) =>\n        booted = moment.unix(resp['boot_time']).format('YYYY-MM-DD HH:mm ZZ')\n        uptime = moment.duration(moment() - booted).minutes()\n        $('#version').html('<b>Tron:</b> v' + resp['version'] + ' <b>Boot:</b> ' + booted + '</b>')\n        resp\n\nwindow.matchAny = (item, query) ->\n    ~item.toLowerCase().indexOf(query.toLowerCase())\n\nwindow.buildMatcher = (getter, matcher) ->\n    (item, query) -> matcher(getter(item), query)\n\nwindow.fieldGetter = (name) ->\n    (item) -> item.get(name)\n\nwindow.nestedName = (field) ->\n    (item) -> item.get(field)['name']\n\n\nclass window.FilterModel extends Backbone.Model\n    filterTypes:\n        name:       buildMatcher(fieldGetter('name'), matchAny)\n        state:      buildMatcher(fieldGetter('state'), _.str.startsWith)\n        node_pool:  buildMatcher(nestedName('node_pool'), _.str.startsWith)\n\n    createFilter: =>\n        filterFuncs = for type, func of @filterTypes\n            do (type, func) =>\n                query = @get(\"#{type}Filter\")\n                if query\n                    (item) -> func(item, query)\n                else\n                    (item) -> true\n\n        (item) -> _.every(filterFuncs, (func) -> func(item))\n\n\nclass IndexEntry\n    constructor: (@name) ->\n\n    toLowerCase: =>\n        @name.toLowerCase()\n\n    replace: (args...) =>\n        @name.replace(args...)\n\n    indexOf: (args...) =>\n        @name.indexOf(args...)\n\n    toString: =>\n       \"#{@type} #{@name}\"\n\n\nclass JobIndexEntry extends IndexEntry\n    type: \"Job\"\n\n    getUrl: =>\n        \"#job/#{@name}\"\n\n\nclass ConfigIndexEntry extends IndexEntry\n    type: \"Config\"\n\n    getUrl: =>\n        \"#config/#{@name}\"\n\n\nclass CommandIndexEntry extends IndexEntry\n    constructor: (@name, @job_name, @action_name) ->\n        @name = name\n\n    type: \"command\"\n\n    getUrl: =>\n        \"#job/#{@job_name}/-1/#{@action_name}\"\n\n\nclass module.QuickFindModel extends Backbone.Model\n    url: \"/\"\n\n    getJobEntries: (jobs) =>\n        buildActions = (actions) ->\n            for action in actions\n                new CommandIndexEntry(action.command, name, action.name)\n\n        nested = for name, actions of jobs\n            [new JobIndexEntry(name), buildActions(actions)]\n        _.flatten(nested)\n\n    parse: (resp, options) =>\n        index = [].concat(\n            @getJobEntries(resp['jobs']),\n            new ConfigIndexEntry name for name in resp['namespaces'])\n\n        _.mash([entry.name, entry] for entry in index)\n"
  },
  {
    "path": "tronweb/coffee/navbar.coffee",
    "content": "window.modules = window.modules || {}\nwindow.modules.navbar = module = {}\n\n\nclass module.NavView extends Backbone.View\n\n    initialize: (options) ->\n\n    tagName: \"div\"\n\n    className: \"navbar navbar-static-top\"\n\n    attributes:\n        id: \"menu\"\n\n    events:\n        \".search-query click\":  \"handleClick\"\n\n    handleClick: (event) ->\n       console.log(event)\n\n    template: \"\"\"\n          <div class=\"navbar-inner\">\n            <div class=\"container\">\n            <ul class=\"nav\">\n              <li class=\"brand\">tron<span>web</span></li>\n              <li><a href=\"#home\">\n                <i class=\"icon-th\"></i>Dashboard</a>\n              </li>\n              <li><a href=\"#jobs\">\n                <i class=\"icon-time\"></i>Scheduled Jobs</a>\n              </li>\n              <li><a href=\"#configs\">\n                <i class=\"icon-wrench\"></i>Config</a>\n              </li>\n            </ul>\n\n            <form class=\"navbar-search pull-right\">\n            </form>\n\n            </div>\n            <div id=\"version\"></div>\n          </div>\n    \"\"\"\n\n    typeaheadTemplate: \"\"\"\n        <input type=\"text\" class=\"input-medium search-query typeahead\"\n            placeholder=\"Search\"\n            autocomplete=\"off\"\n            data-provide=\"typeahead\">\n        <div class=\"icon-search\"></div>\n    \"\"\"\n\n    render: =>\n        @$el.html @template\n        @renderTypeahead()\n        # fetch tron version and uptime\n        new window.StatusModel().fetch()\n        @\n\n    updater: (item) =>\n        entry = @model.get(item)\n        routes.navigate(entry.getUrl(), trigger: true)\n        entry.name\n\n    source: (query, process) =>\n        (entry.name for _, entry of @model.attributes)\n\n    highlighter: (item) =>\n        # Also formats the item for display\n        typeahead = @$('.typeahead').data().typeahead\n        name = module.typeahead_hl.call(typeahead, item)\n        entry = @model.get(item)\n        \"<small>#{entry.type}</small> #{name}\"\n\n    sorter: (items) ->\n        [startsWithQuery, containsQuery] = [[], []]\n        query = @query.toLowerCase()\n        for item in items\n            uncasedItem = item.toLowerCase()\n            if _.str.startsWith(uncasedItem, query) then startsWithQuery.push item\n            else if _.str.include(uncasedItem, query) then containsQuery.push item\n\n        lengthSort = (item) -> item.length\n        _.sortBy(startsWithQuery, lengthSort)\n            .concat(_.sortBy(containsQuery, lengthSort))\n\n    renderTypeahead: =>\n        @$('.navbar-search').html @typeaheadTemplate\n        @$('.typeahead').typeahead\n            source: @source,\n            updater: @updater\n            highlighter: @highlighter,\n            sorter: @sorter\n        @\n\n    setActive: =>\n        @$('li').removeClass 'active'\n        [path, params] = modules.routes.getLocationParams()\n        path = path.split('/')[0]\n        @$(\"a[href=#{path}]\").parent('li').addClass 'active'\n\nTypeahead = $.fn.typeahead.Constructor.prototype\n\nTypeahead.show = ->\n    top = @$element.position().top + @$element[0].offsetHeight + 1\n    @$menu.insertAfter(@$element).css(top: top).show()\n    @shown = true\n    @\n\nmodule.typeahead_hl = $.fn.typeahead.Constructor.prototype.highlighter\n"
  },
  {
    "path": "tronweb/coffee/nodes.coffee",
    "content": "class NodeModel extends Backbone.Model\n\n\nclass NodePoolModel extends Backbone.Model\n\n\nclass NodeInlineView extends Backbone.View\n    tagName: \"span\"\n\n    template: _.template \"\"\"\n        <span class=\"tt-enable\" title=\"<%= username %>@<%= hostname %>:<%= port %>\">\n            <%= name %>\n        </span>\n    \"\"\"\n\n    render: =>\n        @$el.html @template(@model.attributes)\n        @\n\n\nclass NodePoolInlineView extends Backbone.View\n    tagName: \"span\"\n\n    template: _.template \"\"\"\n        <span class=\"tt-enable\" title=\"<%= nodes.length %> node(s)\">\n            <%= name %>\n        </span>\n    \"\"\"\n\n    render: =>\n        @$el.html @template(@model.attributes)\n        @\n\n\nwindow.displayNode = (node) ->\n    new NodeInlineView(model: new NodeModel(node)).render().$el.html()\n\n\nwindow.displayNodePool = (pool) ->\n    new NodePoolInlineView(model: new NodePoolModel(pool)).render().$el.html()\n"
  },
  {
    "path": "tronweb/coffee/routes.coffee",
    "content": "# Routes\n# This file defines the URL structure and navigation flow for Tronweb.\n#\n# Flow:\n# 1. When a URL changes (e.g., #job/name), Backbone matches it to a route handler\n# 2. The route handler creates appropriate models (i.e. from job.coffee, etc.) and views\n# 3. The route handler fetches data from the API\n# 4. Returned data is rendered in the view\nwindow.modules = window.modules || {}\nwindow.modules.routes = module = {}\n\n# Backbone router that handles URL navigation and view management\n# Routes follow the pattern: \"url/:parameter\": \"handlerMethod\".\n# E.g. navigating to .../#job/my-job-name will call the job method\n# with name = \"my-job-name\", and this feeds into updateMainView.\nclass module.TronRoutes extends Backbone.Router\n\n    routes:\n        \"\":                         \"index\"\n        \"home(;*params)\":           \"home\"\n        \"dashboard(;*params)\":      \"dashboard\"\n        \"jobs(;*params)\":           \"jobs\"\n        \"job/:name\":                \"job\"\n        \"job/:job_name/:run_num\":   \"jobrun\"\n        \"job/:name/:run/:action\":   \"actionrun\"\n        \"configs\":                  \"configs\"\n        \"config/:name\":             \"config\"\n\n    # Create a view based on the provided model and viewType. Then, trigger\n    # the model's API call and update the main view with returned content.\n    updateMainView: (model, viewType) ->\n        view = new viewType(model: model)\n        model.fetch()\n        mainView.updateMain(view)\n\n    index: ->\n        @navigate('home', trigger: true)\n\n    home: (params) ->\n        model = new Dashboard\n            filterModel: new DashboardFilterModel(module.getParamsMap(params))\n        document.title = \"home\"\n        @updateMainView(model, DashboardView)\n\n    dashboard: (params) ->\n        mainView.close()\n        model = new Dashboard\n            filterModel: new DashboardFilterModel(module.getParamsMap(params))\n        dashboard = new DashboardView(model: model)\n        model.fetch()\n        document.title = \"dashboard\"\n        mainView.updateFullView dashboard.render()\n\n    configs: ->\n        document.title = \"configs\"\n        @updateMainView(new NamespaceList(), NamespaceListView)\n\n    config: (name) ->\n        document.title = \"config #{name}\"\n        @updateMainView(new Config(name: name), ConfigView)\n\n    jobs: (params) ->\n        collection = new JobCollection([],\n            refreshModel: new RefreshModel(),\n            filterModel: new JobListFilterModel(module.getParamsMap(params)))\n        document.title = \"jobs\"\n        @updateMainView(collection, JobListView)\n\n    job: (name) ->\n        refreshModel = new RefreshModel()\n        document.title = \"#{name}\"\n        @updateMainView(new Job(name: name, refreshModel: refreshModel), JobView)\n\n    jobrun: (name, run) ->\n        model = new JobRun(\n            name: name, run_num: run, refreshModel: new RefreshModel())\n        document.title = \"#{name}.#{run}\"\n        @updateMainView(model, JobRunView)\n\n    actionrun: (name, run, action) ->\n        model = new modules.actionrun.ActionRun(\n            job_name: name\n            run_num: run\n            action_name: action\n            refreshModel: new RefreshModel())\n        historyCollection = new modules.actionrun.ActionRunHistory([],\n            job_name: name\n            action_name: action)\n        view = new modules.actionrun.ActionRunView(\n            model: model\n            history: historyCollection)\n        model.fetch()\n        historyCollection.fetch()\n        document.title = \"#{name}.#{run}.#{action}\"\n        mainView.updateMain(view)\n\n# Main view that manages our overall page structure\nclass MainView extends Backbone.View\n\n    initialize: (options) ->\n       @navView = new modules.navbar.NavView(model: @model)\n\n    el: $(\"#all-view\")\n\n    template: \"\"\"\n        <div id=\"nav\"></div>\n        <div class=\"container\">\n            <div id=\"main\" class=\"row\">\n            </div>\n        </div>\n        \"\"\"\n\n    updateMain: (view) =>\n        @close()\n        @renderNav() if @$('#nav').html() == ''\n        @navView.setActive()\n        @$('#main').html view.el\n\n    updateFullView: (view) =>\n        @$('#nav').html ''\n        @$('#main').html view.el\n\n    render: =>\n        @$el.html @template\n        @renderNav()\n        @\n\n    renderNav: =>\n        @$('#nav').html @navView.render().el\n\n    close: =>\n        @trigger('closeView')\n\n\n# URL parameter helpers\nmodule.splitKeyValuePairs = (pairs) ->\n    _.mash(param.split('=') for param in pairs)\n\nmodule.getParamsMap = (paramString) ->\n    paramString = paramString || \"\"\n    module.splitKeyValuePairs(paramString.split(';'))\n\nmodule.getLocationHash = ->\n    document.location.hash\n\nmodule.getLocationParams = ->\n    parts = module.getLocationHash().split(';')\n    [parts[0], module.splitKeyValuePairs(parts[1..])]\n\n\nmodule.buildLocationString = (base, params) ->\n    params = (pair.join('=') for pair in _.pairs(params) when pair[1]).join(';')\n    \"#{ base };#{ params }\"\n\n\nmodule.updateLocationParam = (name, value) ->\n    [base, params] = module.getLocationParams()\n    params[name] = value\n    routes.navigate(module.buildLocationString(base, params))\n\n\nwindow.attachRouter = () ->\n    $(document).ready ->\n\n        window.routes = new modules.routes.TronRoutes()\n        model = modules.models = new modules.models.QuickFindModel()\n        window.mainView = new MainView(model: model).render()\n        model.fetch()\n        Backbone.history.start(root: \"/web/\")\n"
  },
  {
    "path": "tronweb/coffee/timeline.coffee",
    "content": "# Timeline\n# This file creates the D3-based timeline visualization for JobRuns.\nwindow.modules = window.modules || {}\nwindow.modules.timeline = module = {}\n\n\n# We pad the right bound of the timeline by {padding}% to ensure the latest bar is within the bounds of the chart.\n# E.g. If we have a 4 hour timeline => 14,400,000 ms * 1.02 = 14,688,000 ms => ~4 hours and 5 minutes are shown.\nmodule.padMaxDate = (dateRange, padding) ->\n    [minDate, maxDate] = (moment.tz(date, 'America/Los_Angeles') for date in dateRange)\n    delta = maxDate.diff(minDate)\n    maxDate.add('ms', delta * padding)\n    [minDate.toDate(), maxDate.toDate()]\n\n# Higher-order function that creates accessors for object methods.\n# We use this throughout the timeline to extract data from timeline entries (e.g. start, end, etc.).\ncall = (field) -> (item) -> item[field]()\n\n\n# The actual rendering of the timeline visualization. There is a decent amount of styling happening here, but I\n# think that's okay because it's all quite specific to the timeline and we don't really reuse this code elsewhere.\nclass module.TimelineView extends Backbone.View\n\n    el: \"#timeline-graph\"\n\n    initialize: (options) =>\n        @margins = _.extend({ top: 30, right: 40, bottom: 20, left: 60 }, options.margins)\n        verticalMargins = @margins.top + @margins.bottom\n        dataHeight = @model?.length * 30 || 0\n        @height = options.height || Math.max(dataHeight + verticalMargins, 100)\n        @width = options.width || @$el.innerWidth()\n        @minBarWidth = options.minBarWidth || 5\n\n    innerHeight: =>\n        @height - @margins.bottom - @margins.top\n\n    innerWidth: =>\n        @width - @margins.left - @margins.right\n\n    # Build the x-axis scale based on the start and end times of the data.\n    buildX: (data) =>\n        minDate = d3.min(data, call('getStart'))\n        maxDate = d3.max(data, call('getEnd'))\n\n        if minDate and maxDate\n            if maxDate - minDate < 300000  # When the time range is less than 5 minutes (300000 ms) we expand it. This just looks better.\n                maxDate = new Date(minDate.getTime() + 300000)\n        else\n            # This is a fallback in case we don't have any data. We show a 5 minute window regardless.\n            now = new Date()\n            minDate = new Date(now.getTime() - 300000)\n            maxDate = now\n\n        domain = [minDate, maxDate]\n        domain = module.padMaxDate(domain, 0.02)  # Hardcoded padding of 2%\n\n        d3.scaleTime()\n            .domain(domain)\n            .range([0, @innerWidth()])\n\n    # Build the y-axis scale based on the number of runs.\n    buildY: (data) =>\n        d3.scaleBand()\n            .domain(data)\n            .range([0, @innerHeight()])\n            .padding(0.1)\n\n    # Building the horizontal grid lines so that we can actually know which bar belongs to which run when looking at more than a few runs.\n    buildGrid: (svg, x, y) =>\n        svg.append(\"g\")\n            .attr(\"class\", \"grid-lines\")\n            .selectAll('.horizontal-grid')\n            .data(y.domain())\n            .join('line')\n            .filter((d, i) -> i % 2 is 0)  # Only show every other line to avoid visual clutter.\n            .attr('class', 'horizontal-grid')\n            .attr('x1', 0)\n            .attr('x2', @innerWidth())\n            .attr('y1', (d) -> y(d) + y.bandwidth() / 2)\n            .attr('y2', (d) -> y(d) + y.bandwidth() / 2)\n            .style('stroke', '#e0e0e0')\n            .style('stroke-dasharray', '3,3')\n\n\n    buildAxis: (x, y) =>\n        # TODO: Test this with a larger run history window. It looks better with fewer ticks, but I've been testing with frequent runs.\n        # In the original version of this function we could get into a situation where the tick labels would overlap and become illegible.\n        # One solution could be to set custom intervals based on the span of the data:\n        #   d3.timeFormat(\"%H:%M\") Hours + Minutes for short spans\n        #   d3.timeFormat(\"%a %d\") Day + Date for short-medium spans\n        #   d3.timeFormat(\"%b %d\") Month + Date for medium-long spans\n        #   d3.timeFormat(\"%b %Y\") Month + Year for looong spans\n        xAxis = d3.axisTop(x)\n            .ticks(5)\n            .tickSize(-@innerHeight())\n            .tickPadding(5)\n\n        yAxis = d3.axisLeft(y)\n            .tickSize(0)\n            .tickPadding(5)\n\n        { xAxis, yAxis }\n\n\n    buildSvg: =>\n        d3.select(@el).append(\"svg\")\n            .attr(\"height\", @height)\n            .attr(\"width\", @width)\n            .attr(\"class\", \"timeline-chart\")\n            .append(\"g\")\n            .attr(\"transform\", \"translate(#{@margins.left}, #{@margins.top})\")\n\n\n    buildSvgAxis: (svg, xAxis, yAxis) =>\n        svg.append(\"g\")\n            .attr(\"class\", \"x axis\")\n            .call(xAxis)\n            .selectAll('line')\n            .style('stroke', '#e0e0e0')\n            .style('stroke-opacity', 0.7)\n\n        yAxisGroup = svg.append(\"g\")\n            .attr(\"class\", \"y axis\")\n            .call(yAxis)\n\n        yAxisGroup.selectAll('.tick')\n            .each (d) ->\n                tick = d3.select(this)\n                tick.select('text').remove()\n                tick.append('a')\n                    .attr('href', d.getYAxisLink())\n                    .append('text')\n                    .attr(\"x\", -5)\n                    .attr(\"y\", 0)\n                    .attr(\"dy\", \".32em\")\n                    .text(d.getYAxisText())\n                    .style(\"fill\", \"#333\")\n                    .style(\"font-size\", \"12px\")\n                    .style(\"text-anchor\", \"end\")\n\n\n    buildSvgBars: (svg, data, x, y) =>\n        # Calculate the width of the bar, ensuring that even short runs have a visible bar.\n        getWidth = (d) =>\n            _.max([@minBarWidth, x(d.getEnd()) - x(d.getStart())])\n\n        svg.selectAll('.bar')\n            .data(data)\n            .join('rect')\n            .attr(\"class\", (d) -> \"bar #{d.getBarClass()}\")\n            .attr(\"x\", (d) -> x(d.getStart()))\n            .attr(\"width\", getWidth)\n            .attr(\"y\", (d) -> y(d))\n            .attr(\"height\", (d) -> y.bandwidth())\n\n\n    render: =>\n        @$el.html('')\n        data = @model\n\n        x = @buildX(data)\n        y = @buildY(data)\n\n        {xAxis, yAxis} = @buildAxis(x, y)\n\n        svg = @buildSvg()\n\n        @buildGrid(svg, x, y)\n\n        @buildSvgAxis(svg, xAxis, yAxis)\n        @buildSvgBars(svg, data, x, y)\n        @\n"
  },
  {
    "path": "tronweb/coffee/views.coffee",
    "content": "# Views\n# This file provides reusable view components and UI utilities used across Tronweb (e.g. consistent\n# date formatting, tooltips, filtering, and interactive elements).\nwindow.modules = window.modules || {}\nwindow.modules.views = module = {}\n\n\n# Note about subview\n# Subviews need to re-delegate events, because they are lost\n# when superviews re-render\n\n# Print the date as a string describing the elapsed time\nwindow.dateFromNow = (string, defaultString='never') ->\n    template = _.template \"\"\"\n        <span title=\"<%= formatted %>\" class=\"tt-enable\" data-placement=\"top\">\n            <%= delta %>\n        </span>\n        \"\"\"\n\n    label_template = _.template \"\"\"\n        <span class=\"label label-<%= type %>\"><%= delta %></span>\n        \"\"\"\n\n    if string\n        date = moment.tz string, 'America/Los_Angeles'\n        formatted = date.format('MMM, Do YYYY, h:mm:ss a')\n        delta = label_template\n            delta: date.fromNow()\n            type: \"clear\"\n    else\n        formatted = defaultString\n        delta = label_template\n            delta: defaultString\n            type: \"important\"\n    template(formatted: formatted, delta: delta)\n\n\nwindow.getDuration = (time) ->\n    days = '0'\n    if time.indexOf(\"day\") != -1\n        [dayStr, time] = time.split(',')\n        [days, day]= dayStr.split(' ')\n    [time, ms] = time.split('.')\n    [hours, minutes, seconds] = time.split(':')\n    moment.duration\n        days: parseInt(days)\n        hours: parseInt(hours)\n        minutes: parseInt(minutes)\n        seconds: parseInt(seconds)\n\n\nwindow.formatDuration = (duration) ->\n    template = _.template \"\"\"\n        <span class=\"label label-clear tt-enable\" title=\"<%= duration %>\">\n          <%= humanized %>\n        </span>\n    \"\"\"\n    humanize = getDuration(duration).humanize()\n    template(duration: duration, humanized: humanize)\n\n\n# If params match, return \"selected\". Used for select boxes\nwindow.isSelected = (current, value) ->\n    if current == value then \"selected\" else \"\"\n\nwindow.makeTooltips = (root) ->\n    root.find('.tt-enable').tooltip({\n        # There is some strange behaviour with the tooltip animation in Bootstrap 2\n        # that causes the object to get into a bad state and not appear as expected.\n        # Disabling animation is a workaround until we upgrade to Bootstrap 3 (or 4...or 5)\n        animation: false\n    })\n\n\nwindow.formatName = (name) =>\n    name.replace(/\\./g, '.<wbr/>').replace(/_/g, '_<wbr/>')\n\n\nwindow.formatState = (state) =>\n    \"\"\"<span class=\"label #{state}\">#{state}</span>\"\"\"\n\n\nwindow.formatDelay = (delay) ->\n    if delay\n        \"\"\"<small> (retry delayed for #{Math.round(delay)}s)</small>\"\"\"\n    else\n        \"\"\n\nmodule.makeHeaderToggle = (root) ->\n    headers = root.find('.outline-block h2')\n    headers.click (event) -> $(event.target).nextAll().slideToggle()\n    headers.addClass('clickable')\n\n\nclass window.FilterView extends Backbone.View\n\n    tagName: \"div\"\n\n    className: \"\"\n\n    defaultIcon: \"icon-filter\"\n\n    filterIcons:\n        name:       \"icon-filter\"\n        node_pool:  \"icon-connected\"\n        state:      \"icon-switchon\"\n        status:     \"icon-switchon\"\n\n    filterTemplate: _.template \"\"\"\n        <div class=\"input-prepend\">\n          <input type=\"text\" id=\"filter-<%= filterName %>\"\n                 value=\"<%= defaultValue %>\"\n                 class=\"input-medium\"\n                 autocomplete=\"off\"\n                 placeholder=\"<%= _.str.humanize(filterName) %>\"\n                 data-filter-name=\"<%= filterName %>Filter\">\n          <i class=\"<%= icon %> icon-grey\"></i>\n        </div>\n    \"\"\"\n\n    template: _.template \"\"\"\n        <form class=\"filter-form\">\n          <div class=\"control-group outline-block\">\n            <div class=\"controls\">\n            <div class=\"span1 toggle-header\"\n                title=\"Toggle Filters\">Filters</div>\n                <%= filters.join('') %>\n            </div>\n          </div>\n        </form>\n        \"\"\"\n\n    getFilterTemplate: (filterName) =>\n        createName = \"create#{filterName}\"\n        if @[createName] then @[createName] else @filterTemplate\n\n    renderFilters: =>\n        createFilter = (filterName) =>\n            template = @getFilterTemplate(filterName)\n            template\n                defaultValue: @model.get(\"#{filterName}Filter\")\n                filterName: filterName\n                icon: @filterIcons[filterName] || @defaultIcon\n\n        filters = _.map((k for k of @model.filterTypes), createFilter)\n        @$el.html @template(filters: filters)\n\n    render: =>\n        @renderFilters()\n        @delegateEvents()\n        makeTooltips(@$el)\n        @\n\n    events:\n        \"keyup input\":   \"filterChange\"\n        \"submit\":        \"submit\"\n        \"change input\":  \"filterDone\"\n        \"change select\": \"selectFilterChange\"\n\n    getFilterFromEvent: (event) =>\n        filterEle = $(event.target)\n        [filterEle.data('filterName'), filterEle.val()]\n\n    filterChange: (event) =>\n        [filterName, filterValue] = @getFilterFromEvent(event)\n        @model.set(filterName, filterValue)\n        @trigger('filter:change', filterName, filterValue)\n\n    filterDone: (event) ->\n        [filterName, filterValue] = @getFilterFromEvent(event)\n        @trigger('filter:done', filterName, filterValue)\n        window.modules.routes.updateLocationParam(filterName, filterValue)\n\n    selectFilterChange: (event) =>\n        @filterChange(event)\n        @filterDone(event)\n\n    submit: (event) ->\n        event.preventDefault()\n\n\nclass window.RefreshToggleView extends Backbone.View\n\n    initialize: ->\n        @listenTo(mainView, 'closeView', @model.disableRefresh)\n        @listenTo(@model, 'refresh', @triggerRefresh)\n\n    tagName: \"div\"\n\n    className: \"refresh-view pull-right\"\n\n    attributes:\n        \"type\":             \"button\"\n        \"data-toggle\":      \"button\"\n\n    template: _.template \"\"\"\n        <span class=\"muted\"><%= text %></span>\n        <button class=\"btn btn-clear tt-enable <%= active %>\"\n            title=\"Toggle Refresh\"\n            data-placement=\"top\">\n            <i class=\"icon-refresh icon-white\"></i>\n        </button>\n        \"\"\"\n\n    render: =>\n        if @model.enabled\n            text = \"Refresh #{ @model.interval / 1000 }s\"\n            active = \"active\"\n        else\n            text = active = \"\"\n        @$el.html @template(text: text, active: active)\n        # See note about subview\n        @delegateEvents()\n        makeTooltips(@$el)\n        @\n\n    events:\n        \"click button\":        \"toggle\"\n\n    toggle: (event) =>\n        @model.toggle(event)\n        @render()\n\n    triggerRefresh: =>\n        @trigger('refreshView')\n\n\nclass window.ClickableListEntry extends Backbone.View\n\n    className: ->\n        \"clickable\"\n\n    events:\n        \"click\":    \"propagateClick\"\n\n    propagateClick: (event) =>\n        if event.button == 0\n            document.location = @$('a').first().attr('href')\n\n\nmodule.makeSlider = (root, options) ->\n    root.find('.slider-bar').slider(options)\n\n\nclass module.SliderView extends Backbone.View\n\n    initialize: (options) ->\n        options = options || {}\n        # NOTE: 50 is somewhat arbitrarily picked as the default value - that should be the most common run_limit\n        @displayCount = options.displayCount || 50\n\n    tagName: \"div\"\n\n    className: \"list-controls controls-row\"\n\n    template: \"\"\"\n            <div class=\"span1\">\n              <span id=\"display-count\" class=\"label label-inverse\"></span>\n            </div>\n            <div class=\"slider-bar span10\"></div>\n        \"\"\"\n\n    handleSliderMove: (event, ui) =>\n        @updateDisplayCount(ui.value)\n        @trigger('slider:change', ui.value)\n\n    updateDisplayCount: (count) =>\n        @displayCount = count\n        content = \"\"\"#{count} / #{@model.length()}\"\"\"\n        @$('#display-count').html(content)\n\n    render: ->\n        @$el.html @template\n        @updateDisplayCount(_.min([@model.length(), @displayCount]))\n        module.makeSlider @$el,\n            max: @model.length()\n            min: 0\n            range: 'min'\n            value: @displayCount\n            slide: @handleSliderMove\n        @\n"
  },
  {
    "path": "tronweb/css/codemirror.css",
    "content": "/* BASICS */\n\n.CodeMirror {\n  /* Set height, width, borders, and global font properties here */\n  font-family: monospace;\n  height: auto;\n}\n.CodeMirror-scroll {\n  /* Set scrolling behaviour here */\n  overflow: auto;\n}\n\n/* PADDING */\n\n.CodeMirror-lines {\n  padding: 4px 0; /* Vertical padding around content */\n}\n.CodeMirror pre {\n  padding: 0 4px; /* Horizontal padding of content */\n}\n\n.CodeMirror-scrollbar-filler {\n  background-color: white; /* The little square between H and V scrollbars */\n}\n\n/* GUTTER */\n\n.CodeMirror-gutters {\n  border-right: 1px solid #ddd;\n  background-color: #f7f7f7;\n}\n.CodeMirror-linenumbers {}\n.CodeMirror-linenumber {\n  padding: 0 3px 0 5px;\n  min-width: 20px;\n  text-align: right;\n  color: #999;\n}\n\n/* CURSOR */\n\n.CodeMirror div.CodeMirror-cursor {\n  border-left: 1px solid black;\n  z-index: 3;\n}\n/* Shown when moving in bi-directional text */\n.CodeMirror div.CodeMirror-secondarycursor {\n  border-left: 1px solid silver;\n}\n.CodeMirror.cm-keymap-fat-cursor div.CodeMirror-cursor {\n  width: auto;\n  border: 0;\n  background: #7e7;\n  z-index: 1;\n}\n/* Can style cursor different in overwrite (non-insert) mode */\n.CodeMirror div.CodeMirror-cursor.CodeMirror-overwrite {}\n\n.cm-tab { display: inline-block; }\n\n/* DEFAULT THEME */\n\n.cm-s-default .cm-keyword {color: #708;}\n.cm-s-default .cm-atom {color: #219;}\n.cm-s-default .cm-number {color: #164;}\n.cm-s-default .cm-def {color: #00f;}\n.cm-s-default .cm-variable {color: black;}\n.cm-s-default .cm-variable-2 {color: #05a;}\n.cm-s-default .cm-variable-3 {color: #085;}\n.cm-s-default .cm-property {color: black;}\n.cm-s-default .cm-operator {color: black;}\n.cm-s-default .cm-comment {color: #a50;}\n.cm-s-default .cm-string {color: #a11;}\n.cm-s-default .cm-string-2 {color: #f50;}\n.cm-s-default .cm-meta {color: #555;}\n.cm-s-default .cm-error {color: #f00;}\n.cm-s-default .cm-qualifier {color: #555;}\n.cm-s-default .cm-builtin {color: #30a;}\n.cm-s-default .cm-bracket {color: #997;}\n.cm-s-default .cm-tag {color: #170;}\n.cm-s-default .cm-attribute {color: #00c;}\n.cm-s-default .cm-header {color: blue;}\n.cm-s-default .cm-quote {color: #090;}\n.cm-s-default .cm-hr {color: #999;}\n.cm-s-default .cm-link {color: #00c;}\n\n.cm-negative {color: #d44;}\n.cm-positive {color: #292;}\n.cm-header, .cm-strong {font-weight: bold;}\n.cm-em {font-style: italic;}\n.cm-link {text-decoration: underline;}\n\n.cm-invalidchar {color: #f00;}\n\ndiv.CodeMirror span.CodeMirror-matchingbracket {color: #0f0;}\ndiv.CodeMirror span.CodeMirror-nonmatchingbracket {color: #f22;}\n\n/* STOP */\n\n/* The rest of this file contains styles related to the mechanics of\n   the editor. You probably shouldn't touch them. */\n\n.CodeMirror {\n  line-height: 1;\n  position: relative;\n  overflow: hidden;\n  background: white;\n  color: black;\n}\n\n.CodeMirror-scroll {\n  /* 30px is the magic margin used to hide the element's real scrollbars */\n  /* See overflow: hidden in .CodeMirror, and the paddings in .CodeMirror-sizer */\n  margin-bottom: -30px; margin-right: -30px;\n  padding-bottom: 30px; padding-right: 30px;\n  height: 100%;\n  outline: none; /* Prevent dragging from highlighting the element */\n  position: relative;\n}\n.CodeMirror-sizer {\n  position: relative;\n}\n\n/* The fake, visible scrollbars. Used to force redraw during scrolling\n   before actuall scrolling happens, thus preventing shaking and\n   flickering artifacts. */\n.CodeMirror-vscrollbar, .CodeMirror-hscrollbar, .CodeMirror-scrollbar-filler {\n  position: absolute;\n  z-index: 6;\n  display: none;\n}\n.CodeMirror-vscrollbar {\n  right: 0; top: 0;\n  overflow-x: hidden;\n  overflow-y: scroll;\n}\n.CodeMirror-hscrollbar {\n  bottom: 0; left: 0;\n  overflow-y: hidden;\n  overflow-x: scroll;\n}\n.CodeMirror-scrollbar-filler {\n  right: 0; bottom: 0;\n  z-index: 6;\n}\n\n.CodeMirror-gutters {\n  position: absolute; left: 0; top: 0;\n  height: 100%;\n  padding-bottom: 30px;\n  z-index: 3;\n}\n.CodeMirror-gutter {\n  height: 100%;\n  padding-bottom: 30px;\n  margin-bottom: -32px;\n  display: inline-block;\n  /* Hack to make IE7 behave */\n  *zoom:1;\n  *display:inline;\n}\n.CodeMirror-gutter-elt {\n  position: absolute;\n  cursor: default;\n  z-index: 4;\n}\n\n.CodeMirror-lines {\n  cursor: text;\n}\n.CodeMirror pre {\n  /* Reset some styles that the rest of the page might have set */\n  -moz-border-radius: 0; -webkit-border-radius: 0; border-radius: 0;\n  border-width: 0;\n  background: transparent;\n  font-family: inherit;\n  font-size: inherit;\n  margin: 0;\n  white-space: pre;\n  word-wrap: normal;\n  line-height: inherit;\n  color: inherit;\n  z-index: 2;\n  position: relative;\n  overflow: visible;\n}\n.CodeMirror-wrap pre {\n  word-wrap: break-word;\n  white-space: pre-wrap;\n  word-break: normal;\n}\n.CodeMirror-linebackground {\n  position: absolute;\n  left: 0; right: 0; top: 0; bottom: 0;\n  z-index: 0;\n}\n\n.CodeMirror-linewidget {\n  position: relative;\n  z-index: 2;\n  overflow: auto;\n}\n\n.CodeMirror-widget {\n  display: inline-block;\n}\n\n.CodeMirror-wrap .CodeMirror-scroll {\n  overflow-x: hidden;\n}\n\n.CodeMirror-measure {\n  position: absolute;\n  width: 100%; height: 0px;\n  overflow: hidden;\n  visibility: hidden;\n}\n.CodeMirror-measure pre { position: static; }\n\n.CodeMirror div.CodeMirror-cursor {\n  position: absolute;\n  visibility: hidden;\n  border-right: none;\n  width: 0;\n}\n.CodeMirror-focused div.CodeMirror-cursor {\n  visibility: visible;\n}\n\n.CodeMirror-selected { background: #d9d9d9; }\n.CodeMirror-focused .CodeMirror-selected { background: #d7d4f0; }\n\n.cm-searching {\n  background: #ffa;\n  background: rgba(255, 255, 0, .4);\n}\n\n/* IE7 hack to prevent it from returning funny offsetTops on the spans */\n.CodeMirror span { *vertical-align: text-bottom; }\n\n@media print {\n  /* Hide the cursor when printing */\n  .CodeMirror div.CodeMirror-cursor {\n    visibility: hidden;\n  }\n}\n"
  },
  {
    "path": "tronweb/css/tronweb.less",
    "content": "\n@import url(//fonts.googleapis.com/css?family=Open+Sans:300,400,700,800);\n@import url(//fonts.googleapis.com/css?family=Droid+Sans+Mono);\n\n@font-face {\n}\n\n/******************************************************************************\n *  Global\n *****************************************************************************/\nbody {\n    background: #f0f0f0;\n}\n\n@text: #222;\n\n.clear-border {\n    .round-corners()\n}\n\n.round-corners (@top-left: 0, @top-right: 0, @bottom-right: 0, @bottom-left: 0) {\n  -webkit-border-radius: @top-left @top-right @bottom-right @bottom-left;\n  -moz-border-radius:    @top-left @top-right @bottom-right @bottom-left;\n  border-radius:         @top-left @top-right @bottom-right @bottom-left;\n}\n\n.clear-box-shadow {\n    -webkit-box-shadow: none;\n    -moz-box-shadow: none;\n    box-shadow: none;\n}\n.clear-text-shadow {\n    text-shadow: none;\n}\n\n.mono-font {\n    font-family: 'Droid Sans Mono', monospace;\n}\n\ncode {\n    .clear-border;\n    padding: 10px;\n    color: #000;\n    .mono-font;\n}\n\ncode.command {\n    display: block;\n    white-space: pre-wrap;\n    background: #f0f0f0;\n    color: #000;\n    border: 0;\n}\n\n.clickable {\n    cursor: pointer;\n}\n\n.table-condensed td {\n    border: 0\n}\n\nh1 {\n    font-size: 20px;\n    line-height: 40px;\n    padding: 10px 20px;\n    .header;\n    font-variant: small-caps;\n    margin: 20px 0;\n\n    .pull-right {\n        margin-top: -1px;\n    }\n    small {\n        font-size: 20px;\n        color: #ddd;\n    }\n    a {\n        color: inherit;\n    }\n    a:hover {\n        text-decoration: underline;\n        color: inherit;\n    }\n    > i[class^=\"icon-\"] {\n        margin: 8px 5px 0 0;\n    }\n}\n\n.header {\n    background-image: url(../img/ui-bg_diagonals-small_10_555_40x40.png);\n    color: #fff;\n    border: #555 1px solid;\n    font-weight: bold;\n}\n\nh2 {\n    text-align: left;\n    padding: 0 0 0 10px;\n    font-size: 16px;\n    font-weight: bold;\n    font-variant: small-caps;\n    .header;\n}\n\n.block-header {\n    .header;\n\n    h3, h2 {\n        font-size: 16px;\n        font-weight: bold;\n        font-variant: small-caps;\n    }\n}\n\n.outline-border {\n    border: @medium-grey 1px solid;\n}\n\n.outline-block {\n\n    h2 {\n        margin: 0;\n        text-align: left;\n        padding: 0 0 0 10px;\n        font-size: 16px;\n        font-weight: bold;\n        font-variant: small-caps;\n    }\n\n    > table tbody,\n    > div\n    {\n        .outline-border;\n        border-top: 0;\n        margin-bottom: 0;\n       background: #fff;\n    }\n    .border-top {\n        border-top: @medium-grey 1px solid;\n    }\n\n    table td {\n       background: #fff;\n    }\n    margin-bottom: 20px;\n}\n\n.table td {\n    border-top: 0;\n}\n\ninput,\n.input\n{\n    outline: 0;\n    .clear-box-shadow;\n    .round-corners(3px, 3px, 3px, 3px);\n    border: 1px solid transparent;\n    background: none;\n}\n\ninput:focus,\ninput.focused,\n.input-focus\n{\n    background: white;\n    border-color: #aaa;\n}\n\n#version {\n    text-align: center;\n    color: white;\n    margin-bottom: 5px;\n}\n\n\n/******************************************************************************\n *  Colors\n *****************************************************************************/\n@green: #218E0B;\n@blue: #2F47B8;\n@yellow: #A6790D;\n@orange: #D66600;\n@red: #BA434F;\n\n@hl-text: #FFF;\n@dark-blue: #0F2277;\n@dark-yellow: #FAA732;\n@dark-red: #A10C1A;\n\n@light-blue: #6177DB;\n@light-yellow: #FFDC88;\n\n@medium-grey: #999;\n\n\n\n/******************************************************************************\n *  Icons\n *****************************************************************************/\n.label-icon {\n    margin-right: 5px;\n    i {\n        font-size: 16px;\n    }\n}\n\ni[class^=\"icon-\"] {\n    margin-top: 2px;\n    font-style: normal;\n    color: #333;\n}\n\ni[class^=\"icon-\"].icon-white {\n    color: white;\n    .clear-text-shadow;\n}\ni[class^=\"icon-\"].icon-grey {\n    color: #aaa;\n    .clear-text-shadow;\n}\n\n.icon-circlepauseempty:before,\n.icon-layers:before,\n.icon-treediagram:before {\n    color: @dark-blue;\n}\n.icon-remove-circle:before {\n    color: @dark-red;\n}\n.icon-repeatone:before {\n    color: #333;\n}\n\n/******************************************************************************\n *  Navbar\n *****************************************************************************/\n.navbar {\n    .brand {\n        color: white;\n        font-weight: 800;\n        .clear-text-shadow;\n        span {\n            color: @light-yellow;\n        }\n    }\n\n    .navbar-inner {\n        .clear-box-shadow;\n        background: @medium-grey;\n        border: 0;\n    }\n\n    .nav > li.active {\n        > a,\n        > a:hover,\n        > a:active {\n            .clear-box-shadow;\n            .clear-text-shadow;\n            background: @light-yellow;\n            color: white;\n        }\n    }\n    .nav > li {\n\n        > a {\n            color: white;\n            .clear-text-shadow;\n            font-weight: bold;\n            i {\n                color: white;\n                .clear-text-shadow;\n                font-weight: normal;\n                margin-right: 7px;\n            }\n        }\n\n        > a:hover {\n            color: white;\n            .clear-text-shadow;\n        }\n    }\n}\n\n\n.search-placeholder {\n    color: #ddd;\n}\n\n.navbar-search {\n    position: relative;\n\n    .search-query,\n    .search-query:focus,\n    .search-query.focused {\n        padding: 2px 10px 2px 27px;\n        margin-top: 2px;\n        line-height: 20px;\n        border: 1px solid #eee;\n        .input;\n    }\n    .search-query {\n        color: #eee;\n    }\n    .search-query:focus,\n    .search-query.focused {\n        color: #333;\n        .input-focus;\n        ~ .icon-search {\n            color: #333;\n        }\n    }\n\n    .icon-search {\n        position: absolute;\n        top: 6px;\n        left: 8px;\n        color: white;\n    }\n    input:-moz-placeholder {\n        .search-placeholder;\n    }\n\n    input::-webkit-input-placeholder {\n        .search-placeholder\n    }\n\n}\n\n.dropdown-menu > .active > a,\n.dropdown-menu > .active > a:hover,\n.dropdown-menu > .active > a:focus\n{\n    background: @light-yellow;\n    color: #000;\n}\n\n.dropdown-menu > li > a:hover,\n.dropdown-menu > li > a:focus\n{\n    background: white;\n    color: #333;\n}\n\n.typeahead.dropdown-menu {\n    .clear-box-shadow;\n    border: 1px solid #333;\n    max-width: 600px;\n    right: -1px;\n\n    a {\n        padding: 3px 10px;\n        text-align: right;\n        overflow: hidden;\n    }\n}\n\n\n/******************************************************************************\n *  Button\n *****************************************************************************/\n.btn {\n    padding: 0;\n    line-height: 30px;\n    width: 32px;\n    border: 0;\n}\n\n.btn-inverse {\n    background: #333;\n    .clear-box-shadow;\n}\n\n.btn-inverse.active {\n    background: #080808;\n}\n\n.modal-header {\n    h3 > i[class^=\"icon-\"] {\n        font-weight: normal;\n        margin: 4px 5px 0 0;\n    }\n    button.btn-clear {\n        float: right;\n    }\n}\n\n.modal-header button.close:hover {\n    background: #222;\n}\n\n.btn-clear {\n    background: none;\n    .clear-box-shadow;\n}\n\n.btn-clear:hover {\n    background: #222;\n}\n\n.btn-clear.active {\n    background: #222;\n}\n\n\n/******************************************************************************\n *  Filters\n *****************************************************************************/\n\n.filter-form .controls .input-prepend {\n    margin: 0 20px 0 0;\n\n    i[class^=\"icon-\"] {\n        font-size: 12px;\n    }\n}\n\n.filter-form .controls {\n    background: none;\n    border: 0;\n    padding: 0;\n}\n\n.placeholder {\n    color: #999;\n    font-size: 13px;\n}\n\nform.filter-form {\n    .input-prepend {\n        position: relative;\n        .round-corners(3px, 3px, 3px, 3px)\n    }\n\n    input,\n    select\n    {\n        .input;\n        padding: 3px 10px 3px 26px;\n    }\n\n    input:focus,\n    select:focus,\n    input:active,\n    select:active\n    {\n        .input-focus;\n    }\n\n    .filter-select {\n        select {\n            background: transparent;\n            -webkit-appearance: none;\n            padding: 0 0 0 20px;\n            color: #999;\n            font-size: 13px;\n            line-height: 13px;\n\n            option {\n                color: #333;\n            }\n        }\n    }\n\n    input:-moz-placeholder {\n        .placeholder;\n    }\n\n    input::-webkit-input-placeholder {\n        .placeholder;\n    }\n\n    i {\n        position: absolute;\n        top: 6px;\n        left: 7px;\n        z-index: 3;\n    }\n\n}\n\n.filter-form .toggle-header {\n    margin: 0 40px 0 0 ;\n    padding: 4px 0 4px 5px;\n    display: inline-block;\n    font-weight: bold;\n}\n\n/******************************************************************************\n *  Slider\n *****************************************************************************/\n.list-controls {\n    padding: 12px 0;\n}\n\n.ui-slider {\n    position: relative;\n    background-color: @medium-grey;\n    margin-top: 8px;\n}\n.ui-slider-handle {\n    position: absolute;\n    z-index: 2;\n    padding-left: 1px;\n    background-color: @dark-yellow;\n    border: @yellow 1px solid;\n}\n.ui-slider-handle:hover {\n}\n.ui-slider-handle.ui-state-focus {\n}\n.ui-slider-horizontal {\n    height: 5px;\n}\n.ui-slider-horizontal .ui-slider-handle {\n    width: 17px;\n    height: 14px;\n    top: -5px;\n    margin-left: -8px;\n}\n\n.ui-slider-range {\n      background: @dark-yellow;\n      height: 100%;\n}\n\n/******************************************************************************\n *  Refresh widget\n *****************************************************************************/\ndiv.refresh-view span {\n    font-size: 14px;\n    vertical-align: middle;\n    margin-right: 4px;\n    font-weight: normal;\n}\n\n\n/******************************************************************************\n *  Outlined tables\n *****************************************************************************/\n.table.table-outline {\n    margin-bottom: 0;\n}\n.table.table-outline td a {\n    color: @text;\n}\n\n.table.table-outline tbody tr > td\n{\n    background: #fff;\n    color: @text;\n}\n\n.table.table-outline tbody tr:hover > td\n{\n    background: @light-yellow;\n}\n\n.table.table-outline thead.sub-header,\n.sub-header {\n    background-color: @medium-grey;\n    color: @hl-text;\n}\n\n.table-striped tbody tr:nth-child(even) > td {\n    background-color: rgba(0, 0, 0, 0.06);\n}\n\n/******************************************************************************\n *  Details tables\n *****************************************************************************/\n.table.details tbody tr {\n\n    td {\n       color: @text;\n       border-top: 0;\n    }\n\n    td:first-child {\n        font-weight: bold;\n    }\n}\n\n#event-list,\n.table.details {\n    margin-bottom: 0;\n}\n\n/******************************************************************************\n *  Popover / Tooltips\n *****************************************************************************/\n\n.cy-tooltip {\n    padding: 0 !important;\n    border-radius: 4px;\n    box-shadow: 0 2px 8px rgba(0,0,0,0.2);\n    overflow: hidden;\n}\n\n.tooltip-header {\n    display: flex;\n    justify-content: space-between;\n    align-items: center;\n    padding: 8px 12px;\n    background-color: #f5f5f5;\n    border-bottom: 1px solid #ddd;\n}\n\n.tooltip-header h4 {\n    margin: 0;\n    font-size: 14px;\n    font-weight: bold;\n    overflow: hidden;\n    text-overflow: ellipsis;\n    white-space: nowrap;\n    max-width: 300px;\n}\n\n.tooltip-content {\n    padding: 0;\n}\n\n.cy-tooltip code.command,\n.tooltip-content code.command {\n    display: block;\n    white-space: pre-wrap;\n    word-break: break-all;\n    margin: 0;\n    font-family: monospace;\n    font-size: 12px;\n    background-color: #2d2d2d !important;\n    color: #f8f8f2 !important;\n    padding: 10px 12px;\n    border-radius: 0 0 4px 4px;\n    border: 0;\n}\n\n/******************************************************************************\n *  Modal\n *****************************************************************************/\n.modal {\n    .clear-border;\n}\n\n.modal-header {\n    .block-header;\n}\n\n#action-graph .modal.fade.in {\n    top: 3%;\n    width: auto;\n    margin-left: 75px;\n    left: 0;\n}\n\n.modal-body {\n    max-height: none;\n}\n\n.top-right-corner {\n   position: absolute;\n   top: -35px;\n   right: 10px;\n}\n\n/******************************************************************************\n *  Dashboard\n *****************************************************************************/\n\n.status-box {\n    padding: 2.2em 20px 20px;\n    margin: 0 10px 10px 0;\n    height: 100px;\n    width: 117px;\n    font-size: 12px;\n    font-weight: bold;\n    position: relative;\n}\n\n.status-box a {\n    color: @hl-text;\n}\n\n.status-box .count {\n    display: inline-block !important;\n    position: absolute;\n    top: 0;\n    right: 0;\n    padding: 3px 7px;\n    background: #333;\n    color: @hl-text;\n}\n\n.status-box i[class^=\"icon-\"] {\n    margin: 1px 5px 0 0;\n}\n\n// TODO: TRON-2386 - We don't use most of these for colour as we have the base styles below (like .success or .info). Only disabled makes it to the page.\n.status-box.disabled {\n    background: @dark-yellow;\n}\n.status-box.unknown,\n.status-box.failed,\n.status-box.degraded\n{\n    background: @dark-red;\n}\n\n.success\n{\n    background: @green;\n    color: @hl-text;\n}\n\n.warning\n{\n    background: @yellow;\n    color: @hl-text;\n}\n\n.error\n{\n    background: @dark-red;\n    color: @hl-text;\n}\n\n.info\n{\n    background: @blue;\n    color: @hl-text;\n}\n\n.pending {\n    background: @medium-grey;\n    color: @hl-text;\n}\n\n.unknown\n{\n    background: @orange;\n    color: @hl-text;\n}\n\n#all-view > .dashboard-view {\n    width: auto;\n    margin-right: 20px;\n}\n\n\n/*****************************************************************************\n *  States\n *****************************************************************************/\n// TODO: TRON-2386 - Remove some of these? Degraded and up seem to be old states that are no longer used.\n.enabled,\n.up,\n.succeeded\n{\n    .success;\n}\n\n.running,\n.starting\n{\n    .info;\n}\n\n.cancelled,\n.disabled,\n.skipped\n{\n    .warning;\n}\n\n.failed,\n.degraded,\n.label-important\n{\n    .error;\n}\n\n.scheduled,\n.waiting,\n.queued\n{\n    .pending;\n}\n\n// Not pictured above is the .unknown state which is the only consumer of @orange for now\n\n/******************************************************************************\n *  Label\n *****************************************************************************/\n\n.label-manual {\n    background: @blue;\n    padding: 2px 4px;\n\n    i {\n        margin-top: 0;\n    }\n}\n\n.jobruns .label-manual {\n    margin-left: 10px;\n}\n\n.label.label-clear {\n    background: none;\n    color: #333;\n    .clear-text-shadow;\n}\n\n.badge.badge-success {\n    .success\n}\n\n.badge.badge-important {\n    .error\n}\n\n\n/******************************************************************************\n *  Misc\n *****************************************************************************/\n\n.scheduler {\n    .mono-font;\n}\n\n\n/******************************************************************************\n *  Stdout / Stderr\n *****************************************************************************/\n\npre.stdout, pre.stderr {\n    margin-bottom: 0;\n}\n\n\n/******************************************************************************\n * Timeline\n *****************************************************************************/\n\n#timeline-graph {\n\n    .axis path,\n    .axis line {\n        fill: none;\n        stroke: #000;\n        shape-rendering: crispEdges;\n    }\n\n    .axis.x .tick.major line {\n        stroke-width: 0;\n\n    }\n    .axis.x g.tick.major:nth-child(2n+2) line {\n        stroke: #ddd;\n        stroke-width: 1px;\n        shape-rendering: crispEdges;\n    }\n\n    .axis text {\n        font-weight: bold;\n        font-size: 12px;\n    }\n\n    .axis.y text {\n        text-anchor: end;\n    }\n\n    .axis.y line,\n    .axis.y > g > text\n    {\n        display: none;\n    }\n\n\n    .bar {\n      stroke-width: 0;\n      fill: @blue;\n      shape-rendering: crispEdges;\n    }\n\n    .bar.failed {\n        fill: @dark-red;\n    }\n\n    .bar.succeeded {\n        fill: @green;\n    }\n\n    .bar.queued,\n    .bar.scheduled {\n        fill: #ccc;\n    }\n\n    .bar.cancelled {\n        fill: @yellow;\n    }\n\n\n}\n"
  },
  {
    "path": "tronweb/css/whhg.css",
    "content": "@font-face {\n    font-family: 'WebHostingHub-Glyphs';\n    src: url(../fonts/webhostinghub-glyphs.ttf) format('truetype');\n    font-weight: normal;\n    font-style: normal;\n    -moz-font-feature-settings: \"calt=0,liga=0\";\n}\n[class^=\"icon-\"], [class*=\" icon-\"] {\n    font-family:'WebHostingHub-Glyphs';\n    background:none;\n    width:auto;\n    height:auto;\n    font-style:normal}\n.icon-aaabattery:before{content:'\\f413'}\n.icon-abacus:before{content:'\\f261'}\n.icon-accountfilter:before{content:'\\f05e'}\n.icon-acsource:before{content:'\\f3ea'}\n.icon-addfriend:before{content:'\\f3da'}\n.icon-address:before{content:'\\f08f'}\n.icon-addshape:before{content:'\\f1fd'}\n.icon-addtocart:before{content:'\\f394'}\n.icon-addtolist:before{content:'\\f2ac'}\n.icon-adjust:before{content:'\\f484'}\n.icon-adobe:before{content:'\\f1c9'}\n.icon-ads-bilboard:before{content:'\\f082'}\n.icon-affiliate:before{content:'\\f01e'}\n.icon-ajax:before{content:'\\f06f'}\n.icon-alarm:before{content:'\\f233'}\n.icon-alarmalt:before{content:'\\f23d'}\n.icon-album-cover:before{content:'\\f19f'}\n.icon-alertalt:before{content:'\\f2b4'}\n.icon-alertpay:before{content:'\\f269'}\n.icon-algorhythm:before{content:'\\f0b8'}\n.icon-alienship:before{content:'\\f41f'}\n.icon-alienware:before{content:'\\f3be'}\n.icon-align-center:before{content:'\\f1d9'}\n.icon-align-justify:before{content:'\\f1da'}\n.icon-align-left:before{content:'\\f1d7'}\n.icon-align-right:before{content:'\\f1d8'}\n.icon-alignbottomedge:before{content:'\\f1d3'}\n.icon-alignhorizontalcenter:before{content:'\\f1d2'}\n.icon-alignleftedge:before{content:'\\f1d6'}\n.icon-alignrightedge:before{content:'\\f1d5'}\n.icon-aligntopedge:before{content:'\\f1d4'}\n.icon-alignverticalcenter:before{content:'\\f1d1'}\n.icon-amd:before{content:'\\f020'}\n.icon-analogdown:before{content:'\\f2cb'}\n.icon-analogleft:before{content:'\\f2c8'}\n.icon-analogright:before{content:'\\f2c9'}\n.icon-analogup:before{content:'\\f2ca'}\n.icon-analytics-piechart:before{content:'\\f000'}\n.icon-analyticsalt-piechartalt:before{content:'\\f001'}\n.icon-anchor-port:before{content:'\\f21d'}\n.icon-android:before{content:'\\f12a'}\n.icon-angrybirds:before{content:'\\f3c1'}\n.icon-antenna:before{content:'\\f3ec'}\n.icon-apache-feather:before{content:'\\f056'}\n.icon-aperture:before{content:'\\f356'}\n.icon-appointment-agenda:before{content:'\\f26c'}\n.icon-archive:before{content:'\\f171'}\n.icon-arrow-down:before{content:'\\f2fe'}\n.icon-arrow-left:before{content:'\\f305'}\n.icon-arrow-right:before{content:'\\f304'}\n.icon-arrow-up:before{content:'\\f301'}\n.icon-asterisk:before{content:'\\f317'}\n.icon-asteriskalt:before{content:'\\002a'}\n.icon-at:before{content:'\\40'}\n.icon-atari:before{content:'\\f3b9'}\n.icon-authentication-keyalt:before{content:'\\f051'}\n.icon-automobile-car:before{content:'\\f239'}\n.icon-autorespond:before{content:'\\f08e'}\n.icon-avatar:before{content:'\\f15a'}\n.icon-avataralt:before{content:'\\f161'}\n.icon-avengers:before{content:'\\f342'}\n.icon-awstats:before{content:'\\f04c'}\n.icon-axe:before{content:'\\f2ef'}\n.icon-backup-vault:before{content:'\\f004'}\n.icon-backupalt-vaultalt:before{content:'\\f005'}\n.icon-backupwizard:before{content:'\\f05f'}\n.icon-backward:before{content:'\\f183'}\n.icon-bag:before{content:'\\f234'}\n.icon-baloon:before{content:'\\f405'}\n.icon-ban-circle:before{content:'\\f313'}\n.icon-banana:before{content:'\\f3f4'}\n.icon-bandwidth:before{content:'\\f006'}\n.icon-bank:before{content:'\\f262'}\n.icon-barchart:before{content:'\\f02f'}\n.icon-barchartalt:before{content:'\\f07d'}\n.icon-barcode:before{content:'\\f276'}\n.icon-basecamp:before{content:'\\f160'}\n.icon-basketball:before{content:'\\f2e9'}\n.icon-bat:before{content:'\\f3d3'}\n.icon-batman:before{content:'\\f348'}\n.icon-batteryaltcharging:before{content:'\\f104'}\n.icon-batteryaltfull:before{content:'\\f101'}\n.icon-batteryaltsixty:before{content:'\\f102'}\n.icon-batteryaltthird:before{content:'\\f103'}\n.icon-batterycharged:before{content:'\\f0f4'}\n.icon-batterycharging:before{content:'\\f0f3'}\n.icon-batteryeighty:before{content:'\\f0f9'}\n.icon-batteryempty:before{content:'\\f0f5'}\n.icon-batteryforty:before{content:'\\f0f7'}\n.icon-batteryfull:before{content:'\\f0fa'}\n.icon-batterysixty:before{content:'\\f0f8'}\n.icon-batterytwenty:before{content:'\\f0f6'}\n.icon-bed:before{content:'\\f2b9'}\n.icon-beer:before{content:'\\f244'}\n.icon-bell:before{content:'\\2407'}\n.icon-bigger:before{content:'\\f30a'}\n.icon-bill:before{content:'\\f278'}\n.icon-binary:before{content:'\\f087'}\n.icon-binoculars-searchalt:before{content:'\\f2a0'}\n.icon-birdhouse:before{content:'\\f390'}\n.icon-birthday:before{content:'\\f36b'}\n.icon-bishop:before{content:'\\f2f9'}\n.icon-blackberry:before{content:'\\f421'}\n.icon-blankstare:before{content:'\\f13e'}\n.icon-blogger-blog:before{content:'\\f167'}\n.icon-bluetooth:before{content:'\\f12b'}\n.icon-bluetoothconnected:before{content:'\\f386'}\n.icon-boardgame:before{content:'\\f2d9'}\n.icon-boat:before{content:'\\f21a'}\n.icon-bold:before{content:'\\f1f4'}\n.icon-bomb:before{content:'\\f2dc'}\n.icon-bone:before{content:'\\f35f'}\n.icon-book:before{content:'\\f1ba'}\n.icon-bookmark:before{content:'\\f143'}\n.icon-boombox:before{content:'\\f195'}\n.icon-bottle:before{content:'\\f361'}\n.icon-bow:before{content:'\\f2ee'}\n.icon-bowling:before{content:'\\f2f3'}\n.icon-bowlingpins:before{content:'\\f3d2'}\n.icon-bowtie:before{content:'\\f37f'}\n.icon-boxtrapper-mousetrap:before{content:'\\f046'}\n.icon-braces:before{content:'\\f0b4'}\n.icon-braille0:before{content:'\\f44b'}\n.icon-braille1:before{content:'\\f44c'}\n.icon-braille2:before{content:'\\f44d'}\n.icon-braille3:before{content:'\\f44e'}\n.icon-braille4:before{content:'\\f44f'}\n.icon-braille5:before{content:'\\f450'}\n.icon-braille6:before{content:'\\f451'}\n.icon-braille7:before{content:'\\f452'}\n.icon-braille8:before{content:'\\f453'}\n.icon-braille9:before{content:'\\f454'}\n.icon-braillea:before{content:'\\f431'}\n.icon-brailleb:before{content:'\\f432'}\n.icon-braillec:before{content:'\\f433'}\n.icon-brailled:before{content:'\\f434'}\n.icon-braillee:before{content:'\\f435'}\n.icon-braillef:before{content:'\\f436'}\n.icon-brailleg:before{content:'\\f437'}\n.icon-brailleh:before{content:'\\f438'}\n.icon-braillei:before{content:'\\f439'}\n.icon-braillej:before{content:'\\f43a'}\n.icon-braillek:before{content:'\\f43b'}\n.icon-braillel:before{content:'\\f43c'}\n.icon-braillem:before{content:'\\f43d'}\n.icon-braillen:before{content:'\\f43e'}\n.icon-brailleo:before{content:'\\f43f'}\n.icon-braillep:before{content:'\\f440'}\n.icon-brailleq:before{content:'\\f441'}\n.icon-brailler:before{content:'\\f442'}\n.icon-brailles:before{content:'\\f443'}\n.icon-braillespace:before{content:'\\f455'}\n.icon-braillet:before{content:'\\f444'}\n.icon-brailleu:before{content:'\\f445'}\n.icon-braillev:before{content:'\\f446'}\n.icon-braillew:before{content:'\\f447'}\n.icon-braillex:before{content:'\\f448'}\n.icon-brailley:before{content:'\\f449'}\n.icon-braillez:before{content:'\\f44a'}\n.icon-brain:before{content:'\\f3e3'}\n.icon-bread:before{content:'\\f42f'}\n.icon-breakable:before{content:'\\f41c'}\n.icon-briefcase:before{content:'\\f25e'}\n.icon-briefcasethree:before{content:'\\f25f'}\n.icon-briefcasetwo:before{content:'\\f0a2'}\n.icon-brightness:before{content:'\\f10a'}\n.icon-brightnessfull:before{content:'\\f10b'}\n.icon-brightnesshalf:before{content:'\\f10c'}\n.icon-broom:before{content:'\\f40a'}\n.icon-browser:before{content:'\\f159'}\n.icon-brush:before{content:'\\f1b8'}\n.icon-bucket:before{content:'\\f1b5'}\n.icon-bug:before{content:'\\f0a7'}\n.icon-bullhorn:before{content:'\\f287'}\n.icon-bus:before{content:'\\f241'}\n.icon-businesscardalt:before{content:'\\f137'}\n.icon-buttona:before{content:'\\f2bf'}\n.icon-buttonb:before{content:'\\f2c0'}\n.icon-buttonx:before{content:'\\f2c1'}\n.icon-buttony:before{content:'\\f2c2'}\n.icon-cactus-desert:before{content:'\\f22c'}\n.icon-calculator:before{content:'\\f258'}\n.icon-calculatoralt:before{content:'\\f265'}\n.icon-calendar:before{content:'\\f20f'}\n.icon-calendaralt-cronjobs:before{content:'\\f0a1'}\n.icon-camera:before{content:'\\f19b'}\n.icon-candle:before{content:'\\f29a'}\n.icon-candy:before{content:'\\f42d'}\n.icon-candycane:before{content:'\\f37d'}\n.icon-cannon:before{content:'\\f401'}\n.icon-canvas:before{content:'\\f1c8'}\n.icon-canvasrulers:before{content:'\\f205'}\n.icon-capacitator:before{content:'\\f3e8'}\n.icon-capslock:before{content:'\\21ea'}\n.icon-captainamerica:before{content:'\\f341'}\n.icon-carrot:before{content:'\\f3f2'}\n.icon-cashregister:before{content:'\\f26e'}\n.icon-cassette:before{content:'\\f377'}\n.icon-cd-dvd:before{content:'\\f0cd'}\n.icon-certificate:before{content:'\\f277'}\n.icon-certificatealt:before{content:'\\f058'}\n.icon-certificatethree:before{content:'\\f059'}\n.icon-cgi:before{content:'\\f086'}\n.icon-cgicenter:before{content:'\\f079'}\n.icon-chair:before{content:'\\2441'}\n.icon-chat:before{content:'\\f162'}\n.icon-check:before{content:'\\f310'}\n.icon-checkboxalt:before{content:'\\f311'}\n.icon-checkin:before{content:'\\f223'}\n.icon-checkinalt:before{content:'\\f227'}\n.icon-chef:before{content:'\\f3ce'}\n.icon-cherry:before{content:'\\f35d'}\n.icon-chevron-down:before{content:'\\f48b'}\n.icon-chevron-left:before{content:'\\f489'}\n.icon-chevron-right:before{content:'\\f488'}\n.icon-chevron-up:before{content:'\\f48a'}\n.icon-chevrons:before{content:'\\f0b5'}\n.icon-chicken:before{content:'\\f359'}\n.icon-chocolate:before{content:'\\f367'}\n.icon-christiancross:before{content:'\\f40f'}\n.icon-christmastree:before{content:'\\f37b'}\n.icon-chrome:before{content:'\\f14e'}\n.icon-cigarette:before{content:'\\f229'}\n.icon-circle-arrow-down:before{content:'\\f475'}\n.icon-circle-arrow-left:before{content:'\\f472'}\n.icon-circle-arrow-right:before{content:'\\f473'}\n.icon-circle-arrow-up:before{content:'\\f474'}\n.icon-circleadd:before{content:'\\f0d1'}\n.icon-circledelete:before{content:'\\f0d2'}\n.icon-circledown:before{content:'\\f3c7'}\n.icon-circleleft:before{content:'\\f3c6'}\n.icon-circleright:before{content:'\\f3c9'}\n.icon-circleselect:before{content:'\\f0d3'}\n.icon-circleselection:before{content:'\\f1b1'}\n.icon-circleup:before{content:'\\f3c8'}\n.icon-clearformatting:before{content:'\\f1e7'}\n.icon-clipboard-paste:before{content:'\\f0cb'}\n.icon-clockalt-timealt:before{content:'\\f22b'}\n.icon-closetab:before{content:'\\f170'}\n.icon-closewindow:before{content:'\\f16e'}\n.icon-cloud:before{content:'\\f0b9'}\n.icon-clouddownload:before{content:'\\f0bb'}\n.icon-cloudhosting:before{content:'\\f007'}\n.icon-cloudsync:before{content:'\\f0bc'}\n.icon-cloudupload:before{content:'\\f0ba'}\n.icon-clubs:before{content:'\\f2f6'}\n.icon-cmd:before{content:'\\f33a'}\n.icon-cms:before{content:'\\f036'}\n.icon-cmsmadesimple:before{content:'\\f0b0'}\n.icon-codeigniter:before{content:'\\f077'}\n.icon-coffee:before{content:'\\f235'}\n.icon-coffeebean:before{content:'\\f366'}\n.icon-cog:before{content:'\\f00f'}\n.icon-colocation:before{content:'\\f024'}\n.icon-colocationalt:before{content:'\\f023'}\n.icon-colors:before{content:'\\f1e6'}\n.icon-comment:before{content:'\\f12c'}\n.icon-commentout:before{content:'\\f080'}\n.icon-commentround:before{content:'\\f155'}\n.icon-commentroundempty:before{content:'\\f156'}\n.icon-commentroundtyping:before{content:'\\f157'}\n.icon-commentroundtypingempty:before{content:'\\f158'}\n.icon-commenttyping:before{content:'\\f12d'}\n.icon-compass:before{content:'\\263c'}\n.icon-concretefive:before{content:'\\f0af'}\n.icon-contact-businesscard:before{content:'\\f040'}\n.icon-controllernes:before{content:'\\f2d2'}\n.icon-controllerps:before{content:'\\f2d1'}\n.icon-controllersnes:before{content:'\\f2d3'}\n.icon-controlpanel:before{content:'\\f008'}\n.icon-controlpanelalt:before{content:'\\f009'}\n.icon-cooling:before{content:'\\f00a'}\n.icon-coppermine:before{content:'\\f0a4'}\n.icon-copy:before{content:'\\f0c9'}\n.icon-copyright:before{content:'\\00a9'}\n.icon-coupon:before{content:'\\f254'}\n.icon-cpanel:before{content:'\\f072'}\n.icon-cplusplus:before{content:'\\f0b1'}\n.icon-cpu-processor:before{content:'\\f002'}\n.icon-cpualt-processoralt:before{content:'\\f003'}\n.icon-crayon:before{content:'\\f383'}\n.icon-createfile:before{content:'\\f0c6'}\n.icon-createfolder:before{content:'\\f0da'}\n.icon-creativecommons:before{content:'\\f1fc'}\n.icon-creditcard:before{content:'\\f279'}\n.icon-cricket:before{content:'\\f418'}\n.icon-croisant:before{content:'\\f29f'}\n.icon-crop:before{content:'\\f1af'}\n.icon-crown:before{content:'\\f28f'}\n.icon-csharp:before{content:'\\f0b2'}\n.icon-cssthree:before{content:'\\f06a'}\n.icon-cup-coffeealt:before{content:'\\f24b'}\n.icon-cupcake:before{content:'\\f35b'}\n.icon-curling:before{content:'\\f3d7'}\n.icon-cursor:before{content:'\\f0dc'}\n.icon-cut-scissors:before{content:'\\f0ca'}\n.icon-dagger:before{content:'\\2020'}\n.icon-danger:before{content:'\\f415'}\n.icon-dart:before{content:'\\f3d4'}\n.icon-darthvader:before{content:'\\f34a'}\n.icon-database:before{content:'\\f00b'}\n.icon-databaseadd:before{content:'\\f00c'}\n.icon-databasedelete:before{content:'\\f00d'}\n.icon-davidstar:before{content:'\\f40e'}\n.icon-dcsource:before{content:'\\f3e9'}\n.icon-dedicatedserver:before{content:'\\f00e'}\n.icon-deletefile:before{content:'\\f0c7'}\n.icon-deletefolder:before{content:'\\f0db'}\n.icon-delicious:before{content:'\\f152'}\n.icon-designcontest:before{content:'\\f351'}\n.icon-desklamp:before{content:'\\f412'}\n.icon-dialpad:before{content:'\\f399'}\n.icon-diamond:before{content:'\\2666'}\n.icon-diamonds:before{content:'\\f2f7'}\n.icon-die-dice:before{content:'\\f2d8'}\n.icon-diefive:before{content:'\\f3fb'}\n.icon-diefour:before{content:'\\f3fa'}\n.icon-dieone:before{content:'\\f3f7'}\n.icon-diesix:before{content:'\\f3fc'}\n.icon-diethree:before{content:'\\f3f9'}\n.icon-dietwo:before{content:'\\f3f8'}\n.icon-diode:before{content:'\\f3e7'}\n.icon-director:before{content:'\\f2ae'}\n.icon-diskspace:before{content:'\\f096'}\n.icon-distributehorizontalcenters:before{content:'\\f1dc'}\n.icon-distributeverticalcenters:before{content:'\\f1db'}\n.icon-divide:before{content:'\\00f7'}\n.icon-dna:before{content:'\\f409'}\n.icon-dnszone:before{content:'\\f07f'}\n.icon-document:before{content:'\\f0c2'}\n.icon-doghouse:before{content:'\\f38f'}\n.icon-dollar:before{content:'\\24'}\n.icon-dollaralt:before{content:'\\f259'}\n.icon-dolphinsoftware:before{content:'\\f064'}\n.icon-domain:before{content:'\\f01d'}\n.icon-domainaddon:before{content:'\\f053'}\n.icon-domino:before{content:'\\f3d5'}\n.icon-donut:before{content:'\\f3ca'}\n.icon-downleft:before{content:'\\f2ff'}\n.icon-download:before{content:'\\f47b'}\n.icon-download-alt:before{content:'\\f11a'}\n.icon-downright:before{content:'\\f300'}\n.icon-draft:before{content:'\\f172'}\n.icon-dreamweaver:before{content:'\\f1d0'}\n.icon-dribbble:before{content:'\\f14c'}\n.icon-dropmenu:before{content:'\\f0a5'}\n.icon-drupal:before{content:'\\f075'}\n.icon-drwho:before{content:'\\f3c0'}\n.icon-edit:before{content:'\\f47c'}\n.icon-editalt:before{content:'\\f0f2'}\n.icon-egg:before{content:'\\f407'}\n.icon-eightball:before{content:'\\f36e'}\n.icon-eject:before{content:'\\f199'}\n.icon-elipse:before{content:'\\f1bc'}\n.icon-emailalt:before{content:'\\f136'}\n.icon-emailexport:before{content:'\\f176'}\n.icon-emailforward:before{content:'\\f175'}\n.icon-emailforwarders:before{content:'\\f049'}\n.icon-emailimport:before{content:'\\f177'}\n.icon-emailrefresh:before{content:'\\f174'}\n.icon-emailtrace:before{content:'\\f091'}\n.icon-emergency:before{content:'\\f246'}\n.icon-emptycart:before{content:'\\f395'}\n.icon-enter:before{content:'\\f323'}\n.icon-envelope:before{content:'\\f028'}\n.icon-equalizer:before{content:'\\f18e'}\n.icon-equalizeralt:before{content:'\\f18f'}\n.icon-equals:before{content:'\\f30c'}\n.icon-eraser:before{content:'\\f1f1'}\n.icon-erroralt:before{content:'\\f05a'}\n.icon-euro:before{content:'\\20ac'}\n.icon-euroalt:before{content:'\\f25a'}\n.icon-evernote:before{content:'\\f17c'}\n.icon-exchange-currency:before{content:'\\f26b'}\n.icon-exclamation-sign:before{content:'\\f04a'}\n.icon-excludeshape:before{content:'\\f200'}\n.icon-exit:before{content:'\\f324'}\n.icon-explorerwindow:before{content:'\\f0d9'}\n.icon-exportfile:before{content:'\\f32f'}\n.icon-exposure:before{content:'\\f1de'}\n.icon-extinguisher:before{content:'\\f2b7'}\n.icon-eye-close:before{content:'\\f481'}\n.icon-eye-open:before{content:'\\f2b5'}\n.icon-eye-view:before{content:'\\f280'}\n.icon-eyedropper:before{content:'\\f1ad'}\n.icon-facebook:before{content:'\\f140'}\n.icon-facebookalt:before{content:'\\f14b'}\n.icon-facetime-video:before{content:'\\f19c'}\n.icon-factory:before{content:'\\f27a'}\n.icon-fantastico:before{content:'\\f0ae'}\n.icon-faq:before{content:'\\f099'}\n.icon-fast-backward:before{content:'\\f47e'}\n.icon-fast-forward:before{content:'\\f47f'}\n.icon-fastdown:before{content:'\\f31d'}\n.icon-fastleft:before{content:'\\f31a'}\n.icon-fastright:before{content:'\\f31b'}\n.icon-fastup:before{content:'\\f31c'}\n.icon-favoritefile:before{content:'\\f381'}\n.icon-favoritefolder:before{content:'\\f382'}\n.icon-featheralt-write:before{content:'\\f1c5'}\n.icon-fedora:before{content:'\\f3f1'}\n.icon-fence:before{content:'\\f2af'}\n.icon-file:before{content:'\\f0d6'}\n.icon-film:before{content:'\\f19d'}\n.icon-filmstrip:before{content:'\\f3ed'}\n.icon-filter:before{content:'\\f05c'}\n.icon-finder:before{content:'\\f398'}\n.icon-fire:before{content:'\\f27f'}\n.icon-firefox:before{content:'\\f420'}\n.icon-firewall:before{content:'\\f021'}\n.icon-firewire:before{content:'\\f0fc'}\n.icon-firstaid:before{content:'\\f2ba'}\n.icon-fish:before{content:'\\f35a'}\n.icon-fishbone:before{content:'\\f42b'}\n.icon-flag:before{content:'\\f487'}\n.icon-flagalt:before{content:'\\f232'}\n.icon-flagtriangle:before{content:'\\f20b'}\n.icon-flash:before{content:'\\f1cf'}\n.icon-flashlight:before{content:'\\f299'}\n.icon-flashplayer:before{content:'\\f070'}\n.icon-flaskfull:before{content:'\\f27e'}\n.icon-flickr:before{content:'\\f146'}\n.icon-flower:before{content:'\\f2a5'}\n.icon-flowernew:before{content:'\\f3a8'}\n.icon-folder-close:before{content:'\\f094'}\n.icon-folder-open:before{content:'\\f483'}\n.icon-foldertree:before{content:'\\f0f0'}\n.icon-font:before{content:'\\f1ae'}\n.icon-foodtray:before{content:'\\f3d0'}\n.icon-football-soccer:before{content:'\\f2eb'}\n.icon-forbiddenalt:before{content:'\\f314'}\n.icon-forest-tree:before{content:'\\f217'}\n.icon-forestalt-treealt:before{content:'\\f21c'}\n.icon-fork:before{content:'\\22d4'}\n.icon-forklift:before{content:'\\f29b'}\n.icon-form:before{content:'\\f08c'}\n.icon-forrst:before{content:'\\f14d'}\n.icon-fort:before{content:'\\f400'}\n.icon-forward:before{content:'\\f182'}\n.icon-fourohfour:before{content:'\\f09d'}\n.icon-foursquare:before{content:'\\f42a'}\n.icon-freeway:before{content:'\\f24a'}\n.icon-fridge:before{content:'\\f40d'}\n.icon-fries:before{content:'\\f36a'}\n.icon-ftp:before{content:'\\f029'}\n.icon-ftpaccounts:before{content:'\\f07b'}\n.icon-ftpsession:before{content:'\\f07c'}\n.icon-fullscreen:before{content:'\\f485'}\n.icon-gameboy:before{content:'\\f403'}\n.icon-gamecursor:before{content:'\\f2d0'}\n.icon-gasstation:before{content:'\\f216'}\n.icon-gearfour:before{content:'\\f3a7'}\n.icon-ghost:before{content:'\\f2da'}\n.icon-gift:before{content:'\\f260'}\n.icon-github:before{content:'\\f081'}\n.icon-glass:before{content:'\\f236'}\n.icon-glasses:before{content:'\\f295'}\n.icon-glassesalt:before{content:'\\f39d'}\n.icon-globe:before{content:'\\f01b'}\n.icon-globealt:before{content:'\\f36c'}\n.icon-glue:before{content:'\\f36d'}\n.icon-gmail:before{content:'\\f150'}\n.icon-golf:before{content:'\\f2f1'}\n.icon-googledrive:before{content:'\\f163'}\n.icon-googleplus:before{content:'\\f165'}\n.icon-googlewallet:before{content:'\\f270'}\n.icon-gpsoff-gps:before{content:'\\f21e'}\n.icon-gpson:before{content:'\\f21f'}\n.icon-gpu-graphicscard:before{content:'\\f108'}\n.icon-gradient:before{content:'\\2207'}\n.icon-grails:before{content:'\\f085'}\n.icon-greenlantern:before{content:'\\f340'}\n.icon-greenlightbulb:before{content:'\\f406'}\n.icon-grooveshark:before{content:'\\f3a2'}\n.icon-groups-friends:before{content:'\\f134'}\n.icon-guitar:before{content:'\\f19a'}\n.icon-halflife:before{content:'\\f3ba'}\n.icon-halo:before{content:'\\f3bb'}\n.icon-hamburger:before{content:'\\f2b3'}\n.icon-hammer:before{content:'\\f291'}\n.icon-hand-down:before{content:'\\f387'}\n.icon-hand-left:before{content:'\\f389'}\n.icon-hand-right:before{content:'\\f388'}\n.icon-hand-up:before{content:'\\f0dd'}\n.icon-handcuffs:before{content:'\\f393'}\n.icon-handdrag:before{content:'\\f0de'}\n.icon-handtwofingers:before{content:'\\f0df'}\n.icon-hanger:before{content:'\\f2ab'}\n.icon-happy:before{content:'\\f13c'}\n.icon-harrypotter:before{content:'\\f38b'}\n.icon-hdd:before{content:'\\f02a'}\n.icon-hdtv:before{content:'\\f1a0'}\n.icon-headphones:before{content:'\\f180'}\n.icon-headphonesalt:before{content:'\\f1a3'}\n.icon-heart:before{content:'\\f131'}\n.icon-heartempty-love:before{content:'\\f132'}\n.icon-hearts:before{content:'\\f2f4'}\n.icon-helicopter:before{content:'\\f3e4'}\n.icon-hexagon-polygon:before{content:'\\f1be'}\n.icon-hockey:before{content:'\\f3d9'}\n.icon-home:before{content:'\\21b8'}\n.icon-homealt:before{content:'\\f02b'}\n.icon-hospital:before{content:'\\f247'}\n.icon-hotdog:before{content:'\\f3cc'}\n.icon-hotlinkprotection:before{content:'\\f050'}\n.icon-hourglassalt:before{content:'\\f122'}\n.icon-html:before{content:'\\f068'}\n.icon-htmlfive:before{content:'\\f069'}\n.icon-hydrant:before{content:'\\f3ff'}\n.icon-icecream:before{content:'\\f2a4'}\n.icon-icecreamalt:before{content:'\\f289'}\n.icon-illustrator:before{content:'\\f1ce'}\n.icon-imac:before{content:'\\f0fb'}\n.icon-images-gallery:before{content:'\\f09f'}\n.icon-importcontacts:before{content:'\\f092'}\n.icon-importfile:before{content:'\\f32e'}\n.icon-inbox:before{content:'\\f17a'}\n.icon-inboxalt:before{content:'\\f178'}\n.icon-incomingcall:before{content:'\\f15d'}\n.icon-indent-left:before{content:'\\f1f2'}\n.icon-indent-right:before{content:'\\f1f3'}\n.icon-indexmanager:before{content:'\\f09e'}\n.icon-infinity:before{content:'\\221e'}\n.icon-info-sign:before{content:'\\f315'}\n.icon-infographic:before{content:'\\f336'}\n.icon-ink:before{content:'\\f3f6'}\n.icon-inkpen:before{content:'\\f1ac'}\n.icon-insertbarchart:before{content:'\\f1e5'}\n.icon-insertpicture:before{content:'\\f1e0'}\n.icon-insertpicturecenter:before{content:'\\f1e3'}\n.icon-insertpictureleft:before{content:'\\f1e1'}\n.icon-insertpictureright:before{content:'\\f1e2'}\n.icon-insertpiechart:before{content:'\\f1e4'}\n.icon-instagram:before{content:'\\f14a'}\n.icon-install:before{content:'\\f128'}\n.icon-intel:before{content:'\\f01f'}\n.icon-intersection:before{content:'\\2229'}\n.icon-intersectshape:before{content:'\\f1ff'}\n.icon-invert:before{content:'\\f1df'}\n.icon-invoice:before{content:'\\f3e5'}\n.icon-ipcontrol:before{content:'\\f08b'}\n.icon-iphone:before{content:'\\f0e6'}\n.icon-ipod:before{content:'\\f190'}\n.icon-ironman:before{content:'\\f349'}\n.icon-islam:before{content:'\\f410'}\n.icon-island:before{content:'\\f392'}\n.icon-italic:before{content:'\\f1f5'}\n.icon-jar:before{content:'\\f2b6'}\n.icon-jason:before{content:'\\f38c'}\n.icon-java:before{content:'\\f083'}\n.icon-joomla:before{content:'\\f073'}\n.icon-joystickarcade:before{content:'\\f2d4'}\n.icon-joystickatari:before{content:'\\f2d5'}\n.icon-jquery:before{content:'\\f06b'}\n.icon-jqueryui:before{content:'\\f06c'}\n.icon-kerning:before{content:'\\f1e9'}\n.icon-key:before{content:'\\f093'}\n.icon-keyboard:before{content:'\\f119'}\n.icon-keyboardalt:before{content:'\\f105'}\n.icon-keyboarddelete:before{content:'\\f3a6'}\n.icon-kidney:before{content:'\\f3e0'}\n.icon-king:before{content:'\\f2fc'}\n.icon-knife:before{content:'\\f214'}\n.icon-knight:before{content:'\\f2fb'}\n.icon-knob:before{content:'\\f376'}\n.icon-lab-flask:before{content:'\\f27d'}\n.icon-lamp:before{content:'\\f2b1'}\n.icon-lan:before{content:'\\f0ee'}\n.icon-language:before{content:'\\f042'}\n.icon-laptop:before{content:'\\f0d8'}\n.icon-lasso:before{content:'\\f396'}\n.icon-lastfm:before{content:'\\f3a3'}\n.icon-laugh:before{content:'\\f13f'}\n.icon-law:before{content:'\\f263'}\n.icon-layers:before{content:'\\f1ca'}\n.icon-layersalt:before{content:'\\f1cb'}\n.icon-leaf:before{content:'\\f039'}\n.icon-leechprotect:before{content:'\\f07e'}\n.icon-legacyfilemanager:before{content:'\\f095'}\n.icon-lego:before{content:'\\f370'}\n.icon-lifeempty:before{content:'\\f2e1'}\n.icon-lifefull:before{content:'\\f2e3'}\n.icon-lifehacker:before{content:'\\f380'}\n.icon-lifehalf:before{content:'\\f2e2'}\n.icon-lifepreserver:before{content:'\\f015'}\n.icon-lightbulb-idea:before{content:'\\f338'}\n.icon-lighthouse:before{content:'\\f3e6'}\n.icon-lightning:before{content:'\\f231'}\n.icon-lightningalt:before{content:'\\f2a8'}\n.icon-line:before{content:'\\f1bf'}\n.icon-lineheight:before{content:'\\f1c0'}\n.icon-link:before{content:'\\f022'}\n.icon-linkalt:before{content:'\\f333'}\n.icon-linkedin:before{content:'\\f166'}\n.icon-linux:before{content:'\\f01a'}\n.icon-list:before{content:'\\f111'}\n.icon-list-alt:before{content:'\\f480'}\n.icon-liver:before{content:'\\f3e2'}\n.icon-loading-hourglass:before{content:'\\f123'}\n.icon-loadingalt:before{content:'\\f339'}\n.icon-lock:before{content:'\\f0be'}\n.icon-lock:before{content:'\\f0be'}\n.icon-lockalt-keyhole:before{content:'\\f0eb'}\n.icon-lollypop:before{content:'\\f3ee'}\n.icon-lungs:before{content:'\\f3df'}\n.icon-macpro:before{content:'\\f3a5'}\n.icon-macro-plant:before{content:'\\f1c6'}\n.icon-magazine:before{content:'\\f1ec'}\n.icon-magento:before{content:'\\f06e'}\n.icon-magnet:before{content:'\\f281'}\n.icon-mailbox:before{content:'\\f044'}\n.icon-mailinglists:before{content:'\\f090'}\n.icon-man-male:before{content:'\\f2a1'}\n.icon-managedhosting:before{content:'\\f038'}\n.icon-map:before{content:'\\f209'}\n.icon-map-marker:before{content:'\\f220'}\n.icon-marker:before{content:'\\f204'}\n.icon-marvin:before{content:'\\f3dd'}\n.icon-mastercard:before{content:'\\f266'}\n.icon-maximize:before{content:'\\f30f'}\n.icon-medal:before{content:'\\f2e5'}\n.icon-medalbronze:before{content:'\\f2e8'}\n.icon-medalgold:before{content:'\\f2e6'}\n.icon-medalsilver:before{content:'\\f2e7'}\n.icon-mediarepeat:before{content:'\\f187'}\n.icon-men:before{content:'\\f24c'}\n.icon-menu:before{content:'\\f127'}\n.icon-merge:before{content:'\\f334'}\n.icon-mergecells:before{content:'\\f327'}\n.icon-mergeshapes:before{content:'\\f201'}\n.icon-metro-subway:before{content:'\\f24f'}\n.icon-metronome:before{content:'\\f374'}\n.icon-mickeymouse:before{content:'\\f37a'}\n.icon-microphone:before{content:'\\f191'}\n.icon-microscope:before{content:'\\f283'}\n.icon-microsd:before{content:'\\f107'}\n.icon-microwave:before{content:'\\f42e'}\n.icon-mimetype:before{content:'\\f057'}\n.icon-minimize:before{content:'\\f30e'}\n.icon-minus:before{content:'\\2212'}\n.icon-minus-sign:before{content:'\\f477'}\n.icon-missedcall:before{content:'\\f15c'}\n.icon-mobile:before{content:'\\f0e8'}\n.icon-moleskine:before{content:'\\f1f0'}\n.icon-money-cash:before{content:'\\f27b'}\n.icon-moneybag:before{content:'\\f271'}\n.icon-monitor:before{content:'\\f0d5'}\n.icon-monstersinc:before{content:'\\f3bd'}\n.icon-moon-night:before{content:'\\f207'}\n.icon-mouse:before{content:'\\f0d4'}\n.icon-mousealt:before{content:'\\f126'}\n.icon-move:before{content:'\\f322'}\n.icon-movieclapper:before{content:'\\f193'}\n.icon-moviereel:before{content:'\\f17f'}\n.icon-muffin:before{content:'\\f363'}\n.icon-mug:before{content:'\\f24e'}\n.icon-mushroom:before{content:'\\f35e'}\n.icon-music:before{content:'\\f181'}\n.icon-musicalt:before{content:'\\f18d'}\n.icon-mutealt:before{content:'\\f0e5'}\n.icon-mxentry:before{content:'\\f07a'}\n.icon-mybb:before{content:'\\f065'}\n.icon-myspace:before{content:'\\f153'}\n.icon-mysql-dolphin:before{content:'\\f076'}\n.icon-nail:before{content:'\\f428'}\n.icon-navigation:before{content:'\\f23a'}\n.icon-network:before{content:'\\f0a6'}\n.icon-networksignal:before{content:'\\f3a9'}\n.icon-news:before{content:'\\f256'}\n.icon-newtab:before{content:'\\f16f'}\n.icon-newwindow:before{content:'\\f16d'}\n.icon-next:before{content:'\\f18a'}\n.icon-nexus:before{content:'\\f0e7'}\n.icon-nintendods:before{content:'\\f404'}\n.icon-nodejs:before{content:'\\f084'}\n.icon-notes:before{content:'\\f0d7'}\n.icon-notificationbottom:before{content:'\\f144'}\n.icon-notificationtop:before{content:'\\f145'}\n.icon-nut:before{content:'\\f427'}\n.icon-off:before{content:'\\f11d'}\n.icon-office-building:before{content:'\\f245'}\n.icon-officechair:before{content:'\\f26d'}\n.icon-ok:before{content:'\\2713'}\n.icon-ok-circle:before{content:'\\f471'}\n.icon-ok-sign:before{content:'\\f479'}\n.icon-oneup:before{content:'\\f3b7'}\n.icon-oneupalt:before{content:'\\f3b6'}\n.icon-opencart:before{content:'\\f060'}\n.icon-opennewwindow:before{content:'\\f332'}\n.icon-orange:before{content:'\\f29e'}\n.icon-outbox:before{content:'\\f179'}\n.icon-outgoingcall:before{content:'\\f15e'}\n.icon-oxwall:before{content:'\\f06d'}\n.icon-pacman:before{content:'\\f2db'}\n.icon-pageback:before{content:'\\f31e'}\n.icon-pagebreak:before{content:'\\f1cc'}\n.icon-pageforward:before{content:'\\f31f'}\n.icon-pagesetup:before{content:'\\f331'}\n.icon-paintbrush:before{content:'\\f1e8'}\n.icon-paintroll:before{content:'\\f1fa'}\n.icon-palette-painting:before{content:'\\f1b9'}\n.icon-paperclip:before{content:'\\f284'}\n.icon-paperclipalt:before{content:'\\f285'}\n.icon-paperclipvertical:before{content:'\\f286'}\n.icon-paperplane:before{content:'\\f296'}\n.icon-parentheses:before{content:'\\f3c4'}\n.icon-parkeddomain:before{content:'\\f055'}\n.icon-password:before{content:'\\f03e'}\n.icon-passwordalt:before{content:'\\f03f'}\n.icon-pasta:before{content:'\\f408'}\n.icon-patch:before{content:'\\f2a3'}\n.icon-path:before{content:'\\f169'}\n.icon-pause:before{content:'\\f186'}\n.icon-paw-pet:before{content:'\\f29d'}\n.icon-pawn:before{content:'\\f2f8'}\n.icon-paypal:before{content:'\\f267'}\n.icon-peace:before{content:'\\f2a7'}\n.icon-pen:before{content:'\\f1ee'}\n.icon-pencil:before{content:'\\f1b7'}\n.icon-pepperoni:before{content:'\\f364'}\n.icon-percent:before{content:'\\25'}\n.icon-perl-camel:before{content:'\\f0b6'}\n.icon-perlalt:before{content:'\\f0b7'}\n.icon-phone-call:before{content:'\\f14f'}\n.icon-phonealt:before{content:'\\f15b'}\n.icon-phonebook:before{content:'\\f149'}\n.icon-phonebookalt:before{content:'\\f135'}\n.icon-phonemic:before{content:'\\f391'}\n.icon-phoneold:before{content:'\\f148'}\n.icon-photoshop:before{content:'\\f1cd'}\n.icon-php:before{content:'\\f09c'}\n.icon-phpbb:before{content:'\\f063'}\n.icon-phppear:before{content:'\\f09b'}\n.icon-piano:before{content:'\\f19e'}\n.icon-picture:before{content:'\\22b7'}\n.icon-pictureframe:before{content:'\\f41e'}\n.icon-piggybank:before{content:'\\f257'}\n.icon-pigpena:before{content:'\\f456'}\n.icon-pigpenb:before{content:'\\f457'}\n.icon-pigpenc:before{content:'\\f458'}\n.icon-pigpend:before{content:'\\f459'}\n.icon-pigpene:before{content:'\\f45a'}\n.icon-pigpenf:before{content:'\\f45b'}\n.icon-pigpeng:before{content:'\\f45c'}\n.icon-pigpenh:before{content:'\\f45d'}\n.icon-pigpeni:before{content:'\\f45e'}\n.icon-pigpenj:before{content:'\\f45f'}\n.icon-pigpenk:before{content:'\\f460'}\n.icon-pigpenl:before{content:'\\f461'}\n.icon-pigpenm:before{content:'\\f462'}\n.icon-pigpenn:before{content:'\\f463'}\n.icon-pigpeno:before{content:'\\f464'}\n.icon-pigpenp:before{content:'\\f465'}\n.icon-pigpenq:before{content:'\\f466'}\n.icon-pigpenr:before{content:'\\f467'}\n.icon-pigpens:before{content:'\\f468'}\n.icon-pigpent:before{content:'\\f469'}\n.icon-pigpenu:before{content:'\\f46a'}\n.icon-pigpenv:before{content:'\\f46b'}\n.icon-pigpenw:before{content:'\\f46c'}\n.icon-pigpenx:before{content:'\\f46d'}\n.icon-pigpeny:before{content:'\\f46e'}\n.icon-pigpenz:before{content:'\\f46f'}\n.icon-pilcrow:before{content:'\\00b6'}\n.icon-pill-antivirusalt:before{content:'\\f0aa'}\n.icon-pin:before{content:'\\f20a'}\n.icon-pipe:before{content:'\\01c0'}\n.icon-piwigo:before{content:'\\f0ad'}\n.icon-pizza:before{content:'\\f35c'}\n.icon-placeadd:before{content:'\\f221'}\n.icon-placealt:before{content:'\\f224'}\n.icon-placealtadd:before{content:'\\f225'}\n.icon-placealtdelete:before{content:'\\f226'}\n.icon-placedelete:before{content:'\\f222'}\n.icon-placeios:before{content:'\\f20c'}\n.icon-plane:before{content:'\\f23e'}\n.icon-plaque:before{content:'\\f2b8'}\n.icon-play:before{content:'\\f184'}\n.icon-play-circle:before{content:'\\f17e'}\n.icon-playstore:before{content:'\\f255'}\n.icon-playvideo:before{content:'\\f03d'}\n.icon-plug:before{content:'\\f0ea'}\n.icon-pluginalt:before{content:'\\f098'}\n.icon-plus:before{content:'\\002b'}\n.icon-plus-sign:before{content:'\\f476'}\n.icon-pocket:before{content:'\\f16b'}\n.icon-podcast:before{content:'\\f1a2'}\n.icon-podium-winner:before{content:'\\f2d6'}\n.icon-pokemon:before{content:'\\f354'}\n.icon-police:before{content:'\\f2aa'}\n.icon-polygonlasso:before{content:'\\f397'}\n.icon-post:before{content:'\\f12e'}\n.icon-postalt:before{content:'\\f130'}\n.icon-pound:before{content:'\\f25b'}\n.icon-poundalt:before{content:'\\f25c'}\n.icon-powerjack:before{content:'\\f0fd'}\n.icon-powerplug:before{content:'\\f0ed'}\n.icon-powerplugeu:before{content:'\\f28b'}\n.icon-powerplugus:before{content:'\\f28c'}\n.icon-presentation:before{content:'\\f0c4'}\n.icon-prestashop:before{content:'\\f061'}\n.icon-pretzel:before{content:'\\f3cf'}\n.icon-preview:before{content:'\\f330'}\n.icon-previous:before{content:'\\f18b'}\n.icon-print:before{content:'\\f125'}\n.icon-protecteddirectory:before{content:'\\f04d'}\n.icon-pscircle:before{content:'\\f2bb'}\n.icon-pscursor:before{content:'\\f2c3'}\n.icon-psdown:before{content:'\\f2c6'}\n.icon-psleft:before{content:'\\f2c7'}\n.icon-pslone:before{content:'\\f2cc'}\n.icon-psltwo:before{content:'\\f2cd'}\n.icon-psright:before{content:'\\f2c5'}\n.icon-psrone:before{content:'\\f2ce'}\n.icon-psrtwo:before{content:'\\f2cf'}\n.icon-pssquare:before{content:'\\f2bc'}\n.icon-pstriangle:before{content:'\\f2bd'}\n.icon-psup:before{content:'\\f2c4'}\n.icon-psx:before{content:'\\f2be'}\n.icon-pull:before{content:'\\f089'}\n.icon-punisher:before{content:'\\f343'}\n.icon-push:before{content:'\\f088'}\n.icon-puzzle-plugin:before{content:'\\f0a0'}\n.icon-python:before{content:'\\f071'}\n.icon-qrcode:before{content:'\\f275'}\n.icon-quake:before{content:'\\f355'}\n.icon-queen:before{content:'\\f2fd'}\n.icon-query:before{content:'\\f08a'}\n.icon-question-sign:before{content:'\\f0a3'}\n.icon-quote:before{content:'\\f12f'}\n.icon-quotedown:before{content:'\\f329'}\n.icon-quoteup:before{content:'\\f328'}\n.icon-raceflag:before{content:'\\f38e'}\n.icon-racquet:before{content:'\\f2f2'}\n.icon-radio:before{content:'\\f1a1'}\n.icon-radioactive:before{content:'\\f282'}\n.icon-radiobutton:before{content:'\\f312'}\n.icon-railroad:before{content:'\\f248'}\n.icon-rain:before{content:'\\f22f'}\n.icon-ram:before{content:'\\f02c'}\n.icon-random:before{content:'\\f188'}\n.icon-rar:before{content:'\\f117'}\n.icon-raspberry:before{content:'\\f368'}\n.icon-raspberrypi:before{content:'\\f369'}\n.icon-rawaccesslogs:before{content:'\\f0c1'}\n.icon-razor:before{content:'\\f416'}\n.icon-reademail:before{content:'\\f173'}\n.icon-record:before{content:'\\f189'}\n.icon-rectangle:before{content:'\\25ad'}\n.icon-recycle:before{content:'\\f297'}\n.icon-reddit:before{content:'\\f154'}\n.icon-redirect:before{content:'\\f054'}\n.icon-refresh:before{content:'\\f078'}\n.icon-reliability:before{content:'\\f016'}\n.icon-remote:before{content:'\\f298'}\n.icon-remove:before{content:'\\00d7'}\n.icon-remove-circle:before{content:'\\f470'}\n.icon-remove-sign:before{content:'\\f478'}\n.icon-removefriend:before{content:'\\f3db'}\n.icon-repeat:before{content:'\\f32b'}\n.icon-repeatone:before{content:'\\f196'}\n.icon-resellerhosting:before{content:'\\f03a'}\n.icon-residentevil:before{content:'\\f350'}\n.icon-resistor:before{content:'\\f3eb'}\n.icon-resize:before{content:'\\f1ed'}\n.icon-resize-full:before{content:'\\f325'}\n.icon-resize-horizontal:before{content:'\\f318'}\n.icon-resize-small:before{content:'\\f326'}\n.icon-resize-vertical:before{content:'\\f319'}\n.icon-restart:before{content:'\\f11f'}\n.icon-restaurantmenu:before{content:'\\f362'}\n.icon-restore:before{content:'\\f30d'}\n.icon-restricted:before{content:'\\f0ab'}\n.icon-retweet:before{content:'\\f486'}\n.icon-rim:before{content:'\\f36f'}\n.icon-ring:before{content:'\\02da'}\n.icon-road:before{content:'\\f249'}\n.icon-roadsign-roadsignright:before{content:'\\f21b'}\n.icon-roadsignleft:before{content:'\\f240'}\n.icon-robocop:before{content:'\\f357'}\n.icon-rocket-launch:before{content:'\\f29c'}\n.icon-rook:before{content:'\\f2fa'}\n.icon-root:before{content:'\\f33c'}\n.icon-rorschach:before{content:'\\f358'}\n.icon-rotateclockwise:before{content:'\\f202'}\n.icon-rotatecounterclockwise:before{content:'\\f203'}\n.icon-roundrectangle:before{content:'\\f1bd'}\n.icon-route:before{content:'\\f402'}\n.icon-router:before{content:'\\f0e9'}\n.icon-rss:before{content:'\\f17b'}\n.icon-rubberstamp:before{content:'\\f274'}\n.icon-ruby:before{content:'\\f067'}\n.icon-ruler:before{content:'\\f1ef'}\n.icon-sad:before{content:'\\f13d'}\n.icon-safetypin:before{content:'\\f417'}\n.icon-satellite:before{content:'\\f38a'}\n.icon-satellitedish-remotemysql:before{content:'\\f0c0'}\n.icon-save-floppy:before{content:'\\f0c8'}\n.icon-scales:before{content:'\\f3fd'}\n.icon-science-atom:before{content:'\\f2b0'}\n.icon-scope-scan:before{content:'\\f212'}\n.icon-scopealt:before{content:'\\f237'}\n.icon-screenshot:before{content:'\\f109'}\n.icon-screw:before{content:'\\f426'}\n.icon-screwdriver:before{content:'\\f292'}\n.icon-screwdriveralt:before{content:'\\f293'}\n.icon-script:before{content:'\\f08d'}\n.icon-sd:before{content:'\\f106'}\n.icon-search:before{content:'\\f0c5'}\n.icon-searchdocument:before{content:'\\f419'}\n.icon-searchfolder:before{content:'\\f41a'}\n.icon-security-shield:before{content:'\\f02d'}\n.icon-securityalt-shieldalt:before{content:'\\f02e'}\n.icon-selection-rectangleselection:before{content:'\\f1b0'}\n.icon-selectionadd:before{content:'\\f1b2'}\n.icon-selectionintersect:before{content:'\\f1b4'}\n.icon-selectionremove:before{content:'\\f1b3'}\n.icon-seo:before{content:'\\f030'}\n.icon-server:before{content:'\\f026'}\n.icon-servers:before{content:'\\f027'}\n.icon-settingsandroid:before{content:'\\f309'}\n.icon-settingsfour-gearsalt:before{content:'\\f306'}\n.icon-settingsthree-gears:before{content:'\\f307'}\n.icon-settingstwo-gearalt:before{content:'\\f308'}\n.icon-shades-sunglasses:before{content:'\\f294'}\n.icon-shapes:before{content:'\\f1dd'}\n.icon-share:before{content:'\\f47d'}\n.icon-share-alt:before{content:'\\f16c'}\n.icon-sharealt:before{content:'\\f147'}\n.icon-sharedfile:before{content:'\\f0ef'}\n.icon-sharedhosting:before{content:'\\f037'}\n.icon-sharethree:before{content:'\\f414'}\n.icon-sheriff:before{content:'\\f2a9'}\n.icon-shipping:before{content:'\\f23f'}\n.icon-shopping:before{content:'\\f010'}\n.icon-shopping-cart:before{content:'\\f035'}\n.icon-shoppingbag:before{content:'\\f273'}\n.icon-shortcut:before{content:'\\f043'}\n.icon-shovel:before{content:'\\f290'}\n.icon-shredder:before{content:'\\f27c'}\n.icon-shutdown:before{content:'\\f11e'}\n.icon-sidebar:before{content:'\\f124'}\n.icon-signal:before{content:'\\f100'}\n.icon-sim:before{content:'\\f0e1'}\n.icon-simalt:before{content:'\\f121'}\n.icon-skrill:before{content:'\\f268'}\n.icon-skull:before{content:'\\f38d'}\n.icon-skype:before{content:'\\f141'}\n.icon-skypeaway:before{content:'\\f39f'}\n.icon-skypebusy:before{content:'\\f3a0'}\n.icon-skypeoffline:before{content:'\\f3a1'}\n.icon-skypeonline:before{content:'\\f39e'}\n.icon-smaller:before{content:'\\f30b'}\n.icon-smf:before{content:'\\f062'}\n.icon-smile:before{content:'\\263a'}\n.icon-snow:before{content:'\\f22e'}\n.icon-snowman:before{content:'\\f37c'}\n.icon-socialnetwork:before{content:'\\f03b'}\n.icon-software:before{content:'\\f09a'}\n.icon-sortbynameascending-atoz:before{content:'\\f1c2'}\n.icon-sortbynamedescending-ztoa:before{content:'\\f1c1'}\n.icon-sortbysizeascending:before{content:'\\f1c3'}\n.icon-sortbysizedescending:before{content:'\\f1c4'}\n.icon-soundwave:before{content:'\\f194'}\n.icon-soup:before{content:'\\f3d1'}\n.icon-spaceinvaders:before{content:'\\f352'}\n.icon-spades:before{content:'\\f2f5'}\n.icon-spam:before{content:'\\f047'}\n.icon-spamalt:before{content:'\\f048'}\n.icon-spawn:before{content:'\\f344'}\n.icon-speaker:before{content:'\\f372'}\n.icon-speed:before{content:'\\f40b'}\n.icon-spider:before{content:'\\f346'}\n.icon-spiderman:before{content:'\\f347'}\n.icon-split:before{content:'\\f335'}\n.icon-spoon:before{content:'\\f213'}\n.icon-spray:before{content:'\\f1c7'}\n.icon-spreadsheet:before{content:'\\f0c3'}\n.icon-squareapp:before{content:'\\f26f'}\n.icon-squarebrackets:before{content:'\\f0b3'}\n.icon-ssh:before{content:'\\f04e'}\n.icon-sslmanager:before{content:'\\f04f'}\n.icon-stadium:before{content:'\\f3d6'}\n.icon-stamp:before{content:'\\f242'}\n.icon-stampalt:before{content:'\\f243'}\n.icon-star:before{content:'\\f13a'}\n.icon-star-empty:before{content:'\\f13b'}\n.icon-starempty:before{content:'\\f2de'}\n.icon-starfull:before{content:'\\f2e0'}\n.icon-starhalf:before{content:'\\f2df'}\n.icon-steak:before{content:'\\f360'}\n.icon-steam:before{content:'\\f2dd'}\n.icon-step-backward:before{content:'\\f198'}\n.icon-step-forward:before{content:'\\f197'}\n.icon-sticker:before{content:'\\f3f5'}\n.icon-stiletto:before{content:'\\f429'}\n.icon-stockdown:before{content:'\\f252'}\n.icon-stocks:before{content:'\\f250'}\n.icon-stockup:before{content:'\\f251'}\n.icon-stomach:before{content:'\\f3e1'}\n.icon-stop:before{content:'\\f185'}\n.icon-stopwatch:before{content:'\\f219'}\n.icon-storage-box:before{content:'\\f011'}\n.icon-storagealt-drawer:before{content:'\\f012'}\n.icon-store:before{content:'\\f272'}\n.icon-storm:before{content:'\\f230'}\n.icon-stove:before{content:'\\f371'}\n.icon-strawberry:before{content:'\\f3f3'}\n.icon-strikethrough:before{content:'\\f1f7'}\n.icon-student-school:before{content:'\\f288'}\n.icon-stumbleupon:before{content:'\\f40c'}\n.icon-subdomain:before{content:'\\f052'}\n.icon-submarine:before{content:'\\f373'}\n.icon-subscript:before{content:'\\f1ea'}\n.icon-subtractshape:before{content:'\\f1fe'}\n.icon-sum:before{content:'\\f33b'}\n.icon-sun-day:before{content:'\\f206'}\n.icon-sunnysideup:before{content:'\\f365'}\n.icon-superman:before{content:'\\f33f'}\n.icon-superscript:before{content:'\\f1eb'}\n.icon-support:before{content:'\\f013'}\n.icon-supportalt:before{content:'\\f014'}\n.icon-switch:before{content:'\\f28a'}\n.icon-switchoff:before{content:'\\f32d'}\n.icon-switchoffalt:before{content:'\\f28e'}\n.icon-switchon:before{content:'\\f32c'}\n.icon-switchonalt:before{content:'\\f28d'}\n.icon-sword:before{content:'\\f2ed'}\n.icon-sync:before{content:'\\f0bd'}\n.icon-syncalt:before{content:'\\f11c'}\n.icon-synckeeplocal:before{content:'\\f33e'}\n.icon-synckeepserver:before{content:'\\f33d'}\n.icon-syringe-antivirus:before{content:'\\f0a9'}\n.icon-tablet:before{content:'\\f118'}\n.icon-tabletennis-pingpong:before{content:'\\f2f0'}\n.icon-taco:before{content:'\\f3cd'}\n.icon-tag:before{content:'\\f032'}\n.icon-tagalt-pricealt:before{content:'\\f264'}\n.icon-tags:before{content:'\\f482'}\n.icon-tagvertical:before{content:'\\f15f'}\n.icon-tank:before{content:'\\f423'}\n.icon-target:before{content:'\\f2a6'}\n.icon-taskmanager-logprograms:before{content:'\\f04b'}\n.icon-tasks:before{content:'\\f0e0'}\n.icon-taxi:before{content:'\\f3a4'}\n.icon-tea:before{content:'\\f3cb'}\n.icon-teapot:before{content:'\\f42c'}\n.icon-telescope:before{content:'\\f3ef'}\n.icon-temperature-thermometer:before{content:'\\f20d'}\n.icon-temperaturealt-thermometeralt:before{content:'\\f20e'}\n.icon-tennis:before{content:'\\f2ea'}\n.icon-tent-camping:before{content:'\\f215'}\n.icon-terminal:before{content:'\\f114'}\n.icon-tethering:before{content:'\\f0f1'}\n.icon-tetrisone:before{content:'\\f34b'}\n.icon-tetristhree:before{content:'\\f34d'}\n.icon-tetristwo:before{content:'\\f34c'}\n.icon-text-height:before{content:'\\f1f8'}\n.icon-text-width:before{content:'\\f1f9'}\n.icon-th:before{content:'\\f110'}\n.icon-th-large:before{content:'\\f112'}\n.icon-th-list:before{content:'\\f113'}\n.icon-theather:before{content:'\\f39c'}\n.icon-theme-style:before{content:'\\f041'}\n.icon-thissideup:before{content:'\\f41d'}\n.icon-threecolumns:before{content:'\\f1ab'}\n.icon-thumbs-down:before{content:'\\f139'}\n.icon-thumbs-up:before{content:'\\f138'}\n.icon-ticket:before{content:'\\f3dc'}\n.icon-tictactoe:before{content:'\\f39a'}\n.icon-tie-business:before{content:'\\2040'}\n.icon-time:before{content:'\\f210'}\n.icon-timeline:before{content:'\\f253'}\n.icon-tint:before{content:'\\f208'}\n.icon-toast:before{content:'\\f2ad'}\n.icon-toiletpaper:before{content:'\\f384'}\n.icon-tooth:before{content:'\\f3de'}\n.icon-toothbrush:before{content:'\\f385'}\n.icon-tophat:before{content:'\\f3f0'}\n.icon-torigate:before{content:'\\f411'}\n.icon-touchpad:before{content:'\\f115'}\n.icon-trafficlight:before{content:'\\f22a'}\n.icon-transform:before{content:'\\f1a6'}\n.icon-trash:before{content:'\\f0ce'}\n.icon-trashempty:before{content:'\\f0cf'}\n.icon-trashfull:before{content:'\\f0d0'}\n.icon-travel:before{content:'\\f422'}\n.icon-treediagram:before{content:'\\f0ec'}\n.icon-treeornament:before{content:'\\f37e'}\n.icon-triangle:before{content:'\\25b3'}\n.icon-tron:before{content:'\\f34f'}\n.icon-trophy:before{content:'\\f2d7'}\n.icon-truck:before{content:'\\f211'}\n.icon-trumpet:before{content:'\\f375'}\n.icon-tumblr:before{content:'\\f164'}\n.icon-tv:before{content:'\\f1a4'}\n.icon-twitter:before{content:'\\f16a'}\n.icon-twocolumnsleft:before{content:'\\f1a9'}\n.icon-twocolumnsleftalt:before{content:'\\f1aa'}\n.icon-twocolumnsright:before{content:'\\f1a7'}\n.icon-twocolumnsrightalt:before{content:'\\f1a8'}\n.icon-ubuntu:before{content:'\\f120'}\n.icon-umbrella:before{content:'\\f218'}\n.icon-underline:before{content:'\\f1f6'}\n.icon-undo:before{content:'\\f32a'}\n.icon-unlock:before{content:'\\f0bf'}\n.icon-upleft:before{content:'\\f302'}\n.icon-upload:before{content:'\\f47a'}\n.icon-uploadalt:before{content:'\\f11b'}\n.icon-upright:before{content:'\\f303'}\n.icon-uptime:before{content:'\\f017'}\n.icon-usb:before{content:'\\f10d'}\n.icon-usbalt:before{content:'\\f10e'}\n.icon-usbplug:before{content:'\\f10f'}\n.icon-user:before{content:'\\f133'}\n.icon-userfilter:before{content:'\\f05d'}\n.icon-usfootball:before{content:'\\f2ec'}\n.icon-value-coins:before{content:'\\f018'}\n.icon-vector:before{content:'\\f1b6'}\n.icon-vendetta:before{content:'\\f3c5'}\n.icon-video:before{content:'\\f17d'}\n.icon-viking:before{content:'\\f379'}\n.icon-vimeo:before{content:'\\f168'}\n.icon-vinyl:before{content:'\\f0cc'}\n.icon-violin:before{content:'\\f1a5'}\n.icon-virus:before{content:'\\f0a8'}\n.icon-visa:before{content:'\\f3c2'}\n.icon-visitor:before{content:'\\f097'}\n.icon-vlc-cone:before{content:'\\f192'}\n.icon-voice:before{content:'\\f18c'}\n.icon-volume-down:before{content:'\\f0e3'}\n.icon-volume-off:before{content:'\\f0e4'}\n.icon-volume-up:before{content:'\\f0e2'}\n.icon-vps:before{content:'\\f025'}\n.icon-wacom:before{content:'\\f1bb'}\n.icon-walle:before{content:'\\f3bc'}\n.icon-wallet:before{content:'\\e000'}\n.icon-warcraft:before{content:'\\f3bf'}\n.icon-warmedal:before{content:'\\f2e4'}\n.icon-warning-sign:before{content:'\\f316'}\n.icon-washer:before{content:'\\f39b'}\n.icon-watch:before{content:'\\f378'}\n.icon-watertap-plumbing:before{content:'\\f22d'}\n.icon-wave-sea:before{content:'\\f23c'}\n.icon-wavealt-seaalt:before{content:'\\f23b'}\n.icon-webcam:before{content:'\\f0fe'}\n.icon-webcamalt:before{content:'\\f129'}\n.icon-webhostinghub:before{content:'\\f031'}\n.icon-webmail:before{content:'\\f045'}\n.icon-webpage:before{content:'\\f033'}\n.icon-webplatform:before{content:'\\f3c3'}\n.icon-websitealt:before{content:'\\f01c'}\n.icon-websitebuilder:before{content:'\\f034'}\n.icon-weight:before{content:'\\f430'}\n.icon-westernunion:before{content:'\\f26a'}\n.icon-wheel:before{content:'\\f228'}\n.icon-wheelchair:before{content:'\\f3fe'}\n.icon-whistle:before{content:'\\f3d8'}\n.icon-whmcs:before{content:'\\f066'}\n.icon-wifi:before{content:'\\f0ff'}\n.icon-wind:before{content:'\\f41b'}\n.icon-windleft:before{content:'\\f424'}\n.icon-windows:before{content:'\\f019'}\n.icon-windright:before{content:'\\f425'}\n.icon-wine:before{content:'\\f238'}\n.icon-wizard:before{content:'\\f03c'}\n.icon-wizardalt:before{content:'\\f1fb'}\n.icon-wizardhat:before{content:'\\f337'}\n.icon-woman-female:before{content:'\\f2a2'}\n.icon-women:before{content:'\\f24d'}\n.icon-wordpress:before{content:'\\f074'}\n.icon-wrench:before{content:'\\f05b'}\n.icon-wrenchalt:before{content:'\\f2b2'}\n.icon-xbox:before{content:'\\f353'}\n.icon-xmen:before{content:'\\f345'}\n.icon-yahoo:before{content:'\\f151'}\n.icon-yen:before{content:'\\00a5'}\n.icon-yenalt:before{content:'\\f25d'}\n.icon-yinyang:before{content:'\\262f'}\n.icon-youtube:before{content:'\\f142'}\n.icon-zelda:before{content:'\\f3b8'}\n.icon-zikula:before{content:'\\f0ac'}\n.icon-zip:before{content:'\\f116'}\n.icon-zodiac-aquarius:before{content:'\\f3b4'}\n.icon-zodiac-aries:before{content:'\\f3aa'}\n.icon-zodiac-cancer:before{content:'\\f3ad'}\n.icon-zodiac-capricorn:before{content:'\\f3b3'}\n.icon-zodiac-gemini:before{content:'\\f3ac'}\n.icon-zodiac-leo:before{content:'\\f3ae'}\n.icon-zodiac-libra:before{content:'\\f3b0'}\n.icon-zodiac-pisces:before{content:'\\f3b5'}\n.icon-zodiac-sagitarius:before{content:'\\f3b2'}\n.icon-zodiac-scorpio:before{content:'\\f3b1'}\n.icon-zodiac-taurus:before{content:'\\f3ab'}\n.icon-zodiac-virgo:before{content:'\\f3af'}\n.icon-zoom-in:before{content:'\\f320'}\n.icon-zoom-out:before{content:'\\f321'}\n.icon-vk:before{content:'\\f34e'}\n.icon-bitcoin:before{content:'\\f584'}\n.icon-rouble:before{content:'\\f4ca'}\n.icon-phpnuke:before{content:'\\f48c'}\n.icon-modx:before{content:'\\f48d'}\n.icon-eoneohseven:before{content:'\\f48e'}\n.icon-subrion:before{content:'\\f48f'}\n.icon-typothree:before{content:'\\f490'}\n.icon-tikiwiki:before{content:'\\f491'}\n.icon-pligg:before{content:'\\f492'}\n.icon-pyrocms:before{content:'\\f493'}\n.icon-mambo:before{content:'\\f494'}\n.icon-contao:before{content:'\\f495'}\n.icon-crackedegg:before{content:'\\f496'}\n.icon-coffeecupalt:before{content:'\\f497'}\n.icon-reademailalt:before{content:'\\f498'}\n.icon-train:before{content:'\\f499'}\n.icon-shoebox:before{content:'\\f49a'}\n.icon-bathtub:before{content:'\\f49b'}\n.icon-ninegag:before{content:'\\f49c'}\n.icon-pebble:before{content:'\\f49d'}\n.icon-musicthree:before{content:'\\f49e'}\n.icon-stairsup:before{content:'\\f49f'}\n.icon-stairsdown:before{content:'\\f4a0'}\n.icon-bookalt:before{content:'\\f4a1'}\n.icon-programclose:before{content:'\\f4a2'}\n.icon-programok:before{content:'\\f4a3'}\n.icon-splitalt:before{content:'\\f4a4'}\n.icon-solarsystem:before{content:'\\f4a5'}\n.icon-honeycomb:before{content:'\\f4a6'}\n.icon-tools:before{content:'\\f4a7'}\n.icon-xoops:before{content:'\\f4a8'}\n.icon-pixie:before{content:'\\f4a9'}\n.icon-dotclear:before{content:'\\f4aa'}\n.icon-impresscms:before{content:'\\f4ab'}\n.icon-saurus:before{content:'\\f4ac'}\n.icon-impresspages:before{content:'\\f4ad'}\n.icon-monstra:before{content:'\\f4ae'}\n.icon-snews:before{content:'\\f4af'}\n.icon-jcore:before{content:'\\f4b0'}\n.icon-silverstripe:before{content:'\\f4b1'}\n.icon-btwoevolution:before{content:'\\f4b2'}\n.icon-nucleus:before{content:'\\f4b3'}\n.icon-symphony:before{content:'\\f4b5'}\n.icon-vanillacms:before{content:'\\f4b6'}\n.icon-bbpress:before{content:'\\f4b7'}\n.icon-phpbbalt:before{content:'\\f4b8'}\n.icon-chyrp:before{content:'\\f4b9'}\n.icon-pivotx:before{content:'\\f4ba'}\n.icon-pagecookery:before{content:'\\f4bb'}\n.icon-moviereelalt:before{content:'\\f4bc'}\n.icon-cassettealt:before{content:'\\f4bd'}\n.icon-photobucket:before{content:'\\f4be'}\n.icon-technorati:before{content:'\\f4bf'}\n.icon-theverge:before{content:'\\f4c0'}\n.icon-stacks:before{content:'\\f4c1'}\n.icon-dotlist:before{content:'\\f4c2'}\n.icon-numberlist:before{content:'\\f4c3'}\n.icon-indentleft:before{content:'\\f4c4'}\n.icon-indentright:before{content:'\\f4c5'}\n.icon-fblike:before{content:'\\f4c6'}\n.icon-fbdislike:before{content:'\\f4c7'}\n.icon-sale:before{content:'\\f4c8'}\n.icon-sharetronix:before{content:'\\f4c9'}\n.icon-markerdown:before{content:'\\f4cb'}\n.icon-markerup:before{content:'\\f4cc'}\n.icon-markerleft:before{content:'\\f4cd'}\n.icon-markerright:before{content:'\\f4ce'}\n.icon-bookmarkalt:before{content:'\\f4cf'}\n.icon-calendarthree:before{content:'\\f4d0'}\n.icon-wineglass:before{content:'\\f4d1'}\n.icon-slidersoff:before{content:'\\f4d2'}\n.icon-slidersmiddle:before{content:'\\f4d3'}\n.icon-slidersfull:before{content:'\\f4d4'}\n.icon-slidersdesc:before{content:'\\f4d5'}\n.icon-slidersasc:before{content:'\\f4d6'}\n.icon-slideronefull:before{content:'\\f4d7'}\n.icon-slidertwofull:before{content:'\\f4d8'}\n.icon-sliderthreefull:before{content:'\\f4d9'}\n.icon-noborders:before{content:'\\f4da'}\n.icon-bottomborder:before{content:'\\f4db'}\n.icon-topborder:before{content:'\\f4dc'}\n.icon-leftborder:before{content:'\\f4dd'}\n.icon-rightborder:before{content:'\\f4de'}\n.icon-horizontalborder:before{content:'\\f4df'}\n.icon-verticalborder:before{content:'\\f4e0'}\n.icon-outerborders:before{content:'\\f4e1'}\n.icon-innerborders:before{content:'\\f4e2'}\n.icon-fullborders:before{content:'\\f4e3'}\n.icon-networksignalalt:before{content:'\\f4e4'}\n.icon-resizeverticalalt:before{content:'\\f4e5'}\n.icon-resizehorizontalalt:before{content:'\\f4e6'}\n.icon-moneyalt:before{content:'\\f4e7'}\n.icon-fontcase:before{content:'\\f4e8'}\n.icon-playstation:before{content:'\\f4e9'}\n.icon-cube:before{content:'\\f4ea'}\n.icon-sphere:before{content:'\\f4eb'}\n.icon-ceilinglight:before{content:'\\f4ec'}\n.icon-chandelier:before{content:'\\f4ed'}\n.icon-details:before{content:'\\f4ee'}\n.icon-detailsalt:before{content:'\\f4ef'}\n.icon-bullet:before{content:'\\f4f0'}\n.icon-gun:before{content:'\\f4f1'}\n.icon-processorthree:before{content:'\\f4f2'}\n.icon-world:before{content:'\\f4f3'}\n.icon-statistics:before{content:'\\f4f4'}\n.icon-shoppingcartalt:before{content:'\\f4f5'}\n.icon-microphonealt:before{content:'\\f4f6'}\n.icon-routeralt:before{content:'\\f4f7'}\n.icon-shell:before{content:'\\f4f8'}\n.icon-squareplay:before{content:'\\f4f9'}\n.icon-squarestop:before{content:'\\f4fa'}\n.icon-squarepause:before{content:'\\f4fb'}\n.icon-squarerecord:before{content:'\\f4fc'}\n.icon-squareforward:before{content:'\\f4fd'}\n.icon-squareback:before{content:'\\f4fe'}\n.icon-squarenext:before{content:'\\f4ff'}\n.icon-squareprevious:before{content:'\\f500'}\n.icon-mega:before{content:'\\f501'}\n.icon-charliechaplin:before{content:'\\f502'}\n.icon-popcorn:before{content:'\\f503'}\n.icon-fatarrowright:before{content:'\\f504'}\n.icon-fatarrowleft:before{content:'\\f505'}\n.icon-fatarrowdown:before{content:'\\f506'}\n.icon-fatarrowup:before{content:'\\f507'}\n.icon-shirtbutton:before{content:'\\f508'}\n.icon-shirtbuttonalt:before{content:'\\f509'}\n.icon-cuckooclock:before{content:'\\f50a'}\n.icon-lens:before{content:'\\f50b'}\n.icon-voltage:before{content:'\\f50c'}\n.icon-planealt:before{content:'\\f50d'}\n.icon-busalt:before{content:'\\f50e'}\n.icon-lipstick:before{content:'\\f50f'}\n.icon-plantalt:before{content:'\\f510'}\n.icon-paperboat:before{content:'\\f511'}\n.icon-texture:before{content:'\\f512'}\n.icon-dominoone:before{content:'\\f513'}\n.icon-dominotwo:before{content:'\\f514'}\n.icon-dominothree:before{content:'\\f515'}\n.icon-dominofour:before{content:'\\f516'}\n.icon-dominofive:before{content:'\\f517'}\n.icon-dominosix:before{content:'\\f518'}\n.icon-dominoseven:before{content:'\\f519'}\n.icon-dominoeight:before{content:'\\f51a'}\n.icon-dominonine:before{content:'\\f51b'}\n.icon-connected:before{content:'\\f51c'}\n.icon-connectedpc:before{content:'\\f51d'}\n.icon-musicsheet:before{content:'\\f51e'}\n.icon-rdio:before{content:'\\f51f'}\n.icon-spotify:before{content:'\\f520'}\n.icon-deviantart:before{content:'\\f521'}\n.icon-yelp:before{content:'\\f522'}\n.icon-behance:before{content:'\\f523'}\n.icon-nfc:before{content:'\\f524'}\n.icon-earbudsalt:before{content:'\\f525'}\n.icon-earbuds:before{content:'\\f526'}\n.icon-amazon:before{content:'\\f527'}\n.icon-openid:before{content:'\\f528'}\n.icon-digg:before{content:'\\f529'}\n.icon-retweet:before{content:'\\f52a'}\n.icon-moonnew:before{content:'\\f52b'}\n.icon-moonwaxingcrescent:before{content:'\\f52c'}\n.icon-moonfirstquarter:before{content:'\\f52d'}\n.icon-moonwaxinggibbous:before{content:'\\f52e'}\n.icon-moonfull:before{content:'\\f52f'}\n.icon-moonwaninggibbous:before{content:'\\f530'}\n.icon-moonthirdquarter:before{content:'\\f531'}\n.icon-moonwaningcrescent:before{content:'\\f532'}\n.icon-planet:before{content:'\\f533'}\n.icon-sodacup:before{content:'\\f534'}\n.icon-cocktail:before{content:'\\f535'}\n.icon-church:before{content:'\\f536'}\n.icon-mosque:before{content:'\\f537'}\n.icon-comedy:before{content:'\\f538'}\n.icon-tragedy:before{content:'\\f539'}\n.icon-bacon:before{content:'\\f53a'}\n.icon-trailor:before{content:'\\f53b'}\n.icon-tshirt:before{content:'\\f53c'}\n.icon-design:before{content:'\\f53d'}\n.icon-spiderweb:before{content:'\\f53e'}\n.icon-fireplace:before{content:'\\f53f'}\n.icon-tallglass:before{content:'\\f540'}\n.icon-grapes:before{content:'\\f541'}\n.icon-biohazard:before{content:'\\f542'}\n.icon-directions:before{content:'\\f543'}\n.icon-equalizerthree:before{content:'\\f544'}\n.icon-mountains:before{content:'\\f545'}\n.icon-bing:before{content:'\\f546'}\n.icon-windowseight:before{content:'\\f547'}\n.icon-microsoftoffice:before{content:'\\f548'}\n.icon-salealt:before{content:'\\f549'}\n.icon-purse:before{content:'\\f54a'}\n.icon-chickenalt:before{content:'\\f54b'}\n.icon-podium:before{content:'\\f54c'}\n.icon-findfriends:before{content:'\\f54d'}\n.icon-microphonethree:before{content:'\\f54e'}\n.icon-workshirt:before{content:'\\f54f'}\n.icon-donotdisturb:before{content:'\\f550'}\n.icon-addtags:before{content:'\\f551'}\n.icon-removetags:before{content:'\\f556'}\n.icon-carbattery:before{content:'\\f557'}\n.icon-debug:before{content:'\\f554'}\n.icon-trojan:before{content:'\\f555'}\n.icon-molecule:before{content:'\\f556'}\n.icon-safetygoggles:before{content:'\\f557'}\n.icon-leather:before{content:'\\f558'}\n.icon-teddybear:before{content:'\\f559'}\n.icon-stroller:before{content:'\\f55a'}\n.icon-circleplay:before{content:'\\f55b'}\n.icon-circlestop:before{content:'\\f55c'}\n.icon-circlepause:before{content:'\\f55d'}\n.icon-circlerecord:before{content:'\\f55e'}\n.icon-circleforward:before{content:'\\f55f'}\n.icon-circlebackward:before{content:'\\f560'}\n.icon-circlenext:before{content:'\\f561'}\n.icon-circleprevious:before{content:'\\f562'}\n.icon-circleplayempty:before{content:'\\f563'}\n.icon-circlestopempty:before{content:'\\f564'}\n.icon-circlepauseempty:before{content:'\\f565'}\n.icon-circlerecordempty:before{content:'\\f566'}\n.icon-circleforwardempty:before{content:'\\f567'}\n.icon-circlebackwardempty:before{content:'\\f568'}\n.icon-circlenextempty:before{content:'\\f569'}\n.icon-circlepreviousempty:before{content:'\\f56a'}\n.icon-belt:before{content:'\\f56b'}\n.icon-bait:before{content:'\\f56c'}\n.icon-manalt:before{content:'\\f56d'}\n.icon-womanalt:before{content:'\\f56e'}\n.icon-clover:before{content:'\\f56f'}\n.icon-pacifier:before{content:'\\f570'}\n.icon-calcplus:before{content:'\\f571'}\n.icon-calcminus:before{content:'\\f572'}\n.icon-calcmultiply:before{content:'\\f573'}\n.icon-calcdivide:before{content:'\\f574'}\n.icon-calcequals:before{content:'\\f575'}\n.icon-city:before{content:'\\f576'}\n.icon-hdvideo:before{content:'\\f577'}\n.icon-horizontalexpand:before{content:'\\f578'}\n.icon-horizontalcontract:before{content:'\\f579'}\n.icon-radar:before{content:'\\f57a'}\n.icon-threed:before{content:'\\f57b'}\n.icon-flickralt:before{content:'\\f57c'}\n.icon-pattern:before{content:'\\f57d'}\n.icon-elevator:before{content:'\\f57e'}\n.icon-escalator:before{content:'\\f57f'}\n.icon-portrait:before{content:'\\f580'}\n.icon-cigar:before{content:'\\f581'}\n.icon-dropbox:before{content:'\\f582'}\n.icon-origami:before{content:'\\f583'}\n.icon-opensource:before{content:'\\f585'}\n.icon-redaxscript:before{content:'\\f586'}\n.icon-mahara:before{content:'\\f587'}\n.icon-forkcms:before{content:'\\f588'}\n.icon-pimcore:before{content:'\\f589'}\n.icon-bigace:before{content:'\\f58a'}\n.icon-aef:before{content:'\\f58b'}\n.icon-punbb:before{content:'\\f58c'}\n.icon-phorum:before{content:'\\f58d'}\n.icon-fluxbb:before{content:'\\f58e'}\n.icon-minibb:before{content:'\\f58f'}\n.icon-zenphoto:before{content:'\\f590'}\n.icon-fourimages:before{content:'\\f591'}\n.icon-plogger:before{content:'\\f592'}\n.icon-jcow:before{content:'\\f593'}\n.icon-elgg:before{content:'\\f594'}\n.icon-etano:before{content:'\\f595'}\n.icon-openclassifieds:before{content:'\\f596'}\n.icon-osclass:before{content:'\\f597'}\n.icon-openx:before{content:'\\f598'}\n.icon-phplist:before{content:'\\f599'}\n.icon-roundcube:before{content:'\\f59a'}\n.icon-pommo:before{content:'\\f59b'}\n.icon-webinsta:before{content:'\\f59c'}\n.icon-limesurvey:before{content:'\\f59d'}\n.icon-fengoffice:before{content:'\\f59e'}\n.icon-eyeos:before{content:'\\f59f'}\n.icon-dotproject:before{content:'\\f5a0'}\n.icon-collabtive:before{content:'\\f5a1'}\n.icon-projectpier:before{content:'\\f5a2'}\n.icon-taskfreak:before{content:'\\f5a3'}\n.icon-eventum:before{content:'\\f5a4'}\n.icon-traq:before{content:'\\f5a5'}\n.icon-mantisbugtracker:before{content:'\\f5a6'}\n.icon-oscommerce:before{content:'\\f5a7'}\n.icon-zencart:before{content:'\\f5a8'}\n.icon-tomatocart:before{content:'\\f5a9'}\n.icon-boxbilling:before{content:'\\f5aa'}\n.icon-zurmo:before{content:'\\f5ab'}\n.icon-orangehrm:before{content:'\\f5ac'}\n.icon-vtiger:before{content:'\\f5ad'}\n.icon-mibew:before{content:'\\f5ae'}\n.icon-phpmyfaq:before{content:'\\f5af'}\n.icon-yiiframework:before{content:'\\f5b0'}\n.icon-zendframework:before{content:'\\f5b1'}\n.icon-fuelphp:before{content:'\\f5b2'}\n.icon-kohana:before{content:'\\f5b3'}\n.icon-smarty:before{content:'\\f5b4'}\n.icon-sidu:before{content:'\\f5b5'}\n.icon-simplepie:before{content:'\\f5b6'}\n.icon-projectsend:before{content:'\\f5b7'}\n.icon-extjs:before{content:'\\f5b8'}\n.icon-raphael:before{content:'\\f5b9'}\n.icon-sizzle:before{content:'\\f5ba'}\n.icon-yui:before{content:'\\f5bb'}\n.icon-scissorsalt:before{content:'\\f5bc'}\n.icon-cuthere:before{content:'\\f5bd'}\n.icon-coinsalt:before{content:'\\f5be'}\n.icon-parkingmeter:before{content:'\\f5bf'}\n.icon-treethree:before{content:'\\f5c0'}\n.icon-packarchive:before{content:'\\f5c1'}\n.icon-unpackarchive:before{content:'\\f5c2'}\n.icon-terminalalt:before{content:'\\f5c3'}\n.icon-jersey:before{content:'\\f5c4'}\n.icon-vial:before{content:'\\f5c5'}\n.icon-noteslist:before{content:'\\f5c6'}\n.icon-notestasks:before{content:'\\f5c7'}\n.icon-notesdate:before{content:'\\f5c8'}\n.icon-noteslocation:before{content:'\\f5c9'}\n.icon-noteslistalt:before{content:'\\f5ca'}\n.icon-notestasksalt:before{content:'\\f5cb'}\n.icon-notesdatealt:before{content:'\\f5cc'}\n.icon-noteslocationalt:before{content:'\\f5cd'}\n.icon-useralt:before{content:'\\f5ce'}\n.icon-adduseralt:before{content:'\\f5cf'}\n.icon-removeuseralt:before{content:'\\f5d0'}\n.icon-banuseralt:before{content:'\\f5d1'}\n.icon-banuser:before{content:'\\f5d2'}\n.icon-paintrollalt:before{content:'\\f5d3'}\n.icon-textcursor:before{content:'\\f5d4'}\n.icon-textfield:before{content:'\\f5d5'}\n.icon-precisecursor:before{content:'\\f5d6'}\n.icon-brokenlink:before{content:'\\f5d7'}\n.icon-bookmarkthree:before{content:'\\f5d8'}\n.icon-bookmarkfour:before{content:'\\f5d9'}\n.icon-warmedalalt:before{content:'\\f5da'}\n.icon-thinking:before{content:'\\f5db'}\n.icon-commentlove:before{content:'\\f5dc'}\n.icon-commentsmiley:before{content:'\\f5dd'}\n"
  },
  {
    "path": "tronweb/fonts/SIL OFL Font License WebHostingHub Glyphs.txt",
    "content": "Copyright (c) 2012 by Web Hosting Hub (webhostinghub.com),\nwith Reserved Font Name &amp;quot;Webhostinghub Glyphs&amp;quot;\n\nThis Font Software is licensed under the SIL Open Font License, Version 1.1.\nThis license is copied below, and is also available with a FAQ at:\nhttp://scripts.sil.org/OFL\n\n\n-----------------------------------------------------------\nSIL OPEN FONT LICENSE Version 1.1 - 26 February 2007\n-----------------------------------------------------------\nPREAMBLE\n\nThe goals of the Open Font License (OFL) are to stimulate worldwide\ndevelopment of collaborative font projects, to support the font creation\nefforts of academic and linguistic communities, and to provide a free and\nopen framework in which fonts may be shared and improved in partnership\nwith others.\n\nThe OFL allows the licensed fonts to be used, studied, modified and\nredistributed freely as long as they are not sold by themselves. The\nfonts, including any derivative works, can be bundled, embedded,\nredistributed and/or sold with any software provided that any reserved\nnames are not used by derivative works. The fonts and derivatives,\nhowever, cannot be released under any other type of license. The\nrequirement for fonts to remain under this license does not apply\nto any document created using the fonts or their derivatives.\nDEFINITIONS\n\n&amp;quot;Font Software&amp;quot; refers to the set of files released by the Copyright\nHolder(s) under this license and clearly marked as such. This may\ninclude source files, build scripts and documentation.\n\n&amp;quot;Reserved Font Name&amp;quot; refers to any names specified as such after the\ncopyright statement(s).\n\n&amp;quot;Original Version&amp;quot; refers to the collection of Font Software components as\ndistributed by the Copyright Holder(s).\n\n&amp;quot;Modified Version&amp;quot; refers to any derivative made by adding to, deleting,\nor substituting — in part or in whole — any of the components of the\nOriginal Version, by changing formats or by porting the Font Software to a\nnew environment.\n\n&amp;quot;Author&amp;quot; refers to any designer, engineer, programmer, technical\nwriter or other person who contributed to the Font Software.\nPERMISSION &amp;amp; CONDITIONS\n\nPermission is hereby granted, free of charge, to any person obtaining\na copy of the Font Software, to use, study, copy, merge, embed, modify,\nredistribute, and sell modified and unmodified copies of the Font\nSoftware, subject to the following conditions:\n\n1) Neither the Font Software nor any of its individual components,\nin Original or Modified Versions, may be sold by itself.\n\n2) Original or Modified Versions of the Font Software may be bundled,\nredistributed and/or sold with any software, provided that each copy\ncontains the above copyright notice and this license. These can be\nincluded either as stand-alone text files, human-readable headers or\nin the appropriate machine-readable metadata fields within text or\nbinary files as long as those fields can be easily viewed by the user.\n\n3) No Modified Version of the Font Software may use the Reserved Font\nName(s) unless explicit written permission is granted by the corresponding\nCopyright Holder. This restriction only applies to the primary font name as\npresented to the users.\n\n4) The name(s) of the Copyright Holder(s) or the Author(s) of the Font\nSoftware shall not be used to promote, endorse or advertise any\nModified Version, except to acknowledge the contribution(s) of the\nCopyright Holder(s) and the Author(s) or with their explicit written\npermission.\n\n5) The Font Software, modified or unmodified, in part or in whole,\nmust be distributed entirely under this license, and must not be\ndistributed under any other license. The requirement for fonts to\nremain under this license does not apply to any document created\nusing the Font Software.\nTERMINATION\n\nThis license becomes null and void if any of the above conditions are\nnot met.\nDISCLAIMER\n\nTHE FONT SOFTWARE IS PROVIDED &amp;quot;AS IS&amp;quot;, WITHOUT WARRANTY OF ANY KIND,\nEXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO ANY WARRANTIES OF\nMERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT\nOF COPYRIGHT, PATENT, TRADEMARK, OR OTHER RIGHT. IN NO EVENT SHALL THE\nCOPYRIGHT HOLDER BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,\nINCLUDING ANY GENERAL, SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL\nDAMAGES, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\nFROM, OUT OF THE USE OR INABILITY TO USE THE FONT SOFTWARE OR FROM\nOTHER DEALINGS IN THE FONT SOFTWARE.\n"
  },
  {
    "path": "tronweb/index.html",
    "content": "<!DOCTYPE html>\n\n<head>\n    <meta charset=\"utf-8\">\n    <meta http-equiv=\"X-UA-Compatible\" content=\"IE=edge,chrome=1\">\n    <title>tronweb</title>\n    <meta name=\"description\" content=\"\">\n    <meta name=\"viewport\" content=\"width=device-width\">\n\n    <!-- Place favicon.ico -->\n    <link rel=\"shortcut icon\" href=\"tronweb.ico\" />\n\n    <link rel=\"stylesheet\" href=\"css/bootstrap.min.css\">\n    <link rel=\"stylesheet\" href=\"css/bootstrap-responsive.min.css\">\n    <link rel=\"stylesheet\" href=\"css/codemirror.css\">\n    <link rel=\"stylesheet\" href=\"css/whhg.css\">\n\n    <link rel=\"stylesheet/less\" href=\"css/tronweb.less\">\n    <!-- TODO: pre-compile LESS instead of client-side compilation -->\n    <script>\n        less = {\n          env: \"production\"\n        };\n      </script>\n    <script src=\"js/less-1.3.3.min.js\"></script>\n</head>\n\n<body>\n    <div id=\"all-view\">\n    </div>\n    <!-- Order of imports matters -->\n    <script src=\"//ajax.googleapis.com/ajax/libs/jquery/1.9.0/jquery.min.js\"></script>\n    <script src=\"//ajax.googleapis.com/ajax/libs/jqueryui/1.10.2/jquery-ui.min.js\"></script>\n    <script src=\"js/plugins.js\"></script>\n    <script src=\"js/underscore-min.js\"></script>\n    <script src=\"js/underscore.string.js\"></script>\n    <script src=\"js/underscore.extra.js\"></script>\n    <script src=\"js/backbone-min.js\"></script>\n    <script src=\"js/bootstrap.min.js\"></script>\n    <script src=\"js/codemirror.js\"></script>\n    <script src=\"js/yaml.js\"></script>\n    <script src=\"js/moment.min.js\"></script>\n    <script src=\"js/moment-timezone-with-data-10-year-range.min.js\"></script>\n    <script src=\"js/d3.v7.min.js\"></script>\n    <script src=\"js/dagre.min.js\"></script>\n    <script src=\"js/cytoscape.min.js\"></script>\n    <script src=\"js/cytoscape-dagre.min.js\"></script>\n\n\n    <script src=\"js/cs/models.js\"></script>\n    <script src=\"js/cs/views.js\"></script>\n    <script src=\"js/cs/nodes.js\"></script>\n    <script src=\"js/cs/graph.js\"></script>\n    <script src=\"js/cs/config.js\"></script>\n    <script src=\"js/cs/actionrun.js\"></script>\n    <script src=\"js/cs/timeline.js\"></script>\n    <script src=\"js/cs/job.js\"></script>\n    <script src=\"js/cs/dashboard.js\"></script>\n    <script src=\"js/cs/navbar.js\"></script>\n    <script src=\"js/cs/routes.js\"></script>\n    <script>attachRouter()</script>\n</body>\n\n</html>\n"
  },
  {
    "path": "tronweb/js/backbone-min.js",
    "content": "// Backbone.js 0.9.10\n\n// (c) 2010-2012 Jeremy Ashkenas, DocumentCloud Inc.\n// Backbone may be freely distributed under the MIT license.\n// For all details and documentation:\n// http://backbonejs.org\n(function(){var n=this,B=n.Backbone,h=[],C=h.push,u=h.slice,D=h.splice,g;g=\"undefined\"!==typeof exports?exports:n.Backbone={};g.VERSION=\"0.9.10\";var f=n._;!f&&\"undefined\"!==typeof require&&(f=require(\"underscore\"));g.$=n.jQuery||n.Zepto||n.ender;g.noConflict=function(){n.Backbone=B;return this};g.emulateHTTP=!1;g.emulateJSON=!1;var v=/\\s+/,q=function(a,b,c,d){if(!c)return!0;if(\"object\"===typeof c)for(var e in c)a[b].apply(a,[e,c[e]].concat(d));else if(v.test(c)){c=c.split(v);e=0;for(var f=c.length;e<\nf;e++)a[b].apply(a,[c[e]].concat(d))}else return!0},w=function(a,b){var c,d=-1,e=a.length;switch(b.length){case 0:for(;++d<e;)(c=a[d]).callback.call(c.ctx);break;case 1:for(;++d<e;)(c=a[d]).callback.call(c.ctx,b[0]);break;case 2:for(;++d<e;)(c=a[d]).callback.call(c.ctx,b[0],b[1]);break;case 3:for(;++d<e;)(c=a[d]).callback.call(c.ctx,b[0],b[1],b[2]);break;default:for(;++d<e;)(c=a[d]).callback.apply(c.ctx,b)}},h=g.Events={on:function(a,b,c){if(!q(this,\"on\",a,[b,c])||!b)return this;this._events||(this._events=\n{});(this._events[a]||(this._events[a]=[])).push({callback:b,context:c,ctx:c||this});return this},once:function(a,b,c){if(!q(this,\"once\",a,[b,c])||!b)return this;var d=this,e=f.once(function(){d.off(a,e);b.apply(this,arguments)});e._callback=b;this.on(a,e,c);return this},off:function(a,b,c){var d,e,t,g,j,l,k,h;if(!this._events||!q(this,\"off\",a,[b,c]))return this;if(!a&&!b&&!c)return this._events={},this;g=a?[a]:f.keys(this._events);j=0;for(l=g.length;j<l;j++)if(a=g[j],d=this._events[a]){t=[];if(b||\nc){k=0;for(h=d.length;k<h;k++)e=d[k],(b&&b!==e.callback&&b!==e.callback._callback||c&&c!==e.context)&&t.push(e)}this._events[a]=t}return this},trigger:function(a){if(!this._events)return this;var b=u.call(arguments,1);if(!q(this,\"trigger\",a,b))return this;var c=this._events[a],d=this._events.all;c&&w(c,b);d&&w(d,arguments);return this},listenTo:function(a,b,c){var d=this._listeners||(this._listeners={}),e=a._listenerId||(a._listenerId=f.uniqueId(\"l\"));d[e]=a;a.on(b,\"object\"===typeof b?this:c,this);\nreturn this},stopListening:function(a,b,c){var d=this._listeners;if(d){if(a)a.off(b,\"object\"===typeof b?this:c,this),!b&&!c&&delete d[a._listenerId];else{\"object\"===typeof b&&(c=this);for(var e in d)d[e].off(b,c,this);this._listeners={}}return this}}};h.bind=h.on;h.unbind=h.off;f.extend(g,h);var r=g.Model=function(a,b){var c,d=a||{};this.cid=f.uniqueId(\"c\");this.attributes={};b&&b.collection&&(this.collection=b.collection);b&&b.parse&&(d=this.parse(d,b)||{});if(c=f.result(this,\"defaults\"))d=f.defaults({},\nd,c);this.set(d,b);this.changed={};this.initialize.apply(this,arguments)};f.extend(r.prototype,h,{changed:null,idAttribute:\"id\",initialize:function(){},toJSON:function(){return f.clone(this.attributes)},sync:function(){return g.sync.apply(this,arguments)},get:function(a){return this.attributes[a]},escape:function(a){return f.escape(this.get(a))},has:function(a){return null!=this.get(a)},set:function(a,b,c){var d,e,g,p,j,l,k;if(null==a)return this;\"object\"===typeof a?(e=a,c=b):(e={})[a]=b;c||(c={});\nif(!this._validate(e,c))return!1;g=c.unset;p=c.silent;a=[];j=this._changing;this._changing=!0;j||(this._previousAttributes=f.clone(this.attributes),this.changed={});k=this.attributes;l=this._previousAttributes;this.idAttribute in e&&(this.id=e[this.idAttribute]);for(d in e)b=e[d],f.isEqual(k[d],b)||a.push(d),f.isEqual(l[d],b)?delete this.changed[d]:this.changed[d]=b,g?delete k[d]:k[d]=b;if(!p){a.length&&(this._pending=!0);b=0;for(d=a.length;b<d;b++)this.trigger(\"change:\"+a[b],this,k[a[b]],c)}if(j)return this;\nif(!p)for(;this._pending;)this._pending=!1,this.trigger(\"change\",this,c);this._changing=this._pending=!1;return this},unset:function(a,b){return this.set(a,void 0,f.extend({},b,{unset:!0}))},clear:function(a){var b={},c;for(c in this.attributes)b[c]=void 0;return this.set(b,f.extend({},a,{unset:!0}))},hasChanged:function(a){return null==a?!f.isEmpty(this.changed):f.has(this.changed,a)},changedAttributes:function(a){if(!a)return this.hasChanged()?f.clone(this.changed):!1;var b,c=!1,d=this._changing?\nthis._previousAttributes:this.attributes,e;for(e in a)if(!f.isEqual(d[e],b=a[e]))(c||(c={}))[e]=b;return c},previous:function(a){return null==a||!this._previousAttributes?null:this._previousAttributes[a]},previousAttributes:function(){return f.clone(this._previousAttributes)},fetch:function(a){a=a?f.clone(a):{};void 0===a.parse&&(a.parse=!0);var b=a.success;a.success=function(a,d,e){if(!a.set(a.parse(d,e),e))return!1;b&&b(a,d,e)};return this.sync(\"read\",this,a)},save:function(a,b,c){var d,e,g=this.attributes;\nnull==a||\"object\"===typeof a?(d=a,c=b):(d={})[a]=b;if(d&&(!c||!c.wait)&&!this.set(d,c))return!1;c=f.extend({validate:!0},c);if(!this._validate(d,c))return!1;d&&c.wait&&(this.attributes=f.extend({},g,d));void 0===c.parse&&(c.parse=!0);e=c.success;c.success=function(a,b,c){a.attributes=g;var k=a.parse(b,c);c.wait&&(k=f.extend(d||{},k));if(f.isObject(k)&&!a.set(k,c))return!1;e&&e(a,b,c)};a=this.isNew()?\"create\":c.patch?\"patch\":\"update\";\"patch\"===a&&(c.attrs=d);a=this.sync(a,this,c);d&&c.wait&&(this.attributes=\ng);return a},destroy:function(a){a=a?f.clone(a):{};var b=this,c=a.success,d=function(){b.trigger(\"destroy\",b,b.collection,a)};a.success=function(a,b,e){(e.wait||a.isNew())&&d();c&&c(a,b,e)};if(this.isNew())return a.success(this,null,a),!1;var e=this.sync(\"delete\",this,a);a.wait||d();return e},url:function(){var a=f.result(this,\"urlRoot\")||f.result(this.collection,\"url\")||x();return this.isNew()?a:a+(\"/\"===a.charAt(a.length-1)?\"\":\"/\")+encodeURIComponent(this.id)},parse:function(a){return a},clone:function(){return new this.constructor(this.attributes)},\nisNew:function(){return null==this.id},isValid:function(a){return!this.validate||!this.validate(this.attributes,a)},_validate:function(a,b){if(!b.validate||!this.validate)return!0;a=f.extend({},this.attributes,a);var c=this.validationError=this.validate(a,b)||null;if(!c)return!0;this.trigger(\"invalid\",this,c,b||{});return!1}});var s=g.Collection=function(a,b){b||(b={});b.model&&(this.model=b.model);void 0!==b.comparator&&(this.comparator=b.comparator);this.models=[];this._reset();this.initialize.apply(this,\narguments);a&&this.reset(a,f.extend({silent:!0},b))};f.extend(s.prototype,h,{model:r,initialize:function(){},toJSON:function(a){return this.map(function(b){return b.toJSON(a)})},sync:function(){return g.sync.apply(this,arguments)},add:function(a,b){a=f.isArray(a)?a.slice():[a];b||(b={});var c,d,e,g,p,j,l,k,h,m;l=[];k=b.at;h=this.comparator&&null==k&&!1!=b.sort;m=f.isString(this.comparator)?this.comparator:null;c=0;for(d=a.length;c<d;c++)(e=this._prepareModel(g=a[c],b))?(p=this.get(e))?b.merge&&(p.set(g===\ne?e.attributes:g,b),h&&(!j&&p.hasChanged(m))&&(j=!0)):(l.push(e),e.on(\"all\",this._onModelEvent,this),this._byId[e.cid]=e,null!=e.id&&(this._byId[e.id]=e)):this.trigger(\"invalid\",this,g,b);l.length&&(h&&(j=!0),this.length+=l.length,null!=k?D.apply(this.models,[k,0].concat(l)):C.apply(this.models,l));j&&this.sort({silent:!0});if(b.silent)return this;c=0;for(d=l.length;c<d;c++)(e=l[c]).trigger(\"add\",e,this,b);j&&this.trigger(\"sort\",this,b);return this},remove:function(a,b){a=f.isArray(a)?a.slice():[a];\nb||(b={});var c,d,e,g;c=0;for(d=a.length;c<d;c++)if(g=this.get(a[c]))delete this._byId[g.id],delete this._byId[g.cid],e=this.indexOf(g),this.models.splice(e,1),this.length--,b.silent||(b.index=e,g.trigger(\"remove\",g,this,b)),this._removeReference(g);return this},push:function(a,b){a=this._prepareModel(a,b);this.add(a,f.extend({at:this.length},b));return a},pop:function(a){var b=this.at(this.length-1);this.remove(b,a);return b},unshift:function(a,b){a=this._prepareModel(a,b);this.add(a,f.extend({at:0},\nb));return a},shift:function(a){var b=this.at(0);this.remove(b,a);return b},slice:function(a,b){return this.models.slice(a,b)},get:function(a){if(null!=a)return this._idAttr||(this._idAttr=this.model.prototype.idAttribute),this._byId[a.id||a.cid||a[this._idAttr]||a]},at:function(a){return this.models[a]},where:function(a){return f.isEmpty(a)?[]:this.filter(function(b){for(var c in a)if(a[c]!==b.get(c))return!1;return!0})},sort:function(a){if(!this.comparator)throw Error(\"Cannot sort a set without a comparator\");\na||(a={});f.isString(this.comparator)||1===this.comparator.length?this.models=this.sortBy(this.comparator,this):this.models.sort(f.bind(this.comparator,this));a.silent||this.trigger(\"sort\",this,a);return this},pluck:function(a){return f.invoke(this.models,\"get\",a)},update:function(a,b){b=f.extend({add:!0,merge:!0,remove:!0},b);b.parse&&(a=this.parse(a,b));var c,d,e,g,h=[],j=[],l={};f.isArray(a)||(a=a?[a]:[]);if(b.add&&!b.remove)return this.add(a,b);d=0;for(e=a.length;d<e;d++)c=a[d],g=this.get(c),\nb.remove&&g&&(l[g.cid]=!0),(b.add&&!g||b.merge&&g)&&h.push(c);if(b.remove){d=0;for(e=this.models.length;d<e;d++)c=this.models[d],l[c.cid]||j.push(c)}j.length&&this.remove(j,b);h.length&&this.add(h,b);return this},reset:function(a,b){b||(b={});b.parse&&(a=this.parse(a,b));for(var c=0,d=this.models.length;c<d;c++)this._removeReference(this.models[c]);b.previousModels=this.models.slice();this._reset();a&&this.add(a,f.extend({silent:!0},b));b.silent||this.trigger(\"reset\",this,b);return this},fetch:function(a){a=\na?f.clone(a):{};void 0===a.parse&&(a.parse=!0);var b=a.success;a.success=function(a,d,e){a[e.update?\"update\":\"reset\"](d,e);b&&b(a,d,e)};return this.sync(\"read\",this,a)},create:function(a,b){b=b?f.clone(b):{};if(!(a=this._prepareModel(a,b)))return!1;b.wait||this.add(a,b);var c=this,d=b.success;b.success=function(a,b,f){f.wait&&c.add(a,f);d&&d(a,b,f)};a.save(null,b);return a},parse:function(a){return a},clone:function(){return new this.constructor(this.models)},_reset:function(){this.length=0;this.models.length=\n0;this._byId={}},_prepareModel:function(a,b){if(a instanceof r)return a.collection||(a.collection=this),a;b||(b={});b.collection=this;var c=new this.model(a,b);return!c._validate(a,b)?!1:c},_removeReference:function(a){this===a.collection&&delete a.collection;a.off(\"all\",this._onModelEvent,this)},_onModelEvent:function(a,b,c,d){(\"add\"===a||\"remove\"===a)&&c!==this||(\"destroy\"===a&&this.remove(b,d),b&&a===\"change:\"+b.idAttribute&&(delete this._byId[b.previous(b.idAttribute)],null!=b.id&&(this._byId[b.id]=\nb)),this.trigger.apply(this,arguments))},sortedIndex:function(a,b,c){b||(b=this.comparator);var d=f.isFunction(b)?b:function(a){return a.get(b)};return f.sortedIndex(this.models,a,d,c)}});f.each(\"forEach each map collect reduce foldl inject reduceRight foldr find detect filter select reject every all some any include contains invoke max min toArray size first head take initial rest tail drop last without indexOf shuffle lastIndexOf isEmpty chain\".split(\" \"),function(a){s.prototype[a]=function(){var b=\nu.call(arguments);b.unshift(this.models);return f[a].apply(f,b)}});f.each([\"groupBy\",\"countBy\",\"sortBy\"],function(a){s.prototype[a]=function(b,c){var d=f.isFunction(b)?b:function(a){return a.get(b)};return f[a](this.models,d,c)}});var y=g.Router=function(a){a||(a={});a.routes&&(this.routes=a.routes);this._bindRoutes();this.initialize.apply(this,arguments)},E=/\\((.*?)\\)/g,F=/(\\(\\?)?:\\w+/g,G=/\\*\\w+/g,H=/[\\-{}\\[\\]+?.,\\\\\\^$|#\\s]/g;f.extend(y.prototype,h,{initialize:function(){},route:function(a,b,c){f.isRegExp(a)||\n(a=this._routeToRegExp(a));c||(c=this[b]);g.history.route(a,f.bind(function(d){d=this._extractParameters(a,d);c&&c.apply(this,d);this.trigger.apply(this,[\"route:\"+b].concat(d));this.trigger(\"route\",b,d);g.history.trigger(\"route\",this,b,d)},this));return this},navigate:function(a,b){g.history.navigate(a,b);return this},_bindRoutes:function(){if(this.routes)for(var a,b=f.keys(this.routes);null!=(a=b.pop());)this.route(a,this.routes[a])},_routeToRegExp:function(a){a=a.replace(H,\"\\\\$&\").replace(E,\"(?:$1)?\").replace(F,\nfunction(a,c){return c?a:\"([^/]+)\"}).replace(G,\"(.*?)\");return RegExp(\"^\"+a+\"$\")},_extractParameters:function(a,b){return a.exec(b).slice(1)}});var m=g.History=function(){this.handlers=[];f.bindAll(this,\"checkUrl\");\"undefined\"!==typeof window&&(this.location=window.location,this.history=window.history)},z=/^[#\\/]|\\s+$/g,I=/^\\/+|\\/+$/g,J=/msie [\\w.]+/,K=/\\/$/;m.started=!1;f.extend(m.prototype,h,{interval:50,getHash:function(a){return(a=(a||this).location.href.match(/#(.*)$/))?a[1]:\"\"},getFragment:function(a,\nb){if(null==a)if(this._hasPushState||!this._wantsHashChange||b){a=this.location.pathname;var c=this.root.replace(K,\"\");a.indexOf(c)||(a=a.substr(c.length))}else a=this.getHash();return a.replace(z,\"\")},start:function(a){if(m.started)throw Error(\"Backbone.history has already been started\");m.started=!0;this.options=f.extend({},{root:\"/\"},this.options,a);this.root=this.options.root;this._wantsHashChange=!1!==this.options.hashChange;this._wantsPushState=!!this.options.pushState;this._hasPushState=!(!this.options.pushState||\n!this.history||!this.history.pushState);a=this.getFragment();var b=document.documentMode,b=J.exec(navigator.userAgent.toLowerCase())&&(!b||7>=b);this.root=(\"/\"+this.root+\"/\").replace(I,\"/\");b&&this._wantsHashChange&&(this.iframe=g.$('<iframe src=\"javascript:0\" tabindex=\"-1\" />').hide().appendTo(\"body\")[0].contentWindow,this.navigate(a));if(this._hasPushState)g.$(window).on(\"popstate\",this.checkUrl);else if(this._wantsHashChange&&\"onhashchange\"in window&&!b)g.$(window).on(\"hashchange\",this.checkUrl);\nelse this._wantsHashChange&&(this._checkUrlInterval=setInterval(this.checkUrl,this.interval));this.fragment=a;a=this.location;b=a.pathname.replace(/[^\\/]$/,\"$&/\")===this.root;if(this._wantsHashChange&&this._wantsPushState&&!this._hasPushState&&!b)return this.fragment=this.getFragment(null,!0),this.location.replace(this.root+this.location.search+\"#\"+this.fragment),!0;this._wantsPushState&&(this._hasPushState&&b&&a.hash)&&(this.fragment=this.getHash().replace(z,\"\"),this.history.replaceState({},document.title,\nthis.root+this.fragment+a.search));if(!this.options.silent)return this.loadUrl()},stop:function(){g.$(window).off(\"popstate\",this.checkUrl).off(\"hashchange\",this.checkUrl);clearInterval(this._checkUrlInterval);m.started=!1},route:function(a,b){this.handlers.unshift({route:a,callback:b})},checkUrl:function(){var a=this.getFragment();a===this.fragment&&this.iframe&&(a=this.getFragment(this.getHash(this.iframe)));if(a===this.fragment)return!1;this.iframe&&this.navigate(a);this.loadUrl()||this.loadUrl(this.getHash())},\nloadUrl:function(a){var b=this.fragment=this.getFragment(a);return f.any(this.handlers,function(a){if(a.route.test(b))return a.callback(b),!0})},navigate:function(a,b){if(!m.started)return!1;if(!b||!0===b)b={trigger:b};a=this.getFragment(a||\"\");if(this.fragment!==a){this.fragment=a;var c=this.root+a;if(this._hasPushState)this.history[b.replace?\"replaceState\":\"pushState\"]({},document.title,c);else if(this._wantsHashChange)this._updateHash(this.location,a,b.replace),this.iframe&&a!==this.getFragment(this.getHash(this.iframe))&&\n(b.replace||this.iframe.document.open().close(),this._updateHash(this.iframe.location,a,b.replace));else return this.location.assign(c);b.trigger&&this.loadUrl(a)}},_updateHash:function(a,b,c){c?(c=a.href.replace(/(javascript:|#).*$/,\"\"),a.replace(c+\"#\"+b)):a.hash=\"#\"+b}});g.history=new m;var A=g.View=function(a){this.cid=f.uniqueId(\"view\");this._configure(a||{});this._ensureElement();this.initialize.apply(this,arguments);this.delegateEvents()},L=/^(\\S+)\\s*(.*)$/,M=\"model collection el id attributes className tagName events\".split(\" \");\nf.extend(A.prototype,h,{tagName:\"div\",$:function(a){return this.$el.find(a)},initialize:function(){},render:function(){return this},remove:function(){this.$el.remove();this.stopListening();return this},setElement:function(a,b){this.$el&&this.undelegateEvents();this.$el=a instanceof g.$?a:g.$(a);this.el=this.$el[0];!1!==b&&this.delegateEvents();return this},delegateEvents:function(a){if(a||(a=f.result(this,\"events\"))){this.undelegateEvents();for(var b in a){var c=a[b];f.isFunction(c)||(c=this[a[b]]);\nif(!c)throw Error('Method \"'+a[b]+'\" does not exist');var d=b.match(L),e=d[1],d=d[2],c=f.bind(c,this),e=e+(\".delegateEvents\"+this.cid);if(\"\"===d)this.$el.on(e,c);else this.$el.on(e,d,c)}}},undelegateEvents:function(){this.$el.off(\".delegateEvents\"+this.cid)},_configure:function(a){this.options&&(a=f.extend({},f.result(this,\"options\"),a));f.extend(this,f.pick(a,M));this.options=a},_ensureElement:function(){if(this.el)this.setElement(f.result(this,\"el\"),!1);else{var a=f.extend({},f.result(this,\"attributes\"));\nthis.id&&(a.id=f.result(this,\"id\"));this.className&&(a[\"class\"]=f.result(this,\"className\"));a=g.$(\"<\"+f.result(this,\"tagName\")+\">\").attr(a);this.setElement(a,!1)}}});var N={create:\"POST\",update:\"PUT\",patch:\"PATCH\",\"delete\":\"DELETE\",read:\"GET\"};g.sync=function(a,b,c){var d=N[a];f.defaults(c||(c={}),{emulateHTTP:g.emulateHTTP,emulateJSON:g.emulateJSON});var e={type:d,dataType:\"json\"};c.url||(e.url=f.result(b,\"url\")||x());if(null==c.data&&b&&(\"create\"===a||\"update\"===a||\"patch\"===a))e.contentType=\"application/json\",\ne.data=JSON.stringify(c.attrs||b.toJSON(c));c.emulateJSON&&(e.contentType=\"application/x-www-form-urlencoded\",e.data=e.data?{model:e.data}:{});if(c.emulateHTTP&&(\"PUT\"===d||\"DELETE\"===d||\"PATCH\"===d)){e.type=\"POST\";c.emulateJSON&&(e.data._method=d);var h=c.beforeSend;c.beforeSend=function(a){a.setRequestHeader(\"X-HTTP-Method-Override\",d);if(h)return h.apply(this,arguments)}}\"GET\"!==e.type&&!c.emulateJSON&&(e.processData=!1);var m=c.success;c.success=function(a){m&&m(b,a,c);b.trigger(\"sync\",b,a,c)};\nvar j=c.error;c.error=function(a){j&&j(b,a,c);b.trigger(\"error\",b,a,c)};a=c.xhr=g.ajax(f.extend(e,c));b.trigger(\"request\",b,a,c);return a};g.ajax=function(){return g.$.ajax.apply(g.$,arguments)};r.extend=s.extend=y.extend=A.extend=m.extend=function(a,b){var c=this,d;d=a&&f.has(a,\"constructor\")?a.constructor:function(){return c.apply(this,arguments)};f.extend(d,c,b);var e=function(){this.constructor=d};e.prototype=c.prototype;d.prototype=new e;a&&f.extend(d.prototype,a);d.__super__=c.prototype;return d};\nvar x=function(){throw Error('A \"url\" property or function must be specified');}}).call(this);\n"
  },
  {
    "path": "tronweb/js/codemirror.js",
    "content": "// CodeMirror is the only global var we claim\nwindow.CodeMirror = (function() {\n  \"use strict\";\n\n  // BROWSER SNIFFING\n\n  // Crude, but necessary to handle a number of hard-to-feature-detect\n  // bugs and behavior differences.\n  var gecko = /gecko\\/\\d/i.test(navigator.userAgent);\n  var ie = /MSIE \\d/.test(navigator.userAgent);\n  var ie_lt8 = ie && (document.documentMode == null || document.documentMode < 8);\n  var ie_lt9 = ie && (document.documentMode == null || document.documentMode < 9);\n  var webkit = /WebKit\\//.test(navigator.userAgent);\n  var qtwebkit = webkit && /Qt\\/\\d+\\.\\d+/.test(navigator.userAgent);\n  var chrome = /Chrome\\//.test(navigator.userAgent);\n  var opera = /Opera\\//.test(navigator.userAgent);\n  var safari = /Apple Computer/.test(navigator.vendor);\n  var khtml = /KHTML\\//.test(navigator.userAgent);\n  var mac_geLion = /Mac OS X 1\\d\\D([7-9]|\\d\\d)\\D/.test(navigator.userAgent);\n  var mac_geMountainLion = /Mac OS X 1\\d\\D([8-9]|\\d\\d)\\D/.test(navigator.userAgent);\n  var phantom = /PhantomJS/.test(navigator.userAgent);\n\n  var ios = /AppleWebKit/.test(navigator.userAgent) && /Mobile\\/\\w+/.test(navigator.userAgent);\n  // This is woefully incomplete. Suggestions for alternative methods welcome.\n  var mobile = ios || /Android|webOS|BlackBerry|Opera Mini|Opera Mobi|IEMobile/i.test(navigator.userAgent);\n  var mac = ios || /Mac/.test(navigator.platform);\n  var windows = /windows/i.test(navigator.platform);\n\n  var opera_version = opera && navigator.userAgent.match(/Version\\/(\\d*\\.\\d*)/);\n  if (opera_version) opera_version = Number(opera_version[1]);\n  // Some browsers use the wrong event properties to signal cmd/ctrl on OS X\n  var flipCtrlCmd = mac && (qtwebkit || opera && (opera_version == null || opera_version < 12.11));\n  var captureMiddleClick = gecko || (ie && !ie_lt9);\n\n  // Optimize some code when these features are not used\n  var sawReadOnlySpans = false, sawCollapsedSpans = false;\n\n  // CONSTRUCTOR\n\n  function CodeMirror(place, options) {\n    if (!(this instanceof CodeMirror)) return new CodeMirror(place, options);\n\n    this.options = options = options || {};\n    // Determine effective options based on given values and defaults.\n    for (var opt in defaults) if (!options.hasOwnProperty(opt) && defaults.hasOwnProperty(opt))\n      options[opt] = defaults[opt];\n    setGuttersForLineNumbers(options);\n\n    var docStart = typeof options.value == \"string\" ? 0 : options.value.first;\n    var display = this.display = makeDisplay(place, docStart);\n    display.wrapper.CodeMirror = this;\n    updateGutters(this);\n    if (options.autofocus && !mobile) focusInput(this);\n\n    this.state = {keyMaps: [],\n                  overlays: [],\n                  modeGen: 0,\n                  overwrite: false, focused: false,\n                  suppressEdits: false, pasteIncoming: false,\n                  draggingText: false,\n                  highlight: new Delayed()};\n\n    themeChanged(this);\n    if (options.lineWrapping)\n      this.display.wrapper.className += \" CodeMirror-wrap\";\n\n    var doc = options.value;\n    if (typeof doc == \"string\") doc = new Doc(options.value, options.mode);\n    operation(this, attachDoc)(this, doc);\n\n    // Override magic textarea content restore that IE sometimes does\n    // on our hidden textarea on reload\n    if (ie) setTimeout(bind(resetInput, this, true), 20);\n\n    registerEventHandlers(this);\n    // IE throws unspecified error in certain cases, when\n    // trying to access activeElement before onload\n    var hasFocus; try { hasFocus = (document.activeElement == display.input); } catch(e) { }\n    if (hasFocus || (options.autofocus && !mobile)) setTimeout(bind(onFocus, this), 20);\n    else onBlur(this);\n\n    operation(this, function() {\n      for (var opt in optionHandlers)\n        if (optionHandlers.propertyIsEnumerable(opt))\n          optionHandlers[opt](this, options[opt], Init);\n      for (var i = 0; i < initHooks.length; ++i) initHooks[i](this);\n    })();\n  }\n\n  // DISPLAY CONSTRUCTOR\n\n  function makeDisplay(place, docStart) {\n    var d = {};\n\n    var input = d.input = elt(\"textarea\", null, null, \"position: absolute; padding: 0; width: 1px; height: 1em; outline: none;\");\n    if (webkit) input.style.width = \"1000px\";\n    else input.setAttribute(\"wrap\", \"off\");\n    // if border: 0; -- iOS fails to open keyboard (issue #1287)\n    if (ios) input.style.border = \"1px solid black\";\n    input.setAttribute(\"autocorrect\", \"off\"); input.setAttribute(\"autocapitalize\", \"off\");\n\n    // Wraps and hides input textarea\n    d.inputDiv = elt(\"div\", [input], null, \"overflow: hidden; position: relative; width: 3px; height: 0px;\");\n    // The actual fake scrollbars.\n    d.scrollbarH = elt(\"div\", [elt(\"div\", null, null, \"height: 1px\")], \"CodeMirror-hscrollbar\");\n    d.scrollbarV = elt(\"div\", [elt(\"div\", null, null, \"width: 1px\")], \"CodeMirror-vscrollbar\");\n    d.scrollbarFiller = elt(\"div\", null, \"CodeMirror-scrollbar-filler\");\n    // DIVs containing the selection and the actual code\n    d.lineDiv = elt(\"div\");\n    d.selectionDiv = elt(\"div\", null, null, \"position: relative; z-index: 1\");\n    // Blinky cursor, and element used to ensure cursor fits at the end of a line\n    d.cursor = elt(\"div\", \"\\u00a0\", \"CodeMirror-cursor\");\n    // Secondary cursor, shown when on a 'jump' in bi-directional text\n    d.otherCursor = elt(\"div\", \"\\u00a0\", \"CodeMirror-cursor CodeMirror-secondarycursor\");\n    // Used to measure text size\n    d.measure = elt(\"div\", null, \"CodeMirror-measure\");\n    // Wraps everything that needs to exist inside the vertically-padded coordinate system\n    d.lineSpace = elt(\"div\", [d.measure, d.selectionDiv, d.lineDiv, d.cursor, d.otherCursor],\n                         null, \"position: relative; outline: none\");\n    // Moved around its parent to cover visible view\n    d.mover = elt(\"div\", [elt(\"div\", [d.lineSpace], \"CodeMirror-lines\")], null, \"position: relative\");\n    // Set to the height of the text, causes scrolling\n    d.sizer = elt(\"div\", [d.mover], \"CodeMirror-sizer\");\n    // D is needed because behavior of elts with overflow: auto and padding is inconsistent across browsers\n    d.heightForcer = elt(\"div\", null, null, \"position: absolute; height: \" + scrollerCutOff + \"px; width: 1px;\");\n    // Will contain the gutters, if any\n    d.gutters = elt(\"div\", null, \"CodeMirror-gutters\");\n    d.lineGutter = null;\n    // Helper element to properly size the gutter backgrounds\n    var scrollerInner = elt(\"div\", [d.sizer, d.heightForcer, d.gutters], null, \"position: relative; min-height: 100%\");\n    // Provides scrolling\n    d.scroller = elt(\"div\", [scrollerInner], \"CodeMirror-scroll\");\n    d.scroller.setAttribute(\"tabIndex\", \"-1\");\n    // The element in which the editor lives.\n    d.wrapper = elt(\"div\", [d.inputDiv, d.scrollbarH, d.scrollbarV,\n                            d.scrollbarFiller, d.scroller], \"CodeMirror\");\n    // Work around IE7 z-index bug\n    if (ie_lt8) { d.gutters.style.zIndex = -1; d.scroller.style.paddingRight = 0; }\n    if (place.appendChild) place.appendChild(d.wrapper); else place(d.wrapper);\n\n    // Needed to hide big blue blinking cursor on Mobile Safari\n    if (ios) input.style.width = \"0px\";\n    if (!webkit) d.scroller.draggable = true;\n    // Needed to handle Tab key in KHTML\n    if (khtml) { d.inputDiv.style.height = \"1px\"; d.inputDiv.style.position = \"absolute\"; }\n    // Need to set a minimum width to see the scrollbar on IE7 (but must not set it on IE8).\n    else if (ie_lt8) d.scrollbarH.style.minWidth = d.scrollbarV.style.minWidth = \"18px\";\n\n    // Current visible range (may be bigger than the view window).\n    d.viewOffset = d.lastSizeC = 0;\n    d.showingFrom = d.showingTo = docStart;\n\n    // Used to only resize the line number gutter when necessary (when\n    // the amount of lines crosses a boundary that makes its width change)\n    d.lineNumWidth = d.lineNumInnerWidth = d.lineNumChars = null;\n    // See readInput and resetInput\n    d.prevInput = \"\";\n    // Set to true when a non-horizontal-scrolling widget is added. As\n    // an optimization, widget aligning is skipped when d is false.\n    d.alignWidgets = false;\n    // Flag that indicates whether we currently expect input to appear\n    // (after some event like 'keypress' or 'input') and are polling\n    // intensively.\n    d.pollingFast = false;\n    // Self-resetting timeout for the poller\n    d.poll = new Delayed();\n    // True when a drag from the editor is active\n    d.draggingText = false;\n\n    d.cachedCharWidth = d.cachedTextHeight = null;\n    d.measureLineCache = [];\n    d.measureLineCachePos = 0;\n\n    // Tracks when resetInput has punted to just putting a short\n    // string instead of the (large) selection.\n    d.inaccurateSelection = false;\n\n    // Tracks the maximum line length so that the horizontal scrollbar\n    // can be kept static when scrolling.\n    d.maxLine = null;\n    d.maxLineLength = 0;\n    d.maxLineChanged = false;\n\n    // Used for measuring wheel scrolling granularity\n    d.wheelDX = d.wheelDY = d.wheelStartX = d.wheelStartY = null;\n\n    return d;\n  }\n\n  // STATE UPDATES\n\n  // Used to get the editor into a consistent state again when options change.\n\n  function loadMode(cm) {\n    cm.doc.mode = CodeMirror.getMode(cm.options, cm.doc.modeOption);\n    cm.doc.iter(function(line) {\n      if (line.stateAfter) line.stateAfter = null;\n      if (line.styles) line.styles = null;\n    });\n    cm.doc.frontier = cm.doc.first;\n    startWorker(cm, 100);\n    cm.state.modeGen++;\n    if (cm.curOp) regChange(cm);\n  }\n\n  function wrappingChanged(cm) {\n    if (cm.options.lineWrapping) {\n      cm.display.wrapper.className += \" CodeMirror-wrap\";\n      cm.display.sizer.style.minWidth = \"\";\n    } else {\n      cm.display.wrapper.className = cm.display.wrapper.className.replace(\" CodeMirror-wrap\", \"\");\n      computeMaxLength(cm);\n    }\n    estimateLineHeights(cm);\n    regChange(cm);\n    clearCaches(cm);\n    setTimeout(function(){updateScrollbars(cm.display, cm.doc.height);}, 100);\n  }\n\n  function estimateHeight(cm) {\n    var th = textHeight(cm.display), wrapping = cm.options.lineWrapping;\n    var perLine = wrapping && Math.max(5, cm.display.scroller.clientWidth / charWidth(cm.display) - 3);\n    return function(line) {\n      if (lineIsHidden(cm.doc, line))\n        return 0;\n      else if (wrapping)\n        return (Math.ceil(line.text.length / perLine) || 1) * th;\n      else\n        return th;\n    };\n  }\n\n  function estimateLineHeights(cm) {\n    var doc = cm.doc, est = estimateHeight(cm);\n    doc.iter(function(line) {\n      var estHeight = est(line);\n      if (estHeight != line.height) updateLineHeight(line, estHeight);\n    });\n  }\n\n  function keyMapChanged(cm) {\n    var style = keyMap[cm.options.keyMap].style;\n    cm.display.wrapper.className = cm.display.wrapper.className.replace(/\\s*cm-keymap-\\S+/g, \"\") +\n      (style ? \" cm-keymap-\" + style : \"\");\n  }\n\n  function themeChanged(cm) {\n    cm.display.wrapper.className = cm.display.wrapper.className.replace(/\\s*cm-s-\\S+/g, \"\") +\n      cm.options.theme.replace(/(^|\\s)\\s*/g, \" cm-s-\");\n    clearCaches(cm);\n  }\n\n  function guttersChanged(cm) {\n    updateGutters(cm);\n    regChange(cm);\n  }\n\n  function updateGutters(cm) {\n    var gutters = cm.display.gutters, specs = cm.options.gutters;\n    removeChildren(gutters);\n    for (var i = 0; i < specs.length; ++i) {\n      var gutterClass = specs[i];\n      var gElt = gutters.appendChild(elt(\"div\", null, \"CodeMirror-gutter \" + gutterClass));\n      if (gutterClass == \"CodeMirror-linenumbers\") {\n        cm.display.lineGutter = gElt;\n        gElt.style.width = (cm.display.lineNumWidth || 1) + \"px\";\n      }\n    }\n    gutters.style.display = i ? \"\" : \"none\";\n  }\n\n  function lineLength(doc, line) {\n    if (line.height == 0) return 0;\n    var len = line.text.length, merged, cur = line;\n    while (merged = collapsedSpanAtStart(cur)) {\n      var found = merged.find();\n      cur = getLine(doc, found.from.line);\n      len += found.from.ch - found.to.ch;\n    }\n    cur = line;\n    while (merged = collapsedSpanAtEnd(cur)) {\n      var found = merged.find();\n      len -= cur.text.length - found.from.ch;\n      cur = getLine(doc, found.to.line);\n      len += cur.text.length - found.to.ch;\n    }\n    return len;\n  }\n\n  function computeMaxLength(cm) {\n    var d = cm.display, doc = cm.doc;\n    d.maxLine = getLine(doc, doc.first);\n    d.maxLineLength = lineLength(doc, d.maxLine);\n    d.maxLineChanged = true;\n    doc.iter(function(line) {\n      var len = lineLength(doc, line);\n      if (len > d.maxLineLength) {\n        d.maxLineLength = len;\n        d.maxLine = line;\n      }\n    });\n  }\n\n  // Make sure the gutters options contains the element\n  // \"CodeMirror-linenumbers\" when the lineNumbers option is true.\n  function setGuttersForLineNumbers(options) {\n    var found = false;\n    for (var i = 0; i < options.gutters.length; ++i) {\n      if (options.gutters[i] == \"CodeMirror-linenumbers\") {\n        if (options.lineNumbers) found = true;\n        else options.gutters.splice(i--, 1);\n      }\n    }\n    if (!found && options.lineNumbers)\n      options.gutters.push(\"CodeMirror-linenumbers\");\n  }\n\n  // SCROLLBARS\n\n  // Re-synchronize the fake scrollbars with the actual size of the\n  // content. Optionally force a scrollTop.\n  function updateScrollbars(d /* display */, docHeight) {\n    var totalHeight = docHeight + paddingVert(d);\n    d.sizer.style.minHeight = d.heightForcer.style.top = totalHeight + \"px\";\n    var scrollHeight = Math.max(totalHeight, d.scroller.scrollHeight);\n    var needsH = d.scroller.scrollWidth > d.scroller.clientWidth;\n    var needsV = scrollHeight > d.scroller.clientHeight;\n    if (needsV) {\n      d.scrollbarV.style.display = \"block\";\n      d.scrollbarV.style.bottom = needsH ? scrollbarWidth(d.measure) + \"px\" : \"0\";\n      d.scrollbarV.firstChild.style.height =\n        (scrollHeight - d.scroller.clientHeight + d.scrollbarV.clientHeight) + \"px\";\n    } else d.scrollbarV.style.display = \"\";\n    if (needsH) {\n      d.scrollbarH.style.display = \"block\";\n      d.scrollbarH.style.right = needsV ? scrollbarWidth(d.measure) + \"px\" : \"0\";\n      d.scrollbarH.firstChild.style.width =\n        (d.scroller.scrollWidth - d.scroller.clientWidth + d.scrollbarH.clientWidth) + \"px\";\n    } else d.scrollbarH.style.display = \"\";\n    if (needsH && needsV) {\n      d.scrollbarFiller.style.display = \"block\";\n      d.scrollbarFiller.style.height = d.scrollbarFiller.style.width = scrollbarWidth(d.measure) + \"px\";\n    } else d.scrollbarFiller.style.display = \"\";\n\n    if (mac_geLion && scrollbarWidth(d.measure) === 0)\n      d.scrollbarV.style.minWidth = d.scrollbarH.style.minHeight = mac_geMountainLion ? \"18px\" : \"12px\";\n  }\n\n  function visibleLines(display, doc, viewPort) {\n    var top = display.scroller.scrollTop, height = display.wrapper.clientHeight;\n    if (typeof viewPort == \"number\") top = viewPort;\n    else if (viewPort) {top = viewPort.top; height = viewPort.bottom - viewPort.top;}\n    top = Math.floor(top - paddingTop(display));\n    var bottom = Math.ceil(top + height);\n    return {from: lineAtHeight(doc, top), to: lineAtHeight(doc, bottom)};\n  }\n\n  // LINE NUMBERS\n\n  function alignHorizontally(cm) {\n    var display = cm.display;\n    if (!display.alignWidgets && (!display.gutters.firstChild || !cm.options.fixedGutter)) return;\n    var comp = compensateForHScroll(display) - display.scroller.scrollLeft + cm.doc.scrollLeft;\n    var gutterW = display.gutters.offsetWidth, l = comp + \"px\";\n    for (var n = display.lineDiv.firstChild; n; n = n.nextSibling) if (n.alignable) {\n      for (var i = 0, a = n.alignable; i < a.length; ++i) a[i].style.left = l;\n    }\n    if (cm.options.fixedGutter)\n      display.gutters.style.left = (comp + gutterW) + \"px\";\n  }\n\n  function maybeUpdateLineNumberWidth(cm) {\n    if (!cm.options.lineNumbers) return false;\n    var doc = cm.doc, last = lineNumberFor(cm.options, doc.first + doc.size - 1), display = cm.display;\n    if (last.length != display.lineNumChars) {\n      var test = display.measure.appendChild(elt(\"div\", [elt(\"div\", last)],\n                                                 \"CodeMirror-linenumber CodeMirror-gutter-elt\"));\n      var innerW = test.firstChild.offsetWidth, padding = test.offsetWidth - innerW;\n      display.lineGutter.style.width = \"\";\n      display.lineNumInnerWidth = Math.max(innerW, display.lineGutter.offsetWidth - padding);\n      display.lineNumWidth = display.lineNumInnerWidth + padding;\n      display.lineNumChars = display.lineNumInnerWidth ? last.length : -1;\n      display.lineGutter.style.width = display.lineNumWidth + \"px\";\n      return true;\n    }\n    return false;\n  }\n\n  function lineNumberFor(options, i) {\n    return String(options.lineNumberFormatter(i + options.firstLineNumber));\n  }\n  function compensateForHScroll(display) {\n    return getRect(display.scroller).left - getRect(display.sizer).left;\n  }\n\n  // DISPLAY DRAWING\n\n  function updateDisplay(cm, changes, viewPort) {\n    var oldFrom = cm.display.showingFrom, oldTo = cm.display.showingTo;\n    var updated = updateDisplayInner(cm, changes, viewPort);\n    if (updated) {\n      signalLater(cm, \"update\", cm);\n      if (cm.display.showingFrom != oldFrom || cm.display.showingTo != oldTo)\n        signalLater(cm, \"viewportChange\", cm, cm.display.showingFrom, cm.display.showingTo);\n    }\n    updateSelection(cm);\n    updateScrollbars(cm.display, cm.doc.height);\n\n    return updated;\n  }\n\n  // Uses a set of changes plus the current scroll position to\n  // determine which DOM updates have to be made, and makes the\n  // updates.\n  function updateDisplayInner(cm, changes, viewPort) {\n    var display = cm.display, doc = cm.doc;\n    if (!display.wrapper.clientWidth) {\n      display.showingFrom = display.showingTo = doc.first;\n      display.viewOffset = 0;\n      return;\n    }\n\n    // Compute the new visible window\n    // If scrollTop is specified, use that to determine which lines\n    // to render instead of the current scrollbar position.\n    var visible = visibleLines(display, doc, viewPort);\n    // Bail out if the visible area is already rendered and nothing changed.\n    if (changes.length == 0 &&\n        visible.from > display.showingFrom && visible.to < display.showingTo)\n      return;\n\n    if (maybeUpdateLineNumberWidth(cm))\n      changes = [{from: doc.first, to: doc.first + doc.size}];\n    var gutterW = display.sizer.style.marginLeft = display.gutters.offsetWidth + \"px\";\n    display.scrollbarH.style.left = cm.options.fixedGutter ? gutterW : \"0\";\n\n    // Used to determine which lines need their line numbers updated\n    var positionsChangedFrom = Infinity;\n    if (cm.options.lineNumbers)\n      for (var i = 0; i < changes.length; ++i)\n        if (changes[i].diff) { positionsChangedFrom = changes[i].from; break; }\n\n    var end = doc.first + doc.size;\n    var from = Math.max(visible.from - cm.options.viewportMargin, doc.first);\n    var to = Math.min(end, visible.to + cm.options.viewportMargin);\n    if (display.showingFrom < from && from - display.showingFrom < 20) from = Math.max(doc.first, display.showingFrom);\n    if (display.showingTo > to && display.showingTo - to < 20) to = Math.min(end, display.showingTo);\n    if (sawCollapsedSpans) {\n      from = lineNo(visualLine(doc, getLine(doc, from)));\n      while (to < end && lineIsHidden(doc, getLine(doc, to))) ++to;\n    }\n\n    // Create a range of theoretically intact lines, and punch holes\n    // in that using the change info.\n    var intact = [{from: Math.max(display.showingFrom, doc.first),\n                   to: Math.min(display.showingTo, end)}];\n    if (intact[0].from >= intact[0].to) intact = [];\n    else intact = computeIntact(intact, changes);\n    // When merged lines are present, we might have to reduce the\n    // intact ranges because changes in continued fragments of the\n    // intact lines do require the lines to be redrawn.\n    if (sawCollapsedSpans)\n      for (var i = 0; i < intact.length; ++i) {\n        var range = intact[i], merged;\n        while (merged = collapsedSpanAtEnd(getLine(doc, range.to - 1))) {\n          var newTo = merged.find().from.line;\n          if (newTo > range.from) range.to = newTo;\n          else { intact.splice(i--, 1); break; }\n        }\n      }\n\n    // Clip off the parts that won't be visible\n    var intactLines = 0;\n    for (var i = 0; i < intact.length; ++i) {\n      var range = intact[i];\n      if (range.from < from) range.from = from;\n      if (range.to > to) range.to = to;\n      if (range.from >= range.to) intact.splice(i--, 1);\n      else intactLines += range.to - range.from;\n    }\n    if (intactLines == to - from && from == display.showingFrom && to == display.showingTo) {\n      updateViewOffset(cm);\n      return;\n    }\n    intact.sort(function(a, b) {return a.from - b.from;});\n\n    var focused = document.activeElement;\n    if (intactLines < (to - from) * .7) display.lineDiv.style.display = \"none\";\n    patchDisplay(cm, from, to, intact, positionsChangedFrom);\n    display.lineDiv.style.display = \"\";\n    if (document.activeElement != focused && focused.offsetHeight) focused.focus();\n\n    var different = from != display.showingFrom || to != display.showingTo ||\n      display.lastSizeC != display.wrapper.clientHeight;\n    // This is just a bogus formula that detects when the editor is\n    // resized or the font size changes.\n    if (different) display.lastSizeC = display.wrapper.clientHeight;\n    display.showingFrom = from; display.showingTo = to;\n    startWorker(cm, 100);\n\n    var prevBottom = display.lineDiv.offsetTop;\n    for (var node = display.lineDiv.firstChild, height; node; node = node.nextSibling) if (node.lineObj) {\n      if (ie_lt8) {\n        var bot = node.offsetTop + node.offsetHeight;\n        height = bot - prevBottom;\n        prevBottom = bot;\n      } else {\n        var box = getRect(node);\n        height = box.bottom - box.top;\n      }\n      var diff = node.lineObj.height - height;\n      if (height < 2) height = textHeight(display);\n      if (diff > .001 || diff < -.001) {\n        updateLineHeight(node.lineObj, height);\n        var widgets = node.lineObj.widgets;\n        if (widgets) for (var i = 0; i < widgets.length; ++i)\n          widgets[i].height = widgets[i].node.offsetHeight;\n      }\n    }\n    updateViewOffset(cm);\n\n    if (visibleLines(display, doc, viewPort).to > to)\n      updateDisplayInner(cm, [], viewPort);\n    return true;\n  }\n\n  function updateViewOffset(cm) {\n    var off = cm.display.viewOffset = heightAtLine(cm, getLine(cm.doc, cm.display.showingFrom));\n    // Position the mover div to align with the current virtual scroll position\n    cm.display.mover.style.top = off + \"px\";\n  }\n\n  function computeIntact(intact, changes) {\n    for (var i = 0, l = changes.length || 0; i < l; ++i) {\n      var change = changes[i], intact2 = [], diff = change.diff || 0;\n      for (var j = 0, l2 = intact.length; j < l2; ++j) {\n        var range = intact[j];\n        if (change.to <= range.from && change.diff) {\n          intact2.push({from: range.from + diff, to: range.to + diff});\n        } else if (change.to <= range.from || change.from >= range.to) {\n          intact2.push(range);\n        } else {\n          if (change.from > range.from)\n            intact2.push({from: range.from, to: change.from});\n          if (change.to < range.to)\n            intact2.push({from: change.to + diff, to: range.to + diff});\n        }\n      }\n      intact = intact2;\n    }\n    return intact;\n  }\n\n  function getDimensions(cm) {\n    var d = cm.display, left = {}, width = {};\n    for (var n = d.gutters.firstChild, i = 0; n; n = n.nextSibling, ++i) {\n      left[cm.options.gutters[i]] = n.offsetLeft;\n      width[cm.options.gutters[i]] = n.offsetWidth;\n    }\n    return {fixedPos: compensateForHScroll(d),\n            gutterTotalWidth: d.gutters.offsetWidth,\n            gutterLeft: left,\n            gutterWidth: width,\n            wrapperWidth: d.wrapper.clientWidth};\n  }\n\n  function patchDisplay(cm, from, to, intact, updateNumbersFrom) {\n    var dims = getDimensions(cm);\n    var display = cm.display, lineNumbers = cm.options.lineNumbers;\n    if (!intact.length && (!webkit || !cm.display.currentWheelTarget))\n      removeChildren(display.lineDiv);\n    var container = display.lineDiv, cur = container.firstChild;\n\n    function rm(node) {\n      var next = node.nextSibling;\n      if (webkit && mac && cm.display.currentWheelTarget == node) {\n        node.style.display = \"none\";\n        node.lineObj = null;\n      } else {\n        node.parentNode.removeChild(node);\n      }\n      return next;\n    }\n\n    var nextIntact = intact.shift(), lineN = from;\n    cm.doc.iter(from, to, function(line) {\n      if (nextIntact && nextIntact.to == lineN) nextIntact = intact.shift();\n      if (lineIsHidden(cm.doc, line)) {\n        if (line.height != 0) updateLineHeight(line, 0);\n        if (line.widgets && cur.previousSibling) for (var i = 0; i < line.widgets.length; ++i)\n          if (line.widgets[i].showIfHidden) {\n            var prev = cur.previousSibling;\n            if (/pre/i.test(prev.nodeName)) {\n              var wrap = elt(\"div\", null, null, \"position: relative\");\n              prev.parentNode.replaceChild(wrap, prev);\n              wrap.appendChild(prev);\n              prev = wrap;\n            }\n            var wnode = prev.appendChild(elt(\"div\", [line.widgets[i].node], \"CodeMirror-linewidget\"));\n            positionLineWidget(line.widgets[i], wnode, prev, dims);\n          }\n      } else if (nextIntact && nextIntact.from <= lineN && nextIntact.to > lineN) {\n        // This line is intact. Skip to the actual node. Update its\n        // line number if needed.\n        while (cur.lineObj != line) cur = rm(cur);\n        if (lineNumbers && updateNumbersFrom <= lineN && cur.lineNumber)\n          setTextContent(cur.lineNumber, lineNumberFor(cm.options, lineN));\n        cur = cur.nextSibling;\n      } else {\n        // For lines with widgets, make an attempt to find and reuse\n        // the existing element, so that widgets aren't needlessly\n        // removed and re-inserted into the dom\n        if (line.widgets) for (var j = 0, search = cur, reuse; search && j < 20; ++j, search = search.nextSibling)\n          if (search.lineObj == line && /div/i.test(search.nodeName)) { reuse = search; break; }\n        // This line needs to be generated.\n        var lineNode = buildLineElement(cm, line, lineN, dims, reuse);\n        if (lineNode != reuse) {\n          container.insertBefore(lineNode, cur);\n        } else {\n          while (cur != reuse) cur = rm(cur);\n          cur = cur.nextSibling;\n        }\n\n        lineNode.lineObj = line;\n      }\n      ++lineN;\n    });\n    while (cur) cur = rm(cur);\n  }\n\n  function buildLineElement(cm, line, lineNo, dims, reuse) {\n    var lineElement = lineContent(cm, line);\n    var markers = line.gutterMarkers, display = cm.display, wrap;\n\n    if (!cm.options.lineNumbers && !markers && !line.bgClass && !line.wrapClass && !line.widgets)\n      return lineElement;\n\n    // Lines with gutter elements, widgets or a background class need\n    // to be wrapped again, and have the extra elements added to the\n    // wrapper div\n\n    if (reuse) {\n      reuse.alignable = null;\n      var isOk = true, widgetsSeen = 0;\n      for (var n = reuse.firstChild, next; n; n = next) {\n        next = n.nextSibling;\n        if (!/\\bCodeMirror-linewidget\\b/.test(n.className)) {\n          reuse.removeChild(n);\n        } else {\n          for (var i = 0, first = true; i < line.widgets.length; ++i) {\n            var widget = line.widgets[i], isFirst = false;\n            if (!widget.above) { isFirst = first; first = false; }\n            if (widget.node == n.firstChild) {\n              positionLineWidget(widget, n, reuse, dims);\n              ++widgetsSeen;\n              if (isFirst) reuse.insertBefore(lineElement, n);\n              break;\n            }\n          }\n          if (i == line.widgets.length) { isOk = false; break; }\n        }\n      }\n      if (isOk && widgetsSeen == line.widgets.length) {\n        wrap = reuse;\n        reuse.className = line.wrapClass || \"\";\n      }\n    }\n    if (!wrap) {\n      wrap = elt(\"div\", null, line.wrapClass, \"position: relative\");\n      wrap.appendChild(lineElement);\n    }\n    // Kludge to make sure the styled element lies behind the selection (by z-index)\n    if (line.bgClass)\n      wrap.insertBefore(elt(\"div\", null, line.bgClass + \" CodeMirror-linebackground\"), wrap.firstChild);\n    if (cm.options.lineNumbers || markers) {\n      var gutterWrap = wrap.insertBefore(elt(\"div\", null, null, \"position: absolute; left: \" +\n                                             (cm.options.fixedGutter ? dims.fixedPos : -dims.gutterTotalWidth) + \"px\"),\n                                         wrap.firstChild);\n      if (cm.options.fixedGutter) (wrap.alignable || (wrap.alignable = [])).push(gutterWrap);\n      if (cm.options.lineNumbers && (!markers || !markers[\"CodeMirror-linenumbers\"]))\n        wrap.lineNumber = gutterWrap.appendChild(\n          elt(\"div\", lineNumberFor(cm.options, lineNo),\n              \"CodeMirror-linenumber CodeMirror-gutter-elt\",\n              \"left: \" + dims.gutterLeft[\"CodeMirror-linenumbers\"] + \"px; width: \"\n              + display.lineNumInnerWidth + \"px\"));\n      if (markers)\n        for (var k = 0; k < cm.options.gutters.length; ++k) {\n          var id = cm.options.gutters[k], found = markers.hasOwnProperty(id) && markers[id];\n          if (found)\n            gutterWrap.appendChild(elt(\"div\", [found], \"CodeMirror-gutter-elt\", \"left: \" +\n                                       dims.gutterLeft[id] + \"px; width: \" + dims.gutterWidth[id] + \"px\"));\n        }\n    }\n    if (ie_lt8) wrap.style.zIndex = 2;\n    if (line.widgets && wrap != reuse) for (var i = 0, ws = line.widgets; i < ws.length; ++i) {\n      var widget = ws[i], node = elt(\"div\", [widget.node], \"CodeMirror-linewidget\");\n      positionLineWidget(widget, node, wrap, dims);\n      if (widget.above)\n        wrap.insertBefore(node, cm.options.lineNumbers && line.height != 0 ? gutterWrap : lineElement);\n      else\n        wrap.appendChild(node);\n      signalLater(widget, \"redraw\");\n    }\n    return wrap;\n  }\n\n  function positionLineWidget(widget, node, wrap, dims) {\n    if (widget.noHScroll) {\n      (wrap.alignable || (wrap.alignable = [])).push(node);\n      var width = dims.wrapperWidth;\n      node.style.left = dims.fixedPos + \"px\";\n      if (!widget.coverGutter) {\n        width -= dims.gutterTotalWidth;\n        node.style.paddingLeft = dims.gutterTotalWidth + \"px\";\n      }\n      node.style.width = width + \"px\";\n    }\n    if (widget.coverGutter) {\n      node.style.zIndex = 5;\n      node.style.position = \"relative\";\n      if (!widget.noHScroll) node.style.marginLeft = -dims.gutterTotalWidth + \"px\";\n    }\n  }\n\n  // SELECTION / CURSOR\n\n  function updateSelection(cm) {\n    var display = cm.display;\n    var collapsed = posEq(cm.doc.sel.from, cm.doc.sel.to);\n    if (collapsed || cm.options.showCursorWhenSelecting)\n      updateSelectionCursor(cm);\n    else\n      display.cursor.style.display = display.otherCursor.style.display = \"none\";\n    if (!collapsed)\n      updateSelectionRange(cm);\n    else\n      display.selectionDiv.style.display = \"none\";\n\n    // Move the hidden textarea near the cursor to prevent scrolling artifacts\n    var headPos = cursorCoords(cm, cm.doc.sel.head, \"div\");\n    var wrapOff = getRect(display.wrapper), lineOff = getRect(display.lineDiv);\n    display.inputDiv.style.top = Math.max(0, Math.min(display.wrapper.clientHeight - 10,\n                                                      headPos.top + lineOff.top - wrapOff.top)) + \"px\";\n    display.inputDiv.style.left = Math.max(0, Math.min(display.wrapper.clientWidth - 10,\n                                                       headPos.left + lineOff.left - wrapOff.left)) + \"px\";\n  }\n\n  // No selection, plain cursor\n  function updateSelectionCursor(cm) {\n    var display = cm.display, pos = cursorCoords(cm, cm.doc.sel.head, \"div\");\n    display.cursor.style.left = pos.left + \"px\";\n    display.cursor.style.top = pos.top + \"px\";\n    display.cursor.style.height = Math.max(0, pos.bottom - pos.top) * cm.options.cursorHeight + \"px\";\n    display.cursor.style.display = \"\";\n\n    if (pos.other) {\n      display.otherCursor.style.display = \"\";\n      display.otherCursor.style.left = pos.other.left + \"px\";\n      display.otherCursor.style.top = pos.other.top + \"px\";\n      display.otherCursor.style.height = (pos.other.bottom - pos.other.top) * .85 + \"px\";\n    } else { display.otherCursor.style.display = \"none\"; }\n  }\n\n  // Highlight selection\n  function updateSelectionRange(cm) {\n    var display = cm.display, doc = cm.doc, sel = cm.doc.sel;\n    var fragment = document.createDocumentFragment();\n    var clientWidth = display.lineSpace.offsetWidth, pl = paddingLeft(cm.display);\n\n    function add(left, top, width, bottom) {\n      if (top < 0) top = 0;\n      fragment.appendChild(elt(\"div\", null, \"CodeMirror-selected\", \"position: absolute; left: \" + left +\n                               \"px; top: \" + top + \"px; width: \" + (width == null ? clientWidth - left : width) +\n                               \"px; height: \" + (bottom - top) + \"px\"));\n    }\n\n    function drawForLine(line, fromArg, toArg, retTop) {\n      var lineObj = getLine(doc, line);\n      var lineLen = lineObj.text.length, rVal = retTop ? Infinity : -Infinity;\n      function coords(ch) {\n        return charCoords(cm, Pos(line, ch), \"div\", lineObj);\n      }\n\n      iterateBidiSections(getOrder(lineObj), fromArg || 0, toArg == null ? lineLen : toArg, function(from, to, dir) {\n        var leftPos = coords(dir == \"rtl\" ? to - 1 : from);\n        var rightPos = coords(dir == \"rtl\" ? from : to - 1);\n        var left = leftPos.left, right = rightPos.right;\n        if (rightPos.top - leftPos.top > 3) { // Different lines, draw top part\n          add(left, leftPos.top, null, leftPos.bottom);\n          left = pl;\n          if (leftPos.bottom < rightPos.top) add(left, leftPos.bottom, null, rightPos.top);\n        }\n        if (toArg == null && to == lineLen) right = clientWidth;\n        if (fromArg == null && from == 0) left = pl;\n        rVal = retTop ? Math.min(rightPos.top, rVal) : Math.max(rightPos.bottom, rVal);\n        if (left < pl + 1) left = pl;\n        add(left, rightPos.top, right - left, rightPos.bottom);\n      });\n      return rVal;\n    }\n\n    if (sel.from.line == sel.to.line) {\n      drawForLine(sel.from.line, sel.from.ch, sel.to.ch);\n    } else {\n      var fromObj = getLine(doc, sel.from.line);\n      var cur = fromObj, merged, path = [sel.from.line, sel.from.ch], singleLine;\n      while (merged = collapsedSpanAtEnd(cur)) {\n        var found = merged.find();\n        path.push(found.from.ch, found.to.line, found.to.ch);\n        if (found.to.line == sel.to.line) {\n          path.push(sel.to.ch);\n          singleLine = true;\n          break;\n        }\n        cur = getLine(doc, found.to.line);\n      }\n\n      // This is a single, merged line\n      if (singleLine) {\n        for (var i = 0; i < path.length; i += 3)\n          drawForLine(path[i], path[i+1], path[i+2]);\n      } else {\n        var middleTop, middleBot, toObj = getLine(doc, sel.to.line);\n        if (sel.from.ch)\n          // Draw the first line of selection.\n          middleTop = drawForLine(sel.from.line, sel.from.ch, null, false);\n        else\n          // Simply include it in the middle block.\n          middleTop = heightAtLine(cm, fromObj) - display.viewOffset;\n\n        if (!sel.to.ch)\n          middleBot = heightAtLine(cm, toObj) - display.viewOffset;\n        else\n          middleBot = drawForLine(sel.to.line, collapsedSpanAtStart(toObj) ? null : 0, sel.to.ch, true);\n\n        if (middleTop < middleBot) add(pl, middleTop, null, middleBot);\n      }\n    }\n\n    removeChildrenAndAdd(display.selectionDiv, fragment);\n    display.selectionDiv.style.display = \"\";\n  }\n\n  // Cursor-blinking\n  function restartBlink(cm) {\n    var display = cm.display;\n    clearInterval(display.blinker);\n    var on = true;\n    display.cursor.style.visibility = display.otherCursor.style.visibility = \"\";\n    display.blinker = setInterval(function() {\n      if (!display.cursor.offsetHeight) return;\n      display.cursor.style.visibility = display.otherCursor.style.visibility = (on = !on) ? \"\" : \"hidden\";\n    }, cm.options.cursorBlinkRate);\n  }\n\n  // HIGHLIGHT WORKER\n\n  function startWorker(cm, time) {\n    if (cm.doc.mode.startState && cm.doc.frontier < cm.display.showingTo)\n      cm.state.highlight.set(time, bind(highlightWorker, cm));\n  }\n\n  function highlightWorker(cm) {\n    var doc = cm.doc;\n    if (doc.frontier < doc.first) doc.frontier = doc.first;\n    if (doc.frontier >= cm.display.showingTo) return;\n    var end = +new Date + cm.options.workTime;\n    var state = copyState(doc.mode, getStateBefore(cm, doc.frontier));\n    var changed = [], prevChange;\n    doc.iter(doc.frontier, Math.min(doc.first + doc.size, cm.display.showingTo + 500), function(line) {\n      if (doc.frontier >= cm.display.showingFrom) { // Visible\n        var oldStyles = line.styles;\n        line.styles = highlightLine(cm, line, state);\n        var ischange = !oldStyles || oldStyles.length != line.styles.length;\n        for (var i = 0; !ischange && i < oldStyles.length; ++i) ischange = oldStyles[i] != line.styles[i];\n        if (ischange) {\n          if (prevChange && prevChange.end == doc.frontier) prevChange.end++;\n          else changed.push(prevChange = {start: doc.frontier, end: doc.frontier + 1});\n        }\n        line.stateAfter = copyState(doc.mode, state);\n      } else {\n        processLine(cm, line, state);\n        line.stateAfter = doc.frontier % 5 == 0 ? copyState(doc.mode, state) : null;\n      }\n      ++doc.frontier;\n      if (+new Date > end) {\n        startWorker(cm, cm.options.workDelay);\n        return true;\n      }\n    });\n    if (changed.length)\n      operation(cm, function() {\n        for (var i = 0; i < changed.length; ++i)\n          regChange(this, changed[i].start, changed[i].end);\n      })();\n  }\n\n  // Finds the line to start with when starting a parse. Tries to\n  // find a line with a stateAfter, so that it can start with a\n  // valid state. If that fails, it returns the line with the\n  // smallest indentation, which tends to need the least context to\n  // parse correctly.\n  function findStartLine(cm, n) {\n    var minindent, minline, doc = cm.doc;\n    for (var search = n, lim = n - 100; search > lim; --search) {\n      if (search <= doc.first) return doc.first;\n      var line = getLine(doc, search - 1);\n      if (line.stateAfter) return search;\n      var indented = countColumn(line.text, null, cm.options.tabSize);\n      if (minline == null || minindent > indented) {\n        minline = search - 1;\n        minindent = indented;\n      }\n    }\n    return minline;\n  }\n\n  function getStateBefore(cm, n) {\n    var doc = cm.doc, display = cm.display;\n      if (!doc.mode.startState) return true;\n    var pos = findStartLine(cm, n), state = pos > doc.first && getLine(doc, pos-1).stateAfter;\n    if (!state) state = startState(doc.mode);\n    else state = copyState(doc.mode, state);\n    doc.iter(pos, n, function(line) {\n      processLine(cm, line, state);\n      var save = pos == n - 1 || pos % 5 == 0 || pos >= display.showingFrom && pos < display.showingTo;\n      line.stateAfter = save ? copyState(doc.mode, state) : null;\n      ++pos;\n    });\n    return state;\n  }\n\n  // POSITION MEASUREMENT\n\n  function paddingTop(display) {return display.lineSpace.offsetTop;}\n  function paddingVert(display) {return display.mover.offsetHeight - display.lineSpace.offsetHeight;}\n  function paddingLeft(display) {\n    var e = removeChildrenAndAdd(display.measure, elt(\"pre\", null, null, \"text-align: left\")).appendChild(elt(\"span\", \"x\"));\n    return e.offsetLeft;\n  }\n\n  function measureChar(cm, line, ch, data) {\n    var dir = -1;\n    data = data || measureLine(cm, line);\n\n    for (var pos = ch;; pos += dir) {\n      var r = data[pos];\n      if (r) break;\n      if (dir < 0 && pos == 0) dir = 1;\n    }\n    return {left: pos < ch ? r.right : r.left,\n            right: pos > ch ? r.left : r.right,\n            top: r.top, bottom: r.bottom};\n  }\n\n  function findCachedMeasurement(cm, line) {\n    var cache = cm.display.measureLineCache;\n    for (var i = 0; i < cache.length; ++i) {\n      var memo = cache[i];\n      if (memo.text == line.text && memo.markedSpans == line.markedSpans &&\n          cm.display.scroller.clientWidth == memo.width &&\n          memo.classes == line.textClass + \"|\" + line.bgClass + \"|\" + line.wrapClass)\n        return memo.measure;\n    }\n  }\n\n  function measureLine(cm, line) {\n    // First look in the cache\n    var measure = findCachedMeasurement(cm, line);\n    if (!measure) {\n      // Failing that, recompute and store result in cache\n      measure = measureLineInner(cm, line);\n      var cache = cm.display.measureLineCache;\n      var memo = {text: line.text, width: cm.display.scroller.clientWidth,\n                  markedSpans: line.markedSpans, measure: measure,\n                  classes: line.textClass + \"|\" + line.bgClass + \"|\" + line.wrapClass};\n      if (cache.length == 16) cache[++cm.display.measureLineCachePos % 16] = memo;\n      else cache.push(memo);\n    }\n    return measure;\n  }\n\n  function measureLineInner(cm, line) {\n    var display = cm.display, measure = emptyArray(line.text.length);\n    var pre = lineContent(cm, line, measure);\n\n    // IE does not cache element positions of inline elements between\n    // calls to getBoundingClientRect. This makes the loop below,\n    // which gathers the positions of all the characters on the line,\n    // do an amount of layout work quadratic to the number of\n    // characters. When line wrapping is off, we try to improve things\n    // by first subdividing the line into a bunch of inline blocks, so\n    // that IE can reuse most of the layout information from caches\n    // for those blocks. This does interfere with line wrapping, so it\n    // doesn't work when wrapping is on, but in that case the\n    // situation is slightly better, since IE does cache line-wrapping\n    // information and only recomputes per-line.\n    if (ie && !ie_lt8 && !cm.options.lineWrapping && pre.childNodes.length > 100) {\n      var fragment = document.createDocumentFragment();\n      var chunk = 10, n = pre.childNodes.length;\n      for (var i = 0, chunks = Math.ceil(n / chunk); i < chunks; ++i) {\n        var wrap = elt(\"div\", null, null, \"display: inline-block\");\n        for (var j = 0; j < chunk && n; ++j) {\n          wrap.appendChild(pre.firstChild);\n          --n;\n        }\n        fragment.appendChild(wrap);\n      }\n      pre.appendChild(fragment);\n    }\n\n    removeChildrenAndAdd(display.measure, pre);\n\n    var outer = getRect(display.lineDiv);\n    var vranges = [], data = emptyArray(line.text.length), maxBot = pre.offsetHeight;\n    // Work around an IE7/8 bug where it will sometimes have randomly\n    // replaced our pre with a clone at this point.\n    if (ie_lt9 && display.measure.first != pre)\n      removeChildrenAndAdd(display.measure, pre);\n\n    for (var i = 0, cur; i < measure.length; ++i) if (cur = measure[i]) {\n      var size = getRect(cur);\n      var top = Math.max(0, size.top - outer.top), bot = Math.min(size.bottom - outer.top, maxBot);\n      for (var j = 0; j < vranges.length; j += 2) {\n        var rtop = vranges[j], rbot = vranges[j+1];\n        if (rtop > bot || rbot < top) continue;\n        if (rtop <= top && rbot >= bot ||\n            top <= rtop && bot >= rbot ||\n            Math.min(bot, rbot) - Math.max(top, rtop) >= (bot - top) >> 1) {\n          vranges[j] = Math.min(top, rtop);\n          vranges[j+1] = Math.max(bot, rbot);\n          break;\n        }\n      }\n      if (j == vranges.length) vranges.push(top, bot);\n      var right = size.right;\n      if (cur.measureRight) right = getRect(cur.measureRight).left;\n      data[i] = {left: size.left - outer.left, right: right - outer.left, top: j};\n    }\n    for (var i = 0, cur; i < data.length; ++i) if (cur = data[i]) {\n      var vr = cur.top;\n      cur.top = vranges[vr]; cur.bottom = vranges[vr+1];\n    }\n\n    return data;\n  }\n\n  function measureLineWidth(cm, line) {\n    var hasBadSpan = false;\n    if (line.markedSpans) for (var i = 0; i < line.markedSpans; ++i) {\n      var sp = line.markedSpans[i];\n      if (sp.collapsed && (sp.to == null || sp.to == line.text.length)) hasBadSpan = true;\n    }\n    var cached = !hasBadSpan && findCachedMeasurement(cm, line);\n    if (cached) return measureChar(cm, line, line.text.length, cached).right;\n\n    var pre = lineContent(cm, line);\n    var end = pre.appendChild(zeroWidthElement(cm.display.measure));\n    removeChildrenAndAdd(cm.display.measure, pre);\n    return getRect(end).right - getRect(cm.display.lineDiv).left;\n  }\n\n  function clearCaches(cm) {\n    cm.display.measureLineCache.length = cm.display.measureLineCachePos = 0;\n    cm.display.cachedCharWidth = cm.display.cachedTextHeight = null;\n    cm.display.maxLineChanged = true;\n    cm.display.lineNumChars = null;\n  }\n\n  // Context is one of \"line\", \"div\" (display.lineDiv), \"local\"/null (editor), or \"page\"\n  function intoCoordSystem(cm, lineObj, rect, context) {\n    if (lineObj.widgets) for (var i = 0; i < lineObj.widgets.length; ++i) if (lineObj.widgets[i].above) {\n      var size = widgetHeight(lineObj.widgets[i]);\n      rect.top += size; rect.bottom += size;\n    }\n    if (context == \"line\") return rect;\n    if (!context) context = \"local\";\n    var yOff = heightAtLine(cm, lineObj);\n    if (context != \"local\") yOff -= cm.display.viewOffset;\n    if (context == \"page\") {\n      var lOff = getRect(cm.display.lineSpace);\n      yOff += lOff.top + (window.pageYOffset || (document.documentElement || document.body).scrollTop);\n      var xOff = lOff.left + (window.pageXOffset || (document.documentElement || document.body).scrollLeft);\n      rect.left += xOff; rect.right += xOff;\n    }\n    rect.top += yOff; rect.bottom += yOff;\n    return rect;\n  }\n\n  // Context may be \"window\", \"page\", \"div\", or \"local\"/null\n  // Result is in local coords\n  function fromCoordSystem(cm, coords, context) {\n    if (context == \"div\") return coords;\n    var left = coords.left, top = coords.top;\n    if (context == \"page\") {\n      left -= window.pageXOffset || (document.documentElement || document.body).scrollLeft;\n      top -= window.pageYOffset || (document.documentElement || document.body).scrollTop;\n    }\n    var lineSpaceBox = getRect(cm.display.lineSpace);\n    left -= lineSpaceBox.left;\n    top -= lineSpaceBox.top;\n    if (context == \"local\" || !context) {\n      var editorBox = getRect(cm.display.wrapper);\n      left -= editorBox.left;\n      top -= editorBox.top;\n    }\n    return {left: left, top: top};\n  }\n\n  function charCoords(cm, pos, context, lineObj) {\n    if (!lineObj) lineObj = getLine(cm.doc, pos.line);\n    return intoCoordSystem(cm, lineObj, measureChar(cm, lineObj, pos.ch), context);\n  }\n\n  function cursorCoords(cm, pos, context, lineObj, measurement) {\n    lineObj = lineObj || getLine(cm.doc, pos.line);\n    if (!measurement) measurement = measureLine(cm, lineObj);\n    function get(ch, right) {\n      var m = measureChar(cm, lineObj, ch, measurement);\n      if (right) m.left = m.right; else m.right = m.left;\n      return intoCoordSystem(cm, lineObj, m, context);\n    }\n    var order = getOrder(lineObj), ch = pos.ch;\n    if (!order) return get(ch);\n    var main, other, linedir = order[0].level;\n    for (var i = 0; i < order.length; ++i) {\n      var part = order[i], rtl = part.level % 2, nb, here;\n      if (part.from < ch && part.to > ch) return get(ch, rtl);\n      var left = rtl ? part.to : part.from, right = rtl ? part.from : part.to;\n      if (left == ch) {\n        // IE returns bogus offsets and widths for edges where the\n        // direction flips, but only for the side with the lower\n        // level. So we try to use the side with the higher level.\n        if (i && part.level < (nb = order[i-1]).level) here = get(nb.level % 2 ? nb.from : nb.to - 1, true);\n        else here = get(rtl && part.from != part.to ? ch - 1 : ch);\n        if (rtl == linedir) main = here; else other = here;\n      } else if (right == ch) {\n        var nb = i < order.length - 1 && order[i+1];\n        if (!rtl && nb && nb.from == nb.to) continue;\n        if (nb && part.level < nb.level) here = get(nb.level % 2 ? nb.to - 1 : nb.from);\n        else here = get(rtl ? ch : ch - 1, true);\n        if (rtl == linedir) main = here; else other = here;\n      }\n    }\n    if (linedir && !ch) other = get(order[0].to - 1);\n    if (!main) return other;\n    if (other) main.other = other;\n    return main;\n  }\n\n  function PosMaybeOutside(line, ch, outside) {\n    var pos = new Pos(line, ch);\n    if (outside) pos.outside = true;\n    return pos;\n  }\n\n  // Coords must be lineSpace-local\n  function coordsChar(cm, x, y) {\n    var doc = cm.doc;\n    y += cm.display.viewOffset;\n    if (y < 0) return PosMaybeOutside(doc.first, 0, true);\n    var lineNo = lineAtHeight(doc, y), last = doc.first + doc.size - 1;\n    if (lineNo > last)\n      return PosMaybeOutside(doc.first + doc.size - 1, getLine(doc, last).text.length, true);\n    if (x < 0) x = 0;\n\n    for (;;) {\n      var lineObj = getLine(doc, lineNo);\n      var found = coordsCharInner(cm, lineObj, lineNo, x, y);\n      var merged = collapsedSpanAtEnd(lineObj);\n      var mergedPos = merged && merged.find();\n      if (merged && found.ch >= mergedPos.from.ch)\n        lineNo = mergedPos.to.line;\n      else\n        return found;\n    }\n  }\n\n  function coordsCharInner(cm, lineObj, lineNo, x, y) {\n    var innerOff = y - heightAtLine(cm, lineObj);\n    var wrongLine = false, adjust = 2 * cm.display.wrapper.clientWidth;\n    var measurement = measureLine(cm, lineObj);\n\n    function getX(ch) {\n      var sp = cursorCoords(cm, Pos(lineNo, ch), \"line\",\n                            lineObj, measurement);\n      wrongLine = true;\n      if (innerOff > sp.bottom) return sp.left - adjust;\n      else if (innerOff < sp.top) return sp.left + adjust;\n      else wrongLine = false;\n      return sp.left;\n    }\n\n    var bidi = getOrder(lineObj), dist = lineObj.text.length;\n    var from = lineLeft(lineObj), to = lineRight(lineObj);\n    var fromX = getX(from), fromOutside = wrongLine, toX = getX(to), toOutside = wrongLine;\n\n    if (x > toX) return PosMaybeOutside(lineNo, to, toOutside);\n    // Do a binary search between these bounds.\n    for (;;) {\n      if (bidi ? to == from || to == moveVisually(lineObj, from, 1) : to - from <= 1) {\n        var after = x - fromX < toX - x, ch = after ? from : to;\n        while (isExtendingChar.test(lineObj.text.charAt(ch))) ++ch;\n        var pos = PosMaybeOutside(lineNo, ch, after ? fromOutside : toOutside);\n        pos.after = after;\n        return pos;\n      }\n      var step = Math.ceil(dist / 2), middle = from + step;\n      if (bidi) {\n        middle = from;\n        for (var i = 0; i < step; ++i) middle = moveVisually(lineObj, middle, 1);\n      }\n      var middleX = getX(middle);\n      if (middleX > x) {to = middle; toX = middleX; if (toOutside = wrongLine) toX += 1000; dist -= step;}\n      else {from = middle; fromX = middleX; fromOutside = wrongLine; dist = step;}\n    }\n  }\n\n  var measureText;\n  function textHeight(display) {\n    if (display.cachedTextHeight != null) return display.cachedTextHeight;\n    if (measureText == null) {\n      measureText = elt(\"pre\");\n      // Measure a bunch of lines, for browsers that compute\n      // fractional heights.\n      for (var i = 0; i < 49; ++i) {\n        measureText.appendChild(document.createTextNode(\"x\"));\n        measureText.appendChild(elt(\"br\"));\n      }\n      measureText.appendChild(document.createTextNode(\"x\"));\n    }\n    removeChildrenAndAdd(display.measure, measureText);\n    var height = measureText.offsetHeight / 50;\n    if (height > 3) display.cachedTextHeight = height;\n    removeChildren(display.measure);\n    return height || 1;\n  }\n\n  function charWidth(display) {\n    if (display.cachedCharWidth != null) return display.cachedCharWidth;\n    var anchor = elt(\"span\", \"x\");\n    var pre = elt(\"pre\", [anchor]);\n    removeChildrenAndAdd(display.measure, pre);\n    var width = anchor.offsetWidth;\n    if (width > 2) display.cachedCharWidth = width;\n    return width || 10;\n  }\n\n  // OPERATIONS\n\n  // Operations are used to wrap changes in such a way that each\n  // change won't have to update the cursor and display (which would\n  // be awkward, slow, and error-prone), but instead updates are\n  // batched and then all combined and executed at once.\n\n  var nextOpId = 0;\n  function startOperation(cm) {\n    cm.curOp = {\n      // An array of ranges of lines that have to be updated. See\n      // updateDisplay.\n      changes: [],\n      updateInput: null,\n      userSelChange: null,\n      textChanged: null,\n      selectionChanged: false,\n      updateMaxLine: false,\n      updateScrollPos: false,\n      id: ++nextOpId\n    };\n    if (!delayedCallbackDepth++) delayedCallbacks = [];\n  }\n\n  function endOperation(cm) {\n    var op = cm.curOp, doc = cm.doc, display = cm.display;\n    cm.curOp = null;\n\n    if (op.updateMaxLine) computeMaxLength(cm);\n    if (display.maxLineChanged && !cm.options.lineWrapping) {\n      var width = measureLineWidth(cm, display.maxLine);\n      display.sizer.style.minWidth = Math.max(0, width + 3 + scrollerCutOff) + \"px\";\n      display.maxLineChanged = false;\n      var maxScrollLeft = Math.max(0, display.sizer.offsetLeft + display.sizer.offsetWidth - display.scroller.clientWidth);\n      if (maxScrollLeft < doc.scrollLeft && !op.updateScrollPos)\n        setScrollLeft(cm, Math.min(display.scroller.scrollLeft, maxScrollLeft), true);\n    }\n    var newScrollPos, updated;\n    if (op.updateScrollPos) {\n      newScrollPos = op.updateScrollPos;\n    } else if (op.selectionChanged && display.scroller.clientHeight) { // don't rescroll if not visible\n      var coords = cursorCoords(cm, doc.sel.head);\n      newScrollPos = calculateScrollPos(cm, coords.left, coords.top, coords.left, coords.bottom);\n    }\n    if (op.changes.length || newScrollPos && newScrollPos.scrollTop != null) {\n      updated = updateDisplay(cm, op.changes, newScrollPos && newScrollPos.scrollTop);\n      if (cm.display.scroller.offsetHeight) cm.doc.scrollTop = cm.display.scroller.scrollTop;\n    }\n    if (!updated && op.selectionChanged) updateSelection(cm);\n    if (op.updateScrollPos) {\n      display.scroller.scrollTop = display.scrollbarV.scrollTop = doc.scrollTop = newScrollPos.scrollTop;\n      display.scroller.scrollLeft = display.scrollbarH.scrollLeft = doc.scrollLeft = newScrollPos.scrollLeft;\n      alignHorizontally(cm);\n    } else if (newScrollPos) {\n      scrollCursorIntoView(cm);\n    }\n    if (op.selectionChanged) restartBlink(cm);\n\n    if (cm.state.focused && op.updateInput)\n      resetInput(cm, op.userSelChange);\n\n    var hidden = op.maybeHiddenMarkers, unhidden = op.maybeUnhiddenMarkers;\n    if (hidden) for (var i = 0; i < hidden.length; ++i)\n      if (!hidden[i].lines.length) signal(hidden[i], \"hide\");\n    if (unhidden) for (var i = 0; i < unhidden.length; ++i)\n      if (unhidden[i].lines.length) signal(unhidden[i], \"unhide\");\n\n    var delayed;\n    if (!--delayedCallbackDepth) {\n      delayed = delayedCallbacks;\n      delayedCallbacks = null;\n    }\n    if (op.textChanged)\n      signal(cm, \"change\", cm, op.textChanged);\n    if (op.selectionChanged) signal(cm, \"cursorActivity\", cm);\n    if (delayed) for (var i = 0; i < delayed.length; ++i) delayed[i]();\n  }\n\n  // Wraps a function in an operation. Returns the wrapped function.\n  function operation(cm1, f) {\n    return function() {\n      var cm = cm1 || this, withOp = !cm.curOp;\n      if (withOp) startOperation(cm);\n      try { var result = f.apply(cm, arguments); }\n      finally { if (withOp) endOperation(cm); }\n      return result;\n    };\n  }\n  function docOperation(f) {\n    return function() {\n      var withOp = this.cm && !this.cm.curOp, result;\n      if (withOp) startOperation(this.cm);\n      try { result = f.apply(this, arguments); }\n      finally { if (withOp) endOperation(this.cm); }\n      return result;\n    };\n  }\n  function runInOp(cm, f) {\n    var withOp = !cm.curOp, result;\n    if (withOp) startOperation(cm);\n    try { result = f(); }\n    finally { if (withOp) endOperation(cm); }\n    return result;\n  }\n\n  function regChange(cm, from, to, lendiff) {\n    if (from == null) from = cm.doc.first;\n    if (to == null) to = cm.doc.first + cm.doc.size;\n    cm.curOp.changes.push({from: from, to: to, diff: lendiff});\n  }\n\n  // INPUT HANDLING\n\n  function slowPoll(cm) {\n    if (cm.display.pollingFast) return;\n    cm.display.poll.set(cm.options.pollInterval, function() {\n      readInput(cm);\n      if (cm.state.focused) slowPoll(cm);\n    });\n  }\n\n  function fastPoll(cm) {\n    var missed = false;\n    cm.display.pollingFast = true;\n    function p() {\n      var changed = readInput(cm);\n      if (!changed && !missed) {missed = true; cm.display.poll.set(60, p);}\n      else {cm.display.pollingFast = false; slowPoll(cm);}\n    }\n    cm.display.poll.set(20, p);\n  }\n\n  // prevInput is a hack to work with IME. If we reset the textarea\n  // on every change, that breaks IME. So we look for changes\n  // compared to the previous content instead. (Modern browsers have\n  // events that indicate IME taking place, but these are not widely\n  // supported or compatible enough yet to rely on.)\n  function readInput(cm) {\n    var input = cm.display.input, prevInput = cm.display.prevInput, doc = cm.doc, sel = doc.sel;\n    if (!cm.state.focused || hasSelection(input) || isReadOnly(cm)) return false;\n    var text = input.value;\n    if (text == prevInput && posEq(sel.from, sel.to)) return false;\n    // IE enjoys randomly deselecting our input's text when\n    // re-focusing. If the selection is gone but the cursor is at the\n    // start of the input, that's probably what happened.\n    if (ie && text && input.selectionStart === 0) {\n      resetInput(cm, true);\n      return false;\n    }\n    var withOp = !cm.curOp;\n    if (withOp) startOperation(cm);\n    sel.shift = false;\n    var same = 0, l = Math.min(prevInput.length, text.length);\n    while (same < l && prevInput[same] == text[same]) ++same;\n    var from = sel.from, to = sel.to;\n    if (same < prevInput.length)\n      from = Pos(from.line, from.ch - (prevInput.length - same));\n    else if (cm.state.overwrite && posEq(from, to) && !cm.state.pasteIncoming)\n      to = Pos(to.line, Math.min(getLine(doc, to.line).text.length, to.ch + (text.length - same)));\n    var updateInput = cm.curOp.updateInput;\n    makeChange(cm.doc, {from: from, to: to, text: splitLines(text.slice(same)),\n                        origin: cm.state.pasteIncoming ? \"paste\" : \"+input\"}, \"end\");\n\n    cm.curOp.updateInput = updateInput;\n    if (text.length > 1000 || text.indexOf(\"\\n\") > -1) input.value = cm.display.prevInput = \"\";\n    else cm.display.prevInput = text;\n    if (withOp) endOperation(cm);\n    cm.state.pasteIncoming = false;\n    return true;\n  }\n\n  function resetInput(cm, user) {\n    var minimal, selected, doc = cm.doc;\n    if (!posEq(doc.sel.from, doc.sel.to)) {\n      cm.display.prevInput = \"\";\n      minimal = hasCopyEvent &&\n        (doc.sel.to.line - doc.sel.from.line > 100 || (selected = cm.getSelection()).length > 1000);\n      if (minimal) cm.display.input.value = \"-\";\n      else cm.display.input.value = selected || cm.getSelection();\n      if (cm.state.focused) selectInput(cm.display.input);\n    } else if (user) cm.display.prevInput = cm.display.input.value = \"\";\n    cm.display.inaccurateSelection = minimal;\n  }\n\n  function focusInput(cm) {\n    if (cm.options.readOnly != \"nocursor\" && (!mobile || document.activeElement != cm.display.input))\n      cm.display.input.focus();\n  }\n\n  function isReadOnly(cm) {\n    return cm.options.readOnly || cm.doc.cantEdit;\n  }\n\n  // EVENT HANDLERS\n\n  function registerEventHandlers(cm) {\n    var d = cm.display;\n    on(d.scroller, \"mousedown\", operation(cm, onMouseDown));\n    on(d.scroller, \"dblclick\", operation(cm, e_preventDefault));\n    on(d.lineSpace, \"selectstart\", function(e) {\n      if (!eventInWidget(d, e)) e_preventDefault(e);\n    });\n    // Gecko browsers fire contextmenu *after* opening the menu, at\n    // which point we can't mess with it anymore. Context menu is\n    // handled in onMouseDown for Gecko.\n    if (!captureMiddleClick) on(d.scroller, \"contextmenu\", function(e) {onContextMenu(cm, e);});\n\n    on(d.scroller, \"scroll\", function() {\n      if (d.scroller.clientHeight) {\n        setScrollTop(cm, d.scroller.scrollTop);\n        setScrollLeft(cm, d.scroller.scrollLeft, true);\n        signal(cm, \"scroll\", cm);\n      }\n    });\n    on(d.scrollbarV, \"scroll\", function() {\n      if (d.scroller.clientHeight) setScrollTop(cm, d.scrollbarV.scrollTop);\n    });\n    on(d.scrollbarH, \"scroll\", function() {\n      if (d.scroller.clientHeight) setScrollLeft(cm, d.scrollbarH.scrollLeft);\n    });\n\n    on(d.scroller, \"mousewheel\", function(e){onScrollWheel(cm, e);});\n    on(d.scroller, \"DOMMouseScroll\", function(e){onScrollWheel(cm, e);});\n\n    function reFocus() { if (cm.state.focused) setTimeout(bind(focusInput, cm), 0); }\n    on(d.scrollbarH, \"mousedown\", reFocus);\n    on(d.scrollbarV, \"mousedown\", reFocus);\n    // Prevent wrapper from ever scrolling\n    on(d.wrapper, \"scroll\", function() { d.wrapper.scrollTop = d.wrapper.scrollLeft = 0; });\n\n    function onResize() {\n      // Might be a text scaling operation, clear size caches.\n      d.cachedCharWidth = d.cachedTextHeight = null;\n      clearCaches(cm);\n      runInOp(cm, bind(regChange, cm));\n    }\n    on(window, \"resize\", onResize);\n    // Above handler holds on to the editor and its data structures.\n    // Here we poll to unregister it when the editor is no longer in\n    // the document, so that it can be garbage-collected.\n    function unregister() {\n      for (var p = d.wrapper.parentNode; p && p != document.body; p = p.parentNode) {}\n      if (p) setTimeout(unregister, 5000);\n      else off(window, \"resize\", onResize);\n    }\n    setTimeout(unregister, 5000);\n\n    on(d.input, \"keyup\", operation(cm, function(e) {\n      if (cm.options.onKeyEvent && cm.options.onKeyEvent(cm, addStop(e))) return;\n      if (e.keyCode == 16) cm.doc.sel.shift = false;\n    }));\n    on(d.input, \"input\", bind(fastPoll, cm));\n    on(d.input, \"keydown\", operation(cm, onKeyDown));\n    on(d.input, \"keypress\", operation(cm, onKeyPress));\n    on(d.input, \"focus\", bind(onFocus, cm));\n    on(d.input, \"blur\", bind(onBlur, cm));\n\n    function drag_(e) {\n      if (cm.options.onDragEvent && cm.options.onDragEvent(cm, addStop(e))) return;\n      e_stop(e);\n    }\n    if (cm.options.dragDrop) {\n      on(d.scroller, \"dragstart\", function(e){onDragStart(cm, e);});\n      on(d.scroller, \"dragenter\", drag_);\n      on(d.scroller, \"dragover\", drag_);\n      on(d.scroller, \"drop\", operation(cm, onDrop));\n    }\n    on(d.scroller, \"paste\", function(e){\n      if (eventInWidget(d, e)) return;\n      focusInput(cm);\n      fastPoll(cm);\n    });\n    on(d.input, \"paste\", function() {\n      cm.state.pasteIncoming = true;\n      fastPoll(cm);\n    });\n\n    function prepareCopy() {\n      if (d.inaccurateSelection) {\n        d.prevInput = \"\";\n        d.inaccurateSelection = false;\n        d.input.value = cm.getSelection();\n        selectInput(d.input);\n      }\n    }\n    on(d.input, \"cut\", prepareCopy);\n    on(d.input, \"copy\", prepareCopy);\n\n    // Needed to handle Tab key in KHTML\n    if (khtml) on(d.sizer, \"mouseup\", function() {\n        if (document.activeElement == d.input) d.input.blur();\n        focusInput(cm);\n    });\n  }\n\n  function eventInWidget(display, e) {\n    for (var n = e_target(e); n != display.wrapper; n = n.parentNode) {\n      if (!n) return true;\n      if (/\\bCodeMirror-(?:line)?widget\\b/.test(n.className) ||\n          n.parentNode == display.sizer && n != display.mover) return true;\n    }\n  }\n\n  function posFromMouse(cm, e, liberal) {\n    var display = cm.display;\n    if (!liberal) {\n      var target = e_target(e);\n      if (target == display.scrollbarH || target == display.scrollbarH.firstChild ||\n          target == display.scrollbarV || target == display.scrollbarV.firstChild ||\n          target == display.scrollbarFiller) return null;\n    }\n    var x, y, space = getRect(display.lineSpace);\n    // Fails unpredictably on IE[67] when mouse is dragged around quickly.\n    try { x = e.clientX; y = e.clientY; } catch (e) { return null; }\n    return coordsChar(cm, x - space.left, y - space.top);\n  }\n\n  var lastClick, lastDoubleClick;\n  function onMouseDown(e) {\n    var cm = this, display = cm.display, doc = cm.doc, sel = doc.sel;\n    sel.shift = e.shiftKey;\n\n    if (eventInWidget(display, e)) {\n      if (!webkit) {\n        display.scroller.draggable = false;\n        setTimeout(function(){display.scroller.draggable = true;}, 100);\n      }\n      return;\n    }\n    if (clickInGutter(cm, e)) return;\n    var start = posFromMouse(cm, e);\n\n    switch (e_button(e)) {\n    case 3:\n      if (captureMiddleClick) onContextMenu.call(cm, cm, e);\n      return;\n    case 2:\n      if (start) extendSelection(cm.doc, start);\n      setTimeout(bind(focusInput, cm), 20);\n      e_preventDefault(e);\n      return;\n    }\n    // For button 1, if it was clicked inside the editor\n    // (posFromMouse returning non-null), we have to adjust the\n    // selection.\n    if (!start) {if (e_target(e) == display.scroller) e_preventDefault(e); return;}\n\n    if (!cm.state.focused) onFocus(cm);\n\n    var now = +new Date, type = \"single\";\n    if (lastDoubleClick && lastDoubleClick.time > now - 400 && posEq(lastDoubleClick.pos, start)) {\n      type = \"triple\";\n      e_preventDefault(e);\n      setTimeout(bind(focusInput, cm), 20);\n      selectLine(cm, start.line);\n    } else if (lastClick && lastClick.time > now - 400 && posEq(lastClick.pos, start)) {\n      type = \"double\";\n      lastDoubleClick = {time: now, pos: start};\n      e_preventDefault(e);\n      var word = findWordAt(getLine(doc, start.line).text, start);\n      extendSelection(cm.doc, word.from, word.to);\n    } else { lastClick = {time: now, pos: start}; }\n\n    var last = start;\n    if (cm.options.dragDrop && dragAndDrop && !isReadOnly(cm) && !posEq(sel.from, sel.to) &&\n        !posLess(start, sel.from) && !posLess(sel.to, start) && type == \"single\") {\n      var dragEnd = operation(cm, function(e2) {\n        if (webkit) display.scroller.draggable = false;\n        cm.state.draggingText = false;\n        off(document, \"mouseup\", dragEnd);\n        off(display.scroller, \"drop\", dragEnd);\n        if (Math.abs(e.clientX - e2.clientX) + Math.abs(e.clientY - e2.clientY) < 10) {\n          e_preventDefault(e2);\n          extendSelection(cm.doc, start);\n          focusInput(cm);\n        }\n      });\n      // Let the drag handler handle this.\n      if (webkit) display.scroller.draggable = true;\n      cm.state.draggingText = dragEnd;\n      // IE's approach to draggable\n      if (display.scroller.dragDrop) display.scroller.dragDrop();\n      on(document, \"mouseup\", dragEnd);\n      on(display.scroller, \"drop\", dragEnd);\n      return;\n    }\n    e_preventDefault(e);\n    if (type == \"single\") extendSelection(cm.doc, clipPos(doc, start));\n\n    var startstart = sel.from, startend = sel.to;\n\n    function doSelect(cur) {\n      if (type == \"single\") {\n        extendSelection(cm.doc, clipPos(doc, start), cur);\n        return;\n      }\n\n      startstart = clipPos(doc, startstart);\n      startend = clipPos(doc, startend);\n      if (type == \"double\") {\n        var word = findWordAt(getLine(doc, cur.line).text, cur);\n        if (posLess(cur, startstart)) extendSelection(cm.doc, word.from, startend);\n        else extendSelection(cm.doc, startstart, word.to);\n      } else if (type == \"triple\") {\n        if (posLess(cur, startstart)) extendSelection(cm.doc, startend, clipPos(doc, Pos(cur.line, 0)));\n        else extendSelection(cm.doc, startstart, clipPos(doc, Pos(cur.line + 1, 0)));\n      }\n    }\n\n    var editorSize = getRect(display.wrapper);\n    // Used to ensure timeout re-tries don't fire when another extend\n    // happened in the meantime (clearTimeout isn't reliable -- at\n    // least on Chrome, the timeouts still happen even when cleared,\n    // if the clear happens after their scheduled firing time).\n    var counter = 0;\n\n    function extend(e) {\n      var curCount = ++counter;\n      var cur = posFromMouse(cm, e, true);\n      if (!cur) return;\n      if (!posEq(cur, last)) {\n        if (!cm.state.focused) onFocus(cm);\n        last = cur;\n        doSelect(cur);\n        var visible = visibleLines(display, doc);\n        if (cur.line >= visible.to || cur.line < visible.from)\n          setTimeout(operation(cm, function(){if (counter == curCount) extend(e);}), 150);\n      } else {\n        var outside = e.clientY < editorSize.top ? -20 : e.clientY > editorSize.bottom ? 20 : 0;\n        if (outside) setTimeout(operation(cm, function() {\n          if (counter != curCount) return;\n          display.scroller.scrollTop += outside;\n          extend(e);\n        }), 50);\n      }\n    }\n\n    function done(e) {\n      counter = Infinity;\n      var cur = posFromMouse(cm, e);\n      if (cur) doSelect(cur);\n      e_preventDefault(e);\n      focusInput(cm);\n      off(document, \"mousemove\", move);\n      off(document, \"mouseup\", up);\n    }\n\n    var move = operation(cm, function(e) {\n      if (!ie && !e_button(e)) done(e);\n      else extend(e);\n    });\n    var up = operation(cm, done);\n    on(document, \"mousemove\", move);\n    on(document, \"mouseup\", up);\n  }\n\n  function onDrop(e) {\n    var cm = this;\n    if (eventInWidget(cm.display, e) || (cm.options.onDragEvent && cm.options.onDragEvent(cm, addStop(e))))\n      return;\n    e_preventDefault(e);\n    var pos = posFromMouse(cm, e, true), files = e.dataTransfer.files;\n    if (!pos || isReadOnly(cm)) return;\n    if (files && files.length && window.FileReader && window.File) {\n      var n = files.length, text = Array(n), read = 0;\n      var loadFile = function(file, i) {\n        var reader = new FileReader;\n        reader.onload = function() {\n          text[i] = reader.result;\n          if (++read == n) {\n            pos = clipPos(cm.doc, pos);\n            makeChange(cm.doc, {from: pos, to: pos, text: splitLines(text.join(\"\\n\")), origin: \"paste\"}, \"around\");\n          }\n        };\n        reader.readAsText(file);\n      };\n      for (var i = 0; i < n; ++i) loadFile(files[i], i);\n    } else {\n      // Don't do a replace if the drop happened inside of the selected text.\n      if (cm.state.draggingText && !(posLess(pos, cm.doc.sel.from) || posLess(cm.doc.sel.to, pos))) {\n        cm.state.draggingText(e);\n        // Ensure the editor is re-focused\n        setTimeout(bind(focusInput, cm), 20);\n        return;\n      }\n      try {\n        var text = e.dataTransfer.getData(\"Text\");\n        if (text) {\n          var curFrom = cm.doc.sel.from, curTo = cm.doc.sel.to;\n          setSelection(cm.doc, pos, pos);\n          if (cm.state.draggingText) replaceRange(cm.doc, \"\", curFrom, curTo, \"paste\");\n          cm.replaceSelection(text, null, \"paste\");\n          focusInput(cm);\n          onFocus(cm);\n        }\n      }\n      catch(e){}\n    }\n  }\n\n  function clickInGutter(cm, e) {\n    var display = cm.display;\n    try { var mX = e.clientX, mY = e.clientY; }\n    catch(e) { return false; }\n\n    if (mX >= Math.floor(getRect(display.gutters).right)) return false;\n    e_preventDefault(e);\n    if (!hasHandler(cm, \"gutterClick\")) return true;\n\n    var lineBox = getRect(display.lineDiv);\n    if (mY > lineBox.bottom) return true;\n    mY -= lineBox.top - display.viewOffset;\n\n    for (var i = 0; i < cm.options.gutters.length; ++i) {\n      var g = display.gutters.childNodes[i];\n      if (g && getRect(g).right >= mX) {\n        var line = lineAtHeight(cm.doc, mY);\n        var gutter = cm.options.gutters[i];\n        signalLater(cm, \"gutterClick\", cm, line, gutter, e);\n        break;\n      }\n    }\n    return true;\n  }\n\n  function onDragStart(cm, e) {\n    if (eventInWidget(cm.display, e)) return;\n\n    var txt = cm.getSelection();\n    e.dataTransfer.setData(\"Text\", txt);\n\n    // Use dummy image instead of default browsers image.\n    // Recent Safari (~6.0.2) have a tendency to segfault when this happens, so we don't do it there.\n    if (e.dataTransfer.setDragImage) {\n      var img = elt(\"img\", null, null, \"position: fixed; left: 0; top: 0;\");\n      if (opera) {\n        img.width = img.height = 1;\n        cm.display.wrapper.appendChild(img);\n        // Force a relayout, or Opera won't use our image for some obscure reason\n        img._top = img.offsetTop;\n      }\n      if (safari) {\n        if (cm.display.dragImg) {\n          img = cm.display.dragImg;\n        } else {\n          cm.display.dragImg = img;\n          img.src = \"data:image/gif;base64,R0lGODlhAQABAAAAACH5BAEKAAEALAAAAAABAAEAAAICTAEAOw==\";\n          cm.display.wrapper.appendChild(img);\n        }\n      }\n      e.dataTransfer.setDragImage(img, 0, 0);\n      if (opera) img.parentNode.removeChild(img);\n    }\n  }\n\n  function setScrollTop(cm, val) {\n    if (Math.abs(cm.doc.scrollTop - val) < 2) return;\n    cm.doc.scrollTop = val;\n    if (!gecko) updateDisplay(cm, [], val);\n    if (cm.display.scroller.scrollTop != val) cm.display.scroller.scrollTop = val;\n    if (cm.display.scrollbarV.scrollTop != val) cm.display.scrollbarV.scrollTop = val;\n    if (gecko) updateDisplay(cm, []);\n  }\n  function setScrollLeft(cm, val, isScroller) {\n    if (isScroller ? val == cm.doc.scrollLeft : Math.abs(cm.doc.scrollLeft - val) < 2) return;\n    val = Math.min(val, cm.display.scroller.scrollWidth - cm.display.scroller.clientWidth);\n    cm.doc.scrollLeft = val;\n    alignHorizontally(cm);\n    if (cm.display.scroller.scrollLeft != val) cm.display.scroller.scrollLeft = val;\n    if (cm.display.scrollbarH.scrollLeft != val) cm.display.scrollbarH.scrollLeft = val;\n  }\n\n  // Since the delta values reported on mouse wheel events are\n  // unstandardized between browsers and even browser versions, and\n  // generally horribly unpredictable, this code starts by measuring\n  // the scroll effect that the first few mouse wheel events have,\n  // and, from that, detects the way it can convert deltas to pixel\n  // offsets afterwards.\n  //\n  // The reason we want to know the amount a wheel event will scroll\n  // is that it gives us a chance to update the display before the\n  // actual scrolling happens, reducing flickering.\n\n  var wheelSamples = 0, wheelPixelsPerUnit = null;\n  // Fill in a browser-detected starting value on browsers where we\n  // know one. These don't have to be accurate -- the result of them\n  // being wrong would just be a slight flicker on the first wheel\n  // scroll (if it is large enough).\n  if (ie) wheelPixelsPerUnit = -.53;\n  else if (gecko) wheelPixelsPerUnit = 15;\n  else if (chrome) wheelPixelsPerUnit = -.7;\n  else if (safari) wheelPixelsPerUnit = -1/3;\n\n  function onScrollWheel(cm, e) {\n    var dx = e.wheelDeltaX, dy = e.wheelDeltaY;\n    if (dx == null && e.detail && e.axis == e.HORIZONTAL_AXIS) dx = e.detail;\n    if (dy == null && e.detail && e.axis == e.VERTICAL_AXIS) dy = e.detail;\n    else if (dy == null) dy = e.wheelDelta;\n\n    // Webkit browsers on OS X abort momentum scrolls when the target\n    // of the scroll event is removed from the scrollable element.\n    // This hack (see related code in patchDisplay) makes sure the\n    // element is kept around.\n    if (dy && mac && webkit) {\n      for (var cur = e.target; cur != scroll; cur = cur.parentNode) {\n        if (cur.lineObj) {\n          cm.display.currentWheelTarget = cur;\n          break;\n        }\n      }\n    }\n\n    var display = cm.display, scroll = display.scroller;\n    // On some browsers, horizontal scrolling will cause redraws to\n    // happen before the gutter has been realigned, causing it to\n    // wriggle around in a most unseemly way. When we have an\n    // estimated pixels/delta value, we just handle horizontal\n    // scrolling entirely here. It'll be slightly off from native, but\n    // better than glitching out.\n    if (dx && !gecko && !opera && wheelPixelsPerUnit != null) {\n      if (dy)\n        setScrollTop(cm, Math.max(0, Math.min(scroll.scrollTop + dy * wheelPixelsPerUnit, scroll.scrollHeight - scroll.clientHeight)));\n      setScrollLeft(cm, Math.max(0, Math.min(scroll.scrollLeft + dx * wheelPixelsPerUnit, scroll.scrollWidth - scroll.clientWidth)));\n      e_preventDefault(e);\n      display.wheelStartX = null; // Abort measurement, if in progress\n      return;\n    }\n\n    if (dy && wheelPixelsPerUnit != null) {\n      var pixels = dy * wheelPixelsPerUnit;\n      var top = cm.doc.scrollTop, bot = top + display.wrapper.clientHeight;\n      if (pixels < 0) top = Math.max(0, top + pixels - 50);\n      else bot = Math.min(cm.doc.height, bot + pixels + 50);\n      updateDisplay(cm, [], {top: top, bottom: bot});\n    }\n\n    if (wheelSamples < 20) {\n      if (display.wheelStartX == null) {\n        display.wheelStartX = scroll.scrollLeft; display.wheelStartY = scroll.scrollTop;\n        display.wheelDX = dx; display.wheelDY = dy;\n        setTimeout(function() {\n          if (display.wheelStartX == null) return;\n          var movedX = scroll.scrollLeft - display.wheelStartX;\n          var movedY = scroll.scrollTop - display.wheelStartY;\n          var sample = (movedY && display.wheelDY && movedY / display.wheelDY) ||\n            (movedX && display.wheelDX && movedX / display.wheelDX);\n          display.wheelStartX = display.wheelStartY = null;\n          if (!sample) return;\n          wheelPixelsPerUnit = (wheelPixelsPerUnit * wheelSamples + sample) / (wheelSamples + 1);\n          ++wheelSamples;\n        }, 200);\n      } else {\n        display.wheelDX += dx; display.wheelDY += dy;\n      }\n    }\n  }\n\n  function doHandleBinding(cm, bound, dropShift) {\n    if (typeof bound == \"string\") {\n      bound = commands[bound];\n      if (!bound) return false;\n    }\n    // Ensure previous input has been read, so that the handler sees a\n    // consistent view of the document\n    if (cm.display.pollingFast && readInput(cm)) cm.display.pollingFast = false;\n    var doc = cm.doc, prevShift = doc.sel.shift, done = false;\n    try {\n      if (isReadOnly(cm)) cm.state.suppressEdits = true;\n      if (dropShift) doc.sel.shift = false;\n      done = bound(cm) != Pass;\n    } finally {\n      doc.sel.shift = prevShift;\n      cm.state.suppressEdits = false;\n    }\n    return done;\n  }\n\n  function allKeyMaps(cm) {\n    var maps = cm.state.keyMaps.slice(0);\n    if (cm.options.extraKeys) maps.push(cm.options.extraKeys);\n    maps.push(cm.options.keyMap);\n    return maps;\n  }\n\n  var maybeTransition;\n  function handleKeyBinding(cm, e) {\n    // Handle auto keymap transitions\n    var startMap = getKeyMap(cm.options.keyMap), next = startMap.auto;\n    clearTimeout(maybeTransition);\n    if (next && !isModifierKey(e)) maybeTransition = setTimeout(function() {\n      if (getKeyMap(cm.options.keyMap) == startMap)\n        cm.options.keyMap = (next.call ? next.call(null, cm) : next);\n    }, 50);\n\n    var name = keyName(e, true), handled = false;\n    if (!name) return false;\n    var keymaps = allKeyMaps(cm);\n\n    if (e.shiftKey) {\n      // First try to resolve full name (including 'Shift-'). Failing\n      // that, see if there is a cursor-motion command (starting with\n      // 'go') bound to the keyname without 'Shift-'.\n      handled = lookupKey(\"Shift-\" + name, keymaps, function(b) {return doHandleBinding(cm, b, true);})\n             || lookupKey(name, keymaps, function(b) {\n                  if (typeof b == \"string\" && /^go[A-Z]/.test(b)) return doHandleBinding(cm, b);\n                });\n    } else {\n      handled = lookupKey(name, keymaps, function(b) { return doHandleBinding(cm, b); });\n    }\n    if (handled == \"stop\") handled = false;\n\n    if (handled) {\n      e_preventDefault(e);\n      restartBlink(cm);\n      if (ie_lt9) { e.oldKeyCode = e.keyCode; e.keyCode = 0; }\n    }\n    return handled;\n  }\n\n  function handleCharBinding(cm, e, ch) {\n    var handled = lookupKey(\"'\" + ch + \"'\", allKeyMaps(cm),\n                            function(b) { return doHandleBinding(cm, b, true); });\n    if (handled) {\n      e_preventDefault(e);\n      restartBlink(cm);\n    }\n    return handled;\n  }\n\n  var lastStoppedKey = null;\n  function onKeyDown(e) {\n    var cm = this;\n    if (!cm.state.focused) onFocus(cm);\n    if (ie && e.keyCode == 27) { e.returnValue = false; }\n    if (cm.options.onKeyEvent && cm.options.onKeyEvent(cm, addStop(e))) return;\n    var code = e.keyCode;\n    // IE does strange things with escape.\n    cm.doc.sel.shift = code == 16 || e.shiftKey;\n    // First give onKeyEvent option a chance to handle this.\n    var handled = handleKeyBinding(cm, e);\n    if (opera) {\n      lastStoppedKey = handled ? code : null;\n      // Opera has no cut event... we try to at least catch the key combo\n      if (!handled && code == 88 && !hasCopyEvent && (mac ? e.metaKey : e.ctrlKey))\n        cm.replaceSelection(\"\");\n    }\n  }\n\n  function onKeyPress(e) {\n    var cm = this;\n    if (cm.options.onKeyEvent && cm.options.onKeyEvent(cm, addStop(e))) return;\n    var keyCode = e.keyCode, charCode = e.charCode;\n    if (opera && keyCode == lastStoppedKey) {lastStoppedKey = null; e_preventDefault(e); return;}\n    if (((opera && (!e.which || e.which < 10)) || khtml) && handleKeyBinding(cm, e)) return;\n    var ch = String.fromCharCode(charCode == null ? keyCode : charCode);\n    if (this.options.electricChars && this.doc.mode.electricChars &&\n        this.options.smartIndent && !isReadOnly(this) &&\n        this.doc.mode.electricChars.indexOf(ch) > -1)\n      setTimeout(operation(cm, function() {indentLine(cm, cm.doc.sel.to.line, \"smart\");}), 75);\n    if (handleCharBinding(cm, e, ch)) return;\n    fastPoll(cm);\n  }\n\n  function onFocus(cm) {\n    if (cm.options.readOnly == \"nocursor\") return;\n    if (!cm.state.focused) {\n      signal(cm, \"focus\", cm);\n      cm.state.focused = true;\n      if (cm.display.wrapper.className.search(/\\bCodeMirror-focused\\b/) == -1)\n        cm.display.wrapper.className += \" CodeMirror-focused\";\n      resetInput(cm, true);\n    }\n    slowPoll(cm);\n    restartBlink(cm);\n  }\n  function onBlur(cm) {\n    if (cm.state.focused) {\n      signal(cm, \"blur\", cm);\n      cm.state.focused = false;\n      cm.display.wrapper.className = cm.display.wrapper.className.replace(\" CodeMirror-focused\", \"\");\n    }\n    clearInterval(cm.display.blinker);\n    setTimeout(function() {if (!cm.state.focused) cm.doc.sel.shift = false;}, 150);\n  }\n\n  var detectingSelectAll;\n  function onContextMenu(cm, e) {\n    var display = cm.display, sel = cm.doc.sel;\n    if (eventInWidget(display, e)) return;\n\n    var pos = posFromMouse(cm, e), scrollPos = display.scroller.scrollTop;\n    if (!pos || opera) return; // Opera is difficult.\n    if (posEq(sel.from, sel.to) || posLess(pos, sel.from) || !posLess(pos, sel.to))\n      operation(cm, setSelection)(cm.doc, pos, pos);\n\n    var oldCSS = display.input.style.cssText;\n    display.inputDiv.style.position = \"absolute\";\n    display.input.style.cssText = \"position: fixed; width: 30px; height: 30px; top: \" + (e.clientY - 5) +\n      \"px; left: \" + (e.clientX - 5) + \"px; z-index: 1000; background: white; outline: none;\" +\n      \"border-width: 0; outline: none; overflow: hidden; opacity: .05; -ms-opacity: .05; filter: alpha(opacity=5);\";\n    focusInput(cm);\n    resetInput(cm, true);\n    // Adds \"Select all\" to context menu in FF\n    if (posEq(sel.from, sel.to)) display.input.value = display.prevInput = \" \";\n\n    function rehide() {\n      display.inputDiv.style.position = \"relative\";\n      display.input.style.cssText = oldCSS;\n      if (ie_lt9) display.scrollbarV.scrollTop = display.scroller.scrollTop = scrollPos;\n      slowPoll(cm);\n\n      // Try to detect the user choosing select-all\n      if (display.input.selectionStart != null && (!ie || ie_lt9)) {\n        clearTimeout(detectingSelectAll);\n        var extval = display.input.value = \" \" + (posEq(sel.from, sel.to) ? \"\" : display.input.value), i = 0;\n        display.prevInput = \" \";\n        display.input.selectionStart = 1; display.input.selectionEnd = extval.length;\n        var poll = function(){\n          if (display.prevInput == \" \" && display.input.selectionStart == 0)\n            operation(cm, commands.selectAll)(cm);\n          else if (i++ < 10) detectingSelectAll = setTimeout(poll, 500);\n          else resetInput(cm);\n        };\n        detectingSelectAll = setTimeout(poll, 200);\n      }\n    }\n\n    if (captureMiddleClick) {\n      e_stop(e);\n      var mouseup = function() {\n        off(window, \"mouseup\", mouseup);\n        setTimeout(rehide, 20);\n      };\n      on(window, \"mouseup\", mouseup);\n    } else {\n      setTimeout(rehide, 50);\n    }\n  }\n\n  // UPDATING\n\n  function changeEnd(change) {\n    return Pos(change.from.line + change.text.length - 1,\n               lst(change.text).length + (change.text.length == 1 ? change.from.ch : 0));\n  }\n\n  // Make sure a position will be valid after the given change.\n  function clipPostChange(doc, change, pos) {\n    if (!posLess(change.from, pos)) return clipPos(doc, pos);\n    var diff = (change.text.length - 1) - (change.to.line - change.from.line);\n    if (pos.line > change.to.line + diff) {\n      var preLine = pos.line - diff, lastLine = doc.first + doc.size - 1;\n      if (preLine > lastLine) return Pos(lastLine, getLine(doc, lastLine).text.length);\n      return clipToLen(pos, getLine(doc, preLine).text.length);\n    }\n    if (pos.line == change.to.line + diff)\n      return clipToLen(pos, lst(change.text).length + (change.text.length == 1 ? change.from.ch : 0) +\n                       getLine(doc, change.to.line).text.length - change.to.ch);\n    var inside = pos.line - change.from.line;\n    return clipToLen(pos, change.text[inside].length + (inside ? 0 : change.from.ch));\n  }\n\n  // Hint can be null|\"end\"|\"start\"|\"around\"|{anchor,head}\n  function computeSelAfterChange(doc, change, hint) {\n    if (hint && typeof hint == \"object\") // Assumed to be {anchor, head} object\n      return {anchor: clipPostChange(doc, change, hint.anchor),\n              head: clipPostChange(doc, change, hint.head)};\n\n    if (hint == \"start\") return {anchor: change.from, head: change.from};\n\n    var end = changeEnd(change);\n    if (hint == \"around\") return {anchor: change.from, head: end};\n    if (hint == \"end\") return {anchor: end, head: end};\n\n    // hint is null, leave the selection alone as much as possible\n    var adjustPos = function(pos) {\n      if (posLess(pos, change.from)) return pos;\n      if (!posLess(change.to, pos)) return end;\n\n      var line = pos.line + change.text.length - (change.to.line - change.from.line) - 1, ch = pos.ch;\n      if (pos.line == change.to.line) ch += end.ch - change.to.ch;\n      return Pos(line, ch);\n    };\n    return {anchor: adjustPos(doc.sel.anchor), head: adjustPos(doc.sel.head)};\n  }\n\n  function filterChange(doc, change) {\n    var obj = {\n      canceled: false,\n      from: change.from,\n      to: change.to,\n      text: change.text,\n      origin: change.origin,\n      update: function(from, to, text, origin) {\n        if (from) this.from = clipPos(doc, from);\n        if (to) this.to = clipPos(doc, to);\n        if (text) this.text = text;\n        if (origin !== undefined) this.origin = origin;\n      },\n      cancel: function() { this.canceled = true; }\n    };\n    signal(doc, \"beforeChange\", doc, obj);\n    if (doc.cm) signal(doc.cm, \"beforeChange\", doc.cm, obj);\n\n    if (obj.canceled) return null;\n    return {from: obj.from, to: obj.to, text: obj.text, origin: obj.origin};\n  }\n\n  // Replace the range from from to to by the strings in replacement.\n  // change is a {from, to, text [, origin]} object\n  function makeChange(doc, change, selUpdate, ignoreReadOnly) {\n    if (doc.cm) {\n      if (!doc.cm.curOp) return operation(doc.cm, makeChange)(doc, change, selUpdate, ignoreReadOnly);\n      if (doc.cm.state.suppressEdits) return;\n    }\n\n    if (hasHandler(doc, \"beforeChange\") || doc.cm && hasHandler(doc.cm, \"beforeChange\")) {\n      change = filterChange(doc, change);\n      if (!change) return;\n    }\n\n    // Possibly split or suppress the update based on the presence\n    // of read-only spans in its range.\n    var split = sawReadOnlySpans && !ignoreReadOnly && removeReadOnlyRanges(doc, change.from, change.to);\n    if (split) {\n      for (var i = split.length - 1; i >= 1; --i)\n        makeChangeNoReadonly(doc, {from: split[i].from, to: split[i].to, text: [\"\"]});\n      if (split.length)\n        makeChangeNoReadonly(doc, {from: split[0].from, to: split[0].to, text: change.text}, selUpdate);\n    } else {\n      makeChangeNoReadonly(doc, change, selUpdate);\n    }\n  }\n\n  function makeChangeNoReadonly(doc, change, selUpdate) {\n    var selAfter = computeSelAfterChange(doc, change, selUpdate);\n    addToHistory(doc, change, selAfter, doc.cm ? doc.cm.curOp.id : NaN);\n\n    makeChangeSingleDoc(doc, change, selAfter, stretchSpansOverChange(doc, change));\n    var rebased = [];\n\n    linkedDocs(doc, function(doc, sharedHist) {\n      if (!sharedHist && indexOf(rebased, doc.history) == -1) {\n        rebaseHist(doc.history, change);\n        rebased.push(doc.history);\n      }\n      makeChangeSingleDoc(doc, change, null, stretchSpansOverChange(doc, change));\n    });\n  }\n\n  function makeChangeFromHistory(doc, type) {\n    if (doc.cm && doc.cm.state.suppressEdits) return;\n\n    var hist = doc.history;\n    var event = (type == \"undo\" ? hist.done : hist.undone).pop();\n    if (!event) return;\n    hist.dirtyCounter += type == \"undo\" ? -1 : 1;\n\n    var anti = {changes: [], anchorBefore: event.anchorAfter, headBefore: event.headAfter,\n                anchorAfter: event.anchorBefore, headAfter: event.headBefore};\n    (type == \"undo\" ? hist.undone : hist.done).push(anti);\n\n    for (var i = event.changes.length - 1; i >= 0; --i) {\n      var change = event.changes[i];\n      change.origin = type;\n      anti.changes.push(historyChangeFromChange(doc, change));\n\n      var after = i ? computeSelAfterChange(doc, change, null)\n                    : {anchor: event.anchorBefore, head: event.headBefore};\n      makeChangeSingleDoc(doc, change, after, mergeOldSpans(doc, change));\n      var rebased = [];\n\n      linkedDocs(doc, function(doc, sharedHist) {\n        if (!sharedHist && indexOf(rebased, doc.history) == -1) {\n          rebaseHist(doc.history, change);\n          rebased.push(doc.history);\n        }\n        makeChangeSingleDoc(doc, change, null, mergeOldSpans(doc, change));\n      });\n    }\n  }\n\n  function shiftDoc(doc, distance) {\n    function shiftPos(pos) {return Pos(pos.line + distance, pos.ch);}\n    doc.first += distance;\n    if (doc.cm) regChange(doc.cm, doc.first, doc.first, distance);\n    doc.sel.head = shiftPos(doc.sel.head); doc.sel.anchor = shiftPos(doc.sel.anchor);\n    doc.sel.from = shiftPos(doc.sel.from); doc.sel.to = shiftPos(doc.sel.to);\n  }\n\n  function makeChangeSingleDoc(doc, change, selAfter, spans) {\n    if (doc.cm && !doc.cm.curOp)\n      return operation(doc.cm, makeChangeSingleDoc)(doc, change, selAfter, spans);\n\n    if (change.to.line < doc.first) {\n      shiftDoc(doc, change.text.length - 1 - (change.to.line - change.from.line));\n      return;\n    }\n    if (change.from.line > doc.lastLine()) return;\n\n    // Clip the change to the size of this doc\n    if (change.from.line < doc.first) {\n      var shift = change.text.length - 1 - (doc.first - change.from.line);\n      shiftDoc(doc, shift);\n      change = {from: Pos(doc.first, 0), to: Pos(change.to.line + shift, change.to.ch),\n                text: [lst(change.text)], origin: change.origin};\n    }\n    var last = doc.lastLine();\n    if (change.to.line > last) {\n      change = {from: change.from, to: Pos(last, getLine(doc, last).text.length),\n                text: [change.text[0]], origin: change.origin};\n    }\n\n    change.removed = getBetween(doc, change.from, change.to);\n\n    if (!selAfter) selAfter = computeSelAfterChange(doc, change, null);\n    if (doc.cm) makeChangeSingleDocInEditor(doc.cm, change, spans, selAfter);\n    else updateDoc(doc, change, spans, selAfter);\n  }\n\n  function makeChangeSingleDocInEditor(cm, change, spans, selAfter) {\n    var doc = cm.doc, display = cm.display, from = change.from, to = change.to;\n\n    var recomputeMaxLength = false, checkWidthStart = from.line;\n    if (!cm.options.lineWrapping) {\n      checkWidthStart = lineNo(visualLine(doc, getLine(doc, from.line)));\n      doc.iter(checkWidthStart, to.line + 1, function(line) {\n        if (line == display.maxLine) {\n          recomputeMaxLength = true;\n          return true;\n        }\n      });\n    }\n\n    updateDoc(doc, change, spans, selAfter, estimateHeight(cm));\n\n    if (!cm.options.lineWrapping) {\n      doc.iter(checkWidthStart, from.line + change.text.length, function(line) {\n        var len = lineLength(doc, line);\n        if (len > display.maxLineLength) {\n          display.maxLine = line;\n          display.maxLineLength = len;\n          display.maxLineChanged = true;\n          recomputeMaxLength = false;\n        }\n      });\n      if (recomputeMaxLength) cm.curOp.updateMaxLine = true;\n    }\n\n    // Adjust frontier, schedule worker\n    doc.frontier = Math.min(doc.frontier, from.line);\n    startWorker(cm, 400);\n\n    var lendiff = change.text.length - (to.line - from.line) - 1;\n    // Remember that these lines changed, for updating the display\n    regChange(cm, from.line, to.line + 1, lendiff);\n\n    if (hasHandler(cm, \"change\")) {\n      var changeObj = {from: from, to: to,\n                       text: change.text,\n                       removed: change.removed,\n                       origin: change.origin};\n      if (cm.curOp.textChanged) {\n        for (var cur = cm.curOp.textChanged; cur.next; cur = cur.next) {}\n        cur.next = changeObj;\n      } else cm.curOp.textChanged = changeObj;\n    }\n  }\n\n  function replaceRange(doc, code, from, to, origin) {\n    if (!to) to = from;\n    if (posLess(to, from)) { var tmp = to; to = from; from = tmp; }\n    if (typeof code == \"string\") code = splitLines(code);\n    makeChange(doc, {from: from, to: to, text: code, origin: origin}, null);\n  }\n\n  // POSITION OBJECT\n\n  function Pos(line, ch) {\n    if (!(this instanceof Pos)) return new Pos(line, ch);\n    this.line = line; this.ch = ch;\n  }\n  CodeMirror.Pos = Pos;\n\n  function posEq(a, b) {return a.line == b.line && a.ch == b.ch;}\n  function posLess(a, b) {return a.line < b.line || (a.line == b.line && a.ch < b.ch);}\n  function copyPos(x) {return Pos(x.line, x.ch);}\n\n  // SELECTION\n\n  function clipLine(doc, n) {return Math.max(doc.first, Math.min(n, doc.first + doc.size - 1));}\n  function clipPos(doc, pos) {\n    if (pos.line < doc.first) return Pos(doc.first, 0);\n    var last = doc.first + doc.size - 1;\n    if (pos.line > last) return Pos(last, getLine(doc, last).text.length);\n    return clipToLen(pos, getLine(doc, pos.line).text.length);\n  }\n  function clipToLen(pos, linelen) {\n    var ch = pos.ch;\n    if (ch == null || ch > linelen) return Pos(pos.line, linelen);\n    else if (ch < 0) return Pos(pos.line, 0);\n    else return pos;\n  }\n  function isLine(doc, l) {return l >= doc.first && l < doc.first + doc.size;}\n\n  // If shift is held, this will move the selection anchor. Otherwise,\n  // it'll set the whole selection.\n  function extendSelection(doc, pos, other, bias) {\n    if (doc.sel.shift || doc.sel.extend) {\n      var anchor = doc.sel.anchor;\n      if (other) {\n        var posBefore = posLess(pos, anchor);\n        if (posBefore != posLess(other, anchor)) {\n          anchor = pos;\n          pos = other;\n        } else if (posBefore != posLess(pos, other)) {\n          pos = other;\n        }\n      }\n      setSelection(doc, anchor, pos, bias);\n    } else {\n      setSelection(doc, pos, other || pos, bias);\n    }\n    if (doc.cm) doc.cm.curOp.userSelChange = true;\n  }\n\n  function filterSelectionChange(doc, anchor, head) {\n    var obj = {anchor: anchor, head: head};\n    signal(doc, \"beforeSelectionChange\", doc, obj);\n    if (doc.cm) signal(doc.cm, \"beforeSelectionChange\", doc.cm, obj);\n    obj.anchor = clipPos(doc, obj.anchor); obj.head = clipPos(doc, obj.head);\n    return obj;\n  }\n\n  // Update the selection. Last two args are only used by\n  // updateDoc, since they have to be expressed in the line\n  // numbers before the update.\n  function setSelection(doc, anchor, head, bias, checkAtomic) {\n    if (!checkAtomic && hasHandler(doc, \"beforeSelectionChange\") || doc.cm && hasHandler(doc.cm, \"beforeSelectionChange\")) {\n      var filtered = filterSelectionChange(doc, anchor, head);\n      head = filtered.head;\n      anchor = filtered.anchor;\n    }\n\n    var sel = doc.sel;\n    sel.goalColumn = null;\n    // Skip over atomic spans.\n    if (checkAtomic || !posEq(anchor, sel.anchor))\n      anchor = skipAtomic(doc, anchor, bias, checkAtomic != \"push\");\n    if (checkAtomic || !posEq(head, sel.head))\n      head = skipAtomic(doc, head, bias, checkAtomic != \"push\");\n\n    if (posEq(sel.anchor, anchor) && posEq(sel.head, head)) return;\n\n    sel.anchor = anchor; sel.head = head;\n    var inv = posLess(head, anchor);\n    sel.from = inv ? head : anchor;\n    sel.to = inv ? anchor : head;\n\n    if (doc.cm)\n      doc.cm.curOp.updateInput = doc.cm.curOp.selectionChanged = true;\n\n    signalLater(doc, \"cursorActivity\", doc);\n  }\n\n  function reCheckSelection(cm) {\n    setSelection(cm.doc, cm.doc.sel.from, cm.doc.sel.to, null, \"push\");\n  }\n\n  function skipAtomic(doc, pos, bias, mayClear) {\n    var flipped = false, curPos = pos;\n    var dir = bias || 1;\n    doc.cantEdit = false;\n    search: for (;;) {\n      var line = getLine(doc, curPos.line);\n      if (line.markedSpans) {\n        for (var i = 0; i < line.markedSpans.length; ++i) {\n          var sp = line.markedSpans[i], m = sp.marker;\n          if ((sp.from == null || (m.inclusiveLeft ? sp.from <= curPos.ch : sp.from < curPos.ch)) &&\n              (sp.to == null || (m.inclusiveRight ? sp.to >= curPos.ch : sp.to > curPos.ch))) {\n            if (mayClear) {\n              signal(m, \"beforeCursorEnter\");\n              if (m.explicitlyCleared) {\n                if (!line.markedSpans) break;\n                else {--i; continue;}\n              }\n            }\n            if (!m.atomic) continue;\n            var newPos = m.find()[dir < 0 ? \"from\" : \"to\"];\n            if (posEq(newPos, curPos)) {\n              newPos.ch += dir;\n              if (newPos.ch < 0) {\n                if (newPos.line > doc.first) newPos = clipPos(doc, Pos(newPos.line - 1));\n                else newPos = null;\n              } else if (newPos.ch > line.text.length) {\n                if (newPos.line < doc.first + doc.size - 1) newPos = Pos(newPos.line + 1, 0);\n                else newPos = null;\n              }\n              if (!newPos) {\n                if (flipped) {\n                  // Driven in a corner -- no valid cursor position found at all\n                  // -- try again *with* clearing, if we didn't already\n                  if (!mayClear) return skipAtomic(doc, pos, bias, true);\n                  // Otherwise, turn off editing until further notice, and return the start of the doc\n                  doc.cantEdit = true;\n                  return Pos(doc.first, 0);\n                }\n                flipped = true; newPos = pos; dir = -dir;\n              }\n            }\n            curPos = newPos;\n            continue search;\n          }\n        }\n      }\n      return curPos;\n    }\n  }\n\n  // SCROLLING\n\n  function scrollCursorIntoView(cm) {\n    var coords = scrollPosIntoView(cm, cm.doc.sel.head);\n    if (!cm.state.focused) return;\n    var display = cm.display, box = getRect(display.sizer), doScroll = null, pTop = paddingTop(cm.display);\n    if (coords.top + pTop + box.top < 0) doScroll = true;\n    else if (coords.bottom + pTop + box.top > (window.innerHeight || document.documentElement.clientHeight)) doScroll = false;\n    if (doScroll != null && !phantom) {\n      var hidden = display.cursor.style.display == \"none\";\n      if (hidden) {\n        display.cursor.style.display = \"\";\n        display.cursor.style.left = coords.left + \"px\";\n        display.cursor.style.top = (coords.top - display.viewOffset) + \"px\";\n      }\n      display.cursor.scrollIntoView(doScroll);\n      if (hidden) display.cursor.style.display = \"none\";\n    }\n  }\n\n  function scrollPosIntoView(cm, pos, margin) {\n    if (margin == null) margin = 0;\n    for (;;) {\n      var changed = false, coords = cursorCoords(cm, pos);\n      var scrollPos = calculateScrollPos(cm, coords.left, coords.top - margin, coords.left, coords.bottom + margin);\n      var startTop = cm.doc.scrollTop, startLeft = cm.doc.scrollLeft;\n      if (scrollPos.scrollTop != null) {\n        setScrollTop(cm, scrollPos.scrollTop);\n        if (Math.abs(cm.doc.scrollTop - startTop) > 1) changed = true;\n      }\n      if (scrollPos.scrollLeft != null) {\n        setScrollLeft(cm, scrollPos.scrollLeft);\n        if (Math.abs(cm.doc.scrollLeft - startLeft) > 1) changed = true;\n      }\n      if (!changed) return coords;\n    }\n  }\n\n  function scrollIntoView(cm, x1, y1, x2, y2) {\n    var scrollPos = calculateScrollPos(cm, x1, y1, x2, y2);\n    if (scrollPos.scrollTop != null) setScrollTop(cm, scrollPos.scrollTop);\n    if (scrollPos.scrollLeft != null) setScrollLeft(cm, scrollPos.scrollLeft);\n  }\n\n  function calculateScrollPos(cm, x1, y1, x2, y2) {\n    var display = cm.display, pt = paddingTop(display);\n    y1 += pt; y2 += pt;\n    var screen = display.scroller.clientHeight - scrollerCutOff, screentop = display.scroller.scrollTop, result = {};\n    var docBottom = cm.doc.height + paddingVert(display);\n    var atTop = y1 < pt + 10, atBottom = y2 + pt > docBottom - 10;\n    if (y1 < screentop) result.scrollTop = atTop ? 0 : Math.max(0, y1);\n    else if (y2 > screentop + screen) result.scrollTop = (atBottom ? docBottom : y2) - screen;\n\n    var screenw = display.scroller.clientWidth - scrollerCutOff, screenleft = display.scroller.scrollLeft;\n    x1 += display.gutters.offsetWidth; x2 += display.gutters.offsetWidth;\n    var gutterw = display.gutters.offsetWidth;\n    var atLeft = x1 < gutterw + 10;\n    if (x1 < screenleft + gutterw || atLeft) {\n      if (atLeft) x1 = 0;\n      result.scrollLeft = Math.max(0, x1 - 10 - gutterw);\n    } else if (x2 > screenw + screenleft - 3) {\n      result.scrollLeft = x2 + 10 - screenw;\n    }\n    return result;\n  }\n\n  function updateScrollPos(cm, left, top) {\n    cm.curOp.updateScrollPos = {scrollLeft: left, scrollTop: top};\n  }\n\n  function addToScrollPos(cm, left, top) {\n    var pos = cm.curOp.updateScrollPos || (cm.curOp.updateScrollPos = {scrollLeft: cm.doc.scrollLeft, scrollTop: cm.doc.scrollTop});\n    var scroll = cm.display.scroller;\n    pos.scrollTop = Math.max(0, Math.min(scroll.scrollHeight - scroll.clientHeight, pos.scrollTop + top));\n    pos.scrollLeft = Math.max(0, Math.min(scroll.scrollWidth - scroll.clientWidth, pos.scrollLeft + left));\n  }\n\n  // API UTILITIES\n\n  function indentLine(cm, n, how, aggressive) {\n    var doc = cm.doc;\n    if (!how) how = \"add\";\n    if (how == \"smart\") {\n      if (!cm.doc.mode.indent) how = \"prev\";\n      else var state = getStateBefore(cm, n);\n    }\n\n    var tabSize = cm.options.tabSize;\n    var line = getLine(doc, n), curSpace = countColumn(line.text, null, tabSize);\n    var curSpaceString = line.text.match(/^\\s*/)[0], indentation;\n    if (how == \"smart\") {\n      indentation = cm.doc.mode.indent(state, line.text.slice(curSpaceString.length), line.text);\n      if (indentation == Pass) {\n        if (!aggressive) return;\n        how = \"prev\";\n      }\n    }\n    if (how == \"prev\") {\n      if (n > doc.first) indentation = countColumn(getLine(doc, n-1).text, null, tabSize);\n      else indentation = 0;\n    } else if (how == \"add\") {\n      indentation = curSpace + cm.options.indentUnit;\n    } else if (how == \"subtract\") {\n      indentation = curSpace - cm.options.indentUnit;\n    }\n    indentation = Math.max(0, indentation);\n\n    var indentString = \"\", pos = 0;\n    if (cm.options.indentWithTabs)\n      for (var i = Math.floor(indentation / tabSize); i; --i) {pos += tabSize; indentString += \"\\t\";}\n    if (pos < indentation) indentString += spaceStr(indentation - pos);\n\n    if (indentString != curSpaceString)\n      replaceRange(cm.doc, indentString, Pos(n, 0), Pos(n, curSpaceString.length), \"+input\");\n    line.stateAfter = null;\n  }\n\n  function changeLine(cm, handle, op) {\n    var no = handle, line = handle, doc = cm.doc;\n    if (typeof handle == \"number\") line = getLine(doc, clipLine(doc, handle));\n    else no = lineNo(handle);\n    if (no == null) return null;\n    if (op(line, no)) regChange(cm, no, no + 1);\n    else return null;\n    return line;\n  }\n\n  function findPosH(doc, pos, dir, unit, visually) {\n    var line = pos.line, ch = pos.ch;\n    var lineObj = getLine(doc, line);\n    var possible = true;\n    function findNextLine() {\n      var l = line + dir;\n      if (l < doc.first || l >= doc.first + doc.size) return (possible = false);\n      line = l;\n      return lineObj = getLine(doc, l);\n    }\n    function moveOnce(boundToLine) {\n      var next = (visually ? moveVisually : moveLogically)(lineObj, ch, dir, true);\n      if (next == null) {\n        if (!boundToLine && findNextLine()) {\n          if (visually) ch = (dir < 0 ? lineRight : lineLeft)(lineObj);\n          else ch = dir < 0 ? lineObj.text.length : 0;\n        } else return (possible = false);\n      } else ch = next;\n      return true;\n    }\n\n    if (unit == \"char\") moveOnce();\n    else if (unit == \"column\") moveOnce(true);\n    else if (unit == \"word\" || unit == \"group\") {\n      var sawType = null, group = unit == \"group\";\n      for (var first = true;; first = false) {\n        if (dir < 0 && !moveOnce(!first)) break;\n        var cur = lineObj.text.charAt(ch) || \"\\n\";\n        var type = isWordChar(cur) ? \"w\"\n          : !group ? null\n          : /\\s/.test(cur) ? null\n          : \"p\";\n        if (sawType && sawType != type) {\n          if (dir < 0) {dir = 1; moveOnce();}\n          break;\n        }\n        if (type) sawType = type;\n        if (dir > 0 && !moveOnce(!first)) break;\n      }\n    }\n    var result = skipAtomic(doc, Pos(line, ch), dir, true);\n    if (!possible) result.hitSide = true;\n    return result;\n  }\n\n  function findPosV(cm, pos, dir, unit) {\n    var doc = cm.doc, x = pos.left, y;\n    if (unit == \"page\") {\n      var pageSize = Math.min(cm.display.wrapper.clientHeight, window.innerHeight || document.documentElement.clientHeight);\n      y = pos.top + dir * (pageSize - (dir < 0 ? 1.5 : .5) * textHeight(cm.display));\n    } else if (unit == \"line\") {\n      y = dir > 0 ? pos.bottom + 3 : pos.top - 3;\n    }\n    for (;;) {\n      var target = coordsChar(cm, x, y);\n      if (!target.outside) break;\n      if (dir < 0 ? y <= 0 : y >= doc.height) { target.hitSide = true; break; }\n      y += dir * 5;\n    }\n    return target;\n  }\n\n  function findWordAt(line, pos) {\n    var start = pos.ch, end = pos.ch;\n    if (line) {\n      if (pos.after === false || end == line.length) --start; else ++end;\n      var startChar = line.charAt(start);\n      var check = isWordChar(startChar) ? isWordChar\n        : /\\s/.test(startChar) ? function(ch) {return /\\s/.test(ch);}\n        : function(ch) {return !/\\s/.test(ch) && !isWordChar(ch);};\n      while (start > 0 && check(line.charAt(start - 1))) --start;\n      while (end < line.length && check(line.charAt(end))) ++end;\n    }\n    return {from: Pos(pos.line, start), to: Pos(pos.line, end)};\n  }\n\n  function selectLine(cm, line) {\n    extendSelection(cm.doc, Pos(line, 0), clipPos(cm.doc, Pos(line + 1, 0)));\n  }\n\n  // PROTOTYPE\n\n  // The publicly visible API. Note that operation(null, f) means\n  // 'wrap f in an operation, performed on its `this` parameter'\n\n  CodeMirror.prototype = {\n    focus: function(){window.focus(); focusInput(this); onFocus(this); fastPoll(this);},\n\n    setOption: function(option, value) {\n      var options = this.options, old = options[option];\n      if (options[option] == value && option != \"mode\") return;\n      options[option] = value;\n      if (optionHandlers.hasOwnProperty(option))\n        operation(this, optionHandlers[option])(this, value, old);\n    },\n\n    getOption: function(option) {return this.options[option];},\n    getDoc: function() {return this.doc;},\n\n    addKeyMap: function(map, bottom) {\n      this.state.keyMaps[bottom ? \"push\" : \"unshift\"](map);\n    },\n    removeKeyMap: function(map) {\n      var maps = this.state.keyMaps;\n      for (var i = 0; i < maps.length; ++i)\n        if ((typeof map == \"string\" ? maps[i].name : maps[i]) == map) {\n          maps.splice(i, 1);\n          return true;\n        }\n    },\n\n    addOverlay: operation(null, function(spec, options) {\n      var mode = spec.token ? spec : CodeMirror.getMode(this.options, spec);\n      if (mode.startState) throw new Error(\"Overlays may not be stateful.\");\n      this.state.overlays.push({mode: mode, modeSpec: spec, opaque: options && options.opaque});\n      this.state.modeGen++;\n      regChange(this);\n    }),\n    removeOverlay: operation(null, function(spec) {\n      var overlays = this.state.overlays;\n      for (var i = 0; i < overlays.length; ++i) {\n        if (overlays[i].modeSpec == spec) {\n          overlays.splice(i, 1);\n          this.state.modeGen++;\n          regChange(this);\n          return;\n        }\n      }\n    }),\n\n    indentLine: operation(null, function(n, dir, aggressive) {\n      if (typeof dir != \"string\") {\n        if (dir == null) dir = this.options.smartIndent ? \"smart\" : \"prev\";\n        else dir = dir ? \"add\" : \"subtract\";\n      }\n      if (isLine(this.doc, n)) indentLine(this, n, dir, aggressive);\n    }),\n    indentSelection: operation(null, function(how) {\n      var sel = this.doc.sel;\n      if (posEq(sel.from, sel.to)) return indentLine(this, sel.from.line, how);\n      var e = sel.to.line - (sel.to.ch ? 0 : 1);\n      for (var i = sel.from.line; i <= e; ++i) indentLine(this, i, how);\n    }),\n\n    // Fetch the parser token for a given character. Useful for hacks\n    // that want to inspect the mode state (say, for completion).\n    getTokenAt: function(pos) {\n      var doc = this.doc;\n      pos = clipPos(doc, pos);\n      var state = getStateBefore(this, pos.line), mode = this.doc.mode;\n      var line = getLine(doc, pos.line);\n      var stream = new StringStream(line.text, this.options.tabSize);\n      while (stream.pos < pos.ch && !stream.eol()) {\n        stream.start = stream.pos;\n        var style = mode.token(stream, state);\n      }\n      return {start: stream.start,\n              end: stream.pos,\n              string: stream.current(),\n              className: style || null, // Deprecated, use 'type' instead\n              type: style || null,\n              state: state};\n    },\n\n    getStateAfter: function(line) {\n      var doc = this.doc;\n      line = clipLine(doc, line == null ? doc.first + doc.size - 1: line);\n      return getStateBefore(this, line + 1);\n    },\n\n    cursorCoords: function(start, mode) {\n      var pos, sel = this.doc.sel;\n      if (start == null) pos = sel.head;\n      else if (typeof start == \"object\") pos = clipPos(this.doc, start);\n      else pos = start ? sel.from : sel.to;\n      return cursorCoords(this, pos, mode || \"page\");\n    },\n\n    charCoords: function(pos, mode) {\n      return charCoords(this, clipPos(this.doc, pos), mode || \"page\");\n    },\n\n    coordsChar: function(coords, mode) {\n      coords = fromCoordSystem(this, coords, mode || \"page\");\n      return coordsChar(this, coords.left, coords.top);\n    },\n\n    defaultTextHeight: function() { return textHeight(this.display); },\n    defaultCharWidth: function() { return charWidth(this.display); },\n\n    setGutterMarker: operation(null, function(line, gutterID, value) {\n      return changeLine(this, line, function(line) {\n        var markers = line.gutterMarkers || (line.gutterMarkers = {});\n        markers[gutterID] = value;\n        if (!value && isEmpty(markers)) line.gutterMarkers = null;\n        return true;\n      });\n    }),\n\n    clearGutter: operation(null, function(gutterID) {\n      var cm = this, doc = cm.doc, i = doc.first;\n      doc.iter(function(line) {\n        if (line.gutterMarkers && line.gutterMarkers[gutterID]) {\n          line.gutterMarkers[gutterID] = null;\n          regChange(cm, i, i + 1);\n          if (isEmpty(line.gutterMarkers)) line.gutterMarkers = null;\n        }\n        ++i;\n      });\n    }),\n\n    addLineClass: operation(null, function(handle, where, cls) {\n      return changeLine(this, handle, function(line) {\n        var prop = where == \"text\" ? \"textClass\" : where == \"background\" ? \"bgClass\" : \"wrapClass\";\n        if (!line[prop]) line[prop] = cls;\n        else if (new RegExp(\"\\\\b\" + cls + \"\\\\b\").test(line[prop])) return false;\n        else line[prop] += \" \" + cls;\n        return true;\n      });\n    }),\n\n    removeLineClass: operation(null, function(handle, where, cls) {\n      return changeLine(this, handle, function(line) {\n        var prop = where == \"text\" ? \"textClass\" : where == \"background\" ? \"bgClass\" : \"wrapClass\";\n        var cur = line[prop];\n        if (!cur) return false;\n        else if (cls == null) line[prop] = null;\n        else {\n          var upd = cur.replace(new RegExp(\"^\" + cls + \"\\\\b\\\\s*|\\\\s*\\\\b\" + cls + \"\\\\b\"), \"\");\n          if (upd == cur) return false;\n          line[prop] = upd || null;\n        }\n        return true;\n      });\n    }),\n\n    addLineWidget: operation(null, function(handle, node, options) {\n      return addLineWidget(this, handle, node, options);\n    }),\n\n    removeLineWidget: function(widget) { widget.clear(); },\n\n    lineInfo: function(line) {\n      if (typeof line == \"number\") {\n        if (!isLine(this.doc, line)) return null;\n        var n = line;\n        line = getLine(this.doc, line);\n        if (!line) return null;\n      } else {\n        var n = lineNo(line);\n        if (n == null) return null;\n      }\n      return {line: n, handle: line, text: line.text, gutterMarkers: line.gutterMarkers,\n              textClass: line.textClass, bgClass: line.bgClass, wrapClass: line.wrapClass,\n              widgets: line.widgets};\n    },\n\n    getViewport: function() { return {from: this.display.showingFrom, to: this.display.showingTo};},\n\n    addWidget: function(pos, node, scroll, vert, horiz) {\n      var display = this.display;\n      pos = cursorCoords(this, clipPos(this.doc, pos));\n      var top = pos.bottom, left = pos.left;\n      node.style.position = \"absolute\";\n      display.sizer.appendChild(node);\n      if (vert == \"over\") {\n        top = pos.top;\n      } else if (vert == \"above\" || vert == \"near\") {\n        var vspace = Math.max(display.wrapper.clientHeight, this.doc.height),\n        hspace = Math.max(display.sizer.clientWidth, display.lineSpace.clientWidth);\n        // Default to positioning above (if specified and possible); otherwise default to positioning below\n        if ((vert == 'above' || pos.bottom + node.offsetHeight > vspace) && pos.top > node.offsetHeight)\n          top = pos.top - node.offsetHeight;\n        else if (pos.bottom + node.offsetHeight <= vspace)\n          top = pos.bottom;\n        if (left + node.offsetWidth > hspace)\n          left = hspace - node.offsetWidth;\n      }\n      node.style.top = (top + paddingTop(display)) + \"px\";\n      node.style.left = node.style.right = \"\";\n      if (horiz == \"right\") {\n        left = display.sizer.clientWidth - node.offsetWidth;\n        node.style.right = \"0px\";\n      } else {\n        if (horiz == \"left\") left = 0;\n        else if (horiz == \"middle\") left = (display.sizer.clientWidth - node.offsetWidth) / 2;\n        node.style.left = left + \"px\";\n      }\n      if (scroll)\n        scrollIntoView(this, left, top, left + node.offsetWidth, top + node.offsetHeight);\n    },\n\n    triggerOnKeyDown: operation(null, onKeyDown),\n\n    execCommand: function(cmd) {return commands[cmd](this);},\n\n    findPosH: function(from, amount, unit, visually) {\n      var dir = 1;\n      if (amount < 0) { dir = -1; amount = -amount; }\n      for (var i = 0, cur = clipPos(this.doc, from); i < amount; ++i) {\n        cur = findPosH(this.doc, cur, dir, unit, visually);\n        if (cur.hitSide) break;\n      }\n      return cur;\n    },\n\n    moveH: operation(null, function(dir, unit) {\n      var sel = this.doc.sel, pos;\n      if (sel.shift || sel.extend || posEq(sel.from, sel.to))\n        pos = findPosH(this.doc, sel.head, dir, unit, this.options.rtlMoveVisually);\n      else\n        pos = dir < 0 ? sel.from : sel.to;\n      extendSelection(this.doc, pos, pos, dir);\n    }),\n\n    deleteH: operation(null, function(dir, unit) {\n      var sel = this.doc.sel;\n      if (!posEq(sel.from, sel.to)) replaceRange(this.doc, \"\", sel.from, sel.to, \"+delete\");\n      else replaceRange(this.doc, \"\", sel.from, findPosH(this.doc, sel.head, dir, unit, false), \"+delete\");\n      this.curOp.userSelChange = true;\n    }),\n\n    findPosV: function(from, amount, unit, goalColumn) {\n      var dir = 1, x = goalColumn;\n      if (amount < 0) { dir = -1; amount = -amount; }\n      for (var i = 0, cur = clipPos(this.doc, from); i < amount; ++i) {\n        var coords = cursorCoords(this, cur, \"div\");\n        if (x == null) x = coords.left;\n        else coords.left = x;\n        cur = findPosV(this, coords, dir, unit);\n        if (cur.hitSide) break;\n      }\n      return cur;\n    },\n\n    moveV: operation(null, function(dir, unit) {\n      var sel = this.doc.sel;\n      var pos = cursorCoords(this, sel.head, \"div\");\n      if (sel.goalColumn != null) pos.left = sel.goalColumn;\n      var target = findPosV(this, pos, dir, unit);\n\n      if (unit == \"page\") addToScrollPos(this, 0, charCoords(this, target, \"div\").top - pos.top);\n      extendSelection(this.doc, target, target, dir);\n      sel.goalColumn = pos.left;\n    }),\n\n    toggleOverwrite: function() {\n      if (this.state.overwrite = !this.state.overwrite)\n        this.display.cursor.className += \" CodeMirror-overwrite\";\n      else\n        this.display.cursor.className = this.display.cursor.className.replace(\" CodeMirror-overwrite\", \"\");\n    },\n    hasFocus: function() { return this.state.focused; },\n\n    scrollTo: operation(null, function(x, y) {\n      updateScrollPos(this, x, y);\n    }),\n    getScrollInfo: function() {\n      var scroller = this.display.scroller, co = scrollerCutOff;\n      return {left: scroller.scrollLeft, top: scroller.scrollTop,\n              height: scroller.scrollHeight - co, width: scroller.scrollWidth - co,\n              clientHeight: scroller.clientHeight - co, clientWidth: scroller.clientWidth - co};\n    },\n\n    scrollIntoView: function(pos, margin) {\n      if (typeof pos == \"number\") pos = Pos(pos, 0);\n      if (!pos || pos.line != null) {\n        pos = pos ? clipPos(this.doc, pos) : this.doc.sel.head;\n        scrollPosIntoView(this, pos, margin);\n      } else {\n        scrollIntoView(this, pos.left, pos.top - margin, pos.right, pos.bottom + margin);\n      }\n    },\n\n    setSize: function(width, height) {\n      function interpret(val) {\n        return typeof val == \"number\" || /^\\d+$/.test(String(val)) ? val + \"px\" : val;\n      }\n      if (width != null) this.display.wrapper.style.width = interpret(width);\n      if (height != null) this.display.wrapper.style.height = interpret(height);\n      this.refresh();\n    },\n\n    on: function(type, f) {on(this, type, f);},\n    off: function(type, f) {off(this, type, f);},\n\n    operation: function(f){return runInOp(this, f);},\n\n    refresh: operation(null, function() {\n      clearCaches(this);\n      updateScrollPos(this, this.doc.scrollLeft, this.doc.scrollTop);\n      regChange(this);\n    }),\n\n    swapDoc: operation(null, function(doc) {\n      var old = this.doc;\n      old.cm = null;\n      attachDoc(this, doc);\n      clearCaches(this);\n      updateScrollPos(this, doc.scrollLeft, doc.scrollTop);\n      return old;\n    }),\n\n    getInputField: function(){return this.display.input;},\n    getWrapperElement: function(){return this.display.wrapper;},\n    getScrollerElement: function(){return this.display.scroller;},\n    getGutterElement: function(){return this.display.gutters;}\n  };\n\n  // OPTION DEFAULTS\n\n  var optionHandlers = CodeMirror.optionHandlers = {};\n\n  // The default configuration options.\n  var defaults = CodeMirror.defaults = {};\n\n  function option(name, deflt, handle, notOnInit) {\n    CodeMirror.defaults[name] = deflt;\n    if (handle) optionHandlers[name] =\n      notOnInit ? function(cm, val, old) {if (old != Init) handle(cm, val, old);} : handle;\n  }\n\n  var Init = CodeMirror.Init = {toString: function(){return \"CodeMirror.Init\";}};\n\n  // These two are, on init, called from the constructor because they\n  // have to be initialized before the editor can start at all.\n  option(\"value\", \"\", function(cm, val) {\n    cm.setValue(val);\n  }, true);\n  option(\"mode\", null, function(cm, val) {\n    cm.doc.modeOption = val;\n    loadMode(cm);\n  }, true);\n\n  option(\"indentUnit\", 2, loadMode, true);\n  option(\"indentWithTabs\", false);\n  option(\"smartIndent\", true);\n  option(\"tabSize\", 4, function(cm) {\n    loadMode(cm);\n    clearCaches(cm);\n    regChange(cm);\n  }, true);\n  option(\"electricChars\", true);\n  option(\"rtlMoveVisually\", !windows);\n\n  option(\"theme\", \"default\", function(cm) {\n    themeChanged(cm);\n    guttersChanged(cm);\n  }, true);\n  option(\"keyMap\", \"default\", keyMapChanged);\n  option(\"extraKeys\", null);\n\n  option(\"onKeyEvent\", null);\n  option(\"onDragEvent\", null);\n\n  option(\"lineWrapping\", false, wrappingChanged, true);\n  option(\"gutters\", [], function(cm) {\n    setGuttersForLineNumbers(cm.options);\n    guttersChanged(cm);\n  }, true);\n  option(\"fixedGutter\", true, function(cm, val) {\n    cm.display.gutters.style.left = val ? compensateForHScroll(cm.display) + \"px\" : \"0\";\n    cm.refresh();\n  }, true);\n  option(\"lineNumbers\", false, function(cm) {\n    setGuttersForLineNumbers(cm.options);\n    guttersChanged(cm);\n  }, true);\n  option(\"firstLineNumber\", 1, guttersChanged, true);\n  option(\"lineNumberFormatter\", function(integer) {return integer;}, guttersChanged, true);\n  option(\"showCursorWhenSelecting\", false, updateSelection, true);\n\n  option(\"readOnly\", false, function(cm, val) {\n    if (val == \"nocursor\") {onBlur(cm); cm.display.input.blur();}\n    else if (!val) resetInput(cm, true);\n  });\n  option(\"dragDrop\", true);\n\n  option(\"cursorBlinkRate\", 530);\n  option(\"cursorHeight\", 1);\n  option(\"workTime\", 100);\n  option(\"workDelay\", 100);\n  option(\"flattenSpans\", true);\n  option(\"pollInterval\", 100);\n  option(\"undoDepth\", 40, function(cm, val){cm.doc.history.undoDepth = val;});\n  option(\"viewportMargin\", 10, function(cm){cm.refresh();}, true);\n\n  option(\"tabindex\", null, function(cm, val) {\n    cm.display.input.tabIndex = val || \"\";\n  });\n  option(\"autofocus\", null);\n\n  // MODE DEFINITION AND QUERYING\n\n  // Known modes, by name and by MIME\n  var modes = CodeMirror.modes = {}, mimeModes = CodeMirror.mimeModes = {};\n\n  CodeMirror.defineMode = function(name, mode) {\n    if (!CodeMirror.defaults.mode && name != \"null\") CodeMirror.defaults.mode = name;\n    if (arguments.length > 2) {\n      mode.dependencies = [];\n      for (var i = 2; i < arguments.length; ++i) mode.dependencies.push(arguments[i]);\n    }\n    modes[name] = mode;\n  };\n\n  CodeMirror.defineMIME = function(mime, spec) {\n    mimeModes[mime] = spec;\n  };\n\n  CodeMirror.resolveMode = function(spec) {\n    if (typeof spec == \"string\" && mimeModes.hasOwnProperty(spec))\n      spec = mimeModes[spec];\n    else if (typeof spec == \"string\" && /^[\\w\\-]+\\/[\\w\\-]+\\+xml$/.test(spec))\n      return CodeMirror.resolveMode(\"application/xml\");\n    if (typeof spec == \"string\") return {name: spec};\n    else return spec || {name: \"null\"};\n  };\n\n  CodeMirror.getMode = function(options, spec) {\n    spec = CodeMirror.resolveMode(spec);\n    var mfactory = modes[spec.name];\n    if (!mfactory) return CodeMirror.getMode(options, \"text/plain\");\n    var modeObj = mfactory(options, spec);\n    if (modeExtensions.hasOwnProperty(spec.name)) {\n      var exts = modeExtensions[spec.name];\n      for (var prop in exts) {\n        if (!exts.hasOwnProperty(prop)) continue;\n        if (modeObj.hasOwnProperty(prop)) modeObj[\"_\" + prop] = modeObj[prop];\n        modeObj[prop] = exts[prop];\n      }\n    }\n    modeObj.name = spec.name;\n    return modeObj;\n  };\n\n  CodeMirror.defineMode(\"null\", function() {\n    return {token: function(stream) {stream.skipToEnd();}};\n  });\n  CodeMirror.defineMIME(\"text/plain\", \"null\");\n\n  var modeExtensions = CodeMirror.modeExtensions = {};\n  CodeMirror.extendMode = function(mode, properties) {\n    var exts = modeExtensions.hasOwnProperty(mode) ? modeExtensions[mode] : (modeExtensions[mode] = {});\n    copyObj(properties, exts);\n  };\n\n  // EXTENSIONS\n\n  CodeMirror.defineExtension = function(name, func) {\n    CodeMirror.prototype[name] = func;\n  };\n\n  CodeMirror.defineOption = option;\n\n  var initHooks = [];\n  CodeMirror.defineInitHook = function(f) {initHooks.push(f);};\n\n  // MODE STATE HANDLING\n\n  // Utility functions for working with state. Exported because modes\n  // sometimes need to do this.\n  function copyState(mode, state) {\n    if (state === true) return state;\n    if (mode.copyState) return mode.copyState(state);\n    var nstate = {};\n    for (var n in state) {\n      var val = state[n];\n      if (val instanceof Array) val = val.concat([]);\n      nstate[n] = val;\n    }\n    return nstate;\n  }\n  CodeMirror.copyState = copyState;\n\n  function startState(mode, a1, a2) {\n    return mode.startState ? mode.startState(a1, a2) : true;\n  }\n  CodeMirror.startState = startState;\n\n  CodeMirror.innerMode = function(mode, state) {\n    while (mode.innerMode) {\n      var info = mode.innerMode(state);\n      state = info.state;\n      mode = info.mode;\n    }\n    return info || {mode: mode, state: state};\n  };\n\n  // STANDARD COMMANDS\n\n  var commands = CodeMirror.commands = {\n    selectAll: function(cm) {cm.setSelection(Pos(cm.firstLine(), 0), Pos(cm.lastLine()));},\n    killLine: function(cm) {\n      var from = cm.getCursor(true), to = cm.getCursor(false), sel = !posEq(from, to);\n      if (!sel && cm.getLine(from.line).length == from.ch)\n        cm.replaceRange(\"\", from, Pos(from.line + 1, 0), \"+delete\");\n      else cm.replaceRange(\"\", from, sel ? to : Pos(from.line), \"+delete\");\n    },\n    deleteLine: function(cm) {\n      var l = cm.getCursor().line;\n      cm.replaceRange(\"\", Pos(l, 0), Pos(l), \"+delete\");\n    },\n    undo: function(cm) {cm.undo();},\n    redo: function(cm) {cm.redo();},\n    goDocStart: function(cm) {cm.extendSelection(Pos(cm.firstLine(), 0));},\n    goDocEnd: function(cm) {cm.extendSelection(Pos(cm.lastLine()));},\n    goLineStart: function(cm) {\n      cm.extendSelection(lineStart(cm, cm.getCursor().line));\n    },\n    goLineStartSmart: function(cm) {\n      var cur = cm.getCursor(), start = lineStart(cm, cur.line);\n      var line = cm.getLineHandle(start.line);\n      var order = getOrder(line);\n      if (!order || order[0].level == 0) {\n        var firstNonWS = Math.max(0, line.text.search(/\\S/));\n        var inWS = cur.line == start.line && cur.ch <= firstNonWS && cur.ch;\n        cm.extendSelection(Pos(start.line, inWS ? 0 : firstNonWS));\n      } else cm.extendSelection(start);\n    },\n    goLineEnd: function(cm) {\n      cm.extendSelection(lineEnd(cm, cm.getCursor().line));\n    },\n    goLineRight: function(cm) {\n      var top = cm.charCoords(cm.getCursor(), \"div\").top + 5;\n      cm.extendSelection(cm.coordsChar({left: cm.display.lineDiv.offsetWidth + 100, top: top}, \"div\"));\n    },\n    goLineLeft: function(cm) {\n      var top = cm.charCoords(cm.getCursor(), \"div\").top + 5;\n      cm.extendSelection(cm.coordsChar({left: 0, top: top}, \"div\"));\n    },\n    goLineUp: function(cm) {cm.moveV(-1, \"line\");},\n    goLineDown: function(cm) {cm.moveV(1, \"line\");},\n    goPageUp: function(cm) {cm.moveV(-1, \"page\");},\n    goPageDown: function(cm) {cm.moveV(1, \"page\");},\n    goCharLeft: function(cm) {cm.moveH(-1, \"char\");},\n    goCharRight: function(cm) {cm.moveH(1, \"char\");},\n    goColumnLeft: function(cm) {cm.moveH(-1, \"column\");},\n    goColumnRight: function(cm) {cm.moveH(1, \"column\");},\n    goWordLeft: function(cm) {cm.moveH(-1, \"word\");},\n    goGroupRight: function(cm) {cm.moveH(1, \"group\");},\n    goGroupLeft: function(cm) {cm.moveH(-1, \"group\");},\n    goWordRight: function(cm) {cm.moveH(1, \"word\");},\n    delCharBefore: function(cm) {cm.deleteH(-1, \"char\");},\n    delCharAfter: function(cm) {cm.deleteH(1, \"char\");},\n    delWordBefore: function(cm) {cm.deleteH(-1, \"word\");},\n    delWordAfter: function(cm) {cm.deleteH(1, \"word\");},\n    delGroupBefore: function(cm) {cm.deleteH(-1, \"group\");},\n    delGroupAfter: function(cm) {cm.deleteH(1, \"group\");},\n    indentAuto: function(cm) {cm.indentSelection(\"smart\");},\n    indentMore: function(cm) {cm.indentSelection(\"add\");},\n    indentLess: function(cm) {cm.indentSelection(\"subtract\");},\n    insertTab: function(cm) {cm.replaceSelection(\"\\t\", \"end\", \"+input\");},\n    defaultTab: function(cm) {\n      if (cm.somethingSelected()) cm.indentSelection(\"add\");\n      else cm.replaceSelection(\"\\t\", \"end\", \"+input\");\n    },\n    transposeChars: function(cm) {\n      var cur = cm.getCursor(), line = cm.getLine(cur.line);\n      if (cur.ch > 0 && cur.ch < line.length - 1)\n        cm.replaceRange(line.charAt(cur.ch) + line.charAt(cur.ch - 1),\n                        Pos(cur.line, cur.ch - 1), Pos(cur.line, cur.ch + 1));\n    },\n    newlineAndIndent: function(cm) {\n      operation(cm, function() {\n        cm.replaceSelection(\"\\n\", \"end\", \"+input\");\n        cm.indentLine(cm.getCursor().line, null, true);\n      })();\n    },\n    toggleOverwrite: function(cm) {cm.toggleOverwrite();}\n  };\n\n  // STANDARD KEYMAPS\n\n  var keyMap = CodeMirror.keyMap = {};\n  keyMap.basic = {\n    \"Left\": \"goCharLeft\", \"Right\": \"goCharRight\", \"Up\": \"goLineUp\", \"Down\": \"goLineDown\",\n    \"End\": \"goLineEnd\", \"Home\": \"goLineStartSmart\", \"PageUp\": \"goPageUp\", \"PageDown\": \"goPageDown\",\n    \"Delete\": \"delCharAfter\", \"Backspace\": \"delCharBefore\", \"Tab\": \"defaultTab\", \"Shift-Tab\": \"indentAuto\",\n    \"Enter\": \"newlineAndIndent\", \"Insert\": \"toggleOverwrite\"\n  };\n  // Note that the save and find-related commands aren't defined by\n  // default. Unknown commands are simply ignored.\n  keyMap.pcDefault = {\n    \"Ctrl-A\": \"selectAll\", \"Ctrl-D\": \"deleteLine\", \"Ctrl-Z\": \"undo\", \"Shift-Ctrl-Z\": \"redo\", \"Ctrl-Y\": \"redo\",\n    \"Ctrl-Home\": \"goDocStart\", \"Alt-Up\": \"goDocStart\", \"Ctrl-End\": \"goDocEnd\", \"Ctrl-Down\": \"goDocEnd\",\n    \"Ctrl-Left\": \"goGroupLeft\", \"Ctrl-Right\": \"goGroupRight\", \"Alt-Left\": \"goLineStart\", \"Alt-Right\": \"goLineEnd\",\n    \"Ctrl-Backspace\": \"delGroupBefore\", \"Ctrl-Delete\": \"delGroupAfter\", \"Ctrl-S\": \"save\", \"Ctrl-F\": \"find\",\n    \"Ctrl-G\": \"findNext\", \"Shift-Ctrl-G\": \"findPrev\", \"Shift-Ctrl-F\": \"replace\", \"Shift-Ctrl-R\": \"replaceAll\",\n    \"Ctrl-[\": \"indentLess\", \"Ctrl-]\": \"indentMore\",\n    fallthrough: \"basic\"\n  };\n  keyMap.macDefault = {\n    \"Cmd-A\": \"selectAll\", \"Cmd-D\": \"deleteLine\", \"Cmd-Z\": \"undo\", \"Shift-Cmd-Z\": \"redo\", \"Cmd-Y\": \"redo\",\n    \"Cmd-Up\": \"goDocStart\", \"Cmd-End\": \"goDocEnd\", \"Cmd-Down\": \"goDocEnd\", \"Alt-Left\": \"goGroupLeft\",\n    \"Alt-Right\": \"goGroupRight\", \"Cmd-Left\": \"goLineStart\", \"Cmd-Right\": \"goLineEnd\", \"Alt-Backspace\": \"delGroupBefore\",\n    \"Ctrl-Alt-Backspace\": \"delGroupAfter\", \"Alt-Delete\": \"delGroupAfter\", \"Cmd-S\": \"save\", \"Cmd-F\": \"find\",\n    \"Cmd-G\": \"findNext\", \"Shift-Cmd-G\": \"findPrev\", \"Cmd-Alt-F\": \"replace\", \"Shift-Cmd-Alt-F\": \"replaceAll\",\n    \"Cmd-[\": \"indentLess\", \"Cmd-]\": \"indentMore\",\n    fallthrough: [\"basic\", \"emacsy\"]\n  };\n  keyMap[\"default\"] = mac ? keyMap.macDefault : keyMap.pcDefault;\n  keyMap.emacsy = {\n    \"Ctrl-F\": \"goCharRight\", \"Ctrl-B\": \"goCharLeft\", \"Ctrl-P\": \"goLineUp\", \"Ctrl-N\": \"goLineDown\",\n    \"Alt-F\": \"goWordRight\", \"Alt-B\": \"goWordLeft\", \"Ctrl-A\": \"goLineStart\", \"Ctrl-E\": \"goLineEnd\",\n    \"Ctrl-V\": \"goPageDown\", \"Shift-Ctrl-V\": \"goPageUp\", \"Ctrl-D\": \"delCharAfter\", \"Ctrl-H\": \"delCharBefore\",\n    \"Alt-D\": \"delWordAfter\", \"Alt-Backspace\": \"delWordBefore\", \"Ctrl-K\": \"killLine\", \"Ctrl-T\": \"transposeChars\"\n  };\n\n  // KEYMAP DISPATCH\n\n  function getKeyMap(val) {\n    if (typeof val == \"string\") return keyMap[val];\n    else return val;\n  }\n\n  function lookupKey(name, maps, handle) {\n    function lookup(map) {\n      map = getKeyMap(map);\n      var found = map[name];\n      if (found === false) return \"stop\";\n      if (found != null && handle(found)) return true;\n      if (map.nofallthrough) return \"stop\";\n\n      var fallthrough = map.fallthrough;\n      if (fallthrough == null) return false;\n      if (Object.prototype.toString.call(fallthrough) != \"[object Array]\")\n        return lookup(fallthrough);\n      for (var i = 0, e = fallthrough.length; i < e; ++i) {\n        var done = lookup(fallthrough[i]);\n        if (done) return done;\n      }\n      return false;\n    }\n\n    for (var i = 0; i < maps.length; ++i) {\n      var done = lookup(maps[i]);\n      if (done) return done;\n    }\n  }\n  function isModifierKey(event) {\n    var name = keyNames[event.keyCode];\n    return name == \"Ctrl\" || name == \"Alt\" || name == \"Shift\" || name == \"Mod\";\n  }\n  function keyName(event, noShift) {\n    if (opera && event.keyCode == 34 && event[\"char\"]) return false;\n    var name = keyNames[event.keyCode];\n    if (name == null || event.altGraphKey) return false;\n    if (event.altKey) name = \"Alt-\" + name;\n    if (flipCtrlCmd ? event.metaKey : event.ctrlKey) name = \"Ctrl-\" + name;\n    if (flipCtrlCmd ? event.ctrlKey : event.metaKey) name = \"Cmd-\" + name;\n    if (!noShift && event.shiftKey) name = \"Shift-\" + name;\n    return name;\n  }\n  CodeMirror.lookupKey = lookupKey;\n  CodeMirror.isModifierKey = isModifierKey;\n  CodeMirror.keyName = keyName;\n\n  // FROMTEXTAREA\n\n  CodeMirror.fromTextArea = function(textarea, options) {\n    if (!options) options = {};\n    options.value = textarea.value;\n    if (!options.tabindex && textarea.tabindex)\n      options.tabindex = textarea.tabindex;\n    if (!options.placeholder && textarea.placeholder)\n      options.placeholder = textarea.placeholder;\n    // Set autofocus to true if this textarea is focused, or if it has\n    // autofocus and no other element is focused.\n    if (options.autofocus == null) {\n      var hasFocus = document.body;\n      // doc.activeElement occasionally throws on IE\n      try { hasFocus = document.activeElement; } catch(e) {}\n      options.autofocus = hasFocus == textarea ||\n        textarea.getAttribute(\"autofocus\") != null && hasFocus == document.body;\n    }\n\n    function save() {textarea.value = cm.getValue();}\n    if (textarea.form) {\n      on(textarea.form, \"submit\", save);\n      // Deplorable hack to make the submit method do the right thing.\n      if (!options.leaveSubmitMethodAlone) {\n        var form = textarea.form, realSubmit = form.submit;\n        try {\n          var wrappedSubmit = form.submit = function() {\n            save();\n            form.submit = realSubmit;\n            form.submit();\n            form.submit = wrappedSubmit;\n          };\n        } catch(e) {}\n      }\n    }\n\n    textarea.style.display = \"none\";\n    var cm = CodeMirror(function(node) {\n      textarea.parentNode.insertBefore(node, textarea.nextSibling);\n    }, options);\n    cm.save = save;\n    cm.getTextArea = function() { return textarea; };\n    cm.toTextArea = function() {\n      save();\n      textarea.parentNode.removeChild(cm.getWrapperElement());\n      textarea.style.display = \"\";\n      if (textarea.form) {\n        off(textarea.form, \"submit\", save);\n        if (typeof textarea.form.submit == \"function\")\n          textarea.form.submit = realSubmit;\n      }\n    };\n    return cm;\n  };\n\n  // STRING STREAM\n\n  // Fed to the mode parsers, provides helper functions to make\n  // parsers more succinct.\n\n  // The character stream used by a mode's parser.\n  function StringStream(string, tabSize) {\n    this.pos = this.start = 0;\n    this.string = string;\n    this.tabSize = tabSize || 8;\n    this.lastColumnPos = this.lastColumnValue = 0;\n  }\n\n  StringStream.prototype = {\n    eol: function() {return this.pos >= this.string.length;},\n    sol: function() {return this.pos == 0;},\n    peek: function() {return this.string.charAt(this.pos) || undefined;},\n    next: function() {\n      if (this.pos < this.string.length)\n        return this.string.charAt(this.pos++);\n    },\n    eat: function(match) {\n      var ch = this.string.charAt(this.pos);\n      if (typeof match == \"string\") var ok = ch == match;\n      else var ok = ch && (match.test ? match.test(ch) : match(ch));\n      if (ok) {++this.pos; return ch;}\n    },\n    eatWhile: function(match) {\n      var start = this.pos;\n      while (this.eat(match)){}\n      return this.pos > start;\n    },\n    eatSpace: function() {\n      var start = this.pos;\n      while (/[\\s\\u00a0]/.test(this.string.charAt(this.pos))) ++this.pos;\n      return this.pos > start;\n    },\n    skipToEnd: function() {this.pos = this.string.length;},\n    skipTo: function(ch) {\n      var found = this.string.indexOf(ch, this.pos);\n      if (found > -1) {this.pos = found; return true;}\n    },\n    backUp: function(n) {this.pos -= n;},\n    column: function() {\n      if (this.lastColumnPos < this.start) {\n        this.lastColumnValue = countColumn(this.string, this.start, this.tabSize, this.lastColumnPos, this.lastColumnValue);\n        this.lastColumnPos = this.start;\n      }\n      return this.lastColumnValue;\n    },\n    indentation: function() {return countColumn(this.string, null, this.tabSize);},\n    match: function(pattern, consume, caseInsensitive) {\n      if (typeof pattern == \"string\") {\n        var cased = function(str) {return caseInsensitive ? str.toLowerCase() : str;};\n        var substr = this.string.substr(this.pos, pattern.length);\n        if (cased(substr) == cased(pattern)) {\n          if (consume !== false) this.pos += pattern.length;\n          return true;\n        }\n      } else {\n        var match = this.string.slice(this.pos).match(pattern);\n        if (match && match.index > 0) return null;\n        if (match && consume !== false) this.pos += match[0].length;\n        return match;\n      }\n    },\n    current: function(){return this.string.slice(this.start, this.pos);}\n  };\n  CodeMirror.StringStream = StringStream;\n\n  // TEXTMARKERS\n\n  function TextMarker(doc, type) {\n    this.lines = [];\n    this.type = type;\n    this.doc = doc;\n  }\n  CodeMirror.TextMarker = TextMarker;\n\n  TextMarker.prototype.clear = function() {\n    if (this.explicitlyCleared) return;\n    var cm = this.doc.cm, withOp = cm && !cm.curOp;\n    if (withOp) startOperation(cm);\n    var min = null, max = null;\n    for (var i = 0; i < this.lines.length; ++i) {\n      var line = this.lines[i];\n      var span = getMarkedSpanFor(line.markedSpans, this);\n      if (span.to != null) max = lineNo(line);\n      line.markedSpans = removeMarkedSpan(line.markedSpans, span);\n      if (span.from != null)\n        min = lineNo(line);\n      else if (this.collapsed && !lineIsHidden(this.doc, line) && cm)\n        updateLineHeight(line, textHeight(cm.display));\n    }\n    if (cm && this.collapsed && !cm.options.lineWrapping) for (var i = 0; i < this.lines.length; ++i) {\n      var visual = visualLine(cm.doc, this.lines[i]), len = lineLength(cm.doc, visual);\n      if (len > cm.display.maxLineLength) {\n        cm.display.maxLine = visual;\n        cm.display.maxLineLength = len;\n        cm.display.maxLineChanged = true;\n      }\n    }\n\n    if (min != null && cm) regChange(cm, min, max + 1);\n    this.lines.length = 0;\n    this.explicitlyCleared = true;\n    if (this.collapsed && this.doc.cantEdit) {\n      this.doc.cantEdit = false;\n      if (cm) reCheckSelection(cm);\n    }\n    if (withOp) endOperation(cm);\n    signalLater(this, \"clear\");\n  };\n\n  TextMarker.prototype.find = function() {\n    var from, to;\n    for (var i = 0; i < this.lines.length; ++i) {\n      var line = this.lines[i];\n      var span = getMarkedSpanFor(line.markedSpans, this);\n      if (span.from != null || span.to != null) {\n        var found = lineNo(line);\n        if (span.from != null) from = Pos(found, span.from);\n        if (span.to != null) to = Pos(found, span.to);\n      }\n    }\n    if (this.type == \"bookmark\") return from;\n    return from && {from: from, to: to};\n  };\n\n  TextMarker.prototype.getOptions = function(copyWidget) {\n    var repl = this.replacedWith;\n    return {className: this.className,\n            inclusiveLeft: this.inclusiveLeft, inclusiveRight: this.inclusiveRight,\n            atomic: this.atomic,\n            collapsed: this.collapsed,\n            replacedWith: copyWidget ? repl && repl.cloneNode(true) : repl,\n            readOnly: this.readOnly,\n            startStyle: this.startStyle, endStyle: this.endStyle};\n  };\n\n  TextMarker.prototype.attachLine = function(line) {\n    if (!this.lines.length && this.doc.cm) {\n      var op = this.doc.cm.curOp;\n      if (!op.maybeHiddenMarkers || indexOf(op.maybeHiddenMarkers, this) == -1)\n        (op.maybeUnhiddenMarkers || (op.maybeUnhiddenMarkers = [])).push(this);\n    }\n    this.lines.push(line);\n  };\n  TextMarker.prototype.detachLine = function(line) {\n    this.lines.splice(indexOf(this.lines, line), 1);\n    if (!this.lines.length && this.doc.cm) {\n      var op = this.doc.cm.curOp;\n      (op.maybeHiddenMarkers || (op.maybeHiddenMarkers = [])).push(this);\n    }\n  };\n\n  function markText(doc, from, to, options, type) {\n    if (options && options.shared) return markTextShared(doc, from, to, options, type);\n    if (doc.cm && !doc.cm.curOp) return operation(doc.cm, markText)(doc, from, to, options, type);\n\n    var marker = new TextMarker(doc, type);\n    if (type == \"range\" && !posLess(from, to)) return marker;\n    if (options) copyObj(options, marker);\n    if (marker.replacedWith) {\n      marker.collapsed = true;\n      marker.replacedWith = elt(\"span\", [marker.replacedWith], \"CodeMirror-widget\");\n    }\n    if (marker.collapsed) sawCollapsedSpans = true;\n\n    var curLine = from.line, size = 0, collapsedAtStart, collapsedAtEnd, cm = doc.cm, updateMaxLine;\n    doc.iter(curLine, to.line + 1, function(line) {\n      if (cm && marker.collapsed && !cm.options.lineWrapping && visualLine(doc, line) == cm.display.maxLine)\n        updateMaxLine = true;\n      var span = {from: null, to: null, marker: marker};\n      size += line.text.length;\n      if (curLine == from.line) {span.from = from.ch; size -= from.ch;}\n      if (curLine == to.line) {span.to = to.ch; size -= line.text.length - to.ch;}\n      if (marker.collapsed) {\n        if (curLine == to.line) collapsedAtEnd = collapsedSpanAt(line, to.ch);\n        if (curLine == from.line) collapsedAtStart = collapsedSpanAt(line, from.ch);\n        else updateLineHeight(line, 0);\n      }\n      addMarkedSpan(line, span);\n      ++curLine;\n    });\n    if (marker.collapsed) doc.iter(from.line, to.line + 1, function(line) {\n      if (lineIsHidden(doc, line)) updateLineHeight(line, 0);\n    });\n\n    if (marker.clearOnEnter) on(marker, \"beforeCursorEnter\", function() { marker.clear(); });\n\n    if (marker.readOnly) {\n      sawReadOnlySpans = true;\n      if (doc.history.done.length || doc.history.undone.length)\n        doc.clearHistory();\n    }\n    if (marker.collapsed) {\n      if (collapsedAtStart != collapsedAtEnd)\n        throw new Error(\"Inserting collapsed marker overlapping an existing one\");\n      marker.size = size;\n      marker.atomic = true;\n    }\n    if (cm) {\n      if (updateMaxLine) cm.curOp.updateMaxLine = true;\n      if (marker.className || marker.startStyle || marker.endStyle || marker.collapsed)\n        regChange(cm, from.line, to.line + 1);\n      if (marker.atomic) reCheckSelection(cm);\n    }\n    return marker;\n  }\n\n  // SHARED TEXTMARKERS\n\n  function SharedTextMarker(markers, primary) {\n    this.markers = markers;\n    this.primary = primary;\n    for (var i = 0, me = this; i < markers.length; ++i) {\n      markers[i].parent = this;\n      on(markers[i], \"clear\", function(){me.clear();});\n    }\n  }\n  CodeMirror.SharedTextMarker = SharedTextMarker;\n\n  SharedTextMarker.prototype.clear = function() {\n    if (this.explicitlyCleared) return;\n    this.explicitlyCleared = true;\n    for (var i = 0; i < this.markers.length; ++i)\n      this.markers[i].clear();\n    signalLater(this, \"clear\");\n  };\n  SharedTextMarker.prototype.find = function() {\n    return this.primary.find();\n  };\n  SharedTextMarker.prototype.getOptions = function(copyWidget) {\n    var inner = this.primary.getOptions(copyWidget);\n    inner.shared = true;\n    return inner;\n  };\n\n  function markTextShared(doc, from, to, options, type) {\n    options = copyObj(options);\n    options.shared = false;\n    var markers = [markText(doc, from, to, options, type)], primary = markers[0];\n    var widget = options.replacedWith;\n    linkedDocs(doc, function(doc) {\n      if (widget) options.replacedWith = widget.cloneNode(true);\n      markers.push(markText(doc, clipPos(doc, from), clipPos(doc, to), options, type));\n      for (var i = 0; i < doc.linked.length; ++i)\n        if (doc.linked[i].isParent) return;\n      primary = lst(markers);\n    });\n    return new SharedTextMarker(markers, primary);\n  }\n\n  // TEXTMARKER SPANS\n\n  function getMarkedSpanFor(spans, marker) {\n    if (spans) for (var i = 0; i < spans.length; ++i) {\n      var span = spans[i];\n      if (span.marker == marker) return span;\n    }\n  }\n  function removeMarkedSpan(spans, span) {\n    for (var r, i = 0; i < spans.length; ++i)\n      if (spans[i] != span) (r || (r = [])).push(spans[i]);\n    return r;\n  }\n  function addMarkedSpan(line, span) {\n    line.markedSpans = line.markedSpans ? line.markedSpans.concat([span]) : [span];\n    span.marker.attachLine(line);\n  }\n\n  function markedSpansBefore(old, startCh, isInsert) {\n    if (old) for (var i = 0, nw; i < old.length; ++i) {\n      var span = old[i], marker = span.marker;\n      var startsBefore = span.from == null || (marker.inclusiveLeft ? span.from <= startCh : span.from < startCh);\n      if (startsBefore || marker.type == \"bookmark\" && span.from == startCh && (!isInsert || !span.marker.insertLeft)) {\n        var endsAfter = span.to == null || (marker.inclusiveRight ? span.to >= startCh : span.to > startCh);\n        (nw || (nw = [])).push({from: span.from,\n                                to: endsAfter ? null : span.to,\n                                marker: marker});\n      }\n    }\n    return nw;\n  }\n\n  function markedSpansAfter(old, endCh, isInsert) {\n    if (old) for (var i = 0, nw; i < old.length; ++i) {\n      var span = old[i], marker = span.marker;\n      var endsAfter = span.to == null || (marker.inclusiveRight ? span.to >= endCh : span.to > endCh);\n      if (endsAfter || marker.type == \"bookmark\" && span.from == endCh && (!isInsert || span.marker.insertLeft)) {\n        var startsBefore = span.from == null || (marker.inclusiveLeft ? span.from <= endCh : span.from < endCh);\n        (nw || (nw = [])).push({from: startsBefore ? null : span.from - endCh,\n                                to: span.to == null ? null : span.to - endCh,\n                                marker: marker});\n      }\n    }\n    return nw;\n  }\n\n  function stretchSpansOverChange(doc, change) {\n    var oldFirst = isLine(doc, change.from.line) && getLine(doc, change.from.line).markedSpans;\n    var oldLast = isLine(doc, change.to.line) && getLine(doc, change.to.line).markedSpans;\n    if (!oldFirst && !oldLast) return null;\n\n    var startCh = change.from.ch, endCh = change.to.ch, isInsert = posEq(change.from, change.to);\n    // Get the spans that 'stick out' on both sides\n    var first = markedSpansBefore(oldFirst, startCh, isInsert);\n    var last = markedSpansAfter(oldLast, endCh, isInsert);\n\n    // Next, merge those two ends\n    var sameLine = change.text.length == 1, offset = lst(change.text).length + (sameLine ? startCh : 0);\n    if (first) {\n      // Fix up .to properties of first\n      for (var i = 0; i < first.length; ++i) {\n        var span = first[i];\n        if (span.to == null) {\n          var found = getMarkedSpanFor(last, span.marker);\n          if (!found) span.to = startCh;\n          else if (sameLine) span.to = found.to == null ? null : found.to + offset;\n        }\n      }\n    }\n    if (last) {\n      // Fix up .from in last (or move them into first in case of sameLine)\n      for (var i = 0; i < last.length; ++i) {\n        var span = last[i];\n        if (span.to != null) span.to += offset;\n        if (span.from == null) {\n          var found = getMarkedSpanFor(first, span.marker);\n          if (!found) {\n            span.from = offset;\n            if (sameLine) (first || (first = [])).push(span);\n          }\n        } else {\n          span.from += offset;\n          if (sameLine) (first || (first = [])).push(span);\n        }\n      }\n    }\n\n    var newMarkers = [first];\n    if (!sameLine) {\n      // Fill gap with whole-line-spans\n      var gap = change.text.length - 2, gapMarkers;\n      if (gap > 0 && first)\n        for (var i = 0; i < first.length; ++i)\n          if (first[i].to == null)\n            (gapMarkers || (gapMarkers = [])).push({from: null, to: null, marker: first[i].marker});\n      for (var i = 0; i < gap; ++i)\n        newMarkers.push(gapMarkers);\n      newMarkers.push(last);\n    }\n    return newMarkers;\n  }\n\n  function mergeOldSpans(doc, change) {\n    var old = getOldSpans(doc, change);\n    var stretched = stretchSpansOverChange(doc, change);\n    if (!old) return stretched;\n    if (!stretched) return old;\n\n    for (var i = 0; i < old.length; ++i) {\n      var oldCur = old[i], stretchCur = stretched[i];\n      if (oldCur && stretchCur) {\n        spans: for (var j = 0; j < stretchCur.length; ++j) {\n          var span = stretchCur[j];\n          for (var k = 0; k < oldCur.length; ++k)\n            if (oldCur[k].marker == span.marker) continue spans;\n          oldCur.push(span);\n        }\n      } else if (stretchCur) {\n        old[i] = stretchCur;\n      }\n    }\n    return old;\n  }\n\n  function removeReadOnlyRanges(doc, from, to) {\n    var markers = null;\n    doc.iter(from.line, to.line + 1, function(line) {\n      if (line.markedSpans) for (var i = 0; i < line.markedSpans.length; ++i) {\n        var mark = line.markedSpans[i].marker;\n        if (mark.readOnly && (!markers || indexOf(markers, mark) == -1))\n          (markers || (markers = [])).push(mark);\n      }\n    });\n    if (!markers) return null;\n    var parts = [{from: from, to: to}];\n    for (var i = 0; i < markers.length; ++i) {\n      var mk = markers[i], m = mk.find();\n      for (var j = 0; j < parts.length; ++j) {\n        var p = parts[j];\n        if (posLess(p.to, m.from) || posLess(m.to, p.from)) continue;\n        var newParts = [j, 1];\n        if (posLess(p.from, m.from) || !mk.inclusiveLeft && posEq(p.from, m.from))\n          newParts.push({from: p.from, to: m.from});\n        if (posLess(m.to, p.to) || !mk.inclusiveRight && posEq(p.to, m.to))\n          newParts.push({from: m.to, to: p.to});\n        parts.splice.apply(parts, newParts);\n        j += newParts.length - 1;\n      }\n    }\n    return parts;\n  }\n\n  function collapsedSpanAt(line, ch) {\n    var sps = sawCollapsedSpans && line.markedSpans, found;\n    if (sps) for (var sp, i = 0; i < sps.length; ++i) {\n      sp = sps[i];\n      if (!sp.marker.collapsed) continue;\n      if ((sp.from == null || sp.from < ch) &&\n          (sp.to == null || sp.to > ch) &&\n          (!found || found.width < sp.marker.width))\n        found = sp.marker;\n    }\n    return found;\n  }\n  function collapsedSpanAtStart(line) { return collapsedSpanAt(line, -1); }\n  function collapsedSpanAtEnd(line) { return collapsedSpanAt(line, line.text.length + 1); }\n\n  function visualLine(doc, line) {\n    var merged;\n    while (merged = collapsedSpanAtStart(line))\n      line = getLine(doc, merged.find().from.line);\n    return line;\n  }\n\n  function lineIsHidden(doc, line) {\n    var sps = sawCollapsedSpans && line.markedSpans;\n    if (sps) for (var sp, i = 0; i < sps.length; ++i) {\n      sp = sps[i];\n      if (!sp.marker.collapsed) continue;\n      if (sp.from == null) return true;\n      if (sp.from == 0 && sp.marker.inclusiveLeft && lineIsHiddenInner(doc, line, sp))\n        return true;\n    }\n  }\n  function lineIsHiddenInner(doc, line, span) {\n    if (span.to == null) {\n      var end = span.marker.find().to, endLine = getLine(doc, end.line);\n      return lineIsHiddenInner(doc, endLine, getMarkedSpanFor(endLine.markedSpans, span.marker));\n    }\n    if (span.marker.inclusiveRight && span.to == line.text.length)\n      return true;\n    for (var sp, i = 0; i < line.markedSpans.length; ++i) {\n      sp = line.markedSpans[i];\n      if (sp.marker.collapsed && sp.from == span.to &&\n          (sp.marker.inclusiveLeft || span.marker.inclusiveRight) &&\n          lineIsHiddenInner(doc, line, sp)) return true;\n    }\n  }\n\n  function detachMarkedSpans(line) {\n    var spans = line.markedSpans;\n    if (!spans) return;\n    for (var i = 0; i < spans.length; ++i)\n      spans[i].marker.detachLine(line);\n    line.markedSpans = null;\n  }\n\n  function attachMarkedSpans(line, spans) {\n    if (!spans) return;\n    for (var i = 0; i < spans.length; ++i)\n      spans[i].marker.attachLine(line);\n    line.markedSpans = spans;\n  }\n\n  // LINE WIDGETS\n\n  var LineWidget = CodeMirror.LineWidget = function(cm, node, options) {\n    for (var opt in options) if (options.hasOwnProperty(opt))\n      this[opt] = options[opt];\n    this.cm = cm;\n    this.node = node;\n  };\n  function widgetOperation(f) {\n    return function() {\n      var withOp = !this.cm.curOp;\n      if (withOp) startOperation(this.cm);\n      try {var result = f.apply(this, arguments);}\n      finally {if (withOp) endOperation(this.cm);}\n      return result;\n    };\n  }\n  LineWidget.prototype.clear = widgetOperation(function() {\n    var ws = this.line.widgets, no = lineNo(this.line);\n    if (no == null || !ws) return;\n    for (var i = 0; i < ws.length; ++i) if (ws[i] == this) ws.splice(i--, 1);\n    if (!ws.length) this.line.widgets = null;\n    updateLineHeight(this.line, Math.max(0, this.line.height - widgetHeight(this)));\n    regChange(this.cm, no, no + 1);\n  });\n  LineWidget.prototype.changed = widgetOperation(function() {\n    var oldH = this.height;\n    this.height = null;\n    var diff = widgetHeight(this) - oldH;\n    if (!diff) return;\n    updateLineHeight(this.line, this.line.height + diff);\n    var no = lineNo(this.line);\n    regChange(this.cm, no, no + 1);\n  });\n\n  function widgetHeight(widget) {\n    if (widget.height != null) return widget.height;\n    if (!widget.node.parentNode || widget.node.parentNode.nodeType != 1)\n      removeChildrenAndAdd(widget.cm.display.measure, elt(\"div\", [widget.node], null, \"position: relative\"));\n    return widget.height = widget.node.offsetHeight;\n  }\n\n  function addLineWidget(cm, handle, node, options) {\n    var widget = new LineWidget(cm, node, options);\n    if (widget.noHScroll) cm.display.alignWidgets = true;\n    changeLine(cm, handle, function(line) {\n      (line.widgets || (line.widgets = [])).push(widget);\n      widget.line = line;\n      if (!lineIsHidden(cm.doc, line) || widget.showIfHidden) {\n        var aboveVisible = heightAtLine(cm, line) < cm.display.scroller.scrollTop;\n        updateLineHeight(line, line.height + widgetHeight(widget));\n        if (aboveVisible) addToScrollPos(cm, 0, widget.height);\n      }\n      return true;\n    });\n    return widget;\n  }\n\n  // LINE DATA STRUCTURE\n\n  // Line objects. These hold state related to a line, including\n  // highlighting info (the styles array).\n  function makeLine(text, markedSpans, estimateHeight) {\n    var line = {text: text};\n    attachMarkedSpans(line, markedSpans);\n    line.height = estimateHeight ? estimateHeight(line) : 1;\n    return line;\n  }\n\n  function updateLine(line, text, markedSpans, estimateHeight) {\n    line.text = text;\n    if (line.stateAfter) line.stateAfter = null;\n    if (line.styles) line.styles = null;\n    if (line.order != null) line.order = null;\n    detachMarkedSpans(line);\n    attachMarkedSpans(line, markedSpans);\n    var estHeight = estimateHeight ? estimateHeight(line) : 1;\n    if (estHeight != line.height) updateLineHeight(line, estHeight);\n  }\n\n  function cleanUpLine(line) {\n    line.parent = null;\n    detachMarkedSpans(line);\n  }\n\n  // Run the given mode's parser over a line, update the styles\n  // array, which contains alternating fragments of text and CSS\n  // classes.\n  function runMode(cm, text, mode, state, f) {\n    var flattenSpans = mode.flattenSpans;\n    if (flattenSpans == null) flattenSpans = cm.options.flattenSpans;\n    var curText = \"\", curStyle = null;\n    var stream = new StringStream(text, cm.options.tabSize);\n    if (text == \"\" && mode.blankLine) mode.blankLine(state);\n    while (!stream.eol()) {\n      var style = mode.token(stream, state);\n      if (stream.pos > 5000) {\n        flattenSpans = false;\n        // Webkit seems to refuse to render text nodes longer than 57444 characters\n        stream.pos = Math.min(text.length, stream.start + 50000);\n        style = null;\n      }\n      var substr = stream.current();\n      stream.start = stream.pos;\n      if (!flattenSpans || curStyle != style) {\n        if (curText) f(curText, curStyle);\n        curText = substr; curStyle = style;\n      } else curText = curText + substr;\n    }\n    if (curText) f(curText, curStyle);\n  }\n\n  function highlightLine(cm, line, state) {\n    // A styles array always starts with a number identifying the\n    // mode/overlays that it is based on (for easy invalidation).\n    var st = [cm.state.modeGen];\n    // Compute the base array of styles\n    runMode(cm, line.text, cm.doc.mode, state, function(txt, style) {st.push(txt, style);});\n\n    // Run overlays, adjust style array.\n    for (var o = 0; o < cm.state.overlays.length; ++o) {\n      var overlay = cm.state.overlays[o], i = 1;\n      runMode(cm, line.text, overlay.mode, true, function(txt, style) {\n        var start = i, len = txt.length;\n        // Ensure there's a token end at the current position, and that i points at it\n        while (len) {\n          var cur = st[i], len_ = cur.length;\n          if (len_ <= len) {\n            len -= len_;\n          } else {\n            st.splice(i, 1, cur.slice(0, len), st[i+1], cur.slice(len));\n            len = 0;\n          }\n          i += 2;\n        }\n        if (!style) return;\n        if (overlay.opaque) {\n          st.splice(start, i - start, txt, style);\n          i = start + 2;\n        } else {\n          for (; start < i; start += 2) {\n            var cur = st[start+1];\n            st[start+1] = cur ? cur + \" \" + style : style;\n          }\n        }\n      });\n    }\n\n    return st;\n  }\n\n  function getLineStyles(cm, line) {\n    if (!line.styles || line.styles[0] != cm.state.modeGen)\n      line.styles = highlightLine(cm, line, line.stateAfter = getStateBefore(cm, lineNo(line)));\n    return line.styles;\n  }\n\n  // Lightweight form of highlight -- proceed over this line and\n  // update state, but don't save a style array.\n  function processLine(cm, line, state) {\n    var mode = cm.doc.mode;\n    var stream = new StringStream(line.text, cm.options.tabSize);\n    if (line.text == \"\" && mode.blankLine) mode.blankLine(state);\n    while (!stream.eol() && stream.pos <= 5000) {\n      mode.token(stream, state);\n      stream.start = stream.pos;\n    }\n  }\n\n  var styleToClassCache = {};\n  function styleToClass(style) {\n    if (!style) return null;\n    return styleToClassCache[style] ||\n      (styleToClassCache[style] = \"cm-\" + style.replace(/ +/g, \" cm-\"));\n  }\n\n  function lineContent(cm, realLine, measure) {\n    var merged, line = realLine, lineBefore, sawBefore, simple = true;\n    while (merged = collapsedSpanAtStart(line)) {\n      simple = false;\n      line = getLine(cm.doc, merged.find().from.line);\n      if (!lineBefore) lineBefore = line;\n    }\n\n    var builder = {pre: elt(\"pre\"), col: 0, pos: 0, display: !measure,\n                   measure: null, addedOne: false, cm: cm};\n    if (line.textClass) builder.pre.className = line.textClass;\n\n    do {\n      builder.measure = line == realLine && measure;\n      builder.pos = 0;\n      builder.addToken = builder.measure ? buildTokenMeasure : buildToken;\n      if ((ie || webkit) && cm.getOption(\"lineWrapping\"))\n        builder.addToken = buildTokenSplitSpaces(builder.addToken);\n      if (measure && sawBefore && line != realLine && !builder.addedOne) {\n        measure[0] = builder.pre.appendChild(zeroWidthElement(cm.display.measure));\n        builder.addedOne = true;\n      }\n      var next = insertLineContent(line, builder, getLineStyles(cm, line));\n      sawBefore = line == lineBefore;\n      if (next) {\n        line = getLine(cm.doc, next.to.line);\n        simple = false;\n      }\n    } while (next);\n\n    if (measure && !builder.addedOne)\n      measure[0] = builder.pre.appendChild(simple ? elt(\"span\", \"\\u00a0\") : zeroWidthElement(cm.display.measure));\n    if (!builder.pre.firstChild && !lineIsHidden(cm.doc, realLine))\n      builder.pre.appendChild(document.createTextNode(\"\\u00a0\"));\n\n    var order;\n    // Work around problem with the reported dimensions of single-char\n    // direction spans on IE (issue #1129). See also the comment in\n    // cursorCoords.\n    if (measure && ie && (order = getOrder(line))) {\n      var l = order.length - 1;\n      if (order[l].from == order[l].to) --l;\n      var last = order[l], prev = order[l - 1];\n      if (last.from + 1 == last.to && prev && last.level < prev.level) {\n        var span = measure[builder.pos - 1];\n        if (span) span.parentNode.insertBefore(span.measureRight = zeroWidthElement(cm.display.measure),\n                                               span.nextSibling);\n      }\n    }\n\n    signal(cm, \"renderLine\", cm, realLine, builder.pre);\n    return builder.pre;\n  }\n\n  var tokenSpecialChars = /[\\t\\u0000-\\u0019\\u00ad\\u200b\\u2028\\u2029\\uFEFF]/g;\n  function buildToken(builder, text, style, startStyle, endStyle) {\n    if (!text) return;\n    if (!tokenSpecialChars.test(text)) {\n      builder.col += text.length;\n      var content = document.createTextNode(text);\n    } else {\n      var content = document.createDocumentFragment(), pos = 0;\n      while (true) {\n        tokenSpecialChars.lastIndex = pos;\n        var m = tokenSpecialChars.exec(text);\n        var skipped = m ? m.index - pos : text.length - pos;\n        if (skipped) {\n          content.appendChild(document.createTextNode(text.slice(pos, pos + skipped)));\n          builder.col += skipped;\n        }\n        if (!m) break;\n        pos += skipped + 1;\n        if (m[0] == \"\\t\") {\n          var tabSize = builder.cm.options.tabSize, tabWidth = tabSize - builder.col % tabSize;\n          content.appendChild(elt(\"span\", spaceStr(tabWidth), \"cm-tab\"));\n          builder.col += tabWidth;\n        } else {\n          var token = elt(\"span\", \"\\u2022\", \"cm-invalidchar\");\n          token.title = \"\\\\u\" + m[0].charCodeAt(0).toString(16);\n          content.appendChild(token);\n          builder.col += 1;\n        }\n      }\n    }\n    if (style || startStyle || endStyle || builder.measure) {\n      var fullStyle = style || \"\";\n      if (startStyle) fullStyle += startStyle;\n      if (endStyle) fullStyle += endStyle;\n      return builder.pre.appendChild(elt(\"span\", [content], fullStyle));\n    }\n    builder.pre.appendChild(content);\n  }\n\n  function buildTokenMeasure(builder, text, style, startStyle, endStyle) {\n    var wrapping = builder.cm.options.lineWrapping;\n    for (var i = 0; i < text.length; ++i) {\n      var ch = text.charAt(i), start = i == 0;\n      if (ch >= \"\\ud800\" && ch < \"\\udbff\" && i < text.length - 1) {\n        ch = text.slice(i, i + 2);\n        ++i;\n      } else if (i && wrapping &&\n                 spanAffectsWrapping.test(text.slice(i - 1, i + 1))) {\n        builder.pre.appendChild(elt(\"wbr\"));\n      }\n      var span = builder.measure[builder.pos] =\n        buildToken(builder, ch, style,\n                   start && startStyle, i == text.length - 1 && endStyle);\n      // In IE single-space nodes wrap differently than spaces\n      // embedded in larger text nodes, except when set to\n      // white-space: normal (issue #1268).\n      if (ie && wrapping && ch == \" \" && i && !/\\s/.test(text.charAt(i - 1)) &&\n          i < text.length - 1 && !/\\s/.test(text.charAt(i + 1)))\n        span.style.whiteSpace = \"normal\";\n      builder.pos += ch.length;\n    }\n    if (text.length) builder.addedOne = true;\n  }\n\n  function buildTokenSplitSpaces(inner) {\n    function split(old) {\n      var out = \" \";\n      for (var i = 0; i < old.length - 2; ++i) out += i % 2 ? \" \" : \"\\u00a0\";\n      out += \" \";\n      return out;\n    }\n    return function(builder, text, style, startStyle, endStyle) {\n      return inner(builder, text.replace(/ {3,}/, split), style, startStyle, endStyle);\n    };\n  }\n\n  function buildCollapsedSpan(builder, size, widget) {\n    if (widget) {\n      if (!builder.display) widget = widget.cloneNode(true);\n      builder.pre.appendChild(widget);\n      if (builder.measure && size) {\n        builder.measure[builder.pos] = widget;\n        builder.addedOne = true;\n      }\n    }\n    builder.pos += size;\n  }\n\n  // Outputs a number of spans to make up a line, taking highlighting\n  // and marked text into account.\n  function insertLineContent(line, builder, styles) {\n    var spans = line.markedSpans;\n    if (!spans) {\n      for (var i = 1; i < styles.length; i+=2)\n        builder.addToken(builder, styles[i], styleToClass(styles[i+1]));\n      return;\n    }\n\n    var allText = line.text, len = allText.length;\n    var pos = 0, i = 1, text = \"\", style;\n    var nextChange = 0, spanStyle, spanEndStyle, spanStartStyle, collapsed;\n    for (;;) {\n      if (nextChange == pos) { // Update current marker set\n        spanStyle = spanEndStyle = spanStartStyle = \"\";\n        collapsed = null; nextChange = Infinity;\n        var foundBookmark = null;\n        for (var j = 0; j < spans.length; ++j) {\n          var sp = spans[j], m = sp.marker;\n          if (sp.from <= pos && (sp.to == null || sp.to > pos)) {\n            if (sp.to != null && nextChange > sp.to) { nextChange = sp.to; spanEndStyle = \"\"; }\n            if (m.className) spanStyle += \" \" + m.className;\n            if (m.startStyle && sp.from == pos) spanStartStyle += \" \" + m.startStyle;\n            if (m.endStyle && sp.to == nextChange) spanEndStyle += \" \" + m.endStyle;\n            if (m.collapsed && (!collapsed || collapsed.marker.width < m.width))\n              collapsed = sp;\n          } else if (sp.from > pos && nextChange > sp.from) {\n            nextChange = sp.from;\n          }\n          if (m.type == \"bookmark\" && sp.from == pos && m.replacedWith)\n            foundBookmark = m.replacedWith;\n        }\n        if (collapsed && (collapsed.from || 0) == pos) {\n          buildCollapsedSpan(builder, (collapsed.to == null ? len : collapsed.to) - pos,\n                             collapsed.from != null && collapsed.marker.replacedWith);\n          if (collapsed.to == null) return collapsed.marker.find();\n        }\n        if (foundBookmark && !collapsed) buildCollapsedSpan(builder, 0, foundBookmark);\n      }\n      if (pos >= len) break;\n\n      var upto = Math.min(len, nextChange);\n      while (true) {\n        if (text) {\n          var end = pos + text.length;\n          if (!collapsed) {\n            var tokenText = end > upto ? text.slice(0, upto - pos) : text;\n            builder.addToken(builder, tokenText, style ? style + spanStyle : spanStyle,\n                             spanStartStyle, pos + tokenText.length == nextChange ? spanEndStyle : \"\");\n          }\n          if (end >= upto) {text = text.slice(upto - pos); pos = upto; break;}\n          pos = end;\n          spanStartStyle = \"\";\n        }\n        text = styles[i++]; style = styleToClass(styles[i++]);\n      }\n    }\n  }\n\n  // DOCUMENT DATA STRUCTURE\n\n  function updateDoc(doc, change, markedSpans, selAfter, estimateHeight) {\n    function spansFor(n) {return markedSpans ? markedSpans[n] : null;}\n    function update(line, text, spans) {\n      updateLine(line, text, spans, estimateHeight);\n      signalLater(line, \"change\", line, change);\n    }\n\n    var from = change.from, to = change.to, text = change.text;\n    var firstLine = getLine(doc, from.line), lastLine = getLine(doc, to.line);\n    var lastText = lst(text), lastSpans = spansFor(text.length - 1), nlines = to.line - from.line;\n\n    // First adjust the line structure\n    if (from.ch == 0 && to.ch == 0 && lastText == \"\") {\n      // This is a whole-line replace. Treated specially to make\n      // sure line objects move the way they are supposed to.\n      for (var i = 0, e = text.length - 1, added = []; i < e; ++i)\n        added.push(makeLine(text[i], spansFor(i), estimateHeight));\n      update(lastLine, lastLine.text, lastSpans);\n      if (nlines) doc.remove(from.line, nlines);\n      if (added.length) doc.insert(from.line, added);\n    } else if (firstLine == lastLine) {\n      if (text.length == 1) {\n        update(firstLine, firstLine.text.slice(0, from.ch) + lastText + firstLine.text.slice(to.ch), lastSpans);\n      } else {\n        for (var added = [], i = 1, e = text.length - 1; i < e; ++i)\n          added.push(makeLine(text[i], spansFor(i), estimateHeight));\n        added.push(makeLine(lastText + firstLine.text.slice(to.ch), lastSpans, estimateHeight));\n        update(firstLine, firstLine.text.slice(0, from.ch) + text[0], spansFor(0));\n        doc.insert(from.line + 1, added);\n      }\n    } else if (text.length == 1) {\n      update(firstLine, firstLine.text.slice(0, from.ch) + text[0] + lastLine.text.slice(to.ch), spansFor(0));\n      doc.remove(from.line + 1, nlines);\n    } else {\n      update(firstLine, firstLine.text.slice(0, from.ch) + text[0], spansFor(0));\n      update(lastLine, lastText + lastLine.text.slice(to.ch), lastSpans);\n      for (var i = 1, e = text.length - 1, added = []; i < e; ++i)\n        added.push(makeLine(text[i], spansFor(i), estimateHeight));\n      if (nlines > 1) doc.remove(from.line + 1, nlines - 1);\n      doc.insert(from.line + 1, added);\n    }\n\n    signalLater(doc, \"change\", doc, change);\n    setSelection(doc, selAfter.anchor, selAfter.head, null, true);\n  }\n\n  function LeafChunk(lines) {\n    this.lines = lines;\n    this.parent = null;\n    for (var i = 0, e = lines.length, height = 0; i < e; ++i) {\n      lines[i].parent = this;\n      height += lines[i].height;\n    }\n    this.height = height;\n  }\n\n  LeafChunk.prototype = {\n    chunkSize: function() { return this.lines.length; },\n    removeInner: function(at, n) {\n      for (var i = at, e = at + n; i < e; ++i) {\n        var line = this.lines[i];\n        this.height -= line.height;\n        cleanUpLine(line);\n        signalLater(line, \"delete\");\n      }\n      this.lines.splice(at, n);\n    },\n    collapse: function(lines) {\n      lines.splice.apply(lines, [lines.length, 0].concat(this.lines));\n    },\n    insertInner: function(at, lines, height) {\n      this.height += height;\n      this.lines = this.lines.slice(0, at).concat(lines).concat(this.lines.slice(at));\n      for (var i = 0, e = lines.length; i < e; ++i) lines[i].parent = this;\n    },\n    iterN: function(at, n, op) {\n      for (var e = at + n; at < e; ++at)\n        if (op(this.lines[at])) return true;\n    }\n  };\n\n  function BranchChunk(children) {\n    this.children = children;\n    var size = 0, height = 0;\n    for (var i = 0, e = children.length; i < e; ++i) {\n      var ch = children[i];\n      size += ch.chunkSize(); height += ch.height;\n      ch.parent = this;\n    }\n    this.size = size;\n    this.height = height;\n    this.parent = null;\n  }\n\n  BranchChunk.prototype = {\n    chunkSize: function() { return this.size; },\n    removeInner: function(at, n) {\n      this.size -= n;\n      for (var i = 0; i < this.children.length; ++i) {\n        var child = this.children[i], sz = child.chunkSize();\n        if (at < sz) {\n          var rm = Math.min(n, sz - at), oldHeight = child.height;\n          child.removeInner(at, rm);\n          this.height -= oldHeight - child.height;\n          if (sz == rm) { this.children.splice(i--, 1); child.parent = null; }\n          if ((n -= rm) == 0) break;\n          at = 0;\n        } else at -= sz;\n      }\n      if (this.size - n < 25) {\n        var lines = [];\n        this.collapse(lines);\n        this.children = [new LeafChunk(lines)];\n        this.children[0].parent = this;\n      }\n    },\n    collapse: function(lines) {\n      for (var i = 0, e = this.children.length; i < e; ++i) this.children[i].collapse(lines);\n    },\n    insertInner: function(at, lines, height) {\n      this.size += lines.length;\n      this.height += height;\n      for (var i = 0, e = this.children.length; i < e; ++i) {\n        var child = this.children[i], sz = child.chunkSize();\n        if (at <= sz) {\n          child.insertInner(at, lines, height);\n          if (child.lines && child.lines.length > 50) {\n            while (child.lines.length > 50) {\n              var spilled = child.lines.splice(child.lines.length - 25, 25);\n              var newleaf = new LeafChunk(spilled);\n              child.height -= newleaf.height;\n              this.children.splice(i + 1, 0, newleaf);\n              newleaf.parent = this;\n            }\n            this.maybeSpill();\n          }\n          break;\n        }\n        at -= sz;\n      }\n    },\n    maybeSpill: function() {\n      if (this.children.length <= 10) return;\n      var me = this;\n      do {\n        var spilled = me.children.splice(me.children.length - 5, 5);\n        var sibling = new BranchChunk(spilled);\n        if (!me.parent) { // Become the parent node\n          var copy = new BranchChunk(me.children);\n          copy.parent = me;\n          me.children = [copy, sibling];\n          me = copy;\n        } else {\n          me.size -= sibling.size;\n          me.height -= sibling.height;\n          var myIndex = indexOf(me.parent.children, me);\n          me.parent.children.splice(myIndex + 1, 0, sibling);\n        }\n        sibling.parent = me.parent;\n      } while (me.children.length > 10);\n      me.parent.maybeSpill();\n    },\n    iterN: function(at, n, op) {\n      for (var i = 0, e = this.children.length; i < e; ++i) {\n        var child = this.children[i], sz = child.chunkSize();\n        if (at < sz) {\n          var used = Math.min(n, sz - at);\n          if (child.iterN(at, used, op)) return true;\n          if ((n -= used) == 0) break;\n          at = 0;\n        } else at -= sz;\n      }\n    }\n  };\n\n  var nextDocId = 0;\n  var Doc = CodeMirror.Doc = function(text, mode, firstLine) {\n    if (!(this instanceof Doc)) return new Doc(text, mode, firstLine);\n    if (firstLine == null) firstLine = 0;\n\n    BranchChunk.call(this, [new LeafChunk([makeLine(\"\", null)])]);\n    this.first = firstLine;\n    this.scrollTop = this.scrollLeft = 0;\n    this.cantEdit = false;\n    this.history = makeHistory();\n    this.frontier = firstLine;\n    var start = Pos(firstLine, 0);\n    this.sel = {from: start, to: start, head: start, anchor: start, shift: false, extend: false, goalColumn: null};\n    this.id = ++nextDocId;\n    this.modeOption = mode;\n\n    if (typeof text == \"string\") text = splitLines(text);\n    updateDoc(this, {from: start, to: start, text: text}, null, {head: start, anchor: start});\n  };\n\n  Doc.prototype = createObj(BranchChunk.prototype, {\n    iter: function(from, to, op) {\n      if (op) this.iterN(from - this.first, to - from, op);\n      else this.iterN(this.first, this.first + this.size, from);\n    },\n\n    insert: function(at, lines) {\n      var height = 0;\n      for (var i = 0, e = lines.length; i < e; ++i) height += lines[i].height;\n      this.insertInner(at - this.first, lines, height);\n    },\n    remove: function(at, n) { this.removeInner(at - this.first, n); },\n\n    getValue: function(lineSep) {\n      var lines = getLines(this, this.first, this.first + this.size);\n      if (lineSep === false) return lines;\n      return lines.join(lineSep || \"\\n\");\n    },\n    setValue: function(code) {\n      var top = Pos(this.first, 0), last = this.first + this.size - 1;\n      makeChange(this, {from: top, to: Pos(last, getLine(this, last).text.length),\n                        text: splitLines(code), origin: \"setValue\"},\n                 {head: top, anchor: top}, true);\n    },\n    replaceRange: function(code, from, to, origin) {\n      from = clipPos(this, from);\n      to = to ? clipPos(this, to) : from;\n      replaceRange(this, code, from, to, origin);\n    },\n    getRange: function(from, to, lineSep) {\n      var lines = getBetween(this, clipPos(this, from), clipPos(this, to));\n      if (lineSep === false) return lines;\n      return lines.join(lineSep || \"\\n\");\n    },\n\n    getLine: function(line) {var l = this.getLineHandle(line); return l && l.text;},\n    setLine: function(line, text) {\n      if (isLine(this, line))\n        replaceRange(this, text, Pos(line, 0), clipPos(this, Pos(line)));\n    },\n    removeLine: function(line) {\n      if (line) replaceRange(this, \"\", clipPos(this, Pos(line - 1)), clipPos(this, Pos(line)));\n      else replaceRange(this, \"\", Pos(0, 0), clipPos(this, Pos(1, 0)));\n    },\n\n    getLineHandle: function(line) {if (isLine(this, line)) return getLine(this, line);},\n    getLineNumber: function(line) {return lineNo(line);},\n\n    lineCount: function() {return this.size;},\n    firstLine: function() {return this.first;},\n    lastLine: function() {return this.first + this.size - 1;},\n\n    clipPos: function(pos) {return clipPos(this, pos);},\n\n    getCursor: function(start) {\n      var sel = this.sel, pos;\n      if (start == null || start == \"head\") pos = sel.head;\n      else if (start == \"anchor\") pos = sel.anchor;\n      else if (start == \"end\" || start === false) pos = sel.to;\n      else pos = sel.from;\n      return copyPos(pos);\n    },\n    somethingSelected: function() {return !posEq(this.sel.head, this.sel.anchor);},\n\n    setCursor: docOperation(function(line, ch, extend) {\n      var pos = clipPos(this, typeof line == \"number\" ? Pos(line, ch || 0) : line);\n      if (extend) extendSelection(this, pos);\n      else setSelection(this, pos, pos);\n    }),\n    setSelection: docOperation(function(anchor, head) {\n      setSelection(this, clipPos(this, anchor), clipPos(this, head || anchor));\n    }),\n    extendSelection: docOperation(function(from, to) {\n      extendSelection(this, clipPos(this, from), to && clipPos(this, to));\n    }),\n\n    getSelection: function(lineSep) {return this.getRange(this.sel.from, this.sel.to, lineSep);},\n    replaceSelection: function(code, collapse, origin) {\n      makeChange(this, {from: this.sel.from, to: this.sel.to, text: splitLines(code), origin: origin}, collapse || \"around\");\n    },\n    undo: docOperation(function() {makeChangeFromHistory(this, \"undo\");}),\n    redo: docOperation(function() {makeChangeFromHistory(this, \"redo\");}),\n\n    setExtending: function(val) {this.sel.extend = val;},\n\n    historySize: function() {\n      var hist = this.history;\n      return {undo: hist.done.length, redo: hist.undone.length};\n    },\n    clearHistory: function() {this.history = makeHistory();},\n\n    markClean: function() {\n      this.history.dirtyCounter = 0;\n      this.history.lastOp = this.history.lastOrigin = null;\n    },\n    isClean: function () {return this.history.dirtyCounter == 0;},\n\n    getHistory: function() {\n      return {done: copyHistoryArray(this.history.done),\n              undone: copyHistoryArray(this.history.undone)};\n    },\n    setHistory: function(histData) {\n      var hist = this.history = makeHistory();\n      hist.done = histData.done.slice(0);\n      hist.undone = histData.undone.slice(0);\n    },\n\n    markText: function(from, to, options) {\n      return markText(this, clipPos(this, from), clipPos(this, to), options, \"range\");\n    },\n    setBookmark: function(pos, options) {\n      var realOpts = {replacedWith: options && (options.nodeType == null ? options.widget : options),\n                      insertLeft: options && options.insertLeft};\n      pos = clipPos(this, pos);\n      return markText(this, pos, pos, realOpts, \"bookmark\");\n    },\n    findMarksAt: function(pos) {\n      pos = clipPos(this, pos);\n      var markers = [], spans = getLine(this, pos.line).markedSpans;\n      if (spans) for (var i = 0; i < spans.length; ++i) {\n        var span = spans[i];\n        if ((span.from == null || span.from <= pos.ch) &&\n            (span.to == null || span.to >= pos.ch))\n          markers.push(span.marker.parent || span.marker);\n      }\n      return markers;\n    },\n    getAllMarks: function() {\n      var markers = [];\n      this.iter(function(line) {\n        var sps = line.markedSpans;\n        if (sps) for (var i = 0; i < sps.length; ++i)\n          if (sps[i].from != null) markers.push(sps[i].marker);\n      });\n      return markers;\n    },\n\n    posFromIndex: function(off) {\n      var ch, lineNo = this.first;\n      this.iter(function(line) {\n        var sz = line.text.length + 1;\n        if (sz > off) { ch = off; return true; }\n        off -= sz;\n        ++lineNo;\n      });\n      return clipPos(this, Pos(lineNo, ch));\n    },\n    indexFromPos: function (coords) {\n      coords = clipPos(this, coords);\n      var index = coords.ch;\n      if (coords.line < this.first || coords.ch < 0) return 0;\n      this.iter(this.first, coords.line, function (line) {\n        index += line.text.length + 1;\n      });\n      return index;\n    },\n\n    copy: function(copyHistory) {\n      var doc = new Doc(getLines(this, this.first, this.first + this.size), this.modeOption, this.first);\n      doc.scrollTop = this.scrollTop; doc.scrollLeft = this.scrollLeft;\n      doc.sel = {from: this.sel.from, to: this.sel.to, head: this.sel.head, anchor: this.sel.anchor,\n                 shift: this.sel.shift, extend: false, goalColumn: this.sel.goalColumn};\n      if (copyHistory) {\n        doc.history.undoDepth = this.history.undoDepth;\n        doc.setHistory(this.getHistory());\n      }\n      return doc;\n    },\n\n    linkedDoc: function(options) {\n      if (!options) options = {};\n      var from = this.first, to = this.first + this.size;\n      if (options.from != null && options.from > from) from = options.from;\n      if (options.to != null && options.to < to) to = options.to;\n      var copy = new Doc(getLines(this, from, to), options.mode || this.modeOption, from);\n      if (options.sharedHist) copy.history = this.history;\n      (this.linked || (this.linked = [])).push({doc: copy, sharedHist: options.sharedHist});\n      copy.linked = [{doc: this, isParent: true, sharedHist: options.sharedHist}];\n      return copy;\n    },\n    unlinkDoc: function(other) {\n      if (other instanceof CodeMirror) other = other.doc;\n      if (this.linked) for (var i = 0; i < this.linked.length; ++i) {\n        var link = this.linked[i];\n        if (link.doc != other) continue;\n        this.linked.splice(i, 1);\n        other.unlinkDoc(this);\n        break;\n      }\n      // If the histories were shared, split them again\n      if (other.history == this.history) {\n        var splitIds = [other.id];\n        linkedDocs(other, function(doc) {splitIds.push(doc.id);}, true);\n        other.history = makeHistory();\n        other.history.done = copyHistoryArray(this.history.done, splitIds);\n        other.history.undone = copyHistoryArray(this.history.undone, splitIds);\n      }\n    },\n    iterLinkedDocs: function(f) {linkedDocs(this, f);},\n\n    getMode: function() {return this.mode;},\n    getEditor: function() {return this.cm;}\n  });\n\n  Doc.prototype.eachLine = Doc.prototype.iter;\n\n  // The Doc methods that should be available on CodeMirror instances\n  var dontDelegate = \"iter insert remove copy getEditor\".split(\" \");\n  for (var prop in Doc.prototype) if (Doc.prototype.hasOwnProperty(prop) && indexOf(dontDelegate, prop) < 0)\n    CodeMirror.prototype[prop] = (function(method) {\n      return function() {return method.apply(this.doc, arguments);};\n    })(Doc.prototype[prop]);\n\n  function linkedDocs(doc, f, sharedHistOnly) {\n    function propagate(doc, skip, sharedHist) {\n      if (doc.linked) for (var i = 0; i < doc.linked.length; ++i) {\n        var rel = doc.linked[i];\n        if (rel.doc == skip) continue;\n        var shared = sharedHist && rel.sharedHist;\n        if (sharedHistOnly && !shared) continue;\n        f(rel.doc, shared);\n        propagate(rel.doc, doc, shared);\n      }\n    }\n    propagate(doc, null, true);\n  }\n\n  function attachDoc(cm, doc) {\n    if (doc.cm) throw new Error(\"This document is already in use.\");\n    cm.doc = doc;\n    doc.cm = cm;\n    estimateLineHeights(cm);\n    loadMode(cm);\n    if (!cm.options.lineWrapping) computeMaxLength(cm);\n    cm.options.mode = doc.modeOption;\n    regChange(cm);\n  }\n\n  // LINE UTILITIES\n\n  function getLine(chunk, n) {\n    n -= chunk.first;\n    while (!chunk.lines) {\n      for (var i = 0;; ++i) {\n        var child = chunk.children[i], sz = child.chunkSize();\n        if (n < sz) { chunk = child; break; }\n        n -= sz;\n      }\n    }\n    return chunk.lines[n];\n  }\n\n  function getBetween(doc, start, end) {\n    var out = [], n = start.line;\n    doc.iter(start.line, end.line + 1, function(line) {\n      var text = line.text;\n      if (n == end.line) text = text.slice(0, end.ch);\n      if (n == start.line) text = text.slice(start.ch);\n      out.push(text);\n      ++n;\n    });\n    return out;\n  }\n  function getLines(doc, from, to) {\n    var out = [];\n    doc.iter(from, to, function(line) { out.push(line.text); });\n    return out;\n  }\n\n  function updateLineHeight(line, height) {\n    var diff = height - line.height;\n    for (var n = line; n; n = n.parent) n.height += diff;\n  }\n\n  function lineNo(line) {\n    if (line.parent == null) return null;\n    var cur = line.parent, no = indexOf(cur.lines, line);\n    for (var chunk = cur.parent; chunk; cur = chunk, chunk = chunk.parent) {\n      for (var i = 0;; ++i) {\n        if (chunk.children[i] == cur) break;\n        no += chunk.children[i].chunkSize();\n      }\n    }\n    return no + cur.first;\n  }\n\n  function lineAtHeight(chunk, h) {\n    var n = chunk.first;\n    outer: do {\n      for (var i = 0, e = chunk.children.length; i < e; ++i) {\n        var child = chunk.children[i], ch = child.height;\n        if (h < ch) { chunk = child; continue outer; }\n        h -= ch;\n        n += child.chunkSize();\n      }\n      return n;\n    } while (!chunk.lines);\n    for (var i = 0, e = chunk.lines.length; i < e; ++i) {\n      var line = chunk.lines[i], lh = line.height;\n      if (h < lh) break;\n      h -= lh;\n    }\n    return n + i;\n  }\n\n  function heightAtLine(cm, lineObj) {\n    lineObj = visualLine(cm.doc, lineObj);\n\n    var h = 0, chunk = lineObj.parent;\n    for (var i = 0; i < chunk.lines.length; ++i) {\n      var line = chunk.lines[i];\n      if (line == lineObj) break;\n      else h += line.height;\n    }\n    for (var p = chunk.parent; p; chunk = p, p = chunk.parent) {\n      for (var i = 0; i < p.children.length; ++i) {\n        var cur = p.children[i];\n        if (cur == chunk) break;\n        else h += cur.height;\n      }\n    }\n    return h;\n  }\n\n  function getOrder(line) {\n    var order = line.order;\n    if (order == null) order = line.order = bidiOrdering(line.text);\n    return order;\n  }\n\n  // HISTORY\n\n  function makeHistory() {\n    return {\n      // Arrays of history events. Doing something adds an event to\n      // done and clears undo. Undoing moves events from done to\n      // undone, redoing moves them in the other direction.\n      done: [], undone: [], undoDepth: Infinity,\n      // Used to track when changes can be merged into a single undo\n      // event\n      lastTime: 0, lastOp: null, lastOrigin: null,\n      // Used by the isClean() method\n      dirtyCounter: 0\n    };\n  }\n\n  function attachLocalSpans(doc, change, from, to) {\n    var existing = change[\"spans_\" + doc.id], n = 0;\n    doc.iter(Math.max(doc.first, from), Math.min(doc.first + doc.size, to), function(line) {\n      if (line.markedSpans)\n        (existing || (existing = change[\"spans_\" + doc.id] = {}))[n] = line.markedSpans;\n      ++n;\n    });\n  }\n\n  function historyChangeFromChange(doc, change) {\n    var histChange = {from: change.from, to: changeEnd(change), text: getBetween(doc, change.from, change.to)};\n    attachLocalSpans(doc, histChange, change.from.line, change.to.line + 1);\n    linkedDocs(doc, function(doc) {attachLocalSpans(doc, histChange, change.from.line, change.to.line + 1);}, true);\n    return histChange;\n  }\n\n  function addToHistory(doc, change, selAfter, opId) {\n    var hist = doc.history;\n    hist.undone.length = 0;\n    var time = +new Date, cur = lst(hist.done);\n\n    if (cur &&\n        (hist.lastOp == opId ||\n         hist.lastOrigin == change.origin && change.origin &&\n         ((change.origin.charAt(0) == \"+\" && hist.lastTime > time - 600) || change.origin.charAt(0) == \"*\"))) {\n      // Merge this change into the last event\n      var last = lst(cur.changes);\n      if (posEq(change.from, change.to) && posEq(change.from, last.to)) {\n        // Optimized case for simple insertion -- don't want to add\n        // new changesets for every character typed\n        last.to = changeEnd(change);\n      } else {\n        // Add new sub-event\n        cur.changes.push(historyChangeFromChange(doc, change));\n      }\n      cur.anchorAfter = selAfter.anchor; cur.headAfter = selAfter.head;\n    } else {\n      // Can not be merged, start a new event.\n      cur = {changes: [historyChangeFromChange(doc, change)],\n             anchorBefore: doc.sel.anchor, headBefore: doc.sel.head,\n             anchorAfter: selAfter.anchor, headAfter: selAfter.head};\n      hist.done.push(cur);\n      while (hist.done.length > hist.undoDepth)\n        hist.done.shift();\n      if (hist.dirtyCounter < 0)\n        // The user has made a change after undoing past the last clean state.\n        // We can never get back to a clean state now until markClean() is called.\n        hist.dirtyCounter = NaN;\n      else\n        hist.dirtyCounter++;\n    }\n    hist.lastTime = time;\n    hist.lastOp = opId;\n    hist.lastOrigin = change.origin;\n  }\n\n  function removeClearedSpans(spans) {\n    if (!spans) return null;\n    for (var i = 0, out; i < spans.length; ++i) {\n      if (spans[i].marker.explicitlyCleared) { if (!out) out = spans.slice(0, i); }\n      else if (out) out.push(spans[i]);\n    }\n    return !out ? spans : out.length ? out : null;\n  }\n\n  function getOldSpans(doc, change) {\n    var found = change[\"spans_\" + doc.id];\n    if (!found) return null;\n    for (var i = 0, nw = []; i < change.text.length; ++i)\n      nw.push(removeClearedSpans(found[i]));\n    return nw;\n  }\n\n  // Used both to provide a JSON-safe object in .getHistory, and, when\n  // detaching a document, to split the history in two\n  function copyHistoryArray(events, newGroup) {\n    for (var i = 0, copy = []; i < events.length; ++i) {\n      var event = events[i], changes = event.changes, newChanges = [];\n      copy.push({changes: newChanges, anchorBefore: event.anchorBefore, headBefore: event.headBefore,\n                 anchorAfter: event.anchorAfter, headAfter: event.headAfter});\n      for (var j = 0; j < changes.length; ++j) {\n        var change = changes[j], m;\n        newChanges.push({from: change.from, to: change.to, text: change.text});\n        if (newGroup) for (var prop in change) if (m = prop.match(/^spans_(\\d+)$/)) {\n          if (indexOf(newGroup, Number(m[1])) > -1) {\n            lst(newChanges)[prop] = change[prop];\n            delete change[prop];\n          }\n        }\n      }\n    }\n    return copy;\n  }\n\n  // Rebasing/resetting history to deal with externally-sourced changes\n\n  function rebaseHistSel(pos, from, to, diff) {\n    if (to < pos.line) {\n      pos.line += diff;\n    } else if (from < pos.line) {\n      pos.line = from;\n      pos.ch = 0;\n    }\n  }\n\n  // Tries to rebase an array of history events given a change in the\n  // document. If the change touches the same lines as the event, the\n  // event, and everything 'behind' it, is discarded. If the change is\n  // before the event, the event's positions are updated. Uses a\n  // copy-on-write scheme for the positions, to avoid having to\n  // reallocate them all on every rebase, but also avoid problems with\n  // shared position objects being unsafely updated.\n  function rebaseHistArray(array, from, to, diff) {\n    for (var i = 0; i < array.length; ++i) {\n      var sub = array[i], ok = true;\n      for (var j = 0; j < sub.changes.length; ++j) {\n        var cur = sub.changes[j];\n        if (!sub.copied) { cur.from = copyPos(cur.from); cur.to = copyPos(cur.to); }\n        if (to < cur.from.line) {\n          cur.from.line += diff;\n          cur.to.line += diff;\n        } else if (from <= cur.to.line) {\n          ok = false;\n          break;\n        }\n      }\n      if (!sub.copied) {\n        sub.anchorBefore = copyPos(sub.anchorBefore); sub.headBefore = copyPos(sub.headBefore);\n        sub.anchorAfter = copyPos(sub.anchorAfter); sub.readAfter = copyPos(sub.headAfter);\n        sub.copied = true;\n      }\n      if (!ok) {\n        array.splice(0, i + 1);\n        i = 0;\n      } else {\n        rebaseHistSel(sub.anchorBefore); rebaseHistSel(sub.headBefore);\n        rebaseHistSel(sub.anchorAfter); rebaseHistSel(sub.headAfter);\n      }\n    }\n  }\n\n  function rebaseHist(hist, change) {\n    var from = change.from.line, to = change.to.line, diff = change.text.length - (to - from) - 1;\n    rebaseHistArray(hist.done, from, to, diff);\n    rebaseHistArray(hist.undone, from, to, diff);\n  }\n\n  // EVENT OPERATORS\n\n  function stopMethod() {e_stop(this);}\n  // Ensure an event has a stop method.\n  function addStop(event) {\n    if (!event.stop) event.stop = stopMethod;\n    return event;\n  }\n\n  function e_preventDefault(e) {\n    if (e.preventDefault) e.preventDefault();\n    else e.returnValue = false;\n  }\n  function e_stopPropagation(e) {\n    if (e.stopPropagation) e.stopPropagation();\n    else e.cancelBubble = true;\n  }\n  function e_stop(e) {e_preventDefault(e); e_stopPropagation(e);}\n  CodeMirror.e_stop = e_stop;\n  CodeMirror.e_preventDefault = e_preventDefault;\n  CodeMirror.e_stopPropagation = e_stopPropagation;\n\n  function e_target(e) {return e.target || e.srcElement;}\n  function e_button(e) {\n    var b = e.which;\n    if (b == null) {\n      if (e.button & 1) b = 1;\n      else if (e.button & 2) b = 3;\n      else if (e.button & 4) b = 2;\n    }\n    if (mac && e.ctrlKey && b == 1) b = 3;\n    return b;\n  }\n\n  // EVENT HANDLING\n\n  function on(emitter, type, f) {\n    if (emitter.addEventListener)\n      emitter.addEventListener(type, f, false);\n    else if (emitter.attachEvent)\n      emitter.attachEvent(\"on\" + type, f);\n    else {\n      var map = emitter._handlers || (emitter._handlers = {});\n      var arr = map[type] || (map[type] = []);\n      arr.push(f);\n    }\n  }\n\n  function off(emitter, type, f) {\n    if (emitter.removeEventListener)\n      emitter.removeEventListener(type, f, false);\n    else if (emitter.detachEvent)\n      emitter.detachEvent(\"on\" + type, f);\n    else {\n      var arr = emitter._handlers && emitter._handlers[type];\n      if (!arr) return;\n      for (var i = 0; i < arr.length; ++i)\n        if (arr[i] == f) { arr.splice(i, 1); break; }\n    }\n  }\n\n  function signal(emitter, type /*, values...*/) {\n    var arr = emitter._handlers && emitter._handlers[type];\n    if (!arr) return;\n    var args = Array.prototype.slice.call(arguments, 2);\n    for (var i = 0; i < arr.length; ++i) arr[i].apply(null, args);\n  }\n\n  var delayedCallbacks, delayedCallbackDepth = 0;\n  function signalLater(emitter, type /*, values...*/) {\n    var arr = emitter._handlers && emitter._handlers[type];\n    if (!arr) return;\n    var args = Array.prototype.slice.call(arguments, 2);\n    if (!delayedCallbacks) {\n      ++delayedCallbackDepth;\n      delayedCallbacks = [];\n      setTimeout(fireDelayed, 0);\n    }\n    function bnd(f) {return function(){f.apply(null, args);};};\n    for (var i = 0; i < arr.length; ++i)\n      delayedCallbacks.push(bnd(arr[i]));\n  }\n\n  function fireDelayed() {\n    --delayedCallbackDepth;\n    var delayed = delayedCallbacks;\n    delayedCallbacks = null;\n    for (var i = 0; i < delayed.length; ++i) delayed[i]();\n  }\n\n  function hasHandler(emitter, type) {\n    var arr = emitter._handlers && emitter._handlers[type];\n    return arr && arr.length > 0;\n  }\n\n  CodeMirror.on = on; CodeMirror.off = off; CodeMirror.signal = signal;\n\n  // MISC UTILITIES\n\n  // Number of pixels added to scroller and sizer to hide scrollbar\n  var scrollerCutOff = 30;\n\n  // Returned or thrown by various protocols to signal 'I'm not\n  // handling this'.\n  var Pass = CodeMirror.Pass = {toString: function(){return \"CodeMirror.Pass\";}};\n\n  function Delayed() {this.id = null;}\n  Delayed.prototype = {set: function(ms, f) {clearTimeout(this.id); this.id = setTimeout(f, ms);}};\n\n  // Counts the column offset in a string, taking tabs into account.\n  // Used mostly to find indentation.\n  function countColumn(string, end, tabSize, startIndex, startValue) {\n    if (end == null) {\n      end = string.search(/[^\\s\\u00a0]/);\n      if (end == -1) end = string.length;\n    }\n    for (var i = startIndex || 0, n = startValue || 0; i < end; ++i) {\n      if (string.charAt(i) == \"\\t\") n += tabSize - (n % tabSize);\n      else ++n;\n    }\n    return n;\n  }\n  CodeMirror.countColumn = countColumn;\n\n  var spaceStrs = [\"\"];\n  function spaceStr(n) {\n    while (spaceStrs.length <= n)\n      spaceStrs.push(lst(spaceStrs) + \" \");\n    return spaceStrs[n];\n  }\n\n  function lst(arr) { return arr[arr.length-1]; }\n\n  function selectInput(node) {\n    if (ios) { // Mobile Safari apparently has a bug where select() is broken.\n      node.selectionStart = 0;\n      node.selectionEnd = node.value.length;\n    } else node.select();\n  }\n\n  function indexOf(collection, elt) {\n    if (collection.indexOf) return collection.indexOf(elt);\n    for (var i = 0, e = collection.length; i < e; ++i)\n      if (collection[i] == elt) return i;\n    return -1;\n  }\n\n  function createObj(base, props) {\n    function Obj() {}\n    Obj.prototype = base;\n    var inst = new Obj();\n    if (props) copyObj(props, inst);\n    return inst;\n  }\n\n  function copyObj(obj, target) {\n    if (!target) target = {};\n    for (var prop in obj) if (obj.hasOwnProperty(prop)) target[prop] = obj[prop];\n    return target;\n  }\n\n  function emptyArray(size) {\n    for (var a = [], i = 0; i < size; ++i) a.push(undefined);\n    return a;\n  }\n\n  function bind(f) {\n    var args = Array.prototype.slice.call(arguments, 1);\n    return function(){return f.apply(null, args);};\n  }\n\n  var nonASCIISingleCaseWordChar = /[\\u3040-\\u309f\\u30a0-\\u30ff\\u3400-\\u4db5\\u4e00-\\u9fcc]/;\n  function isWordChar(ch) {\n    return /\\w/.test(ch) || ch > \"\\x80\" &&\n      (ch.toUpperCase() != ch.toLowerCase() || nonASCIISingleCaseWordChar.test(ch));\n  }\n\n  function isEmpty(obj) {\n    for (var n in obj) if (obj.hasOwnProperty(n) && obj[n]) return false;\n    return true;\n  }\n\n  var isExtendingChar = /[\\u0300-\\u036F\\u0483-\\u0487\\u0488-\\u0489\\u0591-\\u05BD\\u05BF\\u05C1-\\u05C2\\u05C4-\\u05C5\\u05C7\\u0610-\\u061A\\u064B-\\u065F\\u0670\\u06D6-\\u06DC\\u06DF-\\u06E4\\u06E7-\\u06E8\\u06EA-\\u06ED\\uA66F\\uA670-\\uA672\\uA674-\\uA67D\\uA69F\\udc00-\\udfff]/;\n\n  // DOM UTILITIES\n\n  function elt(tag, content, className, style) {\n    var e = document.createElement(tag);\n    if (className) e.className = className;\n    if (style) e.style.cssText = style;\n    if (typeof content == \"string\") setTextContent(e, content);\n    else if (content) for (var i = 0; i < content.length; ++i) e.appendChild(content[i]);\n    return e;\n  }\n\n  function removeChildren(e) {\n    for (var count = e.childNodes.length; count > 0; --count)\n      e.removeChild(e.firstChild);\n    return e;\n  }\n\n  function removeChildrenAndAdd(parent, e) {\n    return removeChildren(parent).appendChild(e);\n  }\n\n  function setTextContent(e, str) {\n    if (ie_lt9) {\n      e.innerHTML = \"\";\n      e.appendChild(document.createTextNode(str));\n    } else e.textContent = str;\n  }\n\n  function getRect(node) {\n    return node.getBoundingClientRect();\n  }\n  CodeMirror.replaceGetRect = function(f) { getRect = f; };\n\n  // FEATURE DETECTION\n\n  // Detect drag-and-drop\n  var dragAndDrop = function() {\n    // There is *some* kind of drag-and-drop support in IE6-8, but I\n    // couldn't get it to work yet.\n    if (ie_lt9) return false;\n    var div = elt('div');\n    return \"draggable\" in div || \"dragDrop\" in div;\n  }();\n\n  // For a reason I have yet to figure out, some browsers disallow\n  // word wrapping between certain characters *only* if a new inline\n  // element is started between them. This makes it hard to reliably\n  // measure the position of things, since that requires inserting an\n  // extra span. This terribly fragile set of regexps matches the\n  // character combinations that suffer from this phenomenon on the\n  // various browsers.\n  var spanAffectsWrapping = /^$/; // Won't match any two-character string\n  if (gecko) spanAffectsWrapping = /$'/;\n  else if (safari && !/Version\\/([6-9]|\\d\\d)\\b/.test(navigator.userAgent)) spanAffectsWrapping = /\\-[^ \\-?]|\\?[^ !'\\\"\\),.\\-\\/:;\\?\\]\\}]/;\n  else if (webkit) spanAffectsWrapping = /[~!#%&*)=+}\\]|\\\"\\.>,:;][({[<]|-[^\\-?\\.]|\\?[\\w~`@#$%\\^&*(_=+{[|><]/;\n\n  var knownScrollbarWidth;\n  function scrollbarWidth(measure) {\n    if (knownScrollbarWidth != null) return knownScrollbarWidth;\n    var test = elt(\"div\", null, null, \"width: 50px; height: 50px; overflow-x: scroll\");\n    removeChildrenAndAdd(measure, test);\n    if (test.offsetWidth)\n      knownScrollbarWidth = test.offsetHeight - test.clientHeight;\n    return knownScrollbarWidth || 0;\n  }\n\n  var zwspSupported;\n  function zeroWidthElement(measure) {\n    if (zwspSupported == null) {\n      var test = elt(\"span\", \"\\u200b\");\n      removeChildrenAndAdd(measure, elt(\"span\", [test, document.createTextNode(\"x\")]));\n      if (measure.firstChild.offsetHeight != 0)\n        zwspSupported = test.offsetWidth <= 1 && test.offsetHeight > 2 && !ie_lt8;\n    }\n    if (zwspSupported) return elt(\"span\", \"\\u200b\");\n    else return elt(\"span\", \"\\u00a0\", null, \"display: inline-block; width: 1px; margin-right: -1px\");\n  }\n\n  // See if \"\".split is the broken IE version, if so, provide an\n  // alternative way to split lines.\n  var splitLines = \"\\n\\nb\".split(/\\n/).length != 3 ? function(string) {\n    var pos = 0, result = [], l = string.length;\n    while (pos <= l) {\n      var nl = string.indexOf(\"\\n\", pos);\n      if (nl == -1) nl = string.length;\n      var line = string.slice(pos, string.charAt(nl - 1) == \"\\r\" ? nl - 1 : nl);\n      var rt = line.indexOf(\"\\r\");\n      if (rt != -1) {\n        result.push(line.slice(0, rt));\n        pos += rt + 1;\n      } else {\n        result.push(line);\n        pos = nl + 1;\n      }\n    }\n    return result;\n  } : function(string){return string.split(/\\r\\n?|\\n/);};\n  CodeMirror.splitLines = splitLines;\n\n  var hasSelection = window.getSelection ? function(te) {\n    try { return te.selectionStart != te.selectionEnd; }\n    catch(e) { return false; }\n  } : function(te) {\n    try {var range = te.ownerDocument.selection.createRange();}\n    catch(e) {}\n    if (!range || range.parentElement() != te) return false;\n    return range.compareEndPoints(\"StartToEnd\", range) != 0;\n  };\n\n  var hasCopyEvent = (function() {\n    var e = elt(\"div\");\n    if (\"oncopy\" in e) return true;\n    e.setAttribute(\"oncopy\", \"return;\");\n    return typeof e.oncopy == 'function';\n  })();\n\n  // KEY NAMING\n\n  var keyNames = {3: \"Enter\", 8: \"Backspace\", 9: \"Tab\", 13: \"Enter\", 16: \"Shift\", 17: \"Ctrl\", 18: \"Alt\",\n                  19: \"Pause\", 20: \"CapsLock\", 27: \"Esc\", 32: \"Space\", 33: \"PageUp\", 34: \"PageDown\", 35: \"End\",\n                  36: \"Home\", 37: \"Left\", 38: \"Up\", 39: \"Right\", 40: \"Down\", 44: \"PrintScrn\", 45: \"Insert\",\n                  46: \"Delete\", 59: \";\", 91: \"Mod\", 92: \"Mod\", 93: \"Mod\", 109: \"-\", 107: \"=\", 127: \"Delete\",\n                  186: \";\", 187: \"=\", 188: \",\", 189: \"-\", 190: \".\", 191: \"/\", 192: \"`\", 219: \"[\", 220: \"\\\\\",\n                  221: \"]\", 222: \"'\", 63276: \"PageUp\", 63277: \"PageDown\", 63275: \"End\", 63273: \"Home\",\n                  63234: \"Left\", 63232: \"Up\", 63235: \"Right\", 63233: \"Down\", 63302: \"Insert\", 63272: \"Delete\"};\n  CodeMirror.keyNames = keyNames;\n  (function() {\n    // Number keys\n    for (var i = 0; i < 10; i++) keyNames[i + 48] = String(i);\n    // Alphabetic keys\n    for (var i = 65; i <= 90; i++) keyNames[i] = String.fromCharCode(i);\n    // Function keys\n    for (var i = 1; i <= 12; i++) keyNames[i + 111] = keyNames[i + 63235] = \"F\" + i;\n  })();\n\n  // BIDI HELPERS\n\n  function iterateBidiSections(order, from, to, f) {\n    if (!order) return f(from, to, \"ltr\");\n    for (var i = 0; i < order.length; ++i) {\n      var part = order[i];\n      if (part.from < to && part.to > from || from == to && part.to == from)\n        f(Math.max(part.from, from), Math.min(part.to, to), part.level == 1 ? \"rtl\" : \"ltr\");\n    }\n  }\n\n  function bidiLeft(part) { return part.level % 2 ? part.to : part.from; }\n  function bidiRight(part) { return part.level % 2 ? part.from : part.to; }\n\n  function lineLeft(line) { var order = getOrder(line); return order ? bidiLeft(order[0]) : 0; }\n  function lineRight(line) {\n    var order = getOrder(line);\n    if (!order) return line.text.length;\n    return bidiRight(lst(order));\n  }\n\n  function lineStart(cm, lineN) {\n    var line = getLine(cm.doc, lineN);\n    var visual = visualLine(cm.doc, line);\n    if (visual != line) lineN = lineNo(visual);\n    var order = getOrder(visual);\n    var ch = !order ? 0 : order[0].level % 2 ? lineRight(visual) : lineLeft(visual);\n    return Pos(lineN, ch);\n  }\n  function lineEnd(cm, lineN) {\n    var merged, line;\n    while (merged = collapsedSpanAtEnd(line = getLine(cm.doc, lineN)))\n      lineN = merged.find().to.line;\n    var order = getOrder(line);\n    var ch = !order ? line.text.length : order[0].level % 2 ? lineLeft(line) : lineRight(line);\n    return Pos(lineN, ch);\n  }\n\n  // This is somewhat involved. It is needed in order to move\n  // 'visually' through bi-directional text -- i.e., pressing left\n  // should make the cursor go left, even when in RTL text. The\n  // tricky part is the 'jumps', where RTL and LTR text touch each\n  // other. This often requires the cursor offset to move more than\n  // one unit, in order to visually move one unit.\n  function moveVisually(line, start, dir, byUnit) {\n    var bidi = getOrder(line);\n    if (!bidi) return moveLogically(line, start, dir, byUnit);\n    var moveOneUnit = byUnit ? function(pos, dir) {\n      do pos += dir;\n      while (pos > 0 && isExtendingChar.test(line.text.charAt(pos)));\n      return pos;\n    } : function(pos, dir) { return pos + dir; };\n    var linedir = bidi[0].level;\n    for (var i = 0; i < bidi.length; ++i) {\n      var part = bidi[i], sticky = part.level % 2 == linedir;\n      if ((part.from < start && part.to > start) ||\n          (sticky && (part.from == start || part.to == start))) break;\n    }\n    var target = moveOneUnit(start, part.level % 2 ? -dir : dir);\n\n    while (target != null) {\n      if (part.level % 2 == linedir) {\n        if (target < part.from || target > part.to) {\n          part = bidi[i += dir];\n          target = part && (dir > 0 == part.level % 2 ? moveOneUnit(part.to, -1) : moveOneUnit(part.from, 1));\n        } else break;\n      } else {\n        if (target == bidiLeft(part)) {\n          part = bidi[--i];\n          target = part && bidiRight(part);\n        } else if (target == bidiRight(part)) {\n          part = bidi[++i];\n          target = part && bidiLeft(part);\n        } else break;\n      }\n    }\n\n    return target < 0 || target > line.text.length ? null : target;\n  }\n\n  function moveLogically(line, start, dir, byUnit) {\n    var target = start + dir;\n    if (byUnit) while (target > 0 && isExtendingChar.test(line.text.charAt(target))) target += dir;\n    return target < 0 || target > line.text.length ? null : target;\n  }\n\n  // Bidirectional ordering algorithm\n  // See http://unicode.org/reports/tr9/tr9-13.html for the algorithm\n  // that this (partially) implements.\n\n  // One-char codes used for character types:\n  // L (L):   Left-to-Right\n  // R (R):   Right-to-Left\n  // r (AL):  Right-to-Left Arabic\n  // 1 (EN):  European Number\n  // + (ES):  European Number Separator\n  // % (ET):  European Number Terminator\n  // n (AN):  Arabic Number\n  // , (CS):  Common Number Separator\n  // m (NSM): Non-Spacing Mark\n  // b (BN):  Boundary Neutral\n  // s (B):   Paragraph Separator\n  // t (S):   Segment Separator\n  // w (WS):  Whitespace\n  // N (ON):  Other Neutrals\n\n  // Returns null if characters are ordered as they appear\n  // (left-to-right), or an array of sections ({from, to, level}\n  // objects) in the order in which they occur visually.\n  var bidiOrdering = (function() {\n    // Character types for codepoints 0 to 0xff\n    var lowTypes = \"bbbbbbbbbtstwsbbbbbbbbbbbbbbssstwNN%%%NNNNNN,N,N1111111111NNNNNNNLLLLLLLLLLLLLLLLLLLLLLLLLLNNNNNNLLLLLLLLLLLLLLLLLLLLLLLLLLNNNNbbbbbbsbbbbbbbbbbbbbbbbbbbbbbbbbb,N%%%%NNNNLNNNNN%%11NLNNN1LNNNNNLLLLLLLLLLLLLLLLLLLLLLLNLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLNLLLLLLLL\";\n    // Character types for codepoints 0x600 to 0x6ff\n    var arabicTypes = \"rrrrrrrrrrrr,rNNmmmmmmrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrmmmmmmmmmmmmmmrrrrrrrnnnnnnnnnn%nnrrrmrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrmmmmmmmmmmmmmmmmmmmNmmmmrrrrrrrrrrrrrrrrrr\";\n    function charType(code) {\n      if (code <= 0xff) return lowTypes.charAt(code);\n      else if (0x590 <= code && code <= 0x5f4) return \"R\";\n      else if (0x600 <= code && code <= 0x6ff) return arabicTypes.charAt(code - 0x600);\n      else if (0x700 <= code && code <= 0x8ac) return \"r\";\n      else return \"L\";\n    }\n\n    var bidiRE = /[\\u0590-\\u05f4\\u0600-\\u06ff\\u0700-\\u08ac]/;\n    var isNeutral = /[stwN]/, isStrong = /[LRr]/, countsAsLeft = /[Lb1n]/, countsAsNum = /[1n]/;\n    // Browsers seem to always treat the boundaries of block elements as being L.\n    var outerType = \"L\";\n\n    return function(str) {\n      if (!bidiRE.test(str)) return false;\n      var len = str.length, types = [];\n      for (var i = 0, type; i < len; ++i)\n        types.push(type = charType(str.charCodeAt(i)));\n\n      // W1. Examine each non-spacing mark (NSM) in the level run, and\n      // change the type of the NSM to the type of the previous\n      // character. If the NSM is at the start of the level run, it will\n      // get the type of sor.\n      for (var i = 0, prev = outerType; i < len; ++i) {\n        var type = types[i];\n        if (type == \"m\") types[i] = prev;\n        else prev = type;\n      }\n\n      // W2. Search backwards from each instance of a European number\n      // until the first strong type (R, L, AL, or sor) is found. If an\n      // AL is found, change the type of the European number to Arabic\n      // number.\n      // W3. Change all ALs to R.\n      for (var i = 0, cur = outerType; i < len; ++i) {\n        var type = types[i];\n        if (type == \"1\" && cur == \"r\") types[i] = \"n\";\n        else if (isStrong.test(type)) { cur = type; if (type == \"r\") types[i] = \"R\"; }\n      }\n\n      // W4. A single European separator between two European numbers\n      // changes to a European number. A single common separator between\n      // two numbers of the same type changes to that type.\n      for (var i = 1, prev = types[0]; i < len - 1; ++i) {\n        var type = types[i];\n        if (type == \"+\" && prev == \"1\" && types[i+1] == \"1\") types[i] = \"1\";\n        else if (type == \",\" && prev == types[i+1] &&\n                 (prev == \"1\" || prev == \"n\")) types[i] = prev;\n        prev = type;\n      }\n\n      // W5. A sequence of European terminators adjacent to European\n      // numbers changes to all European numbers.\n      // W6. Otherwise, separators and terminators change to Other\n      // Neutral.\n      for (var i = 0; i < len; ++i) {\n        var type = types[i];\n        if (type == \",\") types[i] = \"N\";\n        else if (type == \"%\") {\n          for (var end = i + 1; end < len && types[end] == \"%\"; ++end) {}\n          var replace = (i && types[i-1] == \"!\") || (end < len - 1 && types[end] == \"1\") ? \"1\" : \"N\";\n          for (var j = i; j < end; ++j) types[j] = replace;\n          i = end - 1;\n        }\n      }\n\n      // W7. Search backwards from each instance of a European number\n      // until the first strong type (R, L, or sor) is found. If an L is\n      // found, then change the type of the European number to L.\n      for (var i = 0, cur = outerType; i < len; ++i) {\n        var type = types[i];\n        if (cur == \"L\" && type == \"1\") types[i] = \"L\";\n        else if (isStrong.test(type)) cur = type;\n      }\n\n      // N1. A sequence of neutrals takes the direction of the\n      // surrounding strong text if the text on both sides has the same\n      // direction. European and Arabic numbers act as if they were R in\n      // terms of their influence on neutrals. Start-of-level-run (sor)\n      // and end-of-level-run (eor) are used at level run boundaries.\n      // N2. Any remaining neutrals take the embedding direction.\n      for (var i = 0; i < len; ++i) {\n        if (isNeutral.test(types[i])) {\n          for (var end = i + 1; end < len && isNeutral.test(types[end]); ++end) {}\n          var before = (i ? types[i-1] : outerType) == \"L\";\n          var after = (end < len - 1 ? types[end] : outerType) == \"L\";\n          var replace = before || after ? \"L\" : \"R\";\n          for (var j = i; j < end; ++j) types[j] = replace;\n          i = end - 1;\n        }\n      }\n\n      // Here we depart from the documented algorithm, in order to avoid\n      // building up an actual levels array. Since there are only three\n      // levels (0, 1, 2) in an implementation that doesn't take\n      // explicit embedding into account, we can build up the order on\n      // the fly, without following the level-based algorithm.\n      var order = [], m;\n      for (var i = 0; i < len;) {\n        if (countsAsLeft.test(types[i])) {\n          var start = i;\n          for (++i; i < len && countsAsLeft.test(types[i]); ++i) {}\n          order.push({from: start, to: i, level: 0});\n        } else {\n          var pos = i, at = order.length;\n          for (++i; i < len && types[i] != \"L\"; ++i) {}\n          for (var j = pos; j < i;) {\n            if (countsAsNum.test(types[j])) {\n              if (pos < j) order.splice(at, 0, {from: pos, to: j, level: 1});\n              var nstart = j;\n              for (++j; j < i && countsAsNum.test(types[j]); ++j) {}\n              order.splice(at, 0, {from: nstart, to: j, level: 2});\n              pos = j;\n            } else ++j;\n          }\n          if (pos < i) order.splice(at, 0, {from: pos, to: i, level: 1});\n        }\n      }\n      if (order[0].level == 1 && (m = str.match(/^\\s+/))) {\n        order[0].from = m[0].length;\n        order.unshift({from: 0, to: m[0].length, level: 0});\n      }\n      if (lst(order).level == 1 && (m = str.match(/\\s+$/))) {\n        lst(order).to -= m[0].length;\n        order.push({from: len - m[0].length, to: len, level: 0});\n      }\n      if (order[0].level != lst(order).level)\n        order.push({from: len, to: len, level: order[0].level});\n\n      return order;\n    };\n  })();\n\n  // THE END\n\n  CodeMirror.version = \"3.11 +\";\n\n  return CodeMirror;\n})();\n"
  },
  {
    "path": "tronweb/js/plugins.js",
    "content": "// Avoid `console` errors in browsers that lack a console.\n(function() {\n    var method;\n    var noop = function () {};\n    var methods = [\n        'assert', 'clear', 'count', 'debug', 'dir', 'dirxml', 'error',\n        'exception', 'group', 'groupCollapsed', 'groupEnd', 'info', 'log',\n        'markTimeline', 'profile', 'profileEnd', 'table', 'time', 'timeEnd',\n        'timeStamp', 'trace', 'warn'\n    ];\n    var length = methods.length;\n    var console = (window.console = window.console || {});\n\n    while (length--) {\n        method = methods[length];\n\n        // Only stub undefined methods.\n        if (!console[method]) {\n            console[method] = noop;\n        }\n    }\n}());\n\n// Place any jQuery/helper plugins in here.\n"
  },
  {
    "path": "tronweb/js/underscore-min.js",
    "content": "(function(){var n=this,t=n._,r={},e=Array.prototype,u=Object.prototype,i=Function.prototype,a=e.push,o=e.slice,c=e.concat,l=u.toString,f=u.hasOwnProperty,s=e.forEach,p=e.map,h=e.reduce,v=e.reduceRight,d=e.filter,g=e.every,m=e.some,y=e.indexOf,b=e.lastIndexOf,x=Array.isArray,_=Object.keys,j=i.bind,w=function(n){return n instanceof w?n:this instanceof w?(this._wrapped=n,void 0):new w(n)};\"undefined\"!=typeof exports?(\"undefined\"!=typeof module&&module.exports&&(exports=module.exports=w),exports._=w):n._=w,w.VERSION=\"1.4.4\";var A=w.each=w.forEach=function(n,t,e){if(null!=n)if(s&&n.forEach===s)n.forEach(t,e);else if(n.length===+n.length){for(var u=0,i=n.length;i>u;u++)if(t.call(e,n[u],u,n)===r)return}else for(var a in n)if(w.has(n,a)&&t.call(e,n[a],a,n)===r)return};w.map=w.collect=function(n,t,r){var e=[];return null==n?e:p&&n.map===p?n.map(t,r):(A(n,function(n,u,i){e[e.length]=t.call(r,n,u,i)}),e)};var O=\"Reduce of empty array with no initial value\";w.reduce=w.foldl=w.inject=function(n,t,r,e){var u=arguments.length>2;if(null==n&&(n=[]),h&&n.reduce===h)return e&&(t=w.bind(t,e)),u?n.reduce(t,r):n.reduce(t);if(A(n,function(n,i,a){u?r=t.call(e,r,n,i,a):(r=n,u=!0)}),!u)throw new TypeError(O);return r},w.reduceRight=w.foldr=function(n,t,r,e){var u=arguments.length>2;if(null==n&&(n=[]),v&&n.reduceRight===v)return e&&(t=w.bind(t,e)),u?n.reduceRight(t,r):n.reduceRight(t);var i=n.length;if(i!==+i){var a=w.keys(n);i=a.length}if(A(n,function(o,c,l){c=a?a[--i]:--i,u?r=t.call(e,r,n[c],c,l):(r=n[c],u=!0)}),!u)throw new TypeError(O);return r},w.find=w.detect=function(n,t,r){var e;return E(n,function(n,u,i){return t.call(r,n,u,i)?(e=n,!0):void 0}),e},w.filter=w.select=function(n,t,r){var e=[];return null==n?e:d&&n.filter===d?n.filter(t,r):(A(n,function(n,u,i){t.call(r,n,u,i)&&(e[e.length]=n)}),e)},w.reject=function(n,t,r){return w.filter(n,function(n,e,u){return!t.call(r,n,e,u)},r)},w.every=w.all=function(n,t,e){t||(t=w.identity);var u=!0;return null==n?u:g&&n.every===g?n.every(t,e):(A(n,function(n,i,a){return(u=u&&t.call(e,n,i,a))?void 0:r}),!!u)};var E=w.some=w.any=function(n,t,e){t||(t=w.identity);var u=!1;return null==n?u:m&&n.some===m?n.some(t,e):(A(n,function(n,i,a){return u||(u=t.call(e,n,i,a))?r:void 0}),!!u)};w.contains=w.include=function(n,t){return null==n?!1:y&&n.indexOf===y?n.indexOf(t)!=-1:E(n,function(n){return n===t})},w.invoke=function(n,t){var r=o.call(arguments,2),e=w.isFunction(t);return w.map(n,function(n){return(e?t:n[t]).apply(n,r)})},w.pluck=function(n,t){return w.map(n,function(n){return n[t]})},w.where=function(n,t,r){return w.isEmpty(t)?r?null:[]:w[r?\"find\":\"filter\"](n,function(n){for(var r in t)if(t[r]!==n[r])return!1;return!0})},w.findWhere=function(n,t){return w.where(n,t,!0)},w.max=function(n,t,r){if(!t&&w.isArray(n)&&n[0]===+n[0]&&65535>n.length)return Math.max.apply(Math,n);if(!t&&w.isEmpty(n))return-1/0;var e={computed:-1/0,value:-1/0};return A(n,function(n,u,i){var a=t?t.call(r,n,u,i):n;a>=e.computed&&(e={value:n,computed:a})}),e.value},w.min=function(n,t,r){if(!t&&w.isArray(n)&&n[0]===+n[0]&&65535>n.length)return Math.min.apply(Math,n);if(!t&&w.isEmpty(n))return 1/0;var e={computed:1/0,value:1/0};return A(n,function(n,u,i){var a=t?t.call(r,n,u,i):n;e.computed>a&&(e={value:n,computed:a})}),e.value},w.shuffle=function(n){var t,r=0,e=[];return A(n,function(n){t=w.random(r++),e[r-1]=e[t],e[t]=n}),e};var k=function(n){return w.isFunction(n)?n:function(t){return t[n]}};w.sortBy=function(n,t,r){var e=k(t);return w.pluck(w.map(n,function(n,t,u){return{value:n,index:t,criteria:e.call(r,n,t,u)}}).sort(function(n,t){var r=n.criteria,e=t.criteria;if(r!==e){if(r>e||r===void 0)return 1;if(e>r||e===void 0)return-1}return n.index<t.index?-1:1}),\"value\")};var F=function(n,t,r,e){var u={},i=k(t||w.identity);return A(n,function(t,a){var o=i.call(r,t,a,n);e(u,o,t)}),u};w.groupBy=function(n,t,r){return F(n,t,r,function(n,t,r){(w.has(n,t)?n[t]:n[t]=[]).push(r)})},w.countBy=function(n,t,r){return F(n,t,r,function(n,t){w.has(n,t)||(n[t]=0),n[t]++})},w.sortedIndex=function(n,t,r,e){r=null==r?w.identity:k(r);for(var u=r.call(e,t),i=0,a=n.length;a>i;){var o=i+a>>>1;u>r.call(e,n[o])?i=o+1:a=o}return i},w.toArray=function(n){return n?w.isArray(n)?o.call(n):n.length===+n.length?w.map(n,w.identity):w.values(n):[]},w.size=function(n){return null==n?0:n.length===+n.length?n.length:w.keys(n).length},w.first=w.head=w.take=function(n,t,r){return null==n?void 0:null==t||r?n[0]:o.call(n,0,t)},w.initial=function(n,t,r){return o.call(n,0,n.length-(null==t||r?1:t))},w.last=function(n,t,r){return null==n?void 0:null==t||r?n[n.length-1]:o.call(n,Math.max(n.length-t,0))},w.rest=w.tail=w.drop=function(n,t,r){return o.call(n,null==t||r?1:t)},w.compact=function(n){return w.filter(n,w.identity)};var R=function(n,t,r){return A(n,function(n){w.isArray(n)?t?a.apply(r,n):R(n,t,r):r.push(n)}),r};w.flatten=function(n,t){return R(n,t,[])},w.without=function(n){return w.difference(n,o.call(arguments,1))},w.uniq=w.unique=function(n,t,r,e){w.isFunction(t)&&(e=r,r=t,t=!1);var u=r?w.map(n,r,e):n,i=[],a=[];return A(u,function(r,e){(t?e&&a[a.length-1]===r:w.contains(a,r))||(a.push(r),i.push(n[e]))}),i},w.union=function(){return w.uniq(c.apply(e,arguments))},w.intersection=function(n){var t=o.call(arguments,1);return w.filter(w.uniq(n),function(n){return w.every(t,function(t){return w.indexOf(t,n)>=0})})},w.difference=function(n){var t=c.apply(e,o.call(arguments,1));return w.filter(n,function(n){return!w.contains(t,n)})},w.zip=function(){for(var n=o.call(arguments),t=w.max(w.pluck(n,\"length\")),r=Array(t),e=0;t>e;e++)r[e]=w.pluck(n,\"\"+e);return r},w.object=function(n,t){if(null==n)return{};for(var r={},e=0,u=n.length;u>e;e++)t?r[n[e]]=t[e]:r[n[e][0]]=n[e][1];return r},w.indexOf=function(n,t,r){if(null==n)return-1;var e=0,u=n.length;if(r){if(\"number\"!=typeof r)return e=w.sortedIndex(n,t),n[e]===t?e:-1;e=0>r?Math.max(0,u+r):r}if(y&&n.indexOf===y)return n.indexOf(t,r);for(;u>e;e++)if(n[e]===t)return e;return-1},w.lastIndexOf=function(n,t,r){if(null==n)return-1;var e=null!=r;if(b&&n.lastIndexOf===b)return e?n.lastIndexOf(t,r):n.lastIndexOf(t);for(var u=e?r:n.length;u--;)if(n[u]===t)return u;return-1},w.range=function(n,t,r){1>=arguments.length&&(t=n||0,n=0),r=arguments[2]||1;for(var e=Math.max(Math.ceil((t-n)/r),0),u=0,i=Array(e);e>u;)i[u++]=n,n+=r;return i},w.bind=function(n,t){if(n.bind===j&&j)return j.apply(n,o.call(arguments,1));var r=o.call(arguments,2);return function(){return n.apply(t,r.concat(o.call(arguments)))}},w.partial=function(n){var t=o.call(arguments,1);return function(){return n.apply(this,t.concat(o.call(arguments)))}},w.bindAll=function(n){var t=o.call(arguments,1);return 0===t.length&&(t=w.functions(n)),A(t,function(t){n[t]=w.bind(n[t],n)}),n},w.memoize=function(n,t){var r={};return t||(t=w.identity),function(){var e=t.apply(this,arguments);return w.has(r,e)?r[e]:r[e]=n.apply(this,arguments)}},w.delay=function(n,t){var r=o.call(arguments,2);return setTimeout(function(){return n.apply(null,r)},t)},w.defer=function(n){return w.delay.apply(w,[n,1].concat(o.call(arguments,1)))},w.throttle=function(n,t){var r,e,u,i,a=0,o=function(){a=new Date,u=null,i=n.apply(r,e)};return function(){var c=new Date,l=t-(c-a);return r=this,e=arguments,0>=l?(clearTimeout(u),u=null,a=c,i=n.apply(r,e)):u||(u=setTimeout(o,l)),i}},w.debounce=function(n,t,r){var e,u;return function(){var i=this,a=arguments,o=function(){e=null,r||(u=n.apply(i,a))},c=r&&!e;return clearTimeout(e),e=setTimeout(o,t),c&&(u=n.apply(i,a)),u}},w.once=function(n){var t,r=!1;return function(){return r?t:(r=!0,t=n.apply(this,arguments),n=null,t)}},w.wrap=function(n,t){return function(){var r=[n];return a.apply(r,arguments),t.apply(this,r)}},w.compose=function(){var n=arguments;return function(){for(var t=arguments,r=n.length-1;r>=0;r--)t=[n[r].apply(this,t)];return t[0]}},w.after=function(n,t){return 0>=n?t():function(){return 1>--n?t.apply(this,arguments):void 0}},w.keys=_||function(n){if(n!==Object(n))throw new TypeError(\"Invalid object\");var t=[];for(var r in n)w.has(n,r)&&(t[t.length]=r);return t},w.values=function(n){var t=[];for(var r in n)w.has(n,r)&&t.push(n[r]);return t},w.pairs=function(n){var t=[];for(var r in n)w.has(n,r)&&t.push([r,n[r]]);return t},w.invert=function(n){var t={};for(var r in n)w.has(n,r)&&(t[n[r]]=r);return t},w.functions=w.methods=function(n){var t=[];for(var r in n)w.isFunction(n[r])&&t.push(r);return t.sort()},w.extend=function(n){return A(o.call(arguments,1),function(t){if(t)for(var r in t)n[r]=t[r]}),n},w.pick=function(n){var t={},r=c.apply(e,o.call(arguments,1));return A(r,function(r){r in n&&(t[r]=n[r])}),t},w.omit=function(n){var t={},r=c.apply(e,o.call(arguments,1));for(var u in n)w.contains(r,u)||(t[u]=n[u]);return t},w.defaults=function(n){return A(o.call(arguments,1),function(t){if(t)for(var r in t)null==n[r]&&(n[r]=t[r])}),n},w.clone=function(n){return w.isObject(n)?w.isArray(n)?n.slice():w.extend({},n):n},w.tap=function(n,t){return t(n),n};var I=function(n,t,r,e){if(n===t)return 0!==n||1/n==1/t;if(null==n||null==t)return n===t;n instanceof w&&(n=n._wrapped),t instanceof w&&(t=t._wrapped);var u=l.call(n);if(u!=l.call(t))return!1;switch(u){case\"[object String]\":return n==t+\"\";case\"[object Number]\":return n!=+n?t!=+t:0==n?1/n==1/t:n==+t;case\"[object Date]\":case\"[object Boolean]\":return+n==+t;case\"[object RegExp]\":return n.source==t.source&&n.global==t.global&&n.multiline==t.multiline&&n.ignoreCase==t.ignoreCase}if(\"object\"!=typeof n||\"object\"!=typeof t)return!1;for(var i=r.length;i--;)if(r[i]==n)return e[i]==t;r.push(n),e.push(t);var a=0,o=!0;if(\"[object Array]\"==u){if(a=n.length,o=a==t.length)for(;a--&&(o=I(n[a],t[a],r,e)););}else{var c=n.constructor,f=t.constructor;if(c!==f&&!(w.isFunction(c)&&c instanceof c&&w.isFunction(f)&&f instanceof f))return!1;for(var s in n)if(w.has(n,s)&&(a++,!(o=w.has(t,s)&&I(n[s],t[s],r,e))))break;if(o){for(s in t)if(w.has(t,s)&&!a--)break;o=!a}}return r.pop(),e.pop(),o};w.isEqual=function(n,t){return I(n,t,[],[])},w.isEmpty=function(n){if(null==n)return!0;if(w.isArray(n)||w.isString(n))return 0===n.length;for(var t in n)if(w.has(n,t))return!1;return!0},w.isElement=function(n){return!(!n||1!==n.nodeType)},w.isArray=x||function(n){return\"[object Array]\"==l.call(n)},w.isObject=function(n){return n===Object(n)},A([\"Arguments\",\"Function\",\"String\",\"Number\",\"Date\",\"RegExp\"],function(n){w[\"is\"+n]=function(t){return l.call(t)==\"[object \"+n+\"]\"}}),w.isArguments(arguments)||(w.isArguments=function(n){return!(!n||!w.has(n,\"callee\"))}),\"function\"!=typeof/./&&(w.isFunction=function(n){return\"function\"==typeof n}),w.isFinite=function(n){return isFinite(n)&&!isNaN(parseFloat(n))},w.isNaN=function(n){return w.isNumber(n)&&n!=+n},w.isBoolean=function(n){return n===!0||n===!1||\"[object Boolean]\"==l.call(n)},w.isNull=function(n){return null===n},w.isUndefined=function(n){return n===void 0},w.has=function(n,t){return f.call(n,t)},w.noConflict=function(){return n._=t,this},w.identity=function(n){return n},w.times=function(n,t,r){for(var e=Array(n),u=0;n>u;u++)e[u]=t.call(r,u);return e},w.random=function(n,t){return null==t&&(t=n,n=0),n+Math.floor(Math.random()*(t-n+1))};var M={escape:{\"&\":\"&amp;\",\"<\":\"&lt;\",\">\":\"&gt;\",'\"':\"&quot;\",\"'\":\"&#x27;\",\"/\":\"&#x2F;\"}};M.unescape=w.invert(M.escape);var S={escape:RegExp(\"[\"+w.keys(M.escape).join(\"\")+\"]\",\"g\"),unescape:RegExp(\"(\"+w.keys(M.unescape).join(\"|\")+\")\",\"g\")};w.each([\"escape\",\"unescape\"],function(n){w[n]=function(t){return null==t?\"\":(\"\"+t).replace(S[n],function(t){return M[n][t]})}}),w.result=function(n,t){if(null==n)return null;var r=n[t];return w.isFunction(r)?r.call(n):r},w.mixin=function(n){A(w.functions(n),function(t){var r=w[t]=n[t];w.prototype[t]=function(){var n=[this._wrapped];return a.apply(n,arguments),D.call(this,r.apply(w,n))}})};var N=0;w.uniqueId=function(n){var t=++N+\"\";return n?n+t:t},w.templateSettings={evaluate:/<%([\\s\\S]+?)%>/g,interpolate:/<%=([\\s\\S]+?)%>/g,escape:/<%-([\\s\\S]+?)%>/g};var T=/(.)^/,q={\"'\":\"'\",\"\\\\\":\"\\\\\",\"\\r\":\"r\",\"\\n\":\"n\",\"\t\":\"t\",\"\\u2028\":\"u2028\",\"\\u2029\":\"u2029\"},B=/\\\\|'|\\r|\\n|\\t|\\u2028|\\u2029/g;w.template=function(n,t,r){var e;r=w.defaults({},r,w.templateSettings);var u=RegExp([(r.escape||T).source,(r.interpolate||T).source,(r.evaluate||T).source].join(\"|\")+\"|$\",\"g\"),i=0,a=\"__p+='\";n.replace(u,function(t,r,e,u,o){return a+=n.slice(i,o).replace(B,function(n){return\"\\\\\"+q[n]}),r&&(a+=\"'+\\n((__t=(\"+r+\"))==null?'':_.escape(__t))+\\n'\"),e&&(a+=\"'+\\n((__t=(\"+e+\"))==null?'':__t)+\\n'\"),u&&(a+=\"';\\n\"+u+\"\\n__p+='\"),i=o+t.length,t}),a+=\"';\\n\",r.variable||(a=\"with(obj||{}){\\n\"+a+\"}\\n\"),a=\"var __t,__p='',__j=Array.prototype.join,\"+\"print=function(){__p+=__j.call(arguments,'');};\\n\"+a+\"return __p;\\n\";try{e=Function(r.variable||\"obj\",\"_\",a)}catch(o){throw o.source=a,o}if(t)return e(t,w);var c=function(n){return e.call(this,n,w)};return c.source=\"function(\"+(r.variable||\"obj\")+\"){\\n\"+a+\"}\",c},w.chain=function(n){return w(n).chain()};var D=function(n){return this._chain?w(n).chain():n};w.mixin(w),A([\"pop\",\"push\",\"reverse\",\"shift\",\"sort\",\"splice\",\"unshift\"],function(n){var t=e[n];w.prototype[n]=function(){var r=this._wrapped;return t.apply(r,arguments),\"shift\"!=n&&\"splice\"!=n||0!==r.length||delete r[0],D.call(this,r)}}),A([\"concat\",\"join\",\"slice\"],function(n){var t=e[n];w.prototype[n]=function(){return D.call(this,t.apply(this._wrapped,arguments))}}),w.extend(w.prototype,{chain:function(){return this._chain=!0,this},value:function(){return this._wrapped}})}).call(this);\n"
  },
  {
    "path": "tronweb/js/underscore.extra.js",
    "content": "_.mixin({\n\n  /* take elements from list while callback condition is met */\n  takeWhile: function(list, callback, context) {\n    var xs = [];\n    _.any(list, function(item, index, list) {\n      var res = callback.call(context, item, index, list);\n      if (res) {\n        xs.push(item);\n        return false;\n      } else {\n        return true;\n      }\n    });\n    return xs;\n  },\n\n  /* Build an object with [key, value] from pair list or callback */\n  mash: function(list, callback, context) {\n    var pair_callback = callback || _.identity;\n    return _.reduce(list, function(obj, value, index, list) {\n      var pair = pair_callback.call(context, value, index, list);\n      if (typeof pair == \"object\" && pair.length == 2) {\n        obj[pair[0]] = pair[1];\n      }\n      return obj;\n    }, {});\n  },\n\n  /* Return pairs [key, value] of object */\n  pairs: function(object) {\n    return _.map(object, function(value, key) {\n      return [key, value];\n    });\n  },\n\n})\n"
  },
  {
    "path": "tronweb/js/underscore.string.js",
    "content": "//  Underscore.string\n//  (c) 2010 Esa-Matti Suuronen <esa-matti aet suuronen dot org>\n//  Underscore.string is freely distributable under the terms of the MIT license.\n//  Documentation: https://github.com/epeli/underscore.string\n//  Some code is borrowed from MooTools and Alexandru Marasteanu.\n//  Version '2.3.0'\n\n!function(root, String){\n  'use strict';\n\n  // Defining helper functions.\n\n  var nativeTrim = String.prototype.trim;\n  var nativeTrimRight = String.prototype.trimRight;\n  var nativeTrimLeft = String.prototype.trimLeft;\n\n  var parseNumber = function(source) { return source * 1 || 0; };\n\n  var strRepeat = function(str, qty){\n    if (qty < 1) return '';\n    var result = '';\n    while (qty > 0) {\n      if (qty & 1) result += str;\n      qty >>= 1, str += str;\n    }\n    return result;\n  };\n\n  var slice = [].slice;\n\n  var defaultToWhiteSpace = function(characters) {\n    if (characters == null)\n      return '\\\\s';\n    else if (characters.source)\n      return characters.source;\n    else\n      return '[' + _s.escapeRegExp(characters) + ']';\n  };\n\n  var escapeChars = {\n    lt: '<',\n    gt: '>',\n    quot: '\"',\n    amp: '&',\n    apos: \"'\"\n  };\n\n  var reversedEscapeChars = {};\n  for(var key in escapeChars) reversedEscapeChars[escapeChars[key]] = key;\n  reversedEscapeChars[\"'\"] = '#39';\n\n  // sprintf() for JavaScript 0.7-beta1\n  // http://www.diveintojavascript.com/projects/javascript-sprintf\n  //\n  // Copyright (c) Alexandru Marasteanu <alexaholic [at) gmail (dot] com>\n  // All rights reserved.\n\n  var sprintf = (function() {\n    function get_type(variable) {\n      return Object.prototype.toString.call(variable).slice(8, -1).toLowerCase();\n    }\n\n    var str_repeat = strRepeat;\n\n    var str_format = function() {\n      if (!str_format.cache.hasOwnProperty(arguments[0])) {\n        str_format.cache[arguments[0]] = str_format.parse(arguments[0]);\n      }\n      return str_format.format.call(null, str_format.cache[arguments[0]], arguments);\n    };\n\n    str_format.format = function(parse_tree, argv) {\n      var cursor = 1, tree_length = parse_tree.length, node_type = '', arg, output = [], i, k, match, pad, pad_character, pad_length;\n      for (i = 0; i < tree_length; i++) {\n        node_type = get_type(parse_tree[i]);\n        if (node_type === 'string') {\n          output.push(parse_tree[i]);\n        }\n        else if (node_type === 'array') {\n          match = parse_tree[i]; // convenience purposes only\n          if (match[2]) { // keyword argument\n            arg = argv[cursor];\n            for (k = 0; k < match[2].length; k++) {\n              if (!arg.hasOwnProperty(match[2][k])) {\n                throw new Error(sprintf('[_.sprintf] property \"%s\" does not exist', match[2][k]));\n              }\n              arg = arg[match[2][k]];\n            }\n          } else if (match[1]) { // positional argument (explicit)\n            arg = argv[match[1]];\n          }\n          else { // positional argument (implicit)\n            arg = argv[cursor++];\n          }\n\n          if (/[^s]/.test(match[8]) && (get_type(arg) != 'number')) {\n            throw new Error(sprintf('[_.sprintf] expecting number but found %s', get_type(arg)));\n          }\n          switch (match[8]) {\n            case 'b': arg = arg.toString(2); break;\n            case 'c': arg = String.fromCharCode(arg); break;\n            case 'd': arg = parseInt(arg, 10); break;\n            case 'e': arg = match[7] ? arg.toExponential(match[7]) : arg.toExponential(); break;\n            case 'f': arg = match[7] ? parseFloat(arg).toFixed(match[7]) : parseFloat(arg); break;\n            case 'o': arg = arg.toString(8); break;\n            case 's': arg = ((arg = String(arg)) && match[7] ? arg.substring(0, match[7]) : arg); break;\n            case 'u': arg = Math.abs(arg); break;\n            case 'x': arg = arg.toString(16); break;\n            case 'X': arg = arg.toString(16).toUpperCase(); break;\n          }\n          arg = (/[def]/.test(match[8]) && match[3] && arg >= 0 ? '+'+ arg : arg);\n          pad_character = match[4] ? match[4] == '0' ? '0' : match[4].charAt(1) : ' ';\n          pad_length = match[6] - String(arg).length;\n          pad = match[6] ? str_repeat(pad_character, pad_length) : '';\n          output.push(match[5] ? arg + pad : pad + arg);\n        }\n      }\n      return output.join('');\n    };\n\n    str_format.cache = {};\n\n    str_format.parse = function(fmt) {\n      var _fmt = fmt, match = [], parse_tree = [], arg_names = 0;\n      while (_fmt) {\n        if ((match = /^[^\\x25]+/.exec(_fmt)) !== null) {\n          parse_tree.push(match[0]);\n        }\n        else if ((match = /^\\x25{2}/.exec(_fmt)) !== null) {\n          parse_tree.push('%');\n        }\n        else if ((match = /^\\x25(?:([1-9]\\d*)\\$|\\(([^\\)]+)\\))?(\\+)?(0|'[^$])?(-)?(\\d+)?(?:\\.(\\d+))?([b-fosuxX])/.exec(_fmt)) !== null) {\n          if (match[2]) {\n            arg_names |= 1;\n            var field_list = [], replacement_field = match[2], field_match = [];\n            if ((field_match = /^([a-z_][a-z_\\d]*)/i.exec(replacement_field)) !== null) {\n              field_list.push(field_match[1]);\n              while ((replacement_field = replacement_field.substring(field_match[0].length)) !== '') {\n                if ((field_match = /^\\.([a-z_][a-z_\\d]*)/i.exec(replacement_field)) !== null) {\n                  field_list.push(field_match[1]);\n                }\n                else if ((field_match = /^\\[(\\d+)\\]/.exec(replacement_field)) !== null) {\n                  field_list.push(field_match[1]);\n                }\n                else {\n                  throw new Error('[_.sprintf] huh?');\n                }\n              }\n            }\n            else {\n              throw new Error('[_.sprintf] huh?');\n            }\n            match[2] = field_list;\n          }\n          else {\n            arg_names |= 2;\n          }\n          if (arg_names === 3) {\n            throw new Error('[_.sprintf] mixing positional and named placeholders is not (yet) supported');\n          }\n          parse_tree.push(match);\n        }\n        else {\n          throw new Error('[_.sprintf] huh?');\n        }\n        _fmt = _fmt.substring(match[0].length);\n      }\n      return parse_tree;\n    };\n\n    return str_format;\n  })();\n\n\n\n  // Defining underscore.string\n\n  var _s = {\n\n    VERSION: '2.3.0',\n\n    isBlank: function(str){\n      if (str == null) str = '';\n      return (/^\\s*$/).test(str);\n    },\n\n    stripTags: function(str){\n      if (str == null) return '';\n      return String(str).replace(/<\\/?[^>]+>/g, '');\n    },\n\n    capitalize : function(str){\n      str = str == null ? '' : String(str);\n      return str.charAt(0).toUpperCase() + str.slice(1);\n    },\n\n    chop: function(str, step){\n      if (str == null) return [];\n      str = String(str);\n      step = ~~step;\n      return step > 0 ? str.match(new RegExp('.{1,' + step + '}', 'g')) : [str];\n    },\n\n    clean: function(str){\n      return _s.strip(str).replace(/\\s+/g, ' ');\n    },\n\n    count: function(str, substr){\n      if (str == null || substr == null) return 0;\n\n      str = String(str);\n      substr = String(substr);\n\n      var count = 0,\n        pos = 0,\n        length = substr.length;\n\n      while (true) {\n        pos = str.indexOf(substr, pos);\n        if (pos === -1) break;\n        count++;\n        pos += length;\n      }\n\n      return count;\n    },\n\n    chars: function(str) {\n      if (str == null) return [];\n      return String(str).split('');\n    },\n\n    swapCase: function(str) {\n      if (str == null) return '';\n      return String(str).replace(/\\S/g, function(c){\n        return c === c.toUpperCase() ? c.toLowerCase() : c.toUpperCase();\n      });\n    },\n\n    escapeHTML: function(str) {\n      if (str == null) return '';\n      return String(str).replace(/[&<>\"']/g, function(m){ return '&' + reversedEscapeChars[m] + ';'; });\n    },\n\n    unescapeHTML: function(str) {\n      if (str == null) return '';\n      return String(str).replace(/\\&([^;]+);/g, function(entity, entityCode){\n        var match;\n\n        if (entityCode in escapeChars) {\n          return escapeChars[entityCode];\n        } else if (match = entityCode.match(/^#x([\\da-fA-F]+)$/)) {\n          return String.fromCharCode(parseInt(match[1], 16));\n        } else if (match = entityCode.match(/^#(\\d+)$/)) {\n          return String.fromCharCode(~~match[1]);\n        } else {\n          return entity;\n        }\n      });\n    },\n\n    escapeRegExp: function(str){\n      if (str == null) return '';\n      return String(str).replace(/([.*+?^=!:${}()|[\\]\\/\\\\])/g, '\\\\$1');\n    },\n\n    splice: function(str, i, howmany, substr){\n      var arr = _s.chars(str);\n      arr.splice(~~i, ~~howmany, substr);\n      return arr.join('');\n    },\n\n    insert: function(str, i, substr){\n      return _s.splice(str, i, 0, substr);\n    },\n\n    include: function(str, needle){\n      if (needle === '') return true;\n      if (str == null) return false;\n      return String(str).indexOf(needle) !== -1;\n    },\n\n    join: function() {\n      var args = slice.call(arguments),\n        separator = args.shift();\n\n      if (separator == null) separator = '';\n\n      return args.join(separator);\n    },\n\n    lines: function(str) {\n      if (str == null) return [];\n      return String(str).split(\"\\n\");\n    },\n\n    reverse: function(str){\n      return _s.chars(str).reverse().join('');\n    },\n\n    startsWith: function(str, starts){\n      if (starts === '') return true;\n      if (str == null || starts == null) return false;\n      str = String(str); starts = String(starts);\n      return str.length >= starts.length && str.slice(0, starts.length) === starts;\n    },\n\n    endsWith: function(str, ends){\n      if (ends === '') return true;\n      if (str == null || ends == null) return false;\n      str = String(str); ends = String(ends);\n      return str.length >= ends.length && str.slice(str.length - ends.length) === ends;\n    },\n\n    succ: function(str){\n      if (str == null) return '';\n      str = String(str);\n      return str.slice(0, -1) + String.fromCharCode(str.charCodeAt(str.length-1) + 1);\n    },\n\n    titleize: function(str){\n      if (str == null) return '';\n      return String(str).replace(/(?:^|\\s)\\S/g, function(c){ return c.toUpperCase(); });\n    },\n\n    camelize: function(str){\n      return _s.trim(str).replace(/[-_\\s]+(.)?/g, function(match, c){ return c.toUpperCase(); });\n    },\n\n    underscored: function(str){\n      return _s.trim(str).replace(/([a-z\\d])([A-Z]+)/g, '$1_$2').replace(/[-\\s]+/g, '_').toLowerCase();\n    },\n\n    dasherize: function(str){\n      return _s.trim(str).replace(/([A-Z])/g, '-$1').replace(/[-_\\s]+/g, '-').toLowerCase();\n    },\n\n    classify: function(str){\n      return _s.titleize(String(str).replace(/[\\W_]/g, ' ')).replace(/\\s/g, '');\n    },\n\n    humanize: function(str){\n      return _s.capitalize(_s.underscored(str).replace(/_id$/,'').replace(/_/g, ' '));\n    },\n\n    trim: function(str, characters){\n      if (str == null) return '';\n      if (!characters && nativeTrim) return nativeTrim.call(str);\n      characters = defaultToWhiteSpace(characters);\n      return String(str).replace(new RegExp('\\^' + characters + '+|' + characters + '+$', 'g'), '');\n    },\n\n    ltrim: function(str, characters){\n      if (str == null) return '';\n      if (!characters && nativeTrimLeft) return nativeTrimLeft.call(str);\n      characters = defaultToWhiteSpace(characters);\n      return String(str).replace(new RegExp('^' + characters + '+'), '');\n    },\n\n    rtrim: function(str, characters){\n      if (str == null) return '';\n      if (!characters && nativeTrimRight) return nativeTrimRight.call(str);\n      characters = defaultToWhiteSpace(characters);\n      return String(str).replace(new RegExp(characters + '+$'), '');\n    },\n\n    truncate: function(str, length, truncateStr){\n      if (str == null) return '';\n      str = String(str); truncateStr = truncateStr || '...';\n      length = ~~length;\n      return str.length > length ? str.slice(0, length) + truncateStr : str;\n    },\n\n    /**\n     * _s.prune: a more elegant version of truncate\n     * prune extra chars, never leaving a half-chopped word.\n     * @author github.com/rwz\n     */\n    prune: function(str, length, pruneStr){\n      if (str == null) return '';\n\n      str = String(str); length = ~~length;\n      pruneStr = pruneStr != null ? String(pruneStr) : '...';\n\n      if (str.length <= length) return str;\n\n      var tmpl = function(c){ return c.toUpperCase() !== c.toLowerCase() ? 'A' : ' '; },\n        template = str.slice(0, length+1).replace(/.(?=\\W*\\w*$)/g, tmpl); // 'Hello, world' -> 'HellAA AAAAA'\n\n      if (template.slice(template.length-2).match(/\\w\\w/))\n        template = template.replace(/\\s*\\S+$/, '');\n      else\n        template = _s.rtrim(template.slice(0, template.length-1));\n\n      return (template+pruneStr).length > str.length ? str : str.slice(0, template.length)+pruneStr;\n    },\n\n    words: function(str, delimiter) {\n      if (_s.isBlank(str)) return [];\n      return _s.trim(str, delimiter).split(delimiter || /\\s+/);\n    },\n\n    pad: function(str, length, padStr, type) {\n      str = str == null ? '' : String(str);\n      length = ~~length;\n\n      var padlen  = 0;\n\n      if (!padStr)\n        padStr = ' ';\n      else if (padStr.length > 1)\n        padStr = padStr.charAt(0);\n\n      switch(type) {\n        case 'right':\n          padlen = length - str.length;\n          return str + strRepeat(padStr, padlen);\n        case 'both':\n          padlen = length - str.length;\n          return strRepeat(padStr, Math.ceil(padlen/2)) + str\n                  + strRepeat(padStr, Math.floor(padlen/2));\n        default: // 'left'\n          padlen = length - str.length;\n          return strRepeat(padStr, padlen) + str;\n        }\n    },\n\n    lpad: function(str, length, padStr) {\n      return _s.pad(str, length, padStr);\n    },\n\n    rpad: function(str, length, padStr) {\n      return _s.pad(str, length, padStr, 'right');\n    },\n\n    lrpad: function(str, length, padStr) {\n      return _s.pad(str, length, padStr, 'both');\n    },\n\n    sprintf: sprintf,\n\n    vsprintf: function(fmt, argv){\n      argv.unshift(fmt);\n      return sprintf.apply(null, argv);\n    },\n\n    toNumber: function(str, decimals) {\n      if (!str) return 0;\n      str = _s.trim(str);\n      if (!str.match(/^-?\\d+(?:\\.\\d+)?$/)) return NaN;\n      return parseNumber(parseNumber(str).toFixed(~~decimals));\n    },\n\n    numberFormat : function(number, dec, dsep, tsep) {\n      if (isNaN(number) || number == null) return '';\n\n      number = number.toFixed(~~dec);\n      tsep = typeof tsep == 'string' ? tsep : ',';\n\n      var parts = number.split('.'), fnums = parts[0],\n        decimals = parts[1] ? (dsep || '.') + parts[1] : '';\n\n      return fnums.replace(/(\\d)(?=(?:\\d{3})+$)/g, '$1' + tsep) + decimals;\n    },\n\n    strRight: function(str, sep){\n      if (str == null) return '';\n      str = String(str); sep = sep != null ? String(sep) : sep;\n      var pos = !sep ? -1 : str.indexOf(sep);\n      return ~pos ? str.slice(pos+sep.length, str.length) : str;\n    },\n\n    strRightBack: function(str, sep){\n      if (str == null) return '';\n      str = String(str); sep = sep != null ? String(sep) : sep;\n      var pos = !sep ? -1 : str.lastIndexOf(sep);\n      return ~pos ? str.slice(pos+sep.length, str.length) : str;\n    },\n\n    strLeft: function(str, sep){\n      if (str == null) return '';\n      str = String(str); sep = sep != null ? String(sep) : sep;\n      var pos = !sep ? -1 : str.indexOf(sep);\n      return ~pos ? str.slice(0, pos) : str;\n    },\n\n    strLeftBack: function(str, sep){\n      if (str == null) return '';\n      str += ''; sep = sep != null ? ''+sep : sep;\n      var pos = str.lastIndexOf(sep);\n      return ~pos ? str.slice(0, pos) : str;\n    },\n\n    toSentence: function(array, separator, lastSeparator, serial) {\n      separator = separator || ', '\n      lastSeparator = lastSeparator || ' and '\n      var a = array.slice(), lastMember = a.pop();\n\n      if (array.length > 2 && serial) lastSeparator = _s.rtrim(separator) + lastSeparator;\n\n      return a.length ? a.join(separator) + lastSeparator + lastMember : lastMember;\n    },\n\n    toSentenceSerial: function() {\n      var args = slice.call(arguments);\n      args[3] = true;\n      return _s.toSentence.apply(_s, args);\n    },\n\n    slugify: function(str) {\n      if (str == null) return '';\n\n      var from  = \"ąàáäâãåæćęèéëêìíïîłńòóöôõøśùúüûñçżź\",\n          to    = \"aaaaaaaaceeeeeiiiilnoooooosuuuunczz\",\n          regex = new RegExp(defaultToWhiteSpace(from), 'g');\n\n      str = String(str).toLowerCase().replace(regex, function(c){\n        var index = from.indexOf(c);\n        return to.charAt(index) || '-';\n      });\n\n      return _s.dasherize(str.replace(/[^\\w\\s-]/g, ''));\n    },\n\n    surround: function(str, wrapper) {\n      return [wrapper, str, wrapper].join('');\n    },\n\n    quote: function(str) {\n      return _s.surround(str, '\"');\n    },\n\n    exports: function() {\n      var result = {};\n\n      for (var prop in this) {\n        if (!this.hasOwnProperty(prop) || prop.match(/^(?:include|contains|reverse)$/)) continue;\n        result[prop] = this[prop];\n      }\n\n      return result;\n    },\n\n    repeat: function(str, qty, separator){\n      if (str == null) return '';\n\n      qty = ~~qty;\n\n      // using faster implementation if separator is not needed;\n      if (separator == null) return strRepeat(String(str), qty);\n\n      // this one is about 300x slower in Google Chrome\n      for (var repeat = []; qty > 0; repeat[--qty] = str) {}\n      return repeat.join(separator);\n    },\n\n    naturalCmp: function(str1, str2){\n      if (str1 == str2) return 0;\n      if (!str1) return -1;\n      if (!str2) return 1;\n\n      var cmpRegex = /(\\.\\d+)|(\\d+)|(\\D+)/g,\n        tokens1 = String(str1).toLowerCase().match(cmpRegex),\n        tokens2 = String(str2).toLowerCase().match(cmpRegex),\n        count = Math.min(tokens1.length, tokens2.length);\n\n      for(var i = 0; i < count; i++) {\n        var a = tokens1[i], b = tokens2[i];\n\n        if (a !== b){\n          var num1 = parseInt(a, 10);\n          if (!isNaN(num1)){\n            var num2 = parseInt(b, 10);\n            if (!isNaN(num2) && num1 - num2)\n              return num1 - num2;\n          }\n          return a < b ? -1 : 1;\n        }\n      }\n\n      if (tokens1.length === tokens2.length)\n        return tokens1.length - tokens2.length;\n\n      return str1 < str2 ? -1 : 1;\n    },\n\n    levenshtein: function(str1, str2) {\n      if (str1 == null && str2 == null) return 0;\n      if (str1 == null) return String(str2).length;\n      if (str2 == null) return String(str1).length;\n\n      str1 = String(str1); str2 = String(str2);\n\n      var current = [], prev, value;\n\n      for (var i = 0; i <= str2.length; i++)\n        for (var j = 0; j <= str1.length; j++) {\n          if (i && j)\n            if (str1.charAt(j - 1) === str2.charAt(i - 1))\n              value = prev;\n            else\n              value = Math.min(current[j], current[j - 1], prev) + 1;\n          else\n            value = i + j;\n\n          prev = current[j];\n          current[j] = value;\n        }\n\n      return current.pop();\n    }\n  };\n\n  // Aliases\n\n  _s.strip    = _s.trim;\n  _s.lstrip   = _s.ltrim;\n  _s.rstrip   = _s.rtrim;\n  _s.center   = _s.lrpad;\n  _s.rjust    = _s.lpad;\n  _s.ljust    = _s.rpad;\n  _s.contains = _s.include;\n  _s.q        = _s.quote;\n\n  // Exporting\n\n  // CommonJS module is defined\n  if (typeof exports !== 'undefined') {\n    if (typeof module !== 'undefined' && module.exports)\n      module.exports = _s;\n\n    exports._s = _s;\n  }\n\n  // Register as a named module with AMD.\n  if (typeof define === 'function' && define.amd)\n    define('underscore.string', [], function(){ return _s; });\n\n\n  // Integrate with Underscore.js if defined\n  // or create our own underscore object.\n  root._ = root._ || {};\n  root._.string = root._.str = _s;\n}(this, String);\n"
  },
  {
    "path": "tronweb/js/yaml.js",
    "content": "CodeMirror.defineMode(\"yaml\", function() {\n\n\tvar cons = ['true', 'false', 'on', 'off', 'yes', 'no'];\n\tvar keywordRegex = new RegExp(\"\\\\b((\"+cons.join(\")|(\")+\"))$\", 'i');\n\n\treturn {\n\t\ttoken: function(stream, state) {\n\t\t\tvar ch = stream.peek();\n\t\t\tvar esc = state.escaped;\n\t\t\tstate.escaped = false;\n\t\t\t/* comments */\n\t\t\tif (ch == \"#\") { stream.skipToEnd(); return \"comment\"; }\n\t\t\tif (state.literal && stream.indentation() > state.keyCol) {\n\t\t\t\tstream.skipToEnd(); return \"string\";\n\t\t\t} else if (state.literal) { state.literal = false; }\n\t\t\tif (stream.sol()) {\n\t\t\t\tstate.keyCol = 0;\n\t\t\t\tstate.pair = false;\n\t\t\t\tstate.pairStart = false;\n\t\t\t\t/* document start */\n\t\t\t\tif(stream.match(/---/)) { return \"def\"; }\n\t\t\t\t/* document end */\n\t\t\t\tif (stream.match(/\\.\\.\\./)) { return \"def\"; }\n\t\t\t\t/* array list item */\n\t\t\t\tif (stream.match(/\\s*-\\s+/)) { return 'meta'; }\n\t\t\t}\n\t\t\t/* pairs (associative arrays) -> key */\n\t\t\tif (!state.pair && stream.match(/^\\s*([a-z0-9\\._-])+(?=\\s*:)/i)) {\n\t\t\t\tstate.pair = true;\n\t\t\t\tstate.keyCol = stream.indentation();\n\t\t\t\treturn \"atom\";\n\t\t\t}\n\t\t\tif (state.pair && stream.match(/^:\\s*/)) { state.pairStart = true; return 'meta'; }\n\n\t\t\t/* inline pairs/lists */\n\t\t\tif (stream.match(/^(\\{|\\}|\\[|\\])/)) {\n\t\t\t\tif (ch == '{')\n\t\t\t\t\tstate.inlinePairs++;\n\t\t\t\telse if (ch == '}')\n\t\t\t\t\tstate.inlinePairs--;\n\t\t\t\telse if (ch == '[')\n\t\t\t\t\tstate.inlineList++;\n\t\t\t\telse\n\t\t\t\t\tstate.inlineList--;\n\t\t\t\treturn 'meta';\n\t\t\t}\n\n\t\t\t/* list seperator */\n\t\t\tif (state.inlineList > 0 && !esc && ch == ',') {\n\t\t\t\tstream.next();\n\t\t\t\treturn 'meta';\n\t\t\t}\n\t\t\t/* pairs seperator */\n\t\t\tif (state.inlinePairs > 0 && !esc && ch == ',') {\n\t\t\t\tstate.keyCol = 0;\n\t\t\t\tstate.pair = false;\n\t\t\t\tstate.pairStart = false;\n\t\t\t\tstream.next();\n\t\t\t\treturn 'meta';\n\t\t\t}\n\n\t\t\t/* start of value of a pair */\n\t\t\tif (state.pairStart) {\n\t\t\t\t/* block literals */\n\t\t\t\tif (stream.match(/^\\s*(\\||\\>)\\s*/)) { state.literal = true; return 'meta'; };\n\t\t\t\t/* references */\n\t\t\t\tif (stream.match(/^\\s*(\\&|\\*)[a-z0-9\\._-]+\\b/i)) { return 'variable-2'; }\n\t\t\t\t/* numbers */\n\t\t\t\tif (state.inlinePairs == 0 && stream.match(/^\\s*-?[0-9\\.\\,]+\\s?$/)) { return 'number'; }\n\t\t\t\tif (state.inlinePairs > 0 && stream.match(/^\\s*-?[0-9\\.\\,]+\\s?(?=(,|}))/)) { return 'number'; }\n\t\t\t\t/* keywords */\n\t\t\t\tif (stream.match(keywordRegex)) { return 'keyword'; }\n\t\t\t}\n\n\t\t\t/* nothing found, continue */\n\t\t\tstate.pairStart = false;\n\t\t\tstate.escaped = (ch == '\\\\');\n\t\t\tstream.next();\n\t\t\treturn null;\n\t\t},\n\t\tstartState: function() {\n\t\t\treturn {\n\t\t\t\tpair: false,\n\t\t\t\tpairStart: false,\n\t\t\t\tkeyCol: 0,\n\t\t\t\tinlinePairs: 0,\n\t\t\t\tinlineList: 0,\n\t\t\t\tliteral: false,\n\t\t\t\tescaped: false\n\t\t\t};\n\t\t}\n\t};\n});\n\nCodeMirror.defineMIME(\"text/x-yaml\", \"yaml\");\n"
  },
  {
    "path": "tronweb_tests/SpecRunner.html",
    "content": "<!DOCTYPE HTML PUBLIC \"-//W3C//DTD HTML 4.01 Transitional//EN\"\n  \"http://www.w3.org/TR/html4/loose.dtd\">\n<html>\n\n<head>\n  <title>tronweb test runner</title>\n\n  <link rel=\"stylesheet\" type=\"text/css\" href=\"lib/jasmine-1.3.1/jasmine.css\">\n  <script type=\"text/javascript\" src=\"lib/jasmine-1.3.1/jasmine.js\"></script>\n  <script type=\"text/javascript\" src=\"lib/jasmine-1.3.1/jasmine-html.js\"></script>\n\n  <!-- depenedncies -->\n  <script src=\"http://ajax.googleapis.com/ajax/libs/jquery/1.9.0/jquery.min.js\"></script>\n  <script src=\"http://ajax.googleapis.com/ajax/libs/jqueryui/1.10.2/jquery-ui.min.js\"></script>\n  <script src=\"../tronweb/js/underscore-min.js\"></script>\n  <script src=\"../tronweb/js/underscore.string.js\"></script>\n  <script src=\"../tronweb/js/underscore.extra.js\"></script>\n  <script src=\"../tronweb/js/backbone-min.js\"></script>\n  <script src=\"../tronweb/js/bootstrap.min.js\"></script>\n  <script src=\"../tronweb/js/codemirror.js\"></script>\n  <script src=\"../tronweb/js/yaml.js\"></script>\n  <script src=\"../tronweb/js/moment.min.js\"></script>\n  <script src=\"../tronweb/js/moment.tz.min.js\"></script>\n  <script src=\"../tronweb/js/d3.v3.min.js\"></script>\n\n  <!-- include source files here... -->\n  <script src=\"../tronweb/js/cs/models.js\"></script>\n  <script src=\"../tronweb/js/cs/views.js\"></script>\n  <script src=\"../tronweb/js/cs/nodes.js\"></script>\n  <script src=\"../tronweb/js/cs/graph.js\"></script>\n  <script src=\"../tronweb/js/cs/config.js\"></script>\n  <script src=\"../tronweb/js/cs/actionrun.js\"></script>\n  <script src=\"../tronweb/js/cs/timeline.js\"></script>\n  <script src=\"../tronweb/js/cs/job.js\"></script>\n  <script src=\"../tronweb/js/cs/dashboard.js\"></script>\n  <script src=\"../tronweb/js/cs/navbar.js\"></script>\n  <script src=\"../tronweb/js/cs/routes.js\"></script>\n\n  <!-- include spec files here... -->\n  <script src=\"spec/routes_test.js\"></script>\n  <script src=\"spec/actionrun_test.js\"></script>\n  <script src=\"spec/timeline_test.js\"></script>\n  <script src=\"spec/navbar_test.js\"></script>\n  <script src=\"spec/dashboard_test.js\"></script>\n\n  <script type=\"text/javascript\">\n    (function () {\n      var jasmineEnv = jasmine.getEnv();\n      jasmineEnv.updateInterval = 200;\n\n      var htmlReporter = new jasmine.HtmlReporter();\n\n      jasmineEnv.addReporter(htmlReporter);\n\n      jasmineEnv.specFilter = function (spec) {\n        return htmlReporter.specFilter(spec);\n      };\n\n      var currentWindowOnload = window.onload;\n\n      window.onload = function () {\n        execJasmine();\n      };\n\n      function execJasmine() {\n        jasmineEnv.execute();\n      }\n\n    })();\n  </script>\n\n</head>\n\n<body>\n</body>\n\n</html>\n"
  },
  {
    "path": "tronweb_tests/spec/README",
    "content": "\nCreate javascript specs from coffeescript by running:\n\ncoffee -w -o tronweb_tests/spec/ -c tronweb_tests/tests/\n\n\nThese tests require jasmine 1.3.1+ to be installed at:\n\ntronweb_tests/lib\n"
  },
  {
    "path": "tronweb_tests/tests/actionrun_test.coffee",
    "content": "\n\ndescribe \"actionrun.coffee\", ->\n    module = modules.actionrun\n\n    describe \"ActionRun Model\", ->\n        self = this\n\n        beforeEach ->\n            self.actionRun = new module.ActionRun\n                action_name: 'action_name'\n                job_name: 'job_name'\n                run_num: 'run_num'\n\n        it \"url creates the correct url\", ->\n            url = self.actionRun.url()\n            expect(url).toEqual('/jobs/job_name/run_num/action_name' +\n                self.actionRun.urlArgs)\n\n        it \"parse builds urls\", ->\n            resp = self.actionRun.parse {}\n            expect(resp['job_url']).toEqual('#job/job_name')\n            expect(resp['job_run_url']).toEqual('#job/job_name/run_num')\n            expect(resp['url']).toEqual('#job/job_name/run_num/action_name')\n\n    describe \"ActionRunHistory Model\", ->\n        self = this\n\n        beforeEach ->\n            self.collection = new module.ActionRunHistory [],\n                job_name: 'job_name'\n                action_name: 'action_name'\n\n        it \"url creates the correct url\", ->\n            expect(self.collection.url()).toEqual(\n                '/jobs/job_name/action_name/')\n"
  },
  {
    "path": "tronweb_tests/tests/dashboard_test.coffee",
    "content": "\n\ndescribe \"Dashboard module\", ->\n\n    describe \"JobStatusBoxView\", ->\n        test = @\n\n        beforeEach ->\n            test.model = new Job()\n            spyOn(test.model, 'get')\n            test.view = new modules.dashboard.JobStatusBoxView(model: test.model)\n\n        it \"count an empty list\", ->\n            test.model.get.andReturn([])\n            expect(test.view.count()).toEqual(0)\n\n        it \"count a non-empty list returns first items run number\", ->\n            runs = [{'run_num': 5}, {'run_num': 4}, {'run_num': 3}]\n            test.model.get.andReturn(runs)\n            expect(test.view.count()).toEqual(5)\n"
  },
  {
    "path": "tronweb_tests/tests/navbar_test.coffee",
    "content": "\ndescribe \"navbar module\", ->\n\n    describe \"NavView\", ->\n\n        describe \"sorter\", ->\n            test = @\n\n            beforeEach ->\n                test.view = new modules.navbar.NavView()\n                test.items = [\n                    \"three\",\n                    \"one\",\n                    \"one longer\",\n                    \"TWO\",\n                    \"a one\",\n                    \"longone with TwO ones\"]\n\n            it \"sorts shorter items first\", ->\n                mockThis = query: \"one\"\n                sortedItems = test.view.sorter.call(mockThis, test.items)\n                expected = [\n                    \"one\",\n                    \"one longer\",\n                    \"a one\",\n                    \"longone with TwO ones\"]\n                expect(sortedItems).toEqual(expected)\n\n            it \"sorts only matching items\", ->\n                mockThis = query: \"TWO\"\n                sortedItems = test.view.sorter.call(mockThis, test.items)\n                expect(sortedItems).toEqual([\"TWO\", \"longone with TwO ones\"])\n\n            it \"matches case insensitive\", ->\n                mockThis = query: \"two\"\n                sortedItems = test.view.sorter.call(mockThis, test.items)\n                expect(sortedItems).toEqual([\"TWO\", \"longone with TwO ones\"])\n"
  },
  {
    "path": "tronweb_tests/tests/routes_test.coffee",
    "content": "\n\ndescribe \"routes.coffee\", ->\n    module = window.modules.routes\n\n    it \"splitKeyValuePairs creates object from list\", ->\n        obj = module.splitKeyValuePairs ['one=two', 'three=four']\n        expect(obj).toEqual\n            one: 'two'\n            three: 'four'\n\n    it \"getParamsMap creates object from string\", ->\n        obj = module.getParamsMap \"a=nameThing;b=other\"\n        expect(obj).toEqual\n            a: 'nameThing'\n            b: 'other'\n\n\n    describe \"getLocationParams\", ->\n\n        beforeEach ->\n            spyOn(module, 'getLocationHash')\n\n        it \"returns location with params\", ->\n            location = \"#base;one=thing;another=what\"\n            module.getLocationHash.andReturn(location)\n            [base, params] = module.getLocationParams()\n            expect(base).toEqual(\"#base\")\n            expect(params).toEqual\n                 one: \"thing\"\n                 another: \"what\"\n\n        it \"returns location without params\", ->\n            module.getLocationHash.andReturn(\"#blah\")\n            [base, params] = module.getLocationParams()\n            expect(base).toEqual(\"#blah\")\n            expect(params).toEqual {}\n\n\n    it \"buildLocationString creates a location string\", ->\n        params =\n            thing: \"ok\"\n            bar: \"tmp\"\n        location = module.buildLocationString \"#base\", params\n        expect(location).toEqual(\"#base;thing=ok;bar=tmp\")\n\n\n    describe \"updateLocationParam\", ->\n\n        beforeEach ->\n            window.routes = jasmine.createSpyObj('routes', ['navigate'])\n            spyOn(module, 'getLocationHash')\n\n        it \"creates params when params is empty\", ->\n            module.getLocationHash.andReturn(\"#base\")\n            module.updateLocationParam('name', 'stars')\n            expected = \"#base;name=stars\"\n            expect(window.routes.navigate).toHaveBeenCalledWith(expected)\n\n        it \"updates existing param\",  ->\n            module.getLocationHash.andReturn(\"#base;name=foo\")\n            module.updateLocationParam('name', 'stars')\n            expected = \"#base;name=stars\"\n            expect(window.routes.navigate).toHaveBeenCalledWith(expected)\n\n        it \"adds new params\", ->\n            module.getLocationHash.andReturn(\"#base;what=why\")\n            module.updateLocationParam('name', 'stars')\n            expected = \"#base;what=why;name=stars\"\n            expect(window.routes.navigate).toHaveBeenCalledWith(expected)\n"
  },
  {
    "path": "tronweb_tests/tests/timeline_test.coffee",
    "content": "\nmodule = modules.timeline\n\ndescribe \"Timeline module\", ->\n\n    it \"padMaxDate adds padding to maxDate\", ->\n        dates = [new Date(\"2013-04-20 01:00:00\"),\n                 new Date(\"2013-04-20 02:30:30\")]\n        padded = module.padMaxDate(dates, 0.1)\n        expect(padded).toEqual([dates[0], new Date(\"2013-04-20 02:39:33\")])\n"
  },
  {
    "path": "yelp_package/extra_requirements_yelp.txt",
    "content": "asn1crypto==1.5.1          # vault-tools dependency\natomicfile==1.0.1          # vault-tools dependency\nclusterman-metrics==2.2.1  # used by tron for pre-scaling for Spark runs\ncrypto-lib==4.0.0          # vault-tools dependency\ngitdb==4.0.12              # vault-tools dependency\ngitpython==3.1.44          # vault-tools dependency\nhvac==1.2.1                # vault-tools dependency\nipaddress==1.0.23          # vault-tools dependency\nlogreader==1.2.0           # used by tron logreader\nndg-httpsclient==0.5.1     # vault-tools dependency\nokta-auth==1.1.1           # used for API auth\npygpgme==0.3               # vault-tools dependency\npyhcl==0.4.5               # vault-tools dependency\npyjwt==2.9.0               # required by okta-auth\nsaml-helper==2.5.3         # required by okta-auth\nservice-identity==24.2.0   # vault-tools dependency\nsmmap==5.0.2               # vault-tools dependency\nvault-tools==1.6.0         # used for API auth\nyelp-meteorite==2.1.1      # used by task-processing to emit metrics, clusterman-metrics dependency\n"
  },
  {
    "path": "yelp_package/jammy/Dockerfile",
    "content": "FROM ubuntu:jammy\n\nRUN apt-get update -yq && \\\n    apt-get install -yq \\\n        # needed to add a ppa\n        software-properties-common && \\\n    add-apt-repository ppa:deadsnakes/ppa\n\nRUN apt-get -q update && \\\n    DEBIAN_FRONTEND=noninteractive apt-get -q install -y --no-install-recommends \\\n        coffeescript \\\n        debhelper \\\n        devscripts \\\n        dh-virtualenv \\\n        dpkg-dev \\\n        gcc \\\n        gdebi-core \\\n        git \\\n        help2man \\\n        libffi-dev \\\n        libgpgme11 \\\n        libssl-dev \\\n        libdb5.3-dev \\\n        libyaml-dev \\\n        libssl-dev \\\n        libffi-dev \\\n        python3.10-dev \\\n        python3.10-distutils \\\n        python3-pip \\\n        rust-all \\\n        tox \\\n        wget \\\n        g++ \\\n        #  12.22, good enough\n        nodejs \\\n    && apt-get -q clean\n\nARG PIP_INDEX_URL\nENV PIP_INDEX_URL=${PIP_INDEX_URL:-https://pypi.python.org/simple}\n\nRUN pip3 install --trusted-host 169.254.255.254 --index-url ${PIP_INDEX_URL} virtualenv==20.17.1\nWORKDIR /work\n"
  }
]